我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torch.nn.ConvTranspose2d()。
def __init__(self): super(ImageTransformNet, self).__init__() self.conv1 = nn.Conv2d(3, 32, kernel_size=9, stride=1, padding=4) self.bn1 = nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=1) self.bn2 = nn.BatchNorm2d(64) self.conv3 = nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1) self.bn3 = nn.BatchNorm2d(128) self.res1 = ResidualBlock(128, 128) self.res2 = ResidualBlock(128, 128) self.res3 = ResidualBlock(128, 128) self.res4 = ResidualBlock(128, 128) self.res5 = ResidualBlock(128, 128) self.conv4 = nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1) self.bn4 = nn.BatchNorm2d(64) self.conv5 = nn.ConvTranspose2d(64, 32, kernel_size=4, stride=2, padding=1) self.bn5 = nn.BatchNorm2d(32) self.conv6 = nn.ConvTranspose2d(32, 3, kernel_size=9, stride=1, padding=4)
def __init__(self, prior_size): super(DCGenerator, self).__init__() self.prior_size = prior_size self.linear1 = nn.Linear(prior_size, 4*4*512) # 4x4 --> 8x8 self.deconv1 = nn.ConvTranspose2d(512, 256, (5,5)) # Batch normalization self.bn1 = nn.BatchNorm2d(256) # 8x8 --> 16x16, stride 2 self.deconv2 = nn.ConvTranspose2d(256, 128, (5,5), stride = (2,2), padding = (2,2), output_padding = (1,1)) # Batch normalization self.bn2 = nn.BatchNorm2d(128) # 16x16 --> 32x32, stride self.deconv3 = nn.ConvTranspose2d(128, 3, (5,5), stride = (2,2), padding = (2,2), output_padding = (1,1))
def __init__(self): super(StylePart, self).__init__() self.conv1 = nn.Conv2d(3, 32, kernel_size=9, stride=1, padding=4) self.bn1 = nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=1) self.bn2 = nn.BatchNorm2d(64) self.conv3 = nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1) self.bn3 = nn.BatchNorm2d(128) self.res1 = ResBlock(128) self.res2 = ResBlock(128) self.res3 = ResBlock(128) self.res4 = ResBlock(128) self.res5 = ResBlock(128) self.deconv1 = nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1) self.bn4 = nn.BatchNorm2d(64) self.deconv2 = nn.ConvTranspose2d(64, 32, kernel_size=4, stride=2, padding=1) self.bn5 = nn.BatchNorm2d(32) self.deconv3 = nn.Conv2d(32, 3, kernel_size=9, stride=1, padding=4)
def buildNetGbg(self, nsize): # take vector as input, and outout bgimg net = nn.Sequential() size_map = 1 name = str(size_map) net.add_module('convt' + name, nn.ConvTranspose2d(nz, ngf * 4, 4, 4, 0, bias=True)) net.add_module('bn' + name, nn.BatchNorm2d(ngf * 4)) net.add_module('relu' + name, nn.ReLU(True)) size_map = 4 depth_in = 4 * ngf depth_out = 2 * ngf while size_map < nsize / 2: name = str(size_map) net.add_module('convt' + name, nn.ConvTranspose2d(depth_in, depth_out, 4, 2, 1, bias=True)) net.add_module('bn' + name, nn.BatchNorm2d(depth_out)) net.add_module('relu' + name, nn.ReLU(True)) depth_in = depth_out depth_out = max(depth_in / 2, 64) size_map = size_map * 2 return net, depth_in
def buildNetGfg(self, nsize): # take vector as input, and output fgimg and fgmask net = nn.Sequential() size_map = 1 name = str(size_map) net.add_module('convt' + name, nn.ConvTranspose2d(nz, ngf * 8, 4, 4, 0, bias=False)) net.add_module('bn' + name, nn.BatchNorm2d(ngf * 8)) net.add_module('relu' + name, nn.ReLU(True)) size_map = 4 depth_in = 8 * ngf depth_out = 4 * ngf while size_map < nsize / 2: name = str(size_map) net.add_module('convt' + name, nn.ConvTranspose2d(depth_in, depth_out, 4, 2, 1, bias=False)) net.add_module('bn' + name, nn.BatchNorm2d(depth_out)) net.add_module('relu' + name, nn.ReLU(True)) depth_in = depth_out depth_out = max(depth_in / 2, 64) size_map = size_map * 2 return net, depth_in
def __init__(self, num_classes, pretrained=True): super(FCN32VGG, self).__init__() vgg = models.vgg16() if pretrained: vgg.load_state_dict(torch.load(vgg16_caffe_path)) features, classifier = list(vgg.features.children()), list(vgg.classifier.children()) features[0].padding = (100, 100) for f in features: if 'MaxPool' in f.__class__.__name__: f.ceil_mode = True elif 'ReLU' in f.__class__.__name__: f.inplace = True self.features5 = nn.Sequential(*features) fc6 = nn.Conv2d(512, 4096, kernel_size=7) fc6.weight.data.copy_(classifier[0].weight.data.view(4096, 512, 7, 7)) fc6.bias.data.copy_(classifier[0].bias.data) fc7 = nn.Conv2d(4096, 4096, kernel_size=1) fc7.weight.data.copy_(classifier[3].weight.data.view(4096, 4096, 1, 1)) fc7.bias.data.copy_(classifier[3].bias.data) score_fr = nn.Conv2d(4096, num_classes, kernel_size=1) score_fr.weight.data.zero_() score_fr.bias.data.zero_() self.score_fr = nn.Sequential( fc6, nn.ReLU(inplace=True), nn.Dropout(), fc7, nn.ReLU(inplace=True), nn.Dropout(), score_fr ) self.upscore = nn.ConvTranspose2d(num_classes, num_classes, kernel_size=64, stride=32, bias=False) self.upscore.weight.data.copy_(get_upsampling_weight(num_classes, num_classes, 64))
def __init__(self, in_channels, out_channels, num_conv_layers): super(_DecoderBlock, self).__init__() middle_channels = in_channels / 2 layers = [ nn.ConvTranspose2d(in_channels, in_channels, kernel_size=2, stride=2), nn.Conv2d(in_channels, middle_channels, kernel_size=3, padding=1), nn.BatchNorm2d(middle_channels), nn.ReLU(inplace=True) ] layers += [ nn.Conv2d(middle_channels, middle_channels, kernel_size=3, padding=1), nn.BatchNorm2d(middle_channels), nn.ReLU(inplace=True), ] * (num_conv_layers - 2) layers += [ nn.Conv2d(middle_channels, out_channels, kernel_size=3, padding=1), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), ] self.decode = nn.Sequential(*layers)
def __init__(self, num_points = 2048): super(PointGenPSG, self).__init__() self.num_points = num_points self.fc1 = nn.Linear(100, 256) self.fc2 = nn.Linear(256, 512) self.fc3 = nn.Linear(512, 1024) self.fc4 = nn.Linear(1024, self.num_points / 4 * 3 * 1) self.th = nn.Tanh() self.conv1 = nn.ConvTranspose2d(100,1024,(2,3)) self.conv2 = nn.ConvTranspose2d(1024, 512, 4, 2, 1) self.conv3 = nn.ConvTranspose2d(512, 256, 4, 2, 1) self.conv4= nn.ConvTranspose2d(256, 128, 4, 2, 1) self.conv5= nn.ConvTranspose2d(128, 3, 4, 2, 1) self.bn1 = torch.nn.BatchNorm2d(1024) self.bn2 = torch.nn.BatchNorm2d(512) self.bn3 = torch.nn.BatchNorm2d(256) self.bn4 = torch.nn.BatchNorm2d(128) self.bn5 = torch.nn.BatchNorm2d(3)
def make_conv_layer(layer_list, in_dim, out_dim, back_conv, batch_norm=True, activation='ReLU', k_s_p=[4,2,1]): k, s, p = k_s_p[0], k_s_p[1], k_s_p[2] if back_conv == False: layer_list.append(nn.Conv2d(in_dim, out_dim, kernel_size=k, stride=s, padding=p, bias=False)) elif back_conv == True: layer_list.append(nn.ConvTranspose2d(in_dim, out_dim, kernel_size=k, stride=s, padding=p, bias=False)) if batch_norm == True: layer_list.append(nn.BatchNorm2d(out_dim)) if activation == 'ReLU': layer_list.append(nn.ReLU(True)) elif activation == 'Sigmoid': layer_list.append(nn.Sigmoid()) elif activation == 'Tanh': layer_list.append(nn.Tanh()) elif activation == 'LeakyReLU': layer_list.append(nn.LeakyReLU(0.2, inplace=True)) return layer_list
def __init__(self, params, nclasses, encoder): super().__init__() self.encoder = encoder self.pooling_modules = [] for mod in self.encoder.modules(): try: if mod.other.downsample: self.pooling_modules.append(mod.other) except AttributeError: pass self.layers = [] for i, params in enumerate(params): if params['upsample']: params['pooling_module'] = self.pooling_modules.pop(-1) layer = DecoderModule(**params) self.layers.append(layer) layer_name = 'decoder{:02d}'.format(i) super().__setattr__(layer_name, layer) self.output_conv = nn.ConvTranspose2d(16, nclasses, 2, stride=2, padding=0, output_padding=0, bias=True)
def __init__(self, numClasses, prelus=False): super().__init__() self.upsampler7 = upsamplerA(128, 128) self.conv7 = nonBt1d(128, 128, 3, 0.1, prelus, 1) self.upsampler6 = upsamplerA(128, 128) self.conv6 = nonBt1d(128, 128, 3, 0.1, prelus, 1) self.upsampler5 = upsamplerA(128, 128) self.conv5 = nonBt1d(128, 128, 3, 0.1, prelus, 1) self.upsampler4 = upsamplerA(128, 128) self.conv4 = nonBt1d(128, 128, 3, 0.1, prelus, 1) self.upsampler3 = upsamplerA(128, 128) self.conv3 = nonBt1d(128, 128, 3, 0.1, prelus, 1) self.upsampler2 = upsamplerA(128, 64) self.conv2a = nonBt1d(64, 64, 3, 0.1, prelus, 2) self.conv2b = nonBt1d(64, 64, 3, 0.1, prelus, 4) self.upsampler1 = upsamplerB(64, numClasses) self.conv1a = nonBt1d(numClasses, numClasses, 3, 0.1, prelus, 2) self.conv1b = nonBt1d(numClasses, numClasses, 3, 0.1, prelus, 4) self.conv1c = nonBt1d(numClasses, numClasses, 3, 0.1, prelus, 8) self.convFinal = nn.ConvTranspose2d(numClasses, numClasses, 2, stride=2)
def __init__(self, ngpu): super(_netG, self).__init__() self.ngpu = ngpu self.main = nn.Sequential( # input is Z, going into a convolution nn.ConvTranspose2d( nz, ngf * 8, 4, 1, 0, bias=False), nn.BatchNorm2d(ngf * 8), nn.ReLU(True), # state size. (ngf*8) x 4 x 4 nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 4), nn.ReLU(True), # state size. (ngf*4) x 8 x 8 nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 2), nn.ReLU(True), # state size. (ngf*2) x 16 x 16 nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True), # state size. (ngf) x 32 x 32 nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False), nn.Tanh() # state size. (nc) x 64 x 64 )
def get_parameters(model, bias=False): import torch.nn as nn modules_skipped = ( nn.ReLU, nn.MaxPool2d, nn.Dropout2d, nn.Sequential, torchfcn.models.FCN32s, torchfcn.models.FCN16s, torchfcn.models.FCN8s, ) for m in model.modules(): if isinstance(m, nn.Conv2d): if bias: yield m.bias else: yield m.weight elif isinstance(m, nn.ConvTranspose2d): # weight is frozen because it is just a bilinear upsampling if bias: assert m.bias is None elif isinstance(m, modules_skipped): continue else: raise ValueError('Unexpected module: %s' % str(m))
def __init__(self): super(Generator, self).__init__() self.main = nn.Sequential( nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False), nn.BatchNorm2d(ngf * 8), nn.ReLU(True), nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 4), nn.ReLU(True), nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 2), nn.ReLU(True), nn.ConvTranspose2d(ngf * 2, ngf * 1, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 1), nn.ReLU(True), nn.ConvTranspose2d(ngf * 1, nc, 4, 2, 1, bias=False), nn.Tanh() ) self.apply(weights_init) self.optimizer = optim.Adam(self.parameters(), lr=learning_rate, betas=(beta_1, beta_2)) #self.optimizer = optim.RMSprop(self.parameters(), lr=learning_rate, alpha=beta_2)
def __init__(self, nc, ngf, hidden_size, condition=False, condition_size=0): super(Decoder, self).__init__() self.condition = condition self.decode_cond = nn.ConvTranspose2d(condition_size, ngf, kernel_size=8,stride=1,padding=0) # 1 self.decode = nn.ConvTranspose2d(hidden_size, ngf, kernel_size=8,stride=1,padding=0) # 8 self.dconv6 = deconv_block(ngf*2, ngf) # 16 self.dconv5 = deconv_block(ngf, ngf) # 32 self.dconv4 = deconv_block(ngf, ngf) # 64 self.dconv3 = deconv_block(ngf, ngf) # 128 #self.dconv2 = deconv_block(ngf, ngf) # 256 self.dconv1 = nn.Sequential(nn.Conv2d(ngf,ngf,kernel_size=3,stride=1,padding=1), nn.ELU(True), nn.Conv2d(ngf,ngf,kernel_size=3,stride=1,padding=1), nn.ELU(True), nn.Conv2d(ngf, nc,kernel_size=3, stride=1,padding=1), nn.Tanh())
def __init__(self, ngpu): super(_netG, self).__init__() self.ngpu = ngpu self.main = nn.Sequential( # b, nz, 1, 1 nn.ConvTranspose2d(nz, 28 * 28, 1, stride=1, padding=0, bias=False), # b, 28*28, 1, 1 nn.BatchNorm2d(28 * 28), nn.ReLU(True), nn.ConvTranspose2d(28 * 28, 14 * 14, 2, stride=2, padding=0, bias=False), # b, 14*14, 2, 2 nn.BatchNorm2d(14 * 14), nn.ReLU(True), nn.ConvTranspose2d(14 * 14, 7 * 7, 2, stride=2, padding=0, bias=False), # b, 7*7, 4, 4 nn.BatchNorm2d(7 * 7), nn.ReLU(True), nn.ConvTranspose2d(7 * 7, 1, 7, stride=7, padding=0, bias=False), # b. 1, 28, 28 nn.Sigmoid() )
def __init__(self): super(G, self).__init__() self.main = nn.Sequential( nn.ConvTranspose2d(74, 1024, 1, 1, bias=False), nn.BatchNorm2d(1024), nn.ReLU(True), nn.ConvTranspose2d(1024, 128, 7, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(True), nn.ConvTranspose2d(128, 64, 4, 2, 1, bias=False), nn.BatchNorm2d(64), nn.ReLU(True), nn.ConvTranspose2d(64, 1, 4, 2, 1, bias=False), nn.Sigmoid() )
def __init__(self): super(Generator, self).__init__() self.model = nn.Sequential( nn.Linear(z_dim+10, 4*4*256), nn.LeakyReLU() ) self.cnn = nn.Sequential( nn.ConvTranspose2d(256, 128, 3, stride=2, padding=0, output_padding=0), nn.LeakyReLU(), nn.ConvTranspose2d(128, 64, 3, stride=2, padding=1, output_padding=0), nn.LeakyReLU(), nn.ConvTranspose2d(64, 64, 3, stride=2, padding=2, output_padding=1), nn.LeakyReLU(), nn.Conv2d(64, 3, 3, stride=1, padding=1), nn.Tanh() )
def __init__(self): super(Generator, self).__init__() self.model = nn.Sequential( nn.Linear(100, 4*4*256), nn.LeakyReLU() ) self.cnn = nn.Sequential( nn.ConvTranspose2d(256, 128, 3, stride=2, padding=0, output_padding=0), nn.LeakyReLU(), nn.ConvTranspose2d(128, 64, 3, stride=2, padding=1, output_padding=0), nn.LeakyReLU(), nn.ConvTranspose2d(64, 64, 3, stride=2, padding=2, output_padding=1), nn.LeakyReLU(), nn.Conv2d(64, 3, 3, stride=1, padding=1), nn.Tanh() )
def __init__(self): super(Generator, self).__init__() self.layer1 = nn.Sequential( nn.Conv2d(1, 16, stride=2, kernel_size=4, padding=1), # 28*28 -> 14*14 nn.BatchNorm2d(16), nn.LeakyReLU() ) self.layer2 = nn.Sequential( nn.Conv2d(16, 16, stride=1, kernel_size=3, padding=1), # 14*14 -> 14*14 nn.BatchNorm2d(16), nn.LeakyReLU() ) self.layer3 = nn.Sequential( nn.ConvTranspose2d(16, 1, stride=2, kernel_size=4, padding=1), # 14*14 -> 28*28 nn.Tanh() )
def __init__(self): super(Decoder, self).__init__() self.main = nn.Sequential( # input is Z, going into a convolution nn.ConvTranspose2d( nz, ngf * 8, 4, 1, 0, bias=False), #nn.BatchNorm2d(ngf * 8), nn.ReLU(True), # state size. (ngf*8) x 4 x 4 nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False), #nn.BatchNorm2d(ngf * 4), nn.ReLU(True), # state size. (ngf*4) x 8 x 8 nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False), #nn.BatchNorm2d(ngf * 2), nn.ReLU(True), # state size. (ngf*2) x 16 x 16 nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False), #nn.BatchNorm2d(ngf), nn.ReLU(True), # state size. (ngf) x 32 x 32 nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False), nn.Tanh() # state size. (nc) x 64 x 64 )
def __init__(self, ngpu): super(NetG, self).__init__() self.ngpu = ngpu self.main = nn.Sequential( # input is Z, going into a convolution nn.ConvTranspose2d( nz, ngf * 8, 4, 1, 0, bias=False), #nn.BatchNorm2d(ngf * 8), nn.ReLU(True), # state size. (ngf*8) x 4 x 4 nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False), #nn.BatchNorm2d(ngf * 4), nn.ReLU(True), # state size. (ngf*4) x 8 x 8 nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False), #nn.BatchNorm2d(ngf * 2), nn.ReLU(True), # state size. (ngf*2) x 16 x 16 nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False), #nn.BatchNorm2d(ngf), nn.ReLU(True), # state size. (ngf) x 32 x 32 nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False), nn.Tanh() # state size. (nc) x 64 x 64 )
def __init__(self, in_channels, n_filters, k_size, stride, padding, bias=True): super(deconv2DBatchNorm, self).__init__() self.dcb_unit = nn.Sequential(nn.ConvTranspose2d(int(in_channels), int(n_filters), kernel_size=k_size, padding=padding, stride=stride, bias=bias), nn.BatchNorm2d(int(n_filters)),)
def __init__(self, in_channels, n_filters, k_size, stride, padding, bias=True): super(deconv2DBatchNormRelu, self).__init__() self.dcbr_unit = nn.Sequential(nn.ConvTranspose2d(int(in_channels), int(n_filters), kernel_size=k_size, padding=padding, stride=stride, bias=bias), nn.BatchNorm2d(int(n_filters)), nn.ReLU(inplace=True),)
def __init__(self, in_size, out_size, is_deconv): super(unetUp, self).__init__() self.conv = unetConv2(in_size, out_size, False) if is_deconv: self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2) else: self.up = nn.UpsamplingBilinear2d(scale_factor=2)
def _initialize_weights(self): vgg16 = torchvision.models.vgg16(pretrained=True) for m in self.modules(): if isinstance(m, nn.ConvTranspose2d): assert m.kernel_size[0] == m.kernel_size[1] m.weight.data = weight_init.kaiming_normal(m.weight.data) for a, b in zip(vgg16.features, self.features): if (isinstance(a, nn.Conv2d) and isinstance(b, nn.Conv2d)): b.weight.data = a.weight.data b.bias.data = a.bias.data for i in [0, 3]: a, b = vgg16.classifier[i], self.classifier[i] b.weight.data = a.weight.data.view(b.weight.size()) b.bias.data = a.bias.data.view(b.bias.size())
def U_weight_init(ms): for m in ms.modules(): classname = m.__class__.__name__ if classname.find('Conv2d') != -1: m.weight.data = init.kaiming_normal(m.weight.data, a=0.2) elif classname.find('ConvTranspose2d') != -1: m.weight.data = init.kaiming_normal(m.weight.data) print ('worked!') # TODO: kill this elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) elif classname.find('Linear') != -1: m.weight.data = init.kaiming_normal(m.weight.data)
def dconv_norm_act(in_dim, out_dim, kernel_size, stride, padding=0, output_padding=0, norm=nn.BatchNorm2d, relu=nn.ReLU): return nn.Sequential( nn.ConvTranspose2d(in_dim, out_dim, kernel_size, stride, padding, output_padding, bias=False), norm(out_dim), relu())
def deconv(c_in, c_out, k_size, stride=2, pad=1, bn=True): """Custom deconvolutional layer for simplicity.""" layers = [] layers.append(nn.ConvTranspose2d(c_in, c_out, k_size, stride, pad, bias=False)) if bn: layers.append(nn.BatchNorm2d(c_out)) return nn.Sequential(*layers)
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, gpu_ids=[]): assert(n_blocks >= 0) super(ResnetGenerator, self).__init__() self.input_nc = input_nc self.output_nc = output_nc self.ngf = ngf self.gpu_ids = gpu_ids model = [nn.Conv2d(input_nc, ngf, kernel_size=7, padding=3), norm_layer(ngf, affine=True), nn.ReLU(True)] n_downsampling = 2 for i in range(n_downsampling): mult = 2**i model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1), norm_layer(ngf * mult * 2, affine=True), nn.ReLU(True)] mult = 2**n_downsampling for i in range(n_blocks): model += [ResnetBlock(ngf * mult, 'zero', norm_layer=norm_layer, use_dropout=use_dropout)] for i in range(n_downsampling): mult = 2**(n_downsampling - i) model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1), norm_layer(int(ngf * mult / 2), affine=True), nn.ReLU(True)] model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=3)] model += [nn.Tanh()] self.model = nn.Sequential(*model)
def __init__(self, outer_nc, inner_nc, submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False): super(UnetSkipConnectionBlock, self).__init__() self.outermost = outermost downconv = nn.Conv2d(outer_nc, inner_nc, kernel_size=4, stride=2, padding=1) downrelu = nn.LeakyReLU(0.2, True) downnorm = norm_layer(inner_nc, affine=True) uprelu = nn.ReLU(True) upnorm = norm_layer(outer_nc, affine=True) if outermost: upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1) down = [downconv] up = [uprelu, upconv, nn.Tanh()] model = down + [submodule] + up elif innermost: upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1) down = [downrelu, downconv] up = [uprelu, upconv, upnorm] model = down + up else: upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1) down = [downrelu, downconv, downnorm] up = [uprelu, upconv, upnorm] if use_dropout: model = down + [submodule] + up + [nn.Dropout(0.5)] else: model = down + [submodule] + up self.model = nn.Sequential(*model)
def test_ConvTranspose2d_output_size(self): m = nn.ConvTranspose2d(3, 4, 3, 3, 0, 2) i = Variable(torch.randn(2, 3, 6, 6)) for h in range(15, 22): for w in range(15, 22): if 18 <= h <= 20 and 18 <= w <= 20: size = (h, w) if h == 19: size = torch.LongStorage(size) elif h == 2: size = torch.LongStorage((2, 4) + size) m(i, output_size=(h, w)) else: self.assertRaises(ValueError, lambda: m(i, (h, w)))
def __init__(self, n_channel_input, n_channel_output, n_filters): super(G, self).__init__() self.conv1 = nn.Conv2d(n_channel_input, n_filters, 4, 2, 1) self.conv2 = nn.Conv2d(n_filters, n_filters * 2, 4, 2, 1) self.conv3 = nn.Conv2d(n_filters * 2, n_filters * 4, 4, 2, 1) self.conv4 = nn.Conv2d(n_filters * 4, n_filters * 8, 4, 2, 1) self.conv5 = nn.Conv2d(n_filters * 8, n_filters * 8, 4, 2, 1) self.conv6 = nn.Conv2d(n_filters * 8, n_filters * 8, 4, 2, 1) self.conv7 = nn.Conv2d(n_filters * 8, n_filters * 8, 4, 2, 1) self.conv8 = nn.Conv2d(n_filters * 8, n_filters * 8, 4, 2, 1) self.deconv1 = nn.ConvTranspose2d(n_filters * 8, n_filters * 8, 4, 2, 1) self.deconv2 = nn.ConvTranspose2d(n_filters * 8 * 2, n_filters * 8, 4, 2, 1) self.deconv3 = nn.ConvTranspose2d(n_filters * 8 * 2, n_filters * 8, 4, 2, 1) self.deconv4 = nn.ConvTranspose2d(n_filters * 8 * 2, n_filters * 8, 4, 2, 1) self.deconv5 = nn.ConvTranspose2d(n_filters * 8 * 2, n_filters * 4, 4, 2, 1) self.deconv6 = nn.ConvTranspose2d(n_filters * 4 * 2, n_filters * 2, 4, 2, 1) self.deconv7 = nn.ConvTranspose2d(n_filters * 2 * 2, n_filters, 4, 2, 1) self.deconv8 = nn.ConvTranspose2d(n_filters * 2, n_channel_output, 4, 2, 1) self.batch_norm = nn.BatchNorm2d(n_filters) self.batch_norm2 = nn.BatchNorm2d(n_filters * 2) self.batch_norm4 = nn.BatchNorm2d(n_filters * 4) self.batch_norm8 = nn.BatchNorm2d(n_filters * 8) self.leaky_relu = nn.LeakyReLU(0.2, True) self.relu = nn.ReLU(True) self.dropout = nn.Dropout(0.5) self.tanh = nn.Tanh()
def __init__(self, out_h, out_w, channel_dims, z_dim=100): super().__init__() assert len(channel_dims) == 4, "length of channel dims should be 4" conv1_dim, conv2_dim, conv3_dim, conv4_dim = channel_dims conv1_h, conv2_h, conv3_h, conv4_h = map(conv_size, [(out_h, step) for step in [4 ,3 ,2 ,1]]) conv1_w, conv2_w, conv3_w, conv4_w = map(conv_size, [(out_w, step) for step in [4 ,3 ,2 ,1]]) self.fc = nn.Linear(z_dim, conv1_dim*conv1_h*conv1_w) self.deconvs = nn.Sequential( nn.BatchNorm2d(conv1_dim), nn.ReLU(), nn.ConvTranspose2d(conv1_dim, conv2_dim, kernel_size=4, stride=2, padding=1, bias=False), nn.BatchNorm2d(conv2_dim), nn.ReLU(), nn.ConvTranspose2d(conv2_dim, conv3_dim, kernel_size=4, stride=2, padding=1, bias=False), nn.BatchNorm2d(conv3_dim), nn.ReLU(), nn.ConvTranspose2d(conv3_dim, conv4_dim, kernel_size=4, stride=2, padding=1, bias=False), nn.BatchNorm2d(conv4_dim), nn.ReLU(), nn.ConvTranspose2d(conv4_dim, 3, kernel_size=4, stride=2, padding=1, bias=False), nn.Tanh(), ) self.conv1_size = (conv1_dim, conv1_h, conv1_w) self._init_weight()
def _init_weight(self): self.fc.weight.data.normal_(.0, 0.02) for layer in self.deconvs: if isinstance(layer, nn.ConvTranspose2d): layer.weight.data.normal_(.0, 0.02) if isinstance(layer, nn.BatchNorm2d): layer.weight.data.normal_(1., 0.02) layer.bias.data.fill_(0)
def __init__(self, in_channels, out_channels): super(Generator, self).__init__() self.c0 = nn.Conv2d(in_channels, 64, 4, stride=2, padding=1) self.c1 = nn.Conv2d(64, 128, 4, stride=2, padding=1) self.c2 = nn.Conv2d(128, 256, 4, stride=2, padding=1) self.c3 = nn.Conv2d(256, 512, 4, stride=2, padding=1) self.c4 = nn.Conv2d(512, 512, 4, stride=2, padding=1) self.c5 = nn.Conv2d(512, 512, 4, stride=2, padding=1) self.c6 = nn.Conv2d(512, 512, 4, stride=2, padding=1) self.c7 = nn.Conv2d(512, 512, 4, stride=2, padding=1) self.d7 = nn.ConvTranspose2d(512, 512, 4, stride=2, padding=1) self.d6 = nn.ConvTranspose2d(1024, 512, 4, stride=2, padding=1) self.d5 = nn.ConvTranspose2d(1024, 512, 4, stride=2, padding=1) self.d4 = nn.ConvTranspose2d(1024, 512, 4, stride=2, padding=1) self.d3 = nn.ConvTranspose2d(1024, 256, 4, stride=2, padding=1) self.d2 = nn.ConvTranspose2d(512, 128, 4, stride=2, padding=1) self.d1 = nn.ConvTranspose2d(256, 64, 4, stride=2, padding=1) self.d0 = nn.ConvTranspose2d(128, out_channels, 4, stride=2, padding=1) self.bnc1 = nn.BatchNorm2d(128) self.bnc2 = nn.BatchNorm2d(256) self.bnc3 = nn.BatchNorm2d(512) self.bnc4 = nn.BatchNorm2d(512) self.bnc5 = nn.BatchNorm2d(512) self.bnc6 = nn.BatchNorm2d(512) self.bnd7 = nn.BatchNorm2d(512) self.bnd6 = nn.BatchNorm2d(512) self.bnd5 = nn.BatchNorm2d(512) self.bnd4 = nn.BatchNorm2d(512) self.bnd3 = nn.BatchNorm2d(256) self.bnd2 = nn.BatchNorm2d(128) self.bnd1 = nn.BatchNorm2d(64)
def __init__(self, isize, nc, k=100, ngf=64): super(Decoder, self).__init__() assert isize % 16 == 0, "isize has to be a multiple of 16" cngf, tisize = ngf // 2, 4 while tisize != isize: cngf = cngf * 2 tisize = tisize * 2 main = nn.Sequential() main.add_module('initial.{0}-{1}.convt'.format(k, cngf), nn.ConvTranspose2d(k, cngf, 4, 1, 0, bias=False)) main.add_module('initial.{0}.batchnorm'.format(cngf), nn.BatchNorm2d(cngf)) main.add_module('initial.{0}.relu'.format(cngf), nn.ReLU(True)) csize = 4 while csize < isize // 2: main.add_module('pyramid.{0}-{1}.convt'.format(cngf, cngf // 2), nn.ConvTranspose2d(cngf, cngf // 2, 4, 2, 1, bias=False)) main.add_module('pyramid.{0}.batchnorm'.format(cngf // 2), nn.BatchNorm2d(cngf // 2)) main.add_module('pyramid.{0}.relu'.format(cngf // 2), nn.ReLU(True)) cngf = cngf // 2 csize = csize * 2 main.add_module('final.{0}-{1}.convt'.format(cngf, nc), nn.ConvTranspose2d(cngf, nc, 4, 2, 1, bias=False)) main.add_module('final.{0}.tanh'.format(nc), nn.Tanh()) self.main = main
def deconv(c_in, c_out, k_size, stride=2, pad=1, bn=True): """Custom deconvolutional layer for simplicity.""" layers = [] layers.append(nn.ConvTranspose2d(c_in, c_out, k_size, stride, pad)) if bn: layers.append(nn.BatchNorm2d(c_out)) return nn.Sequential(*layers)
def deconv(in_planes, out_planes): return nn.Sequential( nn.ConvTranspose2d(in_planes, out_planes, kernel_size=4, stride=2, padding=1, bias=False), nn.LeakyReLU(0.1,inplace=True) )
def __init__(self,batchNorm=True): super(FlowNetS,self).__init__() self.batchNorm = batchNorm self.conv1 = conv(self.batchNorm, 6, 64, kernel_size=7, stride=2) self.conv2 = conv(self.batchNorm, 64, 128, kernel_size=5, stride=2) self.conv3 = conv(self.batchNorm, 128, 256, kernel_size=5, stride=2) self.conv3_1 = conv(self.batchNorm, 256, 256) self.conv4 = conv(self.batchNorm, 256, 512, stride=2) self.conv4_1 = conv(self.batchNorm, 512, 512) self.conv5 = conv(self.batchNorm, 512, 512, stride=2) self.conv5_1 = conv(self.batchNorm, 512, 512) self.conv6 = conv(self.batchNorm, 512, 1024, stride=2) self.conv6_1 = conv(self.batchNorm,1024, 1024) self.deconv5 = deconv(1024,512) self.deconv4 = deconv(1026,256) self.deconv3 = deconv(770,128) self.deconv2 = deconv(386,64) self.predict_flow6 = predict_flow(1024) self.predict_flow5 = predict_flow(1026) self.predict_flow4 = predict_flow(770) self.predict_flow3 = predict_flow(386) self.predict_flow2 = predict_flow(194) self.upsampled_flow6_to_5 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False) self.upsampled_flow5_to_4 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False) self.upsampled_flow4_to_3 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False) self.upsampled_flow3_to_2 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False) for m in self.modules(): if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d): kaiming_normal(m.weight.data) if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_()
def upconv2x2(in_channels, out_channels, mode='transpose'): if mode == 'transpose': return nn.ConvTranspose2d( in_channels, out_channels, kernel_size=2, stride=2) else: # out_channels is always going to be the same # as in_channels return nn.Sequential( nn.Upsample(mode='bilinear', scale_factor=2), conv1x1(in_channels, out_channels))
def __init__(self, bn=False, num_classes=10): super(CMTL, self).__init__() self.num_classes = num_classes self.base_layer = nn.Sequential(Conv2d( 1, 16, 9, same_padding=True, NL='prelu', bn=bn), Conv2d(16, 32, 7, same_padding=True, NL='prelu', bn=bn)) self.hl_prior_1 = nn.Sequential(Conv2d( 32, 16, 9, same_padding=True, NL='prelu', bn=bn), nn.MaxPool2d(2), Conv2d(16, 32, 7, same_padding=True, NL='prelu', bn=bn), nn.MaxPool2d(2), Conv2d(32, 16, 7, same_padding=True, NL='prelu', bn=bn), Conv2d(16, 8, 7, same_padding=True, NL='prelu', bn=bn)) self.hl_prior_2 = nn.Sequential(nn.AdaptiveMaxPool2d((32,32)), Conv2d( 8, 4, 1, same_padding=True, NL='prelu', bn=bn)) self.hl_prior_fc1 = FC(4*1024,512, NL='prelu') self.hl_prior_fc2 = FC(512,256, NL='prelu') self.hl_prior_fc3 = FC(256, self.num_classes, NL='prelu') self.de_stage_1 = nn.Sequential(Conv2d( 32, 20, 7, same_padding=True, NL='prelu', bn=bn), nn.MaxPool2d(2), Conv2d(20, 40, 5, same_padding=True, NL='prelu', bn=bn), nn.MaxPool2d(2), Conv2d(40, 20, 5, same_padding=True, NL='prelu', bn=bn), Conv2d(20, 10, 5, same_padding=True, NL='prelu', bn=bn)) self.de_stage_2 = nn.Sequential(Conv2d( 18, 24, 3, same_padding=True, NL='prelu', bn=bn), Conv2d( 24, 32, 3, same_padding=True, NL='prelu', bn=bn), nn.ConvTranspose2d(32,16,4,stride=2,padding=1,output_padding=0,bias=True), nn.PReLU(), nn.ConvTranspose2d(16,8,4,stride=2,padding=1,output_padding=0,bias=True), nn.PReLU(), Conv2d(8, 1, 1, same_padding=True, NL='relu', bn=bn))
def __init__(self, in_: int, out: int, *, bn=True, activation='relu', up='upconv'): super().__init__() self.l1 = Conv3BN(in_, out, bn, activation) self.l2 = Conv3BN(out, out, bn, activation) if up == 'upconv': self.up = nn.ConvTranspose2d(in_, out, 2, stride=2) elif up == 'upsample': self.up = nn.Upsample(scale_factor=2)
def __init__(self, in_: int, out: int, *, bn=True, activation='relu', up='upsample'): super().__init__() self.l1 = Conv3BN(in_, out, bn, activation) self.l2 = Conv3BN(out, out, bn, activation) self.l3 = Conv3BN(out, out, bn, activation) if up == 'upconv': self.up = nn.ConvTranspose2d(in_, out, 2, stride=2) elif up == 'upsample': self.up = nn.Upsample(scale_factor=2)
def __init__(self, in_: int, out: int, *, bn=True, activation='relu', up='upsample'): super().__init__() self.l1 = Conv3BN(in_, out, bn, activation) self.l2 = Conv3BN(out, out, bn, activation) self.l3 = Conv3BN(out, out, bn, activation) self.l4 = Conv3BN(out, out, bn, activation) if up == 'upconv': self.up = nn.ConvTranspose2d(in_, out, 2, stride=2) elif up == 'upsample': self.up = nn.Upsample(scale_factor=2)