我们从Python开源项目中,提取了以下14个代码示例,用于说明如何使用torch.nn.PixelShuffle()。
def __init__(self, down_factor, in_dim, num_classes): super(_DenseUpsamplingConvModule, self).__init__() upsample_dim = (down_factor ** 2) * num_classes self.conv = nn.Conv2d(in_dim, upsample_dim, kernel_size=3, padding=1) self.bn = nn.BatchNorm2d(upsample_dim) self.relu = nn.ReLU(inplace=True) self.pixel_shuffle = nn.PixelShuffle(down_factor)
def __init__(self, inplanes, planes, upscale_factor=2): super(DUC, self).__init__() self.relu = nn.ReLU() self.conv = nn.Conv2d(inplanes, planes, kernel_size=3, padding=1) self.bn = nn.BatchNorm2d(planes) self.pixel_shuffle = nn.PixelShuffle(upscale_factor)
def test_pixel_shuffle(self): batch_size = random.randint(1, 3) upscale_factor = random.randint(2, 5) channels = random.randint(1, 4) * upscale_factor ** 2 height = random.randint(5, 10) width = random.randint(5, 10) input = Variable(torch.Tensor(batch_size, channels, height, width).uniform_(), requires_grad=True) ps = nn.PixelShuffle(upscale_factor) output = ps(input) self._verify_pixel_shuffle(input.data, output.data, upscale_factor) output.backward(output.data) self.assertEqual(input.data, input.grad.data)
def __init__(self, upscale_factor): super(Net, self).__init__() self.relu = nn.ReLU() self.conv1 = nn.Conv2d(1, 64, (5, 5), (1, 1), (2, 2)) self.conv2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)) self.conv3 = nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1)) self.conv4 = nn.Conv2d(32, upscale_factor ** 2, (3, 3), (1, 1), (1, 1)) self.pixel_shuffle = nn.PixelShuffle(upscale_factor) self._initialize_weights()
def __init__(self, n_channels=64, upsample=2): super(SubPixelConv, self).__init__() self.n_channels = n_channels self.upsample = upsample self.out_channels = self.upsample * self.upsample * self.n_channels self.conv = nn.Conv2d(in_channels=self.n_channels, out_channels=self.out_channels, kernel_size=3, stride=1, padding=1, bias=False) self.upsample_net = nn.PixelShuffle(self.upsample) self.relu = nn.ReLU(inplace=True)
def __init__(self, upscale_factor, inplace=False): super(SuperResolutionNet, self).__init__() self.relu = nn.ReLU(inplace=inplace) self.conv1 = nn.Conv2d(1, 64, (5, 5), (1, 1), (2, 2)) self.conv2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)) self.conv3 = nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1)) self.conv4 = nn.Conv2d(32, upscale_factor ** 2, (3, 3), (1, 1), (1, 1)) self.pixel_shuffle = nn.PixelShuffle(upscale_factor) self._initialize_weights()
def __init__(self, in_channels, out_channels): super(PixelShuffleBlock, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels * 4, kernel_size = 3, stride = 1, padding = 1, bias = False) self.pixel_shuffle = nn.PixelShuffle(2) self.bn = nn.BatchNorm2d(out_channels)
def __init__(self, n=64, f=3, upscale_factor=2): super().__init__() self.conv = nn.Conv2d( in_channels=n, out_channels=n*upscale_factor ** 2, kernel_size=f, stride=1, padding=(f-1)//2) xavier_normal(self.conv.weight) self.pixsf = nn.PixelShuffle(upscale_factor)
def __init__(self, ngf=32): super(def_netG, self).__init__() self.toH = nn.Sequential(nn.Conv2d(4, ngf, kernel_size=7, stride=1, padding=3), nn.LeakyReLU(0.2, True)) self.to0 = nn.Sequential(nn.Conv2d(1, ngf // 2, kernel_size=3, stride=1, padding=1), # 512 nn.LeakyReLU(0.2, True)) self.to1 = nn.Sequential(nn.Conv2d(ngf // 2, ngf, kernel_size=4, stride=2, padding=1), # 256 nn.LeakyReLU(0.2, True)) self.to2 = nn.Sequential(nn.Conv2d(ngf, ngf * 2, kernel_size=4, stride=2, padding=1), # 128 nn.LeakyReLU(0.2, True)) self.to3 = nn.Sequential(nn.Conv2d(ngf * 3, ngf * 4, kernel_size=4, stride=2, padding=1), # 64 nn.LeakyReLU(0.2, True)) self.to4 = nn.Sequential(nn.Conv2d(ngf * 4, ngf * 8, kernel_size=4, stride=2, padding=1), # 32 nn.LeakyReLU(0.2, True)) tunnel4 = [ResNeXtBottleneck(ngf * 8, ngf * 8, cardinality=32, dilate=1) for _ in range(10)] tunnel4 += [nn.Conv2d(ngf * 8, ngf * 4 * 4, kernel_size=3, stride=1, padding=1), nn.PixelShuffle(2), nn.LeakyReLU(0.2, True)] self.tunnel4 = nn.Sequential(*tunnel4) tunnel = [ResNeXtBottleneck(ngf * 4, ngf * 4, cardinality=16, dilate=1) for _ in range(6)] tunnel3 = nn.Sequential(*tunnel) self.tunnel3 = nn.Sequential(nn.Conv2d(ngf * 8, ngf * 4, kernel_size=3, stride=1, padding=1), nn.LeakyReLU(0.2, True), tunnel3, nn.Conv2d(ngf * 4, ngf * 2 * 4, kernel_size=3, stride=1, padding=1), nn.PixelShuffle(2), nn.LeakyReLU(0.2, True) ) tunnel = [ResNeXtBottleneck(ngf * 2, ngf * 2, cardinality=8, dilate=1) for _ in range(4)] tunnel2 = nn.Sequential(*tunnel) self.tunnel2 = nn.Sequential(nn.Conv2d(ngf * 4, ngf * 2, kernel_size=3, stride=1, padding=1), nn.LeakyReLU(0.2, True), tunnel2, nn.Conv2d(ngf * 2, ngf * 4, kernel_size=3, stride=1, padding=1), nn.PixelShuffle(2), nn.LeakyReLU(0.2, True) ) tunnel = [ResNeXtBottleneck(ngf, ngf, cardinality=4, dilate=1) for _ in range(2)] tunnel1 = nn.Sequential(*tunnel) self.tunnel1 = nn.Sequential(nn.Conv2d(ngf * 2, ngf, kernel_size=3, stride=1, padding=1), nn.LeakyReLU(0.2, True), tunnel1, nn.Conv2d(ngf, ngf * 2, kernel_size=3, stride=1, padding=1), nn.PixelShuffle(2), nn.LeakyReLU(0.2, True) ) self.exit = nn.Conv2d(ngf, 3, kernel_size=3, stride=1, padding=1)
def build_layers(img_sz, img_fm, init_fm, max_fm, n_layers, n_attr, n_skip, deconv_method, instance_norm, enc_dropout, dec_dropout): """ Build auto-encoder layers. """ assert init_fm <= max_fm assert n_skip <= n_layers - 1 assert np.log2(img_sz).is_integer() assert n_layers <= int(np.log2(img_sz)) assert type(instance_norm) is bool assert 0 <= enc_dropout < 1 assert 0 <= dec_dropout < 1 norm_fn = nn.InstanceNorm2d if instance_norm else nn.BatchNorm2d enc_layers = [] dec_layers = [] n_in = img_fm n_out = init_fm for i in range(n_layers): enc_layer = [] dec_layer = [] skip_connection = n_layers - (n_skip + 1) <= i < n_layers - 1 n_dec_in = n_out + n_attr + (n_out if skip_connection else 0) n_dec_out = n_in # encoder layer enc_layer.append(nn.Conv2d(n_in, n_out, 4, 2, 1)) if i > 0: enc_layer.append(norm_fn(n_out, affine=True)) enc_layer.append(nn.LeakyReLU(0.2, inplace=True)) if enc_dropout > 0: enc_layer.append(nn.Dropout(enc_dropout)) # decoder layer if deconv_method == 'upsampling': dec_layer.append(nn.UpsamplingNearest2d(scale_factor=2)) dec_layer.append(nn.Conv2d(n_dec_in, n_dec_out, 3, 1, 1)) elif deconv_method == 'convtranspose': dec_layer.append(nn.ConvTranspose2d(n_dec_in, n_dec_out, 4, 2, 1, bias=False)) else: assert deconv_method == 'pixelshuffle' dec_layer.append(nn.Conv2d(n_dec_in, n_dec_out * 4, 3, 1, 1)) dec_layer.append(nn.PixelShuffle(2)) if i > 0: dec_layer.append(norm_fn(n_dec_out, affine=True)) if dec_dropout > 0 and i >= n_layers - 3: dec_layer.append(nn.Dropout(dec_dropout)) dec_layer.append(nn.ReLU(inplace=True)) else: dec_layer.append(nn.Tanh()) # update n_in = n_out n_out = min(2 * n_out, max_fm) enc_layers.append(nn.Sequential(*enc_layer)) dec_layers.insert(0, nn.Sequential(*dec_layer)) return enc_layers, dec_layers
def __init__(self, net_opts): super().__init__() upscale_factor = net_opts['upscale_factor'] self.tanh = nn.Tanh() prev_filters = 1 num_filters = 64 kernel_size = 5 padding = (kernel_size-1) // 2 self.conv1 = nn.Conv2d( in_channels=prev_filters, out_channels=num_filters, kernel_size=kernel_size, stride=1, padding=padding) self._initialize_weights(self.conv1) prev_filters = num_filters num_filters = 32 kernel_size = 3 padding = (kernel_size-1) // 2 self.conv2 = nn.Conv2d( in_channels=prev_filters, out_channels=num_filters, kernel_size=kernel_size, stride=1, padding=padding) self._initialize_weights(self.conv2) prev_filters = num_filters num_filters = upscale_factor ** 2 kernel_size = 3 padding = (kernel_size-1) // 2 self.conv3 = nn.Conv2d( in_channels=prev_filters, out_channels=num_filters, kernel_size=kernel_size, stride=1, padding=padding) self._initialize_weights(self.conv3) self.pixsf = nn.PixelShuffle(upscale_factor)