我们从Python开源项目中,提取了以下11个代码示例,用于说明如何使用ops.deconv2d()。
def generator(hparams, z, scope_name, train, reuse): with tf.variable_scope(scope_name) as scope: if reuse: scope.reuse_variables() output_size = 64 s = output_size s2, s4, s8, s16 = int(s/2), int(s/4), int(s/8), int(s/16) g_bn0 = ops.batch_norm(name='g_bn0') g_bn1 = ops.batch_norm(name='g_bn1') g_bn2 = ops.batch_norm(name='g_bn2') g_bn3 = ops.batch_norm(name='g_bn3') # project `z` and reshape h0 = tf.reshape(ops.linear(z, hparams.gf_dim*8*s16*s16, 'g_h0_lin'), [-1, s16, s16, hparams.gf_dim * 8]) h0 = tf.nn.relu(g_bn0(h0, train=train)) h1 = ops.deconv2d(h0, [hparams.batch_size, s8, s8, hparams.gf_dim*4], name='g_h1') h1 = tf.nn.relu(g_bn1(h1, train=train)) h2 = ops.deconv2d(h1, [hparams.batch_size, s4, s4, hparams.gf_dim*2], name='g_h2') h2 = tf.nn.relu(g_bn2(h2, train=train)) h3 = ops.deconv2d(h2, [hparams.batch_size, s2, s2, hparams.gf_dim*1], name='g_h3') h3 = tf.nn.relu(g_bn3(h3, train=train)) h4 = ops.deconv2d(h3, [hparams.batch_size, s, s, hparams.c_dim], name='g_h4') x_gen = tf.nn.tanh(h4) return x_gen
def generator(hparams, z, train, reuse): if reuse: tf.get_variable_scope().reuse_variables() output_size = 64 s = output_size s2, s4, s8, s16 = int(s/2), int(s/4), int(s/8), int(s/16) g_bn0 = ops.batch_norm(name='g_bn0') g_bn1 = ops.batch_norm(name='g_bn1') g_bn2 = ops.batch_norm(name='g_bn2') g_bn3 = ops.batch_norm(name='g_bn3') # project `z` and reshape h0 = tf.reshape(ops.linear(z, hparams.gf_dim*8*s16*s16, 'g_h0_lin'), [-1, s16, s16, hparams.gf_dim * 8]) h0 = tf.nn.relu(g_bn0(h0, train=train)) h1 = ops.deconv2d(h0, [hparams.batch_size, s8, s8, hparams.gf_dim*4], name='g_h1') h1 = tf.nn.relu(g_bn1(h1, train=train)) h2 = ops.deconv2d(h1, [hparams.batch_size, s4, s4, hparams.gf_dim*2], name='g_h2') h2 = tf.nn.relu(g_bn2(h2, train=train)) h3 = ops.deconv2d(h2, [hparams.batch_size, s2, s2, hparams.gf_dim*1], name='g_h3') h3 = tf.nn.relu(g_bn3(h3, train=train)) h4 = ops.deconv2d(h3, [hparams.batch_size, s, s, hparams.c_dim], name='g_h4') x_gen = tf.nn.tanh(h4) return x_gen
def GeneratorCNN( z, config, reuse=None): ''' maps z to a 64x64 images with values in [-1,1] uses batch normalization internally ''' #trying to get around batch_size like this: batch_size=tf.shape(z)[0] #batch_size=tf.placeholder_with_default(64,[],'bs') with tf.variable_scope("generator",reuse=reuse) as vs: g_bn0 = batch_norm(name='g_bn0') g_bn1 = batch_norm(name='g_bn1') g_bn2 = batch_norm(name='g_bn2') g_bn3 = batch_norm(name='g_bn3') s_h, s_w = config.gf_dim, config.gf_dim#64,64 s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2) s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2) s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2) s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2) # project `z` and reshape z_, self_h0_w, self_h0_b = linear( z, config.gf_dim*8*s_h16*s_w16, 'g_h0_lin', with_w=True) self_h0 = tf.reshape( z_, [-1, s_h16, s_w16, config.gf_dim * 8]) h0 = tf.nn.relu(g_bn0(self_h0)) h1, h1_w, h1_b = deconv2d( h0, [batch_size, s_h8, s_w8, config.gf_dim*4], name='g_h1', with_w=True) h1 = tf.nn.relu(g_bn1(h1)) h2, h2_w, h2_b = deconv2d( h1, [batch_size, s_h4, s_w4, config.gf_dim*2], name='g_h2', with_w=True) h2 = tf.nn.relu(g_bn2(h2)) h3, h3_w, h3_b = deconv2d( h2, [batch_size, s_h2, s_w2, config.gf_dim*1], name='g_h3', with_w=True) h3 = tf.nn.relu(g_bn3(h3)) h4, h4_w, h4_b = deconv2d( h3, [batch_size, s_h, s_w, config.c_dim], name='g_h4', with_w=True) out=tf.nn.tanh(h4) variables = tf.contrib.framework.get_variables(vs) return out, variables
def __call__(self, z, y): """ :param z: 2D [batch_size, z_dim] :param y: 2D [batch_size, y_dim] :return: """ batch_size, y_dim = y.get_shape().as_list() batch_size_, z_dim = z.get_shape().as_list() assert batch_size == batch_size_ h1_size = int(self._output_size / 4) h2_size = int(self._output_size / 2) with tf.variable_scope(self._name): yb = tf.reshape(y, shape=[-1, 1, 1, y_dim]) # (100, 1, 1, 10) z = tf.concat([z, y], axis=1) # (batch_size=100, y_dim+z_dim=110) h0 = tf.nn.relu( ops.batch_norm( ops.fc(z, self._fc_dim, reuse=self._reuse, name='g_fc0'), is_training=self._is_training, reuse=self._reuse, name_scope='g_bn0' ) ) h0 = tf.concat([h0, y], axis=1) # (batch_size=100, fc_dim+y_dim=794) h1 = tf.nn.relu( ops.batch_norm( ops.fc(h0, self._ngf*h1_size*h1_size, reuse=self._reuse, name='g_fc1'), is_training=self._is_training, reuse=self._reuse, name_scope='g_bn1' ) ) h1 = tf.reshape(h1, shape=[-1, h1_size, h1_size, self._ngf]) h1 = tf.concat([h1, yb*tf.ones([batch_size, h1_size, h1_size, y_dim])], axis=3) # (100, 7, 7, 522) h2 = tf.nn.relu( ops.batch_norm( ops.deconv2d(h1, self._ngf, reuse=self._reuse, name='g_conv2'), is_training=self._is_training, reuse=self._reuse, name_scope='g_bn2' ) ) h2 = tf.concat([h2, yb*tf.ones([batch_size, h2_size, h2_size, y_dim])], axis=3) # (100, 14, 14, 522) h3 = tf.nn.sigmoid( ops.deconv2d(h2, self._channel_dim, reuse=self._reuse, name='g_conv3') ) # TODO DIMENSION??? SHRINK self._reuse = True return h3 # (100, 28, 28, 1)
def generator(self, opts, noise, is_training, reuse=False): """Generator function, suitable for simple picture experiments. Args: noise: [num_points, dim] array, where dim is dimensionality of the latent noise space. is_training: bool, defines whether to use batch_norm in the train or test mode. Returns: [num_points, dim1, dim2, dim3] array, where the first coordinate indexes the points, which all are of the shape (dim1, dim2, dim3). """ output_shape = self._data.data_shape # (dim1, dim2, dim3) # Computing the number of noise vectors on-the-go dim1 = tf.shape(noise)[0] num_filters = opts['g_num_filters'] with tf.variable_scope("GENERATOR", reuse=reuse): height = output_shape[0] / 4 width = output_shape[1] / 4 h0 = ops.linear(opts, noise, num_filters * height * width, scope='h0_lin') h0 = tf.reshape(h0, [-1, height, width, num_filters]) h0 = ops.batch_norm(opts, h0, is_training, reuse, scope='bn_layer1') # h0 = tf.nn.relu(h0) h0 = ops.lrelu(h0) _out_shape = [dim1, height * 2, width * 2, num_filters / 2] # for 28 x 28 does 7 x 7 --> 14 x 14 h1 = ops.deconv2d(opts, h0, _out_shape, scope='h1_deconv') h1 = ops.batch_norm(opts, h1, is_training, reuse, scope='bn_layer2') # h1 = tf.nn.relu(h1) h1 = ops.lrelu(h1) _out_shape = [dim1, height * 4, width * 4, num_filters / 4] # for 28 x 28 does 14 x 14 --> 28 x 28 h2 = ops.deconv2d(opts, h1, _out_shape, scope='h2_deconv') h2 = ops.batch_norm(opts, h2, is_training, reuse, scope='bn_layer3') # h2 = tf.nn.relu(h2) h2 = ops.lrelu(h2) _out_shape = [dim1] + list(output_shape) # data_shape[0] x data_shape[1] x ? -> data_shape h3 = ops.deconv2d(opts, h2, _out_shape, d_h=1, d_w=1, scope='h3_deconv') h3 = ops.batch_norm(opts, h3, is_training, reuse, scope='bn_layer4') if opts['input_normalize_sym']: return tf.nn.tanh(h3) else: return tf.nn.sigmoid(h3)
def generator(self, opts, noise, is_training, reuse=False): """Generator function, suitable for bigger simple pictures. Args: noise: [num_points, dim] array, where dim is dimensionality of the latent noise space. is_training: bool, defines whether to use batch_norm in the train or test mode. Returns: [num_points, dim1, dim2, dim3] array, where the first coordinate indexes the points, which all are of the shape (dim1, dim2, dim3). """ output_shape = self._data.data_shape # (dim1, dim2, dim3) # Computing the number of noise vectors on-the-go dim1 = tf.shape(noise)[0] num_filters = opts['g_num_filters'] with tf.variable_scope("GENERATOR", reuse=reuse): height = output_shape[0] / 16 width = output_shape[1] / 16 h0 = ops.linear(opts, noise, num_filters * height * width, scope='h0_lin') h0 = tf.reshape(h0, [-1, height, width, num_filters]) h0 = ops.batch_norm(opts, h0, is_training, reuse, scope='bn_layer1') h0 = tf.nn.relu(h0) _out_shape = [dim1, height * 2, width * 2, num_filters / 2] # for 128 x 128 does 8 x 8 --> 16 x 16 h1 = ops.deconv2d(opts, h0, _out_shape, scope='h1_deconv') h1 = ops.batch_norm(opts, h1, is_training, reuse, scope='bn_layer2') h1 = tf.nn.relu(h1) _out_shape = [dim1, height * 4, width * 4, num_filters / 4] # for 128 x 128 does 16 x 16 --> 32 x 32 h2 = ops.deconv2d(opts, h1, _out_shape, scope='h2_deconv') h2 = ops.batch_norm(opts, h2, is_training, reuse, scope='bn_layer3') h2 = tf.nn.relu(h2) _out_shape = [dim1, height * 8, width * 8, num_filters / 8] # for 128 x 128 does 32 x 32 --> 64 x 64 h3 = ops.deconv2d(opts, h2, _out_shape, scope='h3_deconv') h3 = ops.batch_norm(opts, h3, is_training, reuse, scope='bn_layer4') h3 = tf.nn.relu(h3) _out_shape = [dim1, height * 16, width * 16, num_filters / 16] # for 128 x 128 does 64 x 64 --> 128 x 128 h4 = ops.deconv2d(opts, h3, _out_shape, scope='h4_deconv') h4 = ops.batch_norm(opts, h4, is_training, reuse, scope='bn_layer5') h4 = tf.nn.relu(h4) _out_shape = [dim1] + list(output_shape) # data_shape[0] x data_shape[1] x ? -> data_shape h5 = ops.deconv2d(opts, h4, _out_shape, d_h=1, d_w=1, scope='h5_deconv') h5 = ops.batch_norm(opts, h5, is_training, reuse, scope='bn_layer6') if opts['input_normalize_sym']: return tf.nn.tanh(h5) else: return tf.nn.sigmoid(h5)
def dcgan_like_arch(self, opts, noise, is_training, reuse, keep_prob): output_shape = self._data.data_shape num_units = opts['g_num_filters'] batch_size = tf.shape(noise)[0] num_layers = opts['g_num_layers'] if opts['g_arch'] == 'dcgan': height = output_shape[0] / 2**num_layers width = output_shape[1] / 2**num_layers elif opts['g_arch'] == 'dcgan_mod': height = output_shape[0] / 2**(num_layers-1) width = output_shape[1] / 2**(num_layers-1) else: assert False h0 = ops.linear( opts, noise, num_units * height * width, scope='h0_lin') h0 = tf.reshape(h0, [-1, height, width, num_units]) h0 = tf.nn.relu(h0) layer_x = h0 for i in xrange(num_layers-1): scale = 2**(i+1) if opts['g_stride1_deconv']: # Sylvain, I'm worried about this part! _out_shape = [batch_size, height * scale / 2, width * scale / 2, num_units / scale * 2] layer_x = ops.deconv2d( opts, layer_x, _out_shape, d_h=1, d_w=1, scope='h%d_deconv_1x1' % i) layer_x = tf.nn.relu(layer_x) _out_shape = [batch_size, height * scale, width * scale, num_units / scale] layer_x = ops.deconv2d(opts, layer_x, _out_shape, scope='h%d_deconv' % i) if opts['batch_norm']: layer_x = ops.batch_norm(opts, layer_x, is_training, reuse, scope='bn%d' % i) layer_x = tf.nn.relu(layer_x) if opts['dropout']: _keep_prob = tf.minimum( 1., 0.9 - (0.9 - keep_prob) * float(i + 1) / (num_layers - 1)) layer_x = tf.nn.dropout(layer_x, _keep_prob) _out_shape = [batch_size] + list(output_shape) if opts['g_arch'] == 'dcgan': last_h = ops.deconv2d( opts, layer_x, _out_shape, scope='hlast_deconv') elif opts['g_arch'] == 'dcgan_mod': last_h = ops.deconv2d( opts, layer_x, _out_shape, d_h=1, d_w=1, scope='hlast_deconv') else: assert False if opts['input_normalize_sym']: return tf.nn.tanh(last_h) else: return tf.nn.sigmoid(last_h)
def ali_deconv(self, opts, noise, is_training, reuse, keep_prob): output_shape = self._data.data_shape batch_size = tf.shape(noise)[0] noise_size = int(noise.get_shape()[1]) data_height = output_shape[0] data_width = output_shape[1] data_channels = output_shape[2] noise = tf.reshape(noise, [-1, 1, 1, noise_size]) num_units = opts['g_num_filters'] layer_params = [] layer_params.append([4, 1, num_units]) layer_params.append([4, 2, num_units / 2]) layer_params.append([4, 1, num_units / 4]) layer_params.append([4, 2, num_units / 8]) layer_params.append([5, 1, num_units / 8]) # For convolution: (n - k) / stride + 1 = s # For transposed: (s - 1) * stride + k = n layer_x = noise height = 1 width = 1 for i, (kernel, stride, channels) in enumerate(layer_params): height = (height - 1) * stride + kernel width = height layer_x = ops.deconv2d( opts, layer_x, [batch_size, height, width, channels], d_h=stride, d_w=stride, scope='h%d_deconv' % i, conv_filters_dim=kernel, padding='VALID') if opts['batch_norm']: layer_x = ops.batch_norm(opts, layer_x, is_training, reuse, scope='bn%d' % i) layer_x = ops.lrelu(layer_x, 0.1) assert height == data_height assert width == data_width # Then two 1x1 convolutions. layer_x = ops.conv2d(opts, layer_x, num_units / 8, d_h=1, d_w=1, scope='conv2d_1x1', conv_filters_dim=1) if opts['batch_norm']: layer_x = ops.batch_norm(opts, layer_x, is_training, reuse, scope='bnlast') layer_x = ops.lrelu(layer_x, 0.1) layer_x = ops.conv2d(opts, layer_x, data_channels, d_h=1, d_w=1, scope='conv2d_1x1_2', conv_filters_dim=1) if opts['input_normalize_sym']: return tf.nn.tanh(layer_x) else: return tf.nn.sigmoid(layer_x)
def generator(self, z, y=None, is_train=True, reuse=False): if reuse: tf.get_variable_scope().reuse_variables() s = self.output_size if np.mod(s, 16) == 0: s2, s4, s8, s16 = int(s/2), int(s/4), int(s/8), int(s/16) # project `z` and reshape self.z_, self.h0_w, self.h0_b = linear(z, self.gf_dim*8*s16*s16, 'g_h0_lin', with_w=True) self.h0 = tf.reshape(self.z_, [-1, s16, s16, self.gf_dim * 8]) h0 = tf.nn.relu(self.g_bn0(self.h0, train=is_train)) self.h1, self.h1_w, self.h1_b = deconv2d(h0, [self.batch_size, s8, s8, self.gf_dim*4], name='g_h1', with_w=True) h1 = tf.nn.relu(self.g_bn1(self.h1, train=is_train)) h2, self.h2_w, self.h2_b = deconv2d(h1, [self.batch_size, s4, s4, self.gf_dim*2], name='g_h2', with_w=True) h2 = tf.nn.relu(self.g_bn2(h2, train=is_train)) h3, self.h3_w, self.h3_b = deconv2d(h2, [self.batch_size, s2, s2, self.gf_dim*1], name='g_h3', with_w=True) h3 = tf.nn.relu(self.g_bn3(h3, train=is_train)) h4, self.h4_w, self.h4_b = deconv2d(h3, [self.batch_size, s, s, self.c_dim], name='g_h4', with_w=True) return tf.nn.tanh(h4) else: s = self.output_size s2, s4 = int(s/2), int(s/4) self.z_, self.h0_w, self.h0_b = linear(z, self.gf_dim*2*s4*s4, 'g_h0_lin', with_w=True) self.h0 = tf.reshape(self.z_, [-1, s4, s4, self.gf_dim * 2]) h0 = tf.nn.relu(self.g_bn0(self.h0, train=is_train)) self.h1, self.h1_w, self.h1_b = deconv2d(h0, [self.batch_size, s2, s2, self.gf_dim*1], name='g_h1', with_w=True) h1 = tf.nn.relu(self.g_bn1(self.h1, train=is_train)) h2, self.h2_w, self.h2_b = deconv2d(h1, [self.batch_size, s, s, self.c_dim], name='g_h2', with_w=True) return tf.nn.tanh(h2)