我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用theano.tensor.nnet.conv.conv2d()。
def predict(self, new_data, batch_size): """ predict for new data """ img_shape = (batch_size, 1, self.image_shape[2], self.image_shape[3]) conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape) if self.non_linear=="tanh": conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x')) output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True) if self.non_linear=="relu": conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x')) output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True) else: pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True) output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x') return output
def __init__(self, input,params_W,params_b, filter_shape, image_shape, poolsize=(2, 2)): assert image_shape[1] == filter_shape[1] self.input = input self.W = params_W self.b = params_b # ?? conv_out = conv.conv2d( input=input, filters=self.W, filter_shape=filter_shape, image_shape=image_shape ) # ??? pooled_out = downsample.max_pool_2d( input=conv_out, ds=poolsize, ignore_border=True ) self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')) self.params = [self.W, self.b]
def eval(self, inp): #input = self.pad(inp.eval()) results= theano.tensor.nnet.conv2d(input, self.W, border_mode='full' ) biased = results + self.b.dimshuffle('x', 0, 'x', 'x') result = theano.tensor.nnet.sigmoid(biased) return result #test = CNNlayer((1,1,4,4), (1,1,3,3)) #inp = np.array([[0.0,0,0,0],[0,1,0,0], [0,0,0,0], [0,0,0,0]]) #weight = np.array([[1,.2, 0],[.4,.5, 0], [0,0,0]]) #test.setW(weight) #weight_2 = np.array([list(weight[i][::-1]) for i in range(len(weight))]) #weight_3 = weight_2[::-1] #print(inp) #print(( weight_3)) #print("their", conv.conv2d(inp, weight).eval())
def encoder(tparams, layer0_input, filter_shape, pool_size, options, prefix='cnn_d'): """ filter_shape: (number of filters, num input feature maps, filter height, filter width) image_shape: (batch_size, num input feature maps, image height, image width) """ conv_out = conv.conv2d(input=layer0_input, filters=tparams[_p(prefix,'W')], filter_shape=filter_shape) # conv_out_tanh = tensor.tanh(conv_out + tparams[_p(prefix,'b')].dimshuffle('x', 0, 'x', 'x')) # output = downsample.max_pool_2d(input=conv_out_tanh, ds=pool_size, ignore_border=False) if options['cnn_activation'] == 'tanh': conv_out_tanh = tensor.tanh(conv_out + tparams[_p(prefix,'b')].dimshuffle('x', 0, 'x', 'x')) output = downsample.max_pool_2d(input=conv_out_tanh, ds=pool_size, ignore_border=False) # the ignore border is very important elif options['cnn_activation'] == 'linear': conv_out2 = conv_out + tparams[_p(prefix,'b')].dimshuffle('x', 0, 'x', 'x') output = downsample.max_pool_2d(input=conv_out2, ds=pool_size, ignore_border=False) # the ignore border is very important else: print(' Wrong specification of activation function in CNN') return output.flatten(2) #output.flatten(2)
def test_broadcast_grad(): # rng = numpy.random.RandomState(utt.fetch_seed()) x1 = T.tensor4('x') # x1_data = rng.randn(1, 1, 300, 300) sigma = T.scalar('sigma') # sigma_data = 20 window_radius = 3 filter_1d = T.arange(-window_radius, window_radius + 1) filter_1d = filter_1d.astype(theano.config.floatX) filter_1d = T.exp(-0.5 * filter_1d**2 / sigma ** 2) filter_1d = filter_1d / filter_1d.sum() filter_W = filter_1d.dimshuffle(['x', 'x', 0, 'x']) y = theano.tensor.nnet.conv2d(x1, filter_W, border_mode='full', filter_shape=[1, 1, None, None]) theano.grad(y.sum(), sigma)
def local_conv2d_cpu(node): if not isinstance(node.op, AbstractConv2d): return None img, kern = node.inputs if ((not isinstance(img.type, TensorType) or not isinstance(kern.type, TensorType))): return None if node.op.border_mode not in ['full', 'valid']: return None if not node.op.filter_flip: # Not tested yet return None rval = conv2d(img, kern, node.op.imshp, node.op.kshp, border_mode=node.op.border_mode, subsample=node.op.subsample) copy_stack_trace(node.outputs[0], rval) return [rval]
def predict(self, new_data, batch_size): """ predict for new data """ img_shape = None#(batch_size, 1, self.image_shape[2], self.image_shape[3]) conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape) if self.non_linear=="tanh": conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x')) output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True) if self.non_linear=="relu": conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x')) output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True) else: pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True) output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x') return output
def predict_maxpool(self, new_data, batch_size): """ predict for new data """ img_shape = (batch_size, 1, self.image_shape[2], self.image_shape[3]) conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape) if self.non_linear=="tanh": conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x')) output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True) if self.non_linear=="relu": conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x')) return conv_out_tanh output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True) else: pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True) output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x') return output
def predict(self, new_data, batch_size): """ predict for new data """ img_shape = (batch_size, 1, self.image_shape[2], self.image_shape[3]) conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape) if self.non_linear=="tanh": conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x')) output = theano.tensor.signal.pool.pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True) if self.non_linear=="relu": conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x')) output =theano.tensor.signal.pool.pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True) else: pooled_out = theano.tensor.signal.pool.pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True) output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x') return output
def output(self, x, a): x = T.reshape(x, (-1, self.n_inputs, self.height, self.width)) return T.tanh(conv2d(x, self.W) + self.b.dimshuffle('x', 0, 'x', 'x'))
def __init__(self, rng, input, image_shape, filter_shape, poolsize=(2, 2)): # ??????????????????? assert image_shape[1] == filter_shape[1] fan_in = np.prod(filter_shape[1:]) fan_out = filter_shape[0] * np.prod(filter_shape[2:]) / np.prod(poolsize) W_bound = np.sqrt(6.0 / (fan_in + fan_out)) self.W = theano.shared( np.asarray(rng.uniform(low=-W_bound, high=W_bound, size=filter_shape), dtype=theano.config.floatX), # @UndefinedVariable borrow=True) b_values = np.zeros((filter_shape[0],), dtype=theano.config.floatX) # @UndefinedVariable self.b = theano.shared(value=b_values, borrow=T) # ?????????????????? conv_out = conv.conv2d( input=input, filters=self.W, filter_shape=filter_shape, image_shape=image_shape) # Max-pooling???????????????????? pooled_out = downsample.max_pool_2d( input=conv_out, ds=poolsize, ignore_border=True) # ???????? self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')) self.params = [self.W, self.b]
def set_output(self): if sum(self._padding) > 0: padded_input = tensor.alloc(0.0, # Value to fill the tensor self._input_shape[0], self._input_shape[1], self._input_shape[2] + 2 * self._padding[2], self._input_shape[3] + 2 * self._padding[3]) padded_input = tensor.set_subtensor( padded_input[:, :, self._padding[2]:self._padding[2] + self._input_shape[2], self._padding[3]:self._padding[3] + self._input_shape[3]], self._prev_layer.output) padded_input_shape = [self._input_shape[0], self._input_shape[1], self._input_shape[2] + 2 * self._padding[2], self._input_shape[3] + 2 * self._padding[3]] else: padded_input = self._prev_layer.output padded_input_shape = self._input_shape conv_out = conv.conv2d( input=padded_input, filters=self.W.val, filter_shape=self._filter_shape, image_shape=np.asarray( padded_input_shape, dtype=np.int16), border_mode='valid') # add the bias term. Since the bias is a vector (1D array), we first # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will # thus be broadcasted across mini-batches and feature map # width & height self._output = conv_out + self.b.val.dimshuffle('x', 0, 'x', 'x')
def conv(self, input, filters, image_shape, filter_shape): return conv.conv2d(input=input, filters=filters, image_shape=image_shape, filter_shape=filter_shape, border_mode=self.border_mode, subsample=self.subsample)
def set_inpt(self, inpt, inpt_dropout, mini_batch_size): self.inpt = inpt.reshape(self.image_shape) conv_out = conv.conv2d( input=self.inpt, filters=self.w, filter_shape=self.filter_shape, image_shape=self.image_shape) pooled_out = downsample.max_pool_2d( input=conv_out, ds=self.poolsize, ignore_border=True) self.output = self.activation_fn( pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')) self.output_dropout = self.output # no dropout in the convolutional layers
def conv_and_pool(input_expr, w, convs_mult, p_drop_conv): conv_w = w if convs_mult == 2: conv_w = T.concatenate([w, w[:,:,::-1,::-1]], axis=0) elif convs_mult == 4: conv_w = T.concatenate([w, w[:,:,::-1], w[:,:,:,::-1], w[:,:,::-1,::-1]], axis=0) e1 = rectify(conv2d(input_expr, conv_w)) e2 = max_pool_2d(e1, (2, 2), ignore_border=False) return dropout(e2, p_drop_conv)
def model(X, w, w2, w3, w4, w_o, p_drop_conv, p_drop_hid, convs_mult): l1 = conv_and_pool(X, w, convs_mult, p_drop_conv) l2 = conv_and_pool(l1, w2, convs_mult, p_drop_conv) l3 = conv_and_pool(l2, w3, convs_mult, p_drop_conv) l4 = rectify(conv2d(l3, w4)) l4 = dropout(l4, p_drop_hid) l4 = T.flatten(l4, outdim=2) pyx = nn.nonlinearities.softmax(T.dot(l4, w_o)) return pyx
def fprop(self, input, return_output_preactivation=False): conv_out = conv.conv2d(input, filters=self.W, border_mode=self.border_mode) # TODO: Could be faster if pooling was done here instead pre_output = conv_out + self.b.dimshuffle('x', 0, 'x', 'x') output = self.activation_fct(pre_output) if return_output_preactivation: return output, pre_output return output
def __init__(self, rng, input, input_shape, filter_shape, pool_shape=(2, 2)): """ ??????????????????????? :param input: ????? :param input_shape: ????????(batch_size, image_channel, image_weight, image_height) :param filter_shape: ???????(filter_count, filter_channel, filter_weight, filter_height) :param pool_shape: ?????? :return: """ # assert input_shape[1] == filter_shape[1] self.input = input self.input_shape = input_shape self.filter_shape = filter_shape self.pool_shape = pool_shape # ????????? n_in = numpy.prod(input_shape[1:]) n_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) // numpy.prod(pool_shape)) weight_max = numpy.sqrt(6. / (n_in + n_out)) self.w = theano.shared( numpy.asarray( rng.uniform(low=-weight_max, high=weight_max, size=filter_shape), dtype=theano.config.floatX ), borrow=True ) self.b = theano.shared(numpy.zeros((filter_shape[0],), dtype=theano.config.floatX), borrow=True) self.params = [self.w, self.b] # calculate the output self.conv_out = conv2d( input=self.input, filters=self.w, filter_shape=self.filter_shape, image_shape=self.input_shape ) self.pool_out = pool_2d( input=self.conv_out, ds=pool_shape, ignore_border=True ) self.output = T.tanh(self.pool_out + self.b.dimshuffle('x', 0, 'x', 'x'))
def model(X, w, w2, w3, w35, w4, p_drop_conv, p_drop_hidden): l1a = rectify(conv2d(X, w, border_mode='full')) #print "l1a",l1a.type #print "l1a",l1a.shape.eval() l1 = max_pool_2d(l1a, (2, 2)) #print "l1",l1.get_value().shape #l1 = dropout(l1, p_drop_conv) l2a = rectify(conv2d(l1, w2)) #print "l2a",l2a.get_value().shape l2 = max_pool_2d(l2a, (2, 2)) #print "l2",l2.get_value().shape #l2 = dropout(l2, p_drop_conv) l3 = rectify(conv2d(l2, w3)) #print "l3",l3.get_value().shape #l3 = max_pool_2d(l3a, (1, 1)) #l3 = dropout(l3, p_drop_conv) l35a = rectify(conv2d(l3, w35)) #print "l35a",l35a.get_value().shape l35b = max_pool_2d(l35a, (2, 2)) #print "l35b",l35b.get_value().shape l35 = T.flatten(l35b, outdim=2) #print "l35",l35.get_value().shape #l35 = dropout(l35, p_drop_conv) l4 = rectify(T.dot(l35, w4)) #print "l4",l4.get_value().shape #l4 = dropout(l4, p_drop_hidden) pyx = softmax(T.dot(l4, w_o)) return l1, l2, l3, l35, l4, pyx
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2), stride=(1, 1)): """ Allocate a LeNetConvPoolLayer with shared variable internal parameters. """ assert image_shape[1] == filter_shape[1] self.input = input fan_in = np.prod(filter_shape[1:]) fan_out = (filter_shape[0] * np.prod(filter_shape[2:]) / np.prod(poolsize)) W_bound = np.sqrt(6. / (fan_in + fan_out)) self.W = theano.shared( np.asarray( rng.uniform(low=-W_bound, high=W_bound, size=filter_shape), dtype=theano.config.floatX ), borrow=True ) b_values = np.zeros((filter_shape[0],), dtype=theano.config.floatX) self.b = theano.shared(value=b_values, borrow=True) conv_out = conv.conv2d( input=input, filters=self.W, filter_shape=filter_shape, image_shape=image_shape, subsample=stride ) pooled_out = downsample.max_pool_2d( input=conv_out, ds=poolsize, ignore_border=True ) self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)): assert image_shape[1] == filter_shape[1] self.input = input # there are "num input feature maps * filter height * filter width" # inputs to each hidden unit fan_in = numpy.prod(filter_shape[1:]) # each unit in the lower layer receives a gradient from: # "num output feature maps * filter height * filter width" / # pooling size fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) / numpy.prod(poolsize)) # initialize weights with random weights W_bound = numpy.sqrt(6. / (fan_in + fan_out)) self.W = theano.shared(numpy.asarray( rng.uniform(low=-W_bound, high=W_bound, size=filter_shape), dtype=theano.config.floatX), borrow=True) # the bias is a 1D tensor -- one bias per output feature map b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX) self.b = theano.shared(value=b_values, borrow=True) # convolve input feature maps with filters conv_out = conv.conv2d(input=input, filters=self.W, filter_shape=filter_shape, image_shape=image_shape) # downsample each feature map individually, using maxpooling pooled_out = pool.pool_2d(input=conv_out, ds=poolsize, ignore_border=True) self.output = T.maximum(0.0, pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')) # store parameters of this layer self.params = [self.W, self.b]
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)): assert image_shape[1] == filter_shape[1] self.input = input # there are "num input feature maps * filter height * filter width" # inputs to each hidden unit fan_in = numpy.prod(filter_shape[1:]) # each unit in the lower layer receives a gradient from: # "num output feature maps * filter height * filter width" / # pooling size fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) / numpy.prod(poolsize)) # initialize weights with random weights W_bound = numpy.sqrt(6. / (fan_in + fan_out)) self.W = theano.shared(numpy.asarray( rng.uniform(low=-W_bound, high=W_bound, size=filter_shape), dtype=theano.config.floatX), borrow=True) # the bias is a 1D tensor -- one bias per output feature map b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX) self.b = theano.shared(value=b_values, borrow=True) # convolve input feature maps with filters conv_out = conv.conv2d(input=input, filters=self.W, filter_shape=filter_shape, image_shape=image_shape) # downsample each feature map individually, using maxpooling pooled_out = downsample.max_pool_2d(input=conv_out, ds=poolsize, ignore_border=True) self.output = T.maximum(0.0, pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')) # store parameters of this layer self.params = [self.W, self.b]
def convolutionalLayer(inpu, feature_map, batch, length, window, dim, prefix, params, names): down = window / 2 up = window - down - 1 zodown = T.zeros((batch, 1, down, dim), dtype=theano.config.floatX) zoup = T.zeros((batch, 1, up, dim), dtype=theano.config.floatX) inps = T.cast(T.concatenate([zoup, inpu, zodown], axis=2), dtype=theano.config.floatX) fan_in = window * dim fan_out = feature_map * window * dim / length #(length - window + 1) filter_shape = (feature_map, 1, window, dim) image_shape = (batch, 1, length + down + up, dim) #if non_linear=="none" or non_linear=="relu": # conv_W = theano.shared(0.2 * numpy.random.uniform(low=-1.0,high=1.0,\ # size=filter_shape).astype(theano.config.floatX)) #else: # W_bound = numpy.sqrt(6. / (fan_in + fan_out)) # conv_W = theano.shared(numpy.random.uniform(low=-W_bound,high=W_bound,\ # size=filter_shape).astype(theano.config.floatX)) W_bound = numpy.sqrt(6. / (fan_in + fan_out)) conv_W = theano.shared(numpy.random.uniform(low=-W_bound,high=W_bound,\ size=filter_shape).astype(theano.config.floatX)) conv_b = theano.shared(numpy.zeros(filter_shape[0], dtype=theano.config.floatX)) # bundle params += [ conv_W, conv_b ] names += [ prefix + '_convL_W_' + str(window), prefix + '_convL_b_' + str(window) ] conv_out = conv.conv2d(input=inps, filters=conv_W, filter_shape=filter_shape, image_shape=image_shape) conv_out = T.tanh(conv_out + conv_b.dimshuffle('x', 0, 'x', 'x')) return conv_out.dimshuffle(0,2,1,3).flatten(3)
def LeNetConvPoolLayer(inps, feature_map, batch, length, window, dim, prefix, params, names): fan_in = window * dim fan_out = feature_map * window * dim / (length - window + 1) filter_shape = (feature_map, 1, window, dim) image_shape = (batch, 1, length, dim) pool_size = (length - window + 1, 1) #if non_linear=="none" or non_linear=="relu": # conv_W = theano.shared(0.2 * numpy.random.uniform(low=-1.0,high=1.0,\ # size=filter_shape).astype(theano.config.floatX)) #else: # W_bound = numpy.sqrt(6. / (fan_in + fan_out)) # conv_W = theano.shared(numpy.random.uniform(low=-W_bound,high=W_bound,\ # size=filter_shape).astype(theano.config.floatX)) W_bound = numpy.sqrt(6. / (fan_in + fan_out)) conv_W = theano.shared(numpy.random.uniform(low=-W_bound,high=W_bound,\ size=filter_shape).astype(theano.config.floatX)) conv_b = theano.shared(numpy.zeros(filter_shape[0], dtype=theano.config.floatX)) # bundle params += [ conv_W, conv_b ] names += [ prefix + '_conv_W_' + str(window), prefix + '_conv_b_' + str(window) ] conv_out = conv.conv2d(input=inps, filters=conv_W, filter_shape=filter_shape, image_shape=image_shape) conv_out_act = T.tanh(conv_out + conv_b.dimshuffle('x', 0, 'x', 'x')) conv_output = downsample.max_pool_2d(input=conv_out_act, ds=pool_size, ignore_border=True) return conv_output.flatten(2)
def convolve1d_4D_conv2d(input, W, mode='full'): conv_out, _ = theano.scan(fn=lambda i: conv2d(input[:,:,:,i:i+1], W[:,:,:,i:i+1], border_mode=mode), outputs_info=None, sequences=[T.arange(0, W.shape[3])]) conv_out = conv_out.flatten(ndim=4).dimshuffle(1,2,3,0) return conv_out
def convolve1d_4D_conv2d_image(input, W, mode='full'): return conv2d(input, W, border_mode='valid')
def output_func(self, input): return conv.conv2d(input, self.W, border_mode='valid', filter_shape=self.filter_shape, image_shape=self.input_shape) # def Conv2dMaxPool(rng, filter_shape, activation): # conv = Conv2dLayer(rng, filter_shape) # nonlinearity = NonLinearityLayer(activation=activation) # pooling = MaxPoolLayer() # layer = FeedForwardNet(layers=[]) # return layer
def encoder(tparams, layer0_input, filter_shape, pool_size, prefix='cnn_encoder'): """ filter_shape: (number of filters, num input feature maps, filter height, filter width) image_shape: (batch_size, num input feature maps, image height, image width) """ conv_out = conv.conv2d(input=layer0_input, filters=tparams[_p(prefix,'W')], filter_shape=filter_shape) conv_out_tanh = tensor.tanh(conv_out + tparams[_p(prefix,'b')].dimshuffle('x', 0, 'x', 'x')) output = pool.pool_2d(input=conv_out_tanh, ds=pool_size, ignore_border=True) return output.flatten(2)
def __call__(self, input): s, f = self.input_shape, self.filter_shape hzeros = T.basic.zeros((s[0], s[1], (f[2]-1)//2, s[3]), dtype=theano.config.floatX) vzeros = T.basic.zeros((s[0], s[1], s[2] + (f[2]-1), (f[3]-1)//2), dtype=theano.config.floatX) input = T.concatenate([hzeros, input, hzeros], axis=2) input = T.concatenate([vzeros, input, vzeros], axis=3) input = conv.conv2d( input=input, filters=self.W, border_mode='valid') return input + self.b.dimshuffle('x', 0, 'x', 'x')
def inv(self, output): output = output - self.b.dimshuffle('x', 0, 'x', 'x') s, f = self.output_shape, self.filter_shape hzeros = T.basic.zeros((s[0], s[1], (f[2]-1)//2, s[3]), dtype=theano.config.floatX) vzeros = T.basic.zeros((s[0], s[1], s[2] + (f[2]-1), (f[3]-1)//2), dtype=theano.config.floatX) output = T.concatenate([hzeros, output, hzeros], axis=2) output = T.concatenate([vzeros, output, vzeros], axis=3) output = conv.conv2d( input=output.dimshuffle(0,1,2,3), filters=self.W.dimshuffle(1,0,2,3)[:,:,::-1,::-1], border_mode='valid') return output
def __call__(self, input): s, f = self.input_shape, self.filter_shape zeros = T.basic.zeros((s[0], s[1], (f[2]-1)//2), dtype=theano.config.floatX) input = T.concatenate([zeros, input, zeros], axis=2) input = conv.conv2d( input=input.dimshuffle(0,1,2,'x'), filters=self.W.dimshuffle(0,1,2,'x'), border_mode='valid')[:,:,:,0] return input + self.b.dimshuffle('x', 0, 'x')
def inv(self, output): output = output - self.b.dimshuffle('x', 0, 'x') s, f = self.output_shape, self.filter_shape zeros = T.basic.zeros((s[0], s[1], (f[2]-1)//2), dtype=theano.config.floatX) output = T.concatenate([zeros, output, zeros], axis=2) output = conv.conv2d( input=output.dimshuffle(0,1,2,'x'), filters=self.W.dimshuffle(1,0,2,'x')[:,:,::-1], border_mode='valid')[:,:,:,0] return output