我们从Python开源项目中,提取了以下8个代码示例,用于说明如何使用theano.tensor.ceil()。
def dynamic_k_max_pooling(input, sent_sizes, k_max_factor, k_max_final): """ k_max_factor -- multiplied by sentence_sizes gives the value of kmax for each sentence """ # Unroll input into (batch_size x nchannels x nwords) x ndim nbatches, nchannels, nwords, ndim = input.shape[0], input.shape[1], input.shape[2], input.shape[3] x = input.dimshuffle(0,1,3,2) sent_sizes = T.cast(T.ceil(sent_sizes * k_max_factor), dtype='int32') sent_sizes = T.maximum(sent_sizes, k_max_final) # sent_sizes_matrix = T.repeat(sent_sizes, nwords, axis=1) sent_sizes_matrix = T.repeat(sent_sizes.dimshuffle(0, 'x'), nwords, axis=1) idx = T.arange(nwords).dimshuffle('x', 0) idx_matrix = T.repeat(idx, nbatches, axis=0) sent_sizes_mask = T.lt(idx_matrix, sent_sizes_matrix)[:,::-1] neighborsArgSorted = T.argsort(x, axis=3) neighborsArgSorted_masked = ((neighborsArgSorted + 1) * sent_sizes_mask.dimshuffle(0,'x','x',1)) - 1 neighborsArgSorted_masked_sorted = neighborsArgSorted_masked.sort(axis=3) nwords_max = T.cast(T.ceil(nwords * k_max_factor), 'int32') # print nwords_max.eval() neighborsArgSorted_masked_sorted_clipped = neighborsArgSorted_masked_sorted[:,:,:,-nwords_max:] ax0 = T.repeat(T.arange(nbatches), nchannels*ndim*nwords_max) ax1 = T.repeat(T.arange(nchannels), ndim * nwords_max).dimshuffle('x', 0) ax1 = T.repeat(ax1, nbatches, axis=0).flatten() ax2 = T.repeat(T.arange(ndim), nwords_max, axis=0).dimshuffle('x', 'x', 0) ax2 = T.repeat(ax2, nchannels, axis=1) ax2 = T.repeat(ax2, nbatches, axis=0).flatten() ax3 = neighborsArgSorted_masked_sorted_clipped.flatten() pooled_out = x[ax0, ax1, ax2, ax3] pooled_out = pooled_out.reshape((nbatches, nchannels, ndim, nwords_max)).dimshuffle(0,1,3,2) return pooled_out
def ceil(x): """ Elemwise ceiling of `x`. """ # see decorator for function body
def test_complex(self): self.assertRaises(TypeError, tensor.ceil, tensor.zvector())
def pool(self, x, mode, pool_size, strides, padding=(0,0)): if strides is None: strides = pool_size assert len(strides)==len(pool_size) do2D = len(pool_size)==2 if mode=='avg': mode='average_exc_pad' # theano requires symmetric padding # We pad the larger on when two sides' padding are unequal max_padding = list(padding) for i, p in enumerate(padding): if isinstance(p, tuple): assert p[1]==p[0]+1 max_padding[i] = p[1] else: max_padding[i] = p if do2D: pool_out = pool.pool_2d(x, ds=pool_size, st=strides, ignore_border=True, padding=max_padding, mode=mode) else: # pool over HW pool_out = pool.pool_2d(x.dimshuffle(0,1,4,2,3), ds=pool_size[:2], st=strides[:2], ignore_border=True, padding=max_padding[:2], mode=mode) # pool over Z pool_out = pool.pool_2d(pool_out.dimshuffle(0,1,3,4,2), ds=(1,pool_size[2]), st=(1,strides[2]), ignore_border=True, padding=(0, max_padding[2]), mode=mode) # theano might output more than expected output shape (due to max padding). We truncate them here exp_l = [] for i in range(len(strides)): l = T.ceil(self.cast(x.shape[i+2], _FLOATX)/strides[i]) exp_l.append(self.cast(l, 'int32')) if do2D: return pool_out[:, :, :exp_l[0], :exp_l[1]] else: return pool_out[:, :, :exp_l[0], :exp_l[1], :exp_l[2]]
def get_all_signals(self, input_, corruption_type = 'round', rng = None): scale = self.get_scale() scaled_input = input_*scale if corruption_type == 'round': epsilon = tt.round(scaled_input) - scaled_input elif corruption_type == 'randround': rng = get_theano_rng(rng) epsilon = tt.where(rng.uniform(scaled_input.shape)>(scaled_input % 1), tt.floor(scaled_input), tt.ceil(scaled_input))-scaled_input print 'STOCH ROUNDING' elif corruption_type == 'rand': rng = get_theano_rng(1234) epsilon = rng.uniform(scaled_input.shape)-.5 else: raise Exception('fdsfsd') spikes = scaled_input + epsilon output = spikes / scale signals = dict( input=input_, scaled_input=scaled_input, spikes=spikes, epsilon=epsilon, output=output, ) return signals # def get_all_signals(self, input_): # scale = self.get_scale() # # # # scaled_input = input_*scale # # # # # epsilon = tt.round(scaled_input) - scaled_input # # rng = get_theano_rng(1234) # epsilon = rng.uniform(scaled_input.shape)-.5 # # spikes = scaled_input + epsilon # output = spikes / scale # signals = dict( # input=input_, # scaled_input=scaled_input, # spikes=spikes, # epsilon=epsilon, # output=output, # ) # return signals