我们从Python开源项目中,提取了以下7个代码示例,用于说明如何使用theano.tensor.where()。
def get_output_for(self, inputs, **kwargs): ''' First layer is a batch of embedding indices: [[11,21,43,0,0], [234,543,0,0,0,], ... ] Second layer are the embeddings: [ [[.02, .01...], [.004, .005, ...], ..., .0 .0 .0 ... , .0 .0 .0 ...], [[...], .... ] ] ''' return \ T.where(T.eq(inputs[0],0), np.float32(0.0), np.float32(1.0)).dimshuffle((0,1,'x')) * inputs[1]
def get_output_for(self, inputs, **kwargs): return T.where(T.eq(inputs[0],0), np.float32(0.0), np.float32(1.0)).dimshuffle((0,1,'x')) * inputs[1]
def get_output_for(self, inputs, **kwargs): ''' Take the exp() of all inputs, and divide by the total. ''' exps = T.where(T.eq(inputs[0],0), np.float32(0.0), np.float32(1.0)) * T.exp(inputs[1]) return exps / (exps.sum(axis=1).dimshuffle((0, 'x')) + 1e-6)
def get_output_for(self, inputs, **kwargs): ''' Take the exp() of all inputs, and divide by the total. ''' tmp_mask = T.where(T.eq(inputs[0],0), np.float32(0.0), np.float32(1.0)) exps = tmp_mask * T.exp(inputs[1]) nums = tmp_mask.sum(axis=1) return ( exps / (exps.sum(axis=1).dimshuffle((0, 'x')) + 1e-6) )*nums.dimshuffle((0, 'x'))
def get_output_for(self, inputs, **kwargs): ''' First layer is a batch of matrices of embedding indices: Second layer are the corresponding embeddings: ''' return \ T.where(T.eq(inputs[0],0), np.float32(0.0), np.float32(1.0)).dimshuffle((0,1,2,'x')) * inputs[1]
def __init__(self, rng, rstream, x, y, setting): # add cost """ Constructing the mlp model. Arguments: rng, rstream - random streams """ self.paramsEle = [] self.paramsHyper = [] self.layers = [ll.InputLayer((None, 3, 28, 28))] self.layers.append(ll.ReshapeLayer(self.layers[-1], (None, 3*28*28))) penalty = 0. for num in [1000, 1000, 1000, 10]: # TODO: refactor it later self.layers.append(DenseLayerWithReg(setting, self.layers[-1], num_units=num)) self.paramsEle += self.layers[-1].W self.paramsEle += self.layers[-1].b if setting.regL2 is not None: tempL2 = self.layers[-1].L2 * T.sqr(self.layers[-1].W) penalty += T.sum(tempL2) self.paramsHyper += self.layers[-1].L2 self.y = self.layers[-1].output self.prediction = T.argmax(self.y, axis=1) self.penalty = penalty if penalty != 0. else T.constant(0.) def stable(x, stabilize=True): if stabilize: x = T.where(T.isnan(x), 1000., x) x = T.where(T.isinf(x), 1000., x) return x if setting.cost == 'categorical_crossentropy': def costFun1(y, label): return stable(-T.log(y[T.arange(label.shape[0]), label]), stabilize=True) else: raise NotImplementedError def costFunT1(*args, **kwargs): return T.mean(costFun1(*args, **kwargs)) # cost function self.trainCost = costFunT1(self.y, y) self.classError = T.mean(T.cast(T.neq(self.guessLabel, y), 'float32'))
def get_all_signals(self, input_, corruption_type = 'round', rng = None): scale = self.get_scale() scaled_input = input_*scale if corruption_type == 'round': epsilon = tt.round(scaled_input) - scaled_input elif corruption_type == 'randround': rng = get_theano_rng(rng) epsilon = tt.where(rng.uniform(scaled_input.shape)>(scaled_input % 1), tt.floor(scaled_input), tt.ceil(scaled_input))-scaled_input print 'STOCH ROUNDING' elif corruption_type == 'rand': rng = get_theano_rng(1234) epsilon = rng.uniform(scaled_input.shape)-.5 else: raise Exception('fdsfsd') spikes = scaled_input + epsilon output = spikes / scale signals = dict( input=input_, scaled_input=scaled_input, spikes=spikes, epsilon=epsilon, output=output, ) return signals # def get_all_signals(self, input_): # scale = self.get_scale() # # # # scaled_input = input_*scale # # # # # epsilon = tt.round(scaled_input) - scaled_input # # rng = get_theano_rng(1234) # epsilon = rng.uniform(scaled_input.shape)-.5 # # spikes = scaled_input + epsilon # output = spikes / scale # signals = dict( # input=input_, # scaled_input=scaled_input, # spikes=spikes, # epsilon=epsilon, # output=output, # ) # return signals