我们从Python开源项目中,提取了以下10个代码示例,用于说明如何使用theano.tensor.true_div()。
def fit(self, weights, o_error, tpo ): gradients = T.grad(o_error ,weights) updates = [] for c, v, w, g in zip(self.t_cache, self.t_velocity, weights,gradients): new_velocity = T.sub( T.mul(tpo["momentum_rate"], v) , T.mul(tpo["learn_rate"], g) ) new_cache = T.add( T.mul(tpo["decay_rate"] , c) , T.mul(T.sub( 1, tpo["decay_rate"]) , T.sqr(g))) new_weights = T.sub(T.add(w , new_velocity) , T.true_div( T.mul(g,tpo["learn_rate"]) , T.sqrt(T.add(new_cache,0.1**8)))) updates.append((w, new_weights)) updates.append((v, new_velocity)) updates.append((c, new_cache)) return updates ###### Nesterov momentum ########################################
def test_local_div_switch_sink(self): c = T.dscalar() idx = 0 for condition in [(T.dmatrix('cond'), self.condm), (T.dvector('cond'), self.condv), (T.dscalar('cond'), self.conds)]: for x in [(T.dmatrix('x'), self.xm), (T.dvector('x'), self.xv), (T.dscalar('x'), self.xs)]: y = T.true_div(T.switch(condition[0] > 0, 1. * x[0], 0.*x[0]), T.switch(condition[0] > 0, 1.*x[0], T.log(c)*x[0])) f = theano.function([condition[0], x[0], c] , [y], mode=self.mode) if type(condition[1]) is list: for i in xrange(len(condition[1])): res = f(condition[1][i], x[1], -1) assert (res == numpy. asarray(self.resm[idx][i])).sum() == self.resm[idx][i].size else: res = f(condition[1], x[1], -1) assert (res == numpy.asarray(self. resm[idx])).sum() == self.resm[idx].size idx += 1
def test_local_zero_div(): """Tests 0/x -> 0""" mode = theano.compile.mode.get_default_mode().including("local_zero_div") for t in (T.scalar, T.ivector, T.ftensor4): x = t('x') for op in (T.int_div, T.true_div): y = op(0, x) g = optimize(FunctionGraph([x], [y])) # the division should be gone divs = [node for node in g.toposort() if isinstance(node.op, T.elemwise.Elemwise) and isinstance(node.op.scalar_op, type(op.scalar_op))] assert len(divs) == 0 # the output type should match the unoptimized one output = g.outputs[0] assert output.ndim == y.ndim assert output.type == y.type # and the output should be zero assert theano.tensor.get_scalar_constant_value(output) == 0
def test_impls(self): i = iscalar() ii = lscalar() d = dscalar() f = fscalar() c = cscalar() assert numpy.allclose(function([i, d], i / d)(5, 7.0), (5.0 / 7.0)) assert numpy.allclose(function([i, d], d / i)(5, 7.0), (7.0 / 5.0)) assert numpy.allclose(function([i, f], i / f)(5, 11.0), (5.0 / 11.0)) assert numpy.allclose(function([i, f], f / i)(5, 11.0), (11.0 / 5.0)) assert numpy.allclose(function([i, ii], i // ii)(5, 3), (5 // 3)) assert numpy.allclose(function([i, ii], ii // i)(5, 3), (3 // 5)) assert numpy.allclose(function([i, ii], true_div(i, ii))(5, 3), (5. / 3.)) assert numpy.allclose(function([i, ii], true_div(ii, i))(5, 3), (3. / 5.)) assert numpy.allclose(function([i, c], i / c)(5, numpy.complex(5, 3)), (5. / (5 + 3j))) assert numpy.allclose(function([i, c], c / i)(5, numpy.complex(5, 3)), ((5 + 3j) / 5.))
def SlopeLin(slope): """ Linear unit with different slopes :param slope: slope of negative quadrant :return: x if x > 0 else x/slope """ import theano.tensor as T def inner(x): return T.switch(T.gt(x, 0), x, T.true_div(x, slope)) return inner
def SlopeLin2(x, slope): """ Linear unit with different slopes :param slope: slope of negative quadrant :return: x if x > 0 else x/slope """ import theano.tensor as T return T.switch(T.gt(x, 0), x, T.true_div(x, slope))
def get_output_for(self, inputs, deterministic=False, **kwargs): alpha,beta = inputs # return 2*T.true_div(alpha,T.add(alpha,beta)+1e-8)-1 return 2*(alpha/(alpha+beta+1e-8))-1 # Convenience Function to produce a residual pre-activation MDCL block
def avg_pool(input_layer, **kwargs): # hack to work around https://github.com/Theano/Theano/issues/3776 norm = nn.layers.ExpressionLayer(input_layer, lambda X: T.ones_like(X)) norm = nn.layers.Pool2DLayer(norm, mode='average_inc_pad', **kwargs) l = nn.layers.Pool2DLayer(input_layer, mode='average_inc_pad', **kwargs) l = nn.layers.ElemwiseMergeLayer([l, norm], T.true_div) return l
def test_elemwise(self): # float Ops mats = theano.tensor.matrices('cabxy') c, a, b, x, y = mats s1 = T.switch(c, a, b) s2 = T.switch(c, x, y) for op in (T.add, T.sub, T.mul, T.true_div, T.int_div, T.floor_div, T.minimum, T.maximum, T.gt, T.lt, T.ge, T.le, T.eq, T.neq, T.pow): g = optimize(FunctionGraph(mats, [op(s1, s2)])) assert str(g).count('Switch') == 1 # integer Ops mats = theano.tensor.imatrices('cabxy') c, a, b, x, y = mats s1 = T.switch(c, a, b) s2 = T.switch(c, x, y) for op in (T.and_, T.or_, T.xor, T.bitwise_and, T.bitwise_or, T.bitwise_xor): g = optimize(FunctionGraph(mats, [op(s1, s2)])) assert str(g).count('Switch') == 1 # add/mul with more than two inputs u, v = theano.tensor.matrices('uv') s3 = T.switch(c, u, v) for op in (T.add, T.mul): g = optimize(FunctionGraph(mats + [u, v], [op(s1, s2, s3)])) assert str(g).count('Switch') == 1
def test_local_upcast_elemwise_constant_inputs(): s = dvector("s") x = tensor.sum(tensor.log(10 ** s)) f = function([s], [tensor.grad(x, s)]) f([-42, -2.1, -1, -0.5, 0, 0.2, 1, 2, 12]) # This test a corner where the optimization should not be applied. old = theano.config.floatX theano.config.floatX = 'float32' try: v = lvector() function([v], theano.tensor.basic.true_div(v, 2)) finally: theano.config.floatX = old