我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用theano.tensor.dmatrix()。
def test_ConcatenatedReLU(): unit = ConcatenatedReLU() assert len(unit.getpara()) == 0 x = T.dmatrix() y = unit.forward((x,)) f = theano.function(inputs=[x,], outputs=y, allow_input_downcast=True) result = f(np.array([[1.0, ], ]))[0] assert abs(result[0,0] - 1.0) < 0.01 assert abs(result[0,1]) < 0.01 result = f(np.array([[-1.0, ], ]))[0] assert abs(result[0,0]) < 0.01 assert abs(result[0,1] - 1.0) < 0.01 inputsize = [128,2,2,2] outputsize = unit.forwardSize((inputsize,))[0] assert inputsize[0] == outputsize[0] assert inputsize[1]*2 == outputsize[1] assert inputsize[2] == outputsize[2] assert inputsize[3] == outputsize[3]
def max_pooling(matrix, pool_size): """ Applies max-pooling for the given matrix for specified pool_size. Only the maximum value in the given pool size is chosen to construct the result. :param matrix: Input matrix :param pool_size: pooling cell size :return: max-pooled output """ """ t_input = tensor.dmatrix('input') pool_out = ds.max_pool_2d(t_input, pool_size, ignore_border=True) pool_f = theano.function([t_input], pool_out) return pool_f(matrix) """ pass
def test_max_pool_2d_2D(self): rng = numpy.random.RandomState(utt.fetch_seed()) maxpoolshps = ((1, 1), (3, 2)) imval = rng.rand(4, 5) images = tensor.dmatrix() for maxpoolshp, ignore_border, mode in product(maxpoolshps, [True, False], ['max', 'sum', 'average_inc_pad', 'average_exc_pad']): # print 'maxpoolshp =', maxpoolshp # print 'ignore_border =', ignore_border numpy_output_val = self.numpy_max_pool_2d(imval, maxpoolshp, ignore_border, mode=mode) output = pool_2d(images, maxpoolshp, ignore_border, mode=mode) output_val = function([images], output)(imval) utt.assert_allclose(output_val, numpy_output_val) def mp(input): return pool_2d(input, maxpoolshp, ignore_border, mode=mode) utt.verify_grad(mp, [imval], rng=rng)
def test_infer_shape(self): adscal = dscalar() bdscal = dscalar() adscal_val = numpy.random.rand() bdscal_val = numpy.random.rand() + 1 out = theano.tensor.opt.assert_op(adscal, bdscal) self._compile_and_check([adscal, bdscal], [out], [adscal_val, bdscal_val], Assert) admat = dmatrix() admat_val = numpy.random.rand(3, 4) adscal_val += 1 out = theano.tensor.opt.assert_op(admat, adscal, bdscal) self._compile_and_check([admat, adscal, bdscal], [out], [admat_val, adscal_val, bdscal_val], Assert)
def test_eq(self): x = T.dmatrix() y = T.dmatrix() f = theano.function([x, y], T.eq(x, y), mode=self.mode) vx = numpy.random.rand(5, 4) vy = numpy.random.rand(5, 4) f(vx, vy) topo = f.maker.fgraph.toposort() assert len(topo) == 1 assert isinstance(topo[0].op, T.Elemwise) assert isinstance(topo[0].op.scalar_op, theano.scalar.EQ) f2 = theano.function([x], T.eq(x, x), mode=self.mode) assert numpy.all(f2(vx) == numpy.ones((5, 4))) topo2 = f2.maker.fgraph.toposort() # Shape_i{1}(<TensorType(float64, matrix)>), Shape_i{0}(<TensorType(float64, matrix)>), Alloc([[1]], Shape_i{0}.0, Shape_i{1}.0 assert len(topo2) == 3 assert isinstance(topo2[-1].op, T.Alloc)
def test_neq(self): x = T.dmatrix() y = T.dmatrix() f = theano.function([x, y], T.neq(x, y), mode=self.mode) vx = numpy.random.rand(5, 4) vy = numpy.random.rand(5, 4) f(vx, vy) topo = f.maker.fgraph.toposort() assert len(topo) == 1 assert isinstance(topo[0].op, T.Elemwise) assert isinstance(topo[0].op.scalar_op, theano.scalar.NEQ) f2 = theano.function([x], T.neq(x, x), mode=self.mode) assert numpy.all(f2(vx) == numpy.zeros((5, 4))) topo2 = f2.maker.fgraph.toposort() assert len(topo2) == 3 assert isinstance(topo2[-1].op, T.Alloc)
def test_mul(self): x = T.dmatrix() y = T.dmatrix() f = theano.function([x], T.mul(x), mode=self.mode) vx = numpy.random.rand(5, 4) vy = numpy.random.rand(5, 4) f(vx) topo = f.maker.fgraph.toposort() assert len(topo) == 1 assert topo[0].op == deep_copy_op f2 = theano.function([x, y], T.mul(x, y), mode=self.mode) assert numpy.all(f2(vx, vy) == vx * vy) topo2 = f2.maker.fgraph.toposort() assert len(topo2) == 1 assert isinstance(topo2[0].op, T.Elemwise) assert isinstance(topo2[0].op.scalar_op, theano.scalar.Mul)
def test(self): x = T.fmatrix() o = T.Elemwise(scal.Cast(scal.Scalar("float64")))(x.astype("float64")) f = theano.function([x], o, mode=self.mode) dx = numpy.random.rand(5, 4).astype("float32") f(dx) topo = f.maker.fgraph.toposort() assert len(topo) == 1 assert isinstance(topo[0].op, T.Elemwise) x = T.dmatrix() o = T.Elemwise(scal.Cast(scal.Scalar("float32")))(x.astype("float32")) f = theano.function([x], o, mode=self.mode) dx = numpy.random.rand(5, 4) f(dx) topo = f.maker.fgraph.toposort() assert len(topo) == 1 assert isinstance(topo[0].op, T.Elemwise)
def test_local_div_switch_sink(self): c = T.dscalar() idx = 0 for condition in [(T.dmatrix('cond'), self.condm), (T.dvector('cond'), self.condv), (T.dscalar('cond'), self.conds)]: for x in [(T.dmatrix('x'), self.xm), (T.dvector('x'), self.xv), (T.dscalar('x'), self.xs)]: y = T.true_div(T.switch(condition[0] > 0, 1. * x[0], 0.*x[0]), T.switch(condition[0] > 0, 1.*x[0], T.log(c)*x[0])) f = theano.function([condition[0], x[0], c] , [y], mode=self.mode) if type(condition[1]) is list: for i in xrange(len(condition[1])): res = f(condition[1][i], x[1], -1) assert (res == numpy. asarray(self.resm[idx][i])).sum() == self.resm[idx][i].size else: res = f(condition[1], x[1], -1) assert (res == numpy.asarray(self. resm[idx])).sum() == self.resm[idx].size idx += 1
def test_adv_sub(self): admat = dmatrix() aivec = lvector() bivec = lvector() admat_val = rand(5, 4) aivec_val = [1, 3, 2] bivec_val = [0, 3, 3] self._compile_and_check([admat, aivec, bivec], [admat[aivec, bivec]], [admat_val, aivec_val, bivec_val], AdvancedSubtensor) # Test case that aren't implemented, but make sure they do not crash. self._compile_and_check([admat, aivec], [admat[aivec, 1:3]], [admat_val, aivec_val], AdvancedSubtensor, check_topo=False) self._compile_and_check([admat, aivec], [admat[1:3, aivec]], [admat_val, aivec_val], AdvancedSubtensor, check_topo=False)
def setUp(self): self.iv = T.tensor(dtype='int32', broadcastable=(False,)) self.fv = T.tensor(dtype='float32', broadcastable=(False,)) self.fv1 = T.tensor(dtype='float32', broadcastable=(True,)) self.dv = T.tensor(dtype='float64', broadcastable=(False,)) self.dv1 = T.tensor(dtype='float64', broadcastable=(True,)) self.cv = T.tensor(dtype='complex64', broadcastable=(False,)) self.zv = T.tensor(dtype='complex128', broadcastable=(False,)) self.fv_2 = T.tensor(dtype='float32', broadcastable=(False,)) self.fv1_2 = T.tensor(dtype='float32', broadcastable=(True,)) self.dv_2 = T.tensor(dtype='float64', broadcastable=(False,)) self.dv1_2 = T.tensor(dtype='float64', broadcastable=(True,)) self.cv_2 = T.tensor(dtype='complex64', broadcastable=(False,)) self.zv_2 = T.tensor(dtype='complex128', broadcastable=(False,)) self.fm = T.fmatrix() self.dm = T.dmatrix() self.cm = T.cmatrix() self.zm = T.zmatrix() self.fa = T.fscalar() self.da = T.dscalar() self.ca = T.cscalar() self.za = T.zscalar()
def test_batched_dot(): first = theano.tensor.tensor3("first") second = theano.tensor.tensor3("second") output = theano.tensor.basic.batched_dot(first, second) first_val = numpy.random.rand(10, 10, 20).astype(config.floatX) second_val = numpy.random.rand(10, 20, 5).astype(config.floatX) result_fn = theano.function([first, second], output) result = result_fn(first_val, second_val) assert result.shape[0] == first_val.shape[0] assert result.shape[1] == first_val.shape[1] assert result.shape[2] == second_val.shape[2] first_mat = theano.tensor.dmatrix("first") second_mat = theano.tensor.dmatrix("second") output = theano.tensor.basic.batched_dot(first_mat, second_mat) first_mat_val = numpy.random.rand(10, 10).astype(config.floatX) second_mat_val = numpy.random.rand(10, 10).astype(config.floatX) result_fn = theano.function([first_mat, second_mat], output) result = result_fn(first_mat_val, second_mat_val) assert result.shape[0] == first_mat_val.shape[0]
def test_scalar_axes(self): # Test matrix-matrix amat = fmatrix() bmat = dmatrix() # We let at float64 to test mix of float32 and float64. axes = 1 aval = rand(4, 5).astype('float32') bval = rand(5, 3) c = tensordot(amat, bmat, axes) f3 = inplace_func([amat, bmat], c) self.assertTrue(numpy.allclose(numpy.tensordot(aval, bval, axes), f3(aval, bval))) utt.verify_grad(self.TensorDot(axes), [aval, bval]) # Test tensor-tensor amat = tensor3() bmat = tensor3() axes = 2 aval = rand(3, 4, 5) bval = rand(4, 5, 3) c = tensordot(amat, bmat, axes) f3 = inplace_func([amat, bmat], c) self.assertTrue(numpy.allclose(numpy.tensordot(aval, bval, axes), f3(aval, bval))) utt.verify_grad(self.TensorDot(axes), [aval, bval])
def test_infer_shape(self): x = dmatrix('x') x.tag.test_value = np.zeros((2, 2)) y = dvector('y') y.tag.test_value = [0, 0] def infer_shape(node, shapes): x, y = shapes return [y] @as_op([dmatrix, dvector], dvector, infer_shape) def cumprod_plus(x, y): return np.cumprod(x) + y self._compile_and_check([x, y], [cumprod_plus(x, y)], [[[1.5, 5], [2, 2]], [1, 100, 2, 200]], cumprod_plus.__class__, warn=False)
def test_borrow_input(self): """ Tests that the contract for io.In is respected. When borrow=False, it should be impossible for outputs to be aliased to the input variables provided by the user, either through a view-map or a destroy map. New tests should be added in the future when borrow=True is implemented. """ a = T.dmatrix() aval = numpy.random.rand(3, 3) # when borrow=False, test that a destroy map cannot alias output to input f = theano.function([In(a, borrow=False)], Out(a + 1, borrow=True)) assert numpy.all(f(aval) == aval + 1) assert not numpy.may_share_memory(aval, f(aval)) # when borrow=False, test that a viewmap cannot alias output to input f = theano.function([In(a, borrow=False)], Out(a[0, :], borrow=True)) assert numpy.all(f(aval) == aval[0, :]) assert not numpy.may_share_memory(aval, f(aval))
def test_borrow_output(self): a = T.dmatrix() f = function([a], Out(a, borrow=False)) o = N.ones((3, 3)) assert o is not f(o) # function no longer permits aliasing outputs to inputs f = function([a], Out(a * 4, borrow=False)) o = N.ones((3, 3)) four = f(o) assert numpy.all(four == 4) f(o + .1) # should not clobber the memory used to store four assert numpy.all(four == 4) f = function([a], Out(a * 4, borrow=True), mode=theano.Mode('c|py_nogc', 'fast_run')) o = N.ones((3, 3)) four = f(o) assert numpy.all(four == 4) f(o + .1) # should clobber the memory used to store four if theano.config.cxx: assert not numpy.all(four == 4) else: # The Elemwise.perform method don't reuse memory # as some numpy version don't support that correctly. assert numpy.all(four == 4)
def test_shared(self): # CHECK: two functions (f1 and f2) can share w w = shared(numpy.random.rand(2, 2), 'w') wval = w.get_value(borrow=False) x = dmatrix() out1 = w + x out2 = w * x f1 = pfunc([x], [out1]) f2 = pfunc([x], [out2]) xval = numpy.random.rand(2, 2) assert numpy.all(f1(xval) == xval + wval) assert numpy.all(f2(xval) == xval * wval) # CHECK: updating a shared value f3 = pfunc([x], out1, updates=[(w, (w - 1))]) # f3 changes the value of w assert numpy.all(f3(xval) == xval + wval) # this same value is read by f1 assert numpy.all(f1(xval) == xval + (wval - 1)) w.set_value(w.get_value(borrow=True) * 10, borrow=True) # this same value is read by f1 assert numpy.all(f1(xval) == xval + w.get_value(borrow=True))
def test_incsubtensor_mixed(): # This catches a bug that occurred when incrementing # a float32 tensor by a float64 tensor. # The result is defined to be float32, so it is OK # to downcast the float64 increment in order to # transfer it to the GPU. # The bug was that the optimization called GpuFromHost # without casting first, causing the optimization to # fail. X = tensor.fmatrix() Y = tensor.dmatrix() Z = tensor.inc_subtensor(X[0:1, 0:1], Y) f = theano.function([X, Y], Z, mode=mode_with_gpu) packed, = f.maker.fgraph.inputs[1].clients client, idx = packed print(client) assert isinstance(client.op, tensor.Elemwise) assert isinstance(client.op.scalar_op, theano.scalar.Cast) packed, = client.outputs[0].clients client, idx = packed assert isinstance(client.op, cuda.GpuFromHost)
def __init__(self, nfeatures=100, noutputs=10, nhiddens=50, rng=None): if rng is None: rng = 0 if isinstance(rng, integer_types): rng = np.random.RandomState(rng) self.rng = rng self.nfeatures = nfeatures self.noutputs = noutputs self.nhiddens = nhiddens x = T.dmatrix('x') wh = th.shared(self.rng.normal(0, 1, (nfeatures, nhiddens)), borrow=True) bh = th.shared(np.zeros(nhiddens), borrow=True) h = T.nnet.sigmoid(T.dot(x, wh) + bh) wy = th.shared(self.rng.normal(0, 1, (nhiddens, noutputs))) by = th.shared(np.zeros(noutputs), borrow=True) y = T.nnet.softmax(T.dot(h, wy) + by) self.inputs = [x] self.outputs = [y]
def sigmoid(a): x = T.dmatrix('x') s = 1/(1+T.exp(-x)) logistic = theano.function([x], s) # performs elementwise operations result = logistic([a]) # we got the logistic value for every element in i return result # sample input
def tanh(a): x = T.dmatrix('x') p, q = T.exp(x)-T.exp(-x), T.exp(x)+T.exp(-x) t = p/q tanhfunc = theano.function([x], t) result = tanhfunc([a]) return result # sample input
def __init__(self, a_n_x, a_n_y): """Class constructor. Args: a_n_x (int): number of underlying cassifiers a_n_y (int): number of classes to predict """ self.n_x = a_n_x self.n_y = a_n_y # define the network # input matrix self.x = TT.dmatrix(name="x") # mapping from input to output vector self.X2Y = self._init_X2Y() self.y_bias = theano.shared(value=HE_UNIFORM((1, self.n_y)), name="y_bias") # prediction vector self.y_pred = TT.nnet.softmax( TT.tensordot(self.x, self.X2Y, ((1, 0), (2, 1))) + self.y_bias) # predicted label self.y_lbl = TT.argmax(self.y_pred, axis=1)[0] self._predict = theano.function([self.x], [self.y_lbl, self.y_pred], name="predict") # define trainable parameters self._params = [self.X2Y, self.y_bias]
def get_compressed_data(self,data): x = T.dmatrix('x') x = data y = self.get_hidden_values(x) #z = self.get_reconstructed_input(y) return y # Funkcija za brzo testiranje AE-a, sa podrazumevanim vrednostima
def test_multilayer_conv(self): # fixed parameters bsize = 10 # batch size imshp = (5, 5) kshp = ((3, 3), (2, 2)) nkerns = (3, 6) # per output pixel ssizes = (((1, 1), (2, 2)),) convmodes = ('full',) # 'valid',) # symbolic stuff kerns = [tensor.dmatrix(), tensor.dmatrix()] input = tensor.dmatrix() rng = numpy.random.RandomState(3423489) # build actual input images img2d = numpy.arange(bsize*numpy.prod(imshp)).reshape((bsize,)+imshp) img1d = img2d.reshape(bsize, -1) for mode in ('FAST_COMPILE', 'FAST_RUN'): for conv_mode in convmodes: for ss in ssizes: l1hid, l1shp = sp.convolve(kerns[0], kshp[0],\ nkerns[0], input, imshp, ss[0], mode=conv_mode) l1propup = function([kerns[0], input], l1hid, mode=mode) #l1kernvals = numpy.random.rand(nkerns[0],numpy.prod(kshp[0])) l1kernvals = numpy.arange(nkerns[0]*numpy.prod(kshp[0])).reshape(nkerns[0], numpy.prod(kshp[0])) l1hidval = l1propup(l1kernvals, img1d) # actual values l2hid, l2shp = sp.convolve(kerns[1], kshp[1],\ nkerns[1], l1hid, l1shp, ss[1], mode=conv_mode) l2propup = function([kerns[1], l1hid], l2hid, mode=mode) #l2kernvals = numpy.random.rand(nkerns[1],numpy.prod(kshp[1])*nkerns[0]) l2kernvals = numpy.arange(nkerns[1]*numpy.prod(kshp[1])*nkerns[0]).reshape(nkerns[1], numpy.prod(kshp[1])*nkerns[0]) # for debugging, we bring things back to integers l1hidval = numpy.arange(numpy.size(l1hidval)).reshape(l1hidval.shape) l2hidval = l2propup(l2kernvals, l1hidval)
def test_maxpool(self): # generate flatted images maxpoolshps = ((2, 2), (3, 3), (4, 4), (5, 5), (6, 6)) imval = numpy.random.rand(4, 5, 10, 10) images = tensor.dmatrix() for maxpoolshp in maxpoolshps: # symbolic stuff output, outshp = sp.max_pool(images, imval.shape[1:], maxpoolshp) f = function([images, ], [output, ]) output_val = f(imval.reshape(imval.shape[0], -1)) # numeric verification my_output_val = numpy.zeros((imval.shape[0], imval.shape[1], imval.shape[2] // maxpoolshp[0], imval.shape[3] // maxpoolshp[1])) assert numpy.prod(my_output_val.shape[1:]) == numpy.prod(numpy.r_[imval.shape[1], outshp]) for n in range(imval.shape[0]): for k in range(imval.shape[1]): for i in range(imval.shape[2] // maxpoolshp[0]): for j in range(imval.shape[3] // maxpoolshp[1]): ii, jj = i*maxpoolshp[0], j*maxpoolshp[1] patch = imval[n, k, ii:ii+maxpoolshp[0], jj:jj+maxpoolshp[1]] my_output_val[n, k, i, j] = numpy.max(patch) my_output_val = my_output_val.reshape(imval.shape[0], -1) assert numpy.all(output_val == my_output_val) def mp(input): output, outshp = sp.max_pool(input, imval.shape[1:], maxpoolshp) return output utt.verify_grad(mp, [imval.reshape(imval.shape[0], -1)])
def test_const_type_in_mul_canonizer(): input = dmatrix() w = dmatrix() visb = dvector() hidb = dvector() betas = dvector() a = dvector() def sigm(x): return 1. / (1 + tensor.exp(-x)) hid = sigm((tensor.dot(w, input) + hidb) * betas) vis_gauss1 = (tensor.dot(w.T, hid) + visb) * betas / (2 * a * a) vis_gauss2 = (tensor.dot(w.T, hid) + visb) * betas / (2. * a * a) f1 = function([input, w, visb, hidb, betas, a], vis_gauss1) f2 = function([input, w, visb, hidb, betas, a], vis_gauss2) ival = numpy.random.rand(5, 5) wval = numpy.random.rand(5, 5) visbval = numpy.random.rand(5) hidbval = numpy.random.rand(5) betaval = numpy.random.rand(5) aval = numpy.random.rand(5) utt.assert_allclose( f2(ival, wval, visbval, hidbval, betaval, aval), f1(ival, wval, visbval, hidbval, betaval, aval))
def test_local_mul_switch_sink(self): c = T.dscalar() idx = 0 for condition in [(T.dmatrix('cond'), self.condm), (T.dvector('cond'), self.condv), (T.dscalar('cond'), self.conds)]: for x in [(T.dmatrix('x'), self.xm), (T.dvector('x'), self.xv), (T.dscalar('x'), self.xs)]: y = T.mul(T.switch(condition[0] > 0, 1. * x[0], 0. * x[0]), T.switch(condition[0] > 0, 1. * x[0], T.log(c) * x[0])) f = theano.function([condition[0], x[0], c], [y], mode=self.mode) if type(condition[1]) is list: for i in xrange(len(condition[1])): res = f(condition[1][i], x[1], -1) assert (res == numpy.asarray( self.resm[idx][i])).sum() == self.resm[idx][i].size else: res = f(condition[1], x[1], -1) assert (res == numpy.asarray(self. resm[idx])).sum() == self.resm[idx].size idx += 1 # This case caused a missed optimization in the past. x = T.dscalar('x') y = T.switch(x < 7, x, T.sqrt(x - 7)) f = theano.function([x], T.grad(y, x), self.mode) assert f(5) == 1, f(5)
def test1(self): a = tensor.dmatrix() w = sort(a) f = theano.function([a], w) assert np.allclose(f(self.m_val), np.sort(self.m_val))
def test2(self): a = tensor.dmatrix() axis = tensor.scalar() w = sort(a, axis) f = theano.function([a, axis], w) for axis_val in 0, 1: gv = f(self.m_val, axis_val) gt = np.sort(self.m_val, axis_val) assert np.allclose(gv, gt)
def test_None(self): a = tensor.dmatrix() l = sort(a, None) f = theano.function([a], l) gv = f(self.m_val) gt = np.sort(self.m_val, None) assert np.allclose(gv, gt)
def test_searchsortedOp_on_no_1d_inp(self): no_1d = T.dmatrix('no_1d') self.assertRaises(ValueError, searchsorted, no_1d, self.v) self.assertRaises(ValueError, searchsorted, self.x, self.v, sorter=no_1d)
def test_infer_shape(self): z = tensor.dtensor3() x = tensor.dmatrix() y = tensor.dscalar() self._compile_and_check([x, y], [self.op(x, y)], [numpy.random.rand(8, 5), numpy.random.rand()], self.op_class) self._compile_and_check([z, y], [self.op(z, y)], # must be square when nd>2 [numpy.random.rand(8, 8, 8), numpy.random.rand()], self.op_class, warn=False)
def test_infer_shape(self): x = tensor.dmatrix() y = tensor.dscalar() z = tensor.iscalar() for test_offset in (-5, -4, -1, 0, 1, 4, 5): self._compile_and_check([x, y, z], [self.op(x, y, z)], [numpy.random.rand(8, 5), numpy.random.rand(), test_offset], self.op_class) self._compile_and_check([x, y, z], [self.op(x, y, z)], [numpy.random.rand(5, 8), numpy.random.rand(), test_offset], self.op_class)
def test_copy(): x = tt.dmatrix('x') data = np.random.rand(5, 5) y = x.copy(name='y') f = theano.function([x], y) assert_equal(f(data), data) assert_string_equal(y.name, 'y')
def test_None_dimShuffle_replace(): # tests replacing None usage in subtensor with dimshuffle # # tests whenever None is used in subtensor to reshape a variable, it is # replaced by dimshuffle. If the replacement is done properly, Subtensor op # (or any of its variants) should not be used anymore. x = tt.dmatrix('x') y = x[:, None, :] f = theano.function([x], y) for elem in f.maker.fgraph.toposort(): assert type(elem.op) not in [Subtensor, AdvancedSubtensor, AdvancedSubtensor1, IncSubtensor, AdvancedIncSubtensor, AdvancedIncSubtensor1] x = tt.tensor3('x') y1 = x[:, :, None, :] y2 = x[None, :, :, None, :] y3 = x[:, :, None, :, None, None] f = theano.function([x], [y1, y2, y3]) for elem in f.maker.fgraph.toposort(): assert type(elem.op) not in [Subtensor, AdvancedSubtensor, AdvancedSubtensor1, IncSubtensor, AdvancedIncSubtensor, AdvancedIncSubtensor1]
def setUp(self): self.rng = numpy.random.RandomState(seed=utt.fetch_seed()) self.s = tensor.iscalar() self.v = tensor.fvector() self.m = tensor.dmatrix() self.t = tensor.ctensor3() self.adv1q = tensor.lvector() # advanced 1d query
def setUp(self): self.s = iscalar() self.v = fvector() self.m = dmatrix() self.t = ctensor3() self.ft4 = ftensor4() self.ix1 = lvector() # advanced 1d query self.ix12 = lvector() self.ix2 = lmatrix() self.ixr = lrow()
def test_multiple_inplace(self): x = tensor.dmatrix('x') y = tensor.dvector('y') z = tensor.dvector('z') f = theano.function([x, y, z], [tensor.dot(y, x), tensor.dot(z,x)], mode=mode_blas_opt) vx = numpy.random.rand(3, 3) vy = numpy.random.rand(3) vz = numpy.random.rand(3) out = f(vx, vy, vz) assert numpy.allclose(out[0], numpy.dot(vy, vx)) assert numpy.allclose(out[1], numpy.dot(vz, vx)) assert len([n for n in f.maker.fgraph.apply_nodes if isinstance(n.op, tensor.AllocEmpty)]) == 2
def test_zero_gradient_shape(self): """Ensure that a zero gradient has the proper shape.""" x = dmatrix() f = theano.function([x], grad(dscalar(), x, disconnected_inputs='ignore')) a = numpy.ones((3, 7)) self.assertTrue((f(a) == 0).all()) # Zero gradient. self.assertTrue(a.shape == f(a).shape) # With proper shape.
def test_make_column_matrix_broadcastable(): # The goal of the operation made by `b` is to ensure the second dimension # of the column matrix is broadcastable. a = tensor.dmatrix() b = a.reshape((a.shape[0], )).dimshuffle(0, 'x') f = function([a], b) assert (f(numpy.zeros((3, 1))) + numpy.ones(2) == numpy.ones((3, 2))).all()
def test_flatten_outdimNone(): a = dmatrix() c = flatten(a) f = inplace_func([a], c) a_val = theano._asarray([[0, 1, 2], [3, 4, 5]], dtype='float64') c_val = theano._asarray([0, 1, 2, 3, 4, 5], dtype='float64') assert numpy.all(f(a_val) == c_val) f = inplace_func([a], c) assert numpy.all(f(a_val) == c_val) utt.verify_grad(flatten, [a_val])
def test_flatten_outdim1(): a = dmatrix() c = flatten(a, 1) f = inplace_func([a], c) a_val = theano._asarray([[0, 1, 2], [3, 4, 5]], dtype='float64') c_val = theano._asarray([0, 1, 2, 3, 4, 5], dtype='float64') assert numpy.all(f(a_val) == c_val) f = inplace_func([a], c) assert numpy.all(f(a_val) == c_val) utt.verify_grad(flatten, [a_val])
def test_flatten_outdim2(): a = dmatrix() c = flatten(a, 2) f = inplace_func([a], c) a_val = theano._asarray([[0, 1, 2], [3, 4, 5]], dtype='float64') assert numpy.all(f(a_val) == a_val) f = inplace_func([a], c) assert numpy.all(f(a_val) == a_val) flatten_2 = partial(flatten, outdim=2) utt.verify_grad(flatten_2, [a_val])
def test_reshape_member_fn(): x = dmatrix() y = x.reshape((4, 5, 6)) assert y.owner.op == Reshape(3)
def test_simple_2d(self): """Increments or sets part of a tensor by a scalar using full slice and a partial slice depending on a scalar. """ a = tt.dmatrix() increment = tt.dscalar() sl1 = slice(None) sl2_end = tt.lscalar() sl2 = slice(sl2_end) for do_set in [False, True]: if do_set: resut = tt.set_subtensor(a[sl1, sl2], increment) else: resut = tt.inc_subtensor(a[sl1, sl2], increment) f = theano.function([a, increment, sl2_end], resut) val_a = numpy.ones((5, 5)) val_inc = 2.3 val_sl2_end = 2 result = f(val_a, val_inc, val_sl2_end) expected_result = numpy.copy(val_a) if do_set: expected_result[:, :val_sl2_end] = val_inc else: expected_result[:, :val_sl2_end] += val_inc utt.assert_allclose(result, expected_result)
def test_perform(self): a = tensor.dmatrix() f = theano.function([a], self.op(a, n=10, axis=0)) a = numpy.random.rand(8, 6) assert numpy.allclose(f(a), numpy.fft.fft(a, 10, 0))
def test_wrong_input(self): """ Make sure errors are raised when image and kernel are not 4D tensors """ self.assertRaises(Exception, self.validate, (3, 2, 8, 8), (4, 2, 5, 5), 'valid', input=T.dmatrix()) self.assertRaises(Exception, self.validate, (3, 2, 8, 8), (4, 2, 5, 5), 'valid', filters=T.dvector()) self.assertRaises(Exception, self.validate, (3, 2, 8, 8), (4, 2, 5, 5), 'valid', input=T.dtensor3())
def test_wrong_input(self): """ Make sure errors are raised when image and kernel are not 5D tensors """ self.assertRaises(Exception, self.validate, (3, 2, 8, 8, 8), (4, 2, 5, 5, 5), 'valid', input=T.dmatrix()) self.assertRaises(Exception, self.validate, (3, 2, 8, 8, 8), (4, 2, 5, 5, 5), 'valid', filters=T.dvector()) self.assertRaises(Exception, self.validate, (3, 2, 8, 8, 8), (4, 2, 5, 5, 5), 'valid', input=T.dtensor3()) self.assertRaises(Exception, self.validate, (3, 2, 8, 8, 8), (4, 2, 5, 5, 5), 'valid', input=T.dtensor4())
def test_2arg(self): x = dmatrix('x') x.tag.test_value = np.zeros((2, 2)) y = dvector('y') y.tag.test_value = [0, 0] @as_op([dmatrix, dvector], dvector) def cumprod_plus(x, y): return np.cumprod(x) + y fn = function([x, y], cumprod_plus(x, y)) r = fn([[1.5, 5], [2, 2]], [1, 100, 2, 200]) r0 = np.array([2.5, 107.5, 17., 230.]) assert allclose(r, r0), (r, r0)