我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用theano.tensor.TensorType()。
def placeholder(shape=None, ndim=None, dtype=_FLOATX, sparse=False, name=None): '''Instantiate an input data placeholder variable. ''' if shape is None and ndim is None: raise Exception('Specify either a shape or ndim value.') if shape is not None: ndim = len(shape) else: shape = tuple([None for _ in range(ndim)]) broadcast = (False,) * ndim if sparse: _assert_sparse_module() x = th_sparse_module.csr_matrix(name=name, dtype=dtype) else: x = T.TensorType(dtype, broadcast)(name) x._keras_shape = shape x._uses_learning_phase = False return x
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None): """Instantiate an input data placeholder variable. """ if dtype is None: dtype = floatx() if shape is None and ndim is None: raise ValueError('Specify either a shape or ndim value.') if shape is not None: ndim = len(shape) else: shape = tuple([None for _ in range(ndim)]) broadcast = (False,) * ndim if sparse: _assert_sparse_module() x = th_sparse_module.csr_matrix(name=name, dtype=dtype) else: x = T.TensorType(dtype, broadcast)(name) x._keras_shape = shape x._uses_learning_phase = False return x
def __init__(self): metric_names = ['Loss','L2','Accuracy'] super(Fr3dNetTrainer, self).__init__(metric_names) tensor5 = T.TensorType(theano.config.floatX, (False,) * 5) input_var = tensor5('inputs') target_var = T.ivector('targets') logging.info("Defining network") net = fr3dnet.define_network(input_var) self.network = net train_fn, val_fn, l_r = fr3dnet.define_updates(net, input_var, target_var) self.train_fn = train_fn self.val_fn = val_fn self.l_r = l_r
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None): '''Instantiate an input data placeholder variable. ''' if dtype is None: dtype = floatx() if shape is None and ndim is None: raise ValueError('Specify either a shape or ndim value.') if shape is not None: ndim = len(shape) else: shape = tuple([None for _ in range(ndim)]) broadcast = (False,) * ndim if sparse: _assert_sparse_module() x = th_sparse_module.csr_matrix(name=name, dtype=dtype) else: x = T.TensorType(dtype, broadcast)(name) x._keras_shape = shape x._uses_learning_phase = False return x
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None): """Instantiate an input data placeholder variable. """ if dtype is None: dtype = floatx() if shape is None and ndim is None: raise ValueError('Specify either a shape or ndim value.') if shape is not None: ndim = len(shape) else: shape = tuple([None for _ in range(ndim)]) name = _prepare_name(name, 'placeholder') broadcast = (False,) * ndim if sparse: _assert_sparse_module() x = th_sparse_module.csr_matrix(name=name, dtype=dtype) else: x = T.TensorType(dtype, broadcast)(name) x._keras_shape = shape x._uses_learning_phase = False return x
def test_op_sd(self): for format in sparse.sparse_formats: for dtype in sparse.all_dtypes: variable, data = sparse_random_inputs(format, shape=(10, 10), out_dtype=dtype, n=2, p=0.1) variable[1] = tensor.TensorType(dtype=dtype, broadcastable=(False, False))() data[1] = data[1].toarray() f = theano.function(variable, self.op(*variable)) tested = f(*data) expected = numpy.dot(data[0].toarray(), data[1]) assert tested.format == format assert tested.dtype == expected.dtype tested = tested.toarray() utt.assert_allclose(tested, expected)
def _is_sparse_variable(x): """ Returns ------- boolean True iff x is a L{SparseVariable} (and not a L{tensor.TensorType}, for instance). """ if not isinstance(x, gof.Variable): raise NotImplementedError("this function should only be called on " "*variables* (of type sparse.SparseType " "or tensor.TensorType, for instance), not ", x) return isinstance(x.type, SparseType)
def test_infer_shape(self): for s_left, s_right in [((5, 6), (5, 6)), ((5, 6), (5, 1)), ((5, 6), (1, 6)), ((5, 1), (5, 6)), ((1, 6), (5, 6)), ((2, 3, 4, 5), (2, 3, 4, 5)), ((2, 3, 4, 5), (2, 3, 1, 5)), ((2, 3, 4, 5), (1, 3, 4, 5)), ((2, 1, 4, 5), (2, 3, 4, 5)), ((2, 3, 4, 1), (2, 3, 4, 5))]: dtype = theano.config.floatX t_left = TensorType(dtype, [(entry == 1) for entry in s_left])() t_right = TensorType(dtype, [(entry == 1) for entry in s_right])() t_left_val = numpy.zeros(s_left, dtype=dtype) t_right_val = numpy.zeros(s_right, dtype=dtype) self._compile_and_check([t_left, t_right], [Elemwise(scalar.add)(t_left, t_right)], [t_left_val, t_right_val], Elemwise)
def test_recursive_lift(self): v = T.vector(dtype="float64") m = T.matrix(dtype="float64") out = ((v + 42) * (m + 84)).T g = FunctionGraph([v, m], [out]) init_str_g = ("[InplaceDimShuffle{1,0}(Elemwise{mul,no_inplace}" "(InplaceDimShuffle{x,0}(Elemwise{add,no_inplace}" "(<TensorType(float64, vector)>, " "InplaceDimShuffle{x}(TensorConstant{42}))), " "Elemwise{add,no_inplace}" "(<TensorType(float64, matrix)>, " "InplaceDimShuffle{x,x}(TensorConstant{84}))))]") self.assertTrue(str(g) == init_str_g) new_out = local_dimshuffle_lift.transform(g.outputs[0].owner)[0] new_g = FunctionGraph(g.inputs, [new_out]) opt_str_g = ("[Elemwise{mul,no_inplace}(Elemwise{add,no_inplace}" "(InplaceDimShuffle{0,x}(<TensorType(float64, vector)>), " "InplaceDimShuffle{x,x}(TensorConstant{42})), " "Elemwise{add,no_inplace}(InplaceDimShuffle{1,0}" "(<TensorType(float64, matrix)>), " "InplaceDimShuffle{x,x}(TensorConstant{84})))]") self.assertTrue(str(new_g) == opt_str_g) # Check stacktrace was copied over correctly after opt was applied self.assertTrue(check_stack_trace(new_g, ops_to_check='all'))
def test_eq(self): x = T.dmatrix() y = T.dmatrix() f = theano.function([x, y], T.eq(x, y), mode=self.mode) vx = numpy.random.rand(5, 4) vy = numpy.random.rand(5, 4) f(vx, vy) topo = f.maker.fgraph.toposort() assert len(topo) == 1 assert isinstance(topo[0].op, T.Elemwise) assert isinstance(topo[0].op.scalar_op, theano.scalar.EQ) f2 = theano.function([x], T.eq(x, x), mode=self.mode) assert numpy.all(f2(vx) == numpy.ones((5, 4))) topo2 = f2.maker.fgraph.toposort() # Shape_i{1}(<TensorType(float64, matrix)>), Shape_i{0}(<TensorType(float64, matrix)>), Alloc([[1]], Shape_i{0}.0, Shape_i{1}.0 assert len(topo2) == 3 assert isinstance(topo2[-1].op, T.Alloc)
def test_local_reduce_broadcast_some_0(self): for fct in [tensor.sum, tensor.all, tensor.any, tensor.prod, tensor.max, tensor.min]: x = T.TensorType('int64', (True, False, True))() f = theano.function([x], [fct(x, axis=[0, 1])], mode=self.mode) order = f.maker.fgraph.toposort() assert 1 == sum([isinstance(node.op, T.CAReduce) for node in order]) node = [node for node in order if isinstance(node.op, tensor.CAReduce)][0] op = node.op assert isinstance(op, T.CAReduce) # -- the leading broadcastable dimension has been dropped # by the local_reduce_broadcastable optimization # now summation is over the original x's dimension 1. assert node.inputs[0].ndim == 2, node assert op.axis == (0,), op.axis
def test_local_log_sum_exp1(): # Tests if optimization is applied by checking the presence of the maximum x = tensor3('x') check_max_log_sum_exp(x, axis=(0,), dimshuffle_op=None) check_max_log_sum_exp(x, axis=(1,), dimshuffle_op=None) check_max_log_sum_exp(x, axis=(2,), dimshuffle_op=None) check_max_log_sum_exp(x, axis=(0, 1), dimshuffle_op=None) check_max_log_sum_exp(x, axis=(0, 1, 2), dimshuffle_op=None) # If a transpose is applied to the sum transpose_op = DimShuffle((False, False), (1, 0)) check_max_log_sum_exp(x, axis=2, dimshuffle_op=transpose_op) # If the sum is performed with keepdims=True x = TensorType(dtype='floatX', broadcastable=(False, True, False))('x') sum_keepdims_op = x.sum(axis=(0, 1), keepdims=True).owner.op check_max_log_sum_exp(x, axis=(0, 1), dimshuffle_op=sum_keepdims_op)
def test_inc_adv_subtensor_w_2vec(self): if inplace_increment is None: raise inplace_increment_missing subt = self.m[self.ix1, self.ix12] a = inc_subtensor(subt, subt) typ = tensor.TensorType(self.m.type.dtype, self.ix2.type.broadcastable) assert a.type == typ, (a.type, typ) f = theano.function([self.m, self.ix1, self.ix12], a, allow_input_downcast=True) aval = f([[.4, .9, .1], [5, 6, 7], [.5, .3, .15]], [1, 2, 1], [0, 1, 0]) assert numpy.allclose(aval, [[.4, .9, .1], [5 * 3, 6, 7], [.5, .3 * 2, .15]]), aval
def test_tensor_values_eq_approx(): # test, inf, -inf and nan equal themself a = numpy.asarray([-numpy.inf, -1, 0, 1, numpy.inf, numpy.nan]) assert TensorType.values_eq_approx(a, a) # test inf, -inf don't equal themself b = numpy.asarray([numpy.inf, -1, 0, 1, numpy.inf, numpy.nan]) assert not TensorType.values_eq_approx(a, b) b = numpy.asarray([-numpy.inf, -1, 0, 1, -numpy.inf, numpy.nan]) assert not TensorType.values_eq_approx(a, b) # test allow_remove_inf b = numpy.asarray([numpy.inf, -1, 0, 1, 5, numpy.nan]) assert TensorType.values_eq_approx(a, b, allow_remove_inf=True) b = numpy.asarray([numpy.inf, -1, 0, 1, 5, 6]) assert not TensorType.values_eq_approx(a, b, allow_remove_inf=True) # test allow_remove_nan b = numpy.asarray([numpy.inf, -1, 0, 1, 5, numpy.nan]) assert not TensorType.values_eq_approx(a, b, allow_remove_nan=False) b = numpy.asarray([-numpy.inf, -1, 0, 1, numpy.inf, 6]) assert not TensorType.values_eq_approx(a, b, allow_remove_nan=False)
def test_flatten_broadcastable(): # Ensure that the broadcastable pattern of the output is coherent with # that of the input inp = TensorType('float64', (False, False, False, False))() out = flatten(inp, outdim=2) assert out.broadcastable == (False, False) inp = TensorType('float64', (False, False, False, True))() out = flatten(inp, outdim=2) assert out.broadcastable == (False, False) inp = TensorType('float64', (False, True, False, True))() out = flatten(inp, outdim=2) assert out.broadcastable == (False, False) inp = TensorType('float64', (False, True, True, True))() out = flatten(inp, outdim=2) assert out.broadcastable == (False, True) inp = TensorType('float64', (True, False, True, True))() out = flatten(inp, outdim=3) assert out.broadcastable == (True, False, True)
def local_abstractconv_gemm(node): if theano.config.cxx == "" or not theano.config.blas.ldflags: return if not isinstance(node.op, AbstractConv2d): return None img, kern = node.inputs if not isinstance(img.type, TensorType) or \ not isinstance(kern.type, TensorType): return None # need to flip the kernel if necessary if node.op.filter_flip: kern = kern[:, :, ::-1, ::-1] rval = CorrMM(border_mode=node.op.border_mode, subsample=node.op.subsample, filter_dilation=node.op.filter_dilation)(img, kern) copy_stack_trace(node.outputs[0], rval) return [rval]
def local_abstractconv3d_gemm(node): if theano.config.cxx == "" or not theano.config.blas.ldflags: return if not isinstance(node.op, AbstractConv3d): return None img, kern = node.inputs if not isinstance(img.type, TensorType) or \ not isinstance(kern.type, TensorType): return None # need to flip the kernel if necessary if node.op.filter_flip: kern = kern[:, :, ::-1, ::-1, ::-1] rval = Corr3dMM(border_mode=node.op.border_mode, subsample=node.op.subsample, filter_dilation=node.op.filter_dilation)(img, kern) copy_stack_trace(node.outputs[0], rval) return [rval]
def local_abstractconv3d_gradweight_gemm(node): if theano.config.cxx == "" or not theano.config.blas.ldflags: return if not isinstance(node.op, AbstractConv3d_gradWeights): return None img, topgrad, shape = node.inputs if not isinstance(img.type, TensorType) or \ not isinstance(topgrad.type, TensorType): return None rval = Corr3dMM_gradWeights(border_mode=node.op.border_mode, subsample=node.op.subsample, filter_dilation=node.op.filter_dilation)(img, topgrad, shape) copy_stack_trace(node.outputs[0], rval) # need to flip the kernel if necessary if node.op.filter_flip: rval = rval[:, :, ::-1, ::-1, ::-1] rval = theano.tensor.patternbroadcast(rval, node.outputs[0].broadcastable) copy_stack_trace(node.outputs[0], rval) return [rval]
def local_abstractconv_gradinputs_gemm(node): if theano.config.cxx == "" or not theano.config.blas.ldflags: return if not isinstance(node.op, AbstractConv2d_gradInputs): return None kern, topgrad, shape = node.inputs if not isinstance(kern.type, TensorType) or \ not isinstance(topgrad.type, TensorType): return None # need to flip the kernel if necessary if node.op.filter_flip: kern = kern[:, :, ::-1, ::-1] rval = CorrMM_gradInputs(border_mode=node.op.border_mode, subsample=node.op.subsample, filter_dilation=node.op.filter_dilation)(kern, topgrad, shape) copy_stack_trace(node.outputs[0], rval) return [rval]
def local_abstractconv3d_gradinputs_gemm(node): if theano.config.cxx == "" or not theano.config.blas.ldflags: return if not isinstance(node.op, AbstractConv3d_gradInputs): return None kern, topgrad, shape = node.inputs if not isinstance(kern.type, TensorType) or \ not isinstance(topgrad.type, TensorType): return None # need to flip the kernel if necessary if node.op.filter_flip: kern = kern[:, :, ::-1, ::-1, ::-1] rval = Corr3dMM_gradInputs(border_mode=node.op.border_mode, subsample=node.op.subsample, filter_dilation=node.op.filter_dilation)(kern, topgrad, shape) copy_stack_trace(node.outputs[0], rval) return [rval]
def local_conv2d_cpu(node): if not isinstance(node.op, AbstractConv2d): return None img, kern = node.inputs if ((not isinstance(img.type, TensorType) or not isinstance(kern.type, TensorType))): return None if node.op.border_mode not in ['full', 'valid']: return None if not node.op.filter_flip: # Not tested yet return None rval = conv2d(img, kern, node.op.imshp, node.op.kshp, border_mode=node.op.border_mode, subsample=node.op.subsample) copy_stack_trace(node.outputs[0], rval) return [rval]
def make_node(self, img, topgrad, shape=None): img = as_tensor_variable(img) topgrad = as_tensor_variable(topgrad) img, topgrad = self.as_common_dtype(img, topgrad) if img.type.ndim != 5: raise TypeError('img must be 5D tensor') if topgrad.type.ndim != 5: raise TypeError('topgrad must be 5D tensor') if self.subsample != (1, 1, 1) or self.border_mode == "half": if shape is None: raise ValueError('shape must be given if subsample != (1, 1, 1)' ' or border_mode == "half"') height_width_depth = [as_tensor_variable(shape[0]).astype('int64'), as_tensor_variable(shape[1]).astype('int64'), as_tensor_variable(shape[2]).astype('int64')] else: height_width_depth = [] broadcastable = [topgrad.type.broadcastable[1], img.type.broadcastable[1], False, False, False] dtype = img.type.dtype return Apply(self, [img, topgrad] + height_width_depth, [TensorType(dtype, broadcastable)()])
def make_node(self, kern, topgrad, shape=None): kern = as_tensor_variable(kern) topgrad = as_tensor_variable(topgrad) kern, topgrad = self.as_common_dtype(kern, topgrad) if kern.type.ndim != 5: raise TypeError('kern must be 5D tensor') if topgrad.type.ndim != 5: raise TypeError('topgrad must be 5D tensor') if self.subsample != (1, 1, 1) and shape is None: raise ValueError('shape must be given if subsample != (1, 1, 1)') if self.subsample != (1, 1, 1): height_width_depth = [as_tensor_variable(shape[0]).astype('int64'), as_tensor_variable(shape[1]).astype('int64'), as_tensor_variable(shape[2]).astype('int64')] else: height_width_depth = [] broadcastable = [topgrad.type.broadcastable[0], kern.type.broadcastable[1], False, False, False] dtype = kern.type.dtype return Apply(self, [kern, topgrad] + height_width_depth, [TensorType(dtype, broadcastable)()])
def make_node(self, kern, topgrad, shape=None): kern = as_tensor_variable(kern) topgrad = as_tensor_variable(topgrad) kern, topgrad = self.as_common_dtype(kern, topgrad) if kern.type.ndim != 4: raise TypeError('kern must be 4D tensor') if topgrad.type.ndim != 4: raise TypeError('topgrad must be 4D tensor') if self.subsample != (1, 1) and shape is None: raise ValueError('shape must be given if subsample != (1, 1)') height_width = [as_tensor_variable(shape[0]).astype('int64'), as_tensor_variable(shape[1]).astype('int64')] if self.subsample != (1, 1) else [] broadcastable = [topgrad.type.broadcastable[0], kern.type.broadcastable[1], False, False] dtype = kern.type.dtype return Apply(self, [kern, topgrad] + height_width, [TensorType(dtype, broadcastable)()])
def uniform(random_state, size=None, low=0.0, high=1.0, ndim=None, dtype=None): """ Sample from a uniform distribution between low and high. If the size argument is ambiguous on the number of dimensions, ndim may be a plain integer to supplement the missing information. If size is None, the output shape will be determined by the shapes of low and high. If dtype is not specified, it will be inferred from the dtype of low and high, but will be at least as precise as floatX. """ low = tensor.as_tensor_variable(low) high = tensor.as_tensor_variable(high) if dtype is None: dtype = tensor.scal.upcast(theano.config.floatX, low.dtype, high.dtype) ndim, size, bcast = _infer_ndim_bcast(ndim, size, low, high) op = RandomFunction('uniform', tensor.TensorType(dtype=dtype, broadcastable=bcast)) return op(random_state, size, low, high)
def normal(random_state, size=None, avg=0.0, std=1.0, ndim=None, dtype=None): """ Sample from a normal distribution centered on avg with the specified standard deviation (std). If the size argument is ambiguous on the number of dimensions, ndim may be a plain integer to supplement the missing information. If size is None, the output shape will be determined by the shapes of avg and std. If dtype is not specified, it will be inferred from the dtype of avg and std, but will be at least as precise as floatX. """ avg = tensor.as_tensor_variable(avg) std = tensor.as_tensor_variable(std) if dtype is None: dtype = tensor.scal.upcast(theano.config.floatX, avg.dtype, std.dtype) ndim, size, bcast = _infer_ndim_bcast(ndim, size, avg, std) op = RandomFunction('normal', tensor.TensorType(dtype=dtype, broadcastable=bcast)) return op(random_state, size, avg, std)
def random_integers(random_state, size=None, low=0, high=1, ndim=None, dtype='int64'): """ Sample a random integer between low and high, both inclusive. If the size argument is ambiguous on the number of dimensions, ndim may be a plain integer to supplement the missing information. If size is None, the output shape will be determined by the shapes of low and high. """ low = tensor.as_tensor_variable(low) high = tensor.as_tensor_variable(high) ndim, size, bcast = _infer_ndim_bcast(ndim, size, low, high) op = RandomFunction(random_integers_helper, tensor.TensorType(dtype=dtype, broadcastable=bcast)) return op(random_state, size, low, high)
def test_ctors(self): if theano.configdefaults.python_int_bitwidth() == 32: assert shared(7).type == theano.tensor.iscalar, shared(7).type else: assert shared(7).type == theano.tensor.lscalar, shared(7).type assert shared(7.0).type == theano.tensor.dscalar assert shared(numpy.float32(7)).type == theano.tensor.fscalar # test tensor constructor b = shared(numpy.zeros((5, 5), dtype='int32')) assert b.type == TensorType('int32', broadcastable=[False, False]) b = shared(numpy.random.rand(4, 5)) assert b.type == TensorType('float64', broadcastable=[False, False]) b = shared(numpy.random.rand(5, 1, 2)) assert b.type == TensorType('float64', broadcastable=[False, False, False]) assert shared([]).type == generic def badfunc(): shared(7, bad_kw=False) self.assertRaises(TypeError, badfunc)
def test_append_inplace(self): mySymbolicMatricesList = TypedListType(T.TensorType( theano.config.floatX, (False, False)))() mySymbolicMatrix = T.matrix() z = Append()(mySymbolicMatricesList, mySymbolicMatrix) m = theano.compile.mode.get_default_mode().including("typed_list_inplace_opt") f = theano.function([In(mySymbolicMatricesList, borrow=True, mutable=True), In(mySymbolicMatrix, borrow=True, mutable=True)], z, accept_inplace=True, mode=m) self.assertTrue(f.maker.fgraph.toposort()[0].op.inplace) x = rand_ranged_matrix(-1000, 1000, [100, 101]) y = rand_ranged_matrix(-1000, 1000, [100, 101]) self.assertTrue(numpy.array_equal(f([x], y), [x, y]))
def test_extend_inplace(self): mySymbolicMatricesList1 = TypedListType(T.TensorType( theano.config.floatX, (False, False)))() mySymbolicMatricesList2 = TypedListType(T.TensorType( theano.config.floatX, (False, False)))() z = Extend()(mySymbolicMatricesList1, mySymbolicMatricesList2) m = theano.compile.mode.get_default_mode().including("typed_list_inplace_opt") f = theano.function([In(mySymbolicMatricesList1, borrow=True, mutable=True), mySymbolicMatricesList2], z, mode=m) self.assertTrue(f.maker.fgraph.toposort()[0].op.inplace) x = rand_ranged_matrix(-1000, 1000, [100, 101]) y = rand_ranged_matrix(-1000, 1000, [100, 101]) self.assertTrue(numpy.array_equal(f([x], [y]), [x, y]))
def test_insert_inplace(self): mySymbolicMatricesList = TypedListType(T.TensorType( theano.config.floatX, (False, False)))() mySymbolicIndex = T.scalar(dtype='int64') mySymbolicMatrix = T.matrix() z = Insert()(mySymbolicMatricesList, mySymbolicIndex, mySymbolicMatrix) m = theano.compile.mode.get_default_mode().including("typed_list_inplace_opt") f = theano.function([In(mySymbolicMatricesList, borrow=True, mutable=True), mySymbolicIndex, mySymbolicMatrix], z, accept_inplace=True, mode=m) self.assertTrue(f.maker.fgraph.toposort()[0].op.inplace) x = rand_ranged_matrix(-1000, 1000, [100, 101]) y = rand_ranged_matrix(-1000, 1000, [100, 101]) self.assertTrue(numpy.array_equal(f([x], numpy.asarray(1, dtype='int64'), y), [x, y]))
def test_remove_inplace(self): mySymbolicMatricesList = TypedListType(T.TensorType( theano.config.floatX, (False, False)))() mySymbolicMatrix = T.matrix() z = Remove()(mySymbolicMatricesList, mySymbolicMatrix) m = theano.compile.mode.get_default_mode().including("typed_list_inplace_opt") f = theano.function([In(mySymbolicMatricesList, borrow=True, mutable=True), In(mySymbolicMatrix, borrow=True, mutable=True)], z, accept_inplace=True, mode=m) self.assertTrue(f.maker.fgraph.toposort()[0].op.inplace) x = rand_ranged_matrix(-1000, 1000, [100, 101]) y = rand_ranged_matrix(-1000, 1000, [100, 101]) self.assertTrue(numpy.array_equal(f([x, y], y), [x]))
def test_type_equality(self): """ Typed list types should only be equal when they contains the same theano variables """ # list of matrices myType1 = TypedListType(T.TensorType(theano.config.floatX, (False, False))) # list of matrices myType2 = TypedListType(T.TensorType(theano.config.floatX, (False, False))) # list of scalars myType3 = TypedListType(T.TensorType(theano.config.floatX, ())) self.assertTrue(myType2 == myType1) self.assertFalse(myType3 == myType1)
def test_sanity_check_slice(self): mySymbolicMatricesList = TypedListType(T.TensorType( theano.config.floatX, (False, False)))() mySymbolicSlice = SliceType()() z = GetItem()(mySymbolicMatricesList, mySymbolicSlice) self.assertFalse(isinstance(z, T.TensorVariable)) f = theano.function([mySymbolicMatricesList, mySymbolicSlice], z) x = rand_ranged_matrix(-1000, 1000, [100, 101]) self.assertTrue(numpy.array_equal(f([x], slice(0, 1, 1)), [x]))
def test_sanity_check_single(self): mySymbolicMatricesList = TypedListType(T.TensorType( theano.config.floatX, (False, False)))() mySymbolicScalar = T.scalar(dtype='int64') z = GetItem()(mySymbolicMatricesList, mySymbolicScalar) f = theano.function([mySymbolicMatricesList, mySymbolicScalar], z) x = rand_ranged_matrix(-1000, 1000, [100, 101]) self.assertTrue(numpy.array_equal(f([x], numpy.asarray(0, dtype='int64')), x))
def test_constant_input(self): mySymbolicMatricesList = TypedListType(T.TensorType( theano.config.floatX, (False, False)))() z = GetItem()(mySymbolicMatricesList, 0) f = theano.function([mySymbolicMatricesList], z) x = rand_ranged_matrix(-1000, 1000, [100, 101]) self.assertTrue(numpy.array_equal(f([x]), x)) z = GetItem()(mySymbolicMatricesList, slice(0, 1, 1)) f = theano.function([mySymbolicMatricesList], z) self.assertTrue(numpy.array_equal(f([x]), [x]))
def test_sanity_check(self): mySymbolicMatricesList1 = TypedListType(T.TensorType( theano.config.floatX, (False, False)))() mySymbolicMatricesList2 = TypedListType(T.TensorType( theano.config.floatX, (False, False)))() z = Extend()(mySymbolicMatricesList1, mySymbolicMatricesList2) f = theano.function([mySymbolicMatricesList1, mySymbolicMatricesList2], z) x = rand_ranged_matrix(-1000, 1000, [100, 101]) y = rand_ranged_matrix(-1000, 1000, [100, 101]) self.assertTrue(numpy.array_equal(f([x], [y]), [x, y]))
def test_interface(self): mySymbolicMatricesList1 = TypedListType(T.TensorType( theano.config.floatX, (False, False)))() mySymbolicMatricesList2 = TypedListType(T.TensorType( theano.config.floatX, (False, False)))() z = mySymbolicMatricesList1.extend(mySymbolicMatricesList2) f = theano.function([mySymbolicMatricesList1, mySymbolicMatricesList2], z) x = rand_ranged_matrix(-1000, 1000, [100, 101]) y = rand_ranged_matrix(-1000, 1000, [100, 101]) self.assertTrue(numpy.array_equal(f([x], [y]), [x, y]))
def test_inplace(self): mySymbolicMatricesList = TypedListType(T.TensorType( theano.config.floatX, (False, False)))() myMatrix = T.matrix() myScalar = T.scalar(dtype='int64') z = Insert(True)(mySymbolicMatricesList, myScalar, myMatrix) f = theano.function([mySymbolicMatricesList, myScalar, myMatrix], z, accept_inplace=True) x = rand_ranged_matrix(-1000, 1000, [100, 101]) y = rand_ranged_matrix(-1000, 1000, [100, 101]) self.assertTrue(numpy.array_equal(f([x], numpy.asarray(1, dtype='int64'), y), [x, y]))
def test_sanity_check(self): mySymbolicMatricesList = TypedListType(T.TensorType( theano.config.floatX, (False, False)))() myMatrix = T.matrix() myScalar = T.scalar(dtype='int64') z = Insert()(mySymbolicMatricesList, myScalar, myMatrix) f = theano.function([mySymbolicMatricesList, myScalar, myMatrix], z) x = rand_ranged_matrix(-1000, 1000, [100, 101]) y = rand_ranged_matrix(-1000, 1000, [100, 101]) self.assertTrue(numpy.array_equal(f([x], numpy.asarray(1, dtype='int64'), y), [x, y]))
def test_interface(self): mySymbolicMatricesList = TypedListType(T.TensorType( theano.config.floatX, (False, False)))() myMatrix = T.matrix() myScalar = T.scalar(dtype='int64') z = mySymbolicMatricesList.insert(myScalar, myMatrix) f = theano.function([mySymbolicMatricesList, myScalar, myMatrix], z) x = rand_ranged_matrix(-1000, 1000, [100, 101]) y = rand_ranged_matrix(-1000, 1000, [100, 101]) self.assertTrue(numpy.array_equal(f([x], numpy.asarray(1, dtype='int64'), y), [x, y]))
def test_non_tensor_type(self): mySymbolicNestedMatricesList = TypedListType(T.TensorType( theano.config.floatX, (False, False)), 1)() mySymbolicMatricesList = TypedListType(T.TensorType( theano.config.floatX, (False, False)))() z = Index()(mySymbolicNestedMatricesList, mySymbolicMatricesList) f = theano.function([mySymbolicNestedMatricesList, mySymbolicMatricesList], z) x = rand_ranged_matrix(-1000, 1000, [100, 101]) y = rand_ranged_matrix(-1000, 1000, [100, 101]) self.assertTrue(f([[x, y], [x, y, y]], [x, y]) == 0)
def test_non_tensor_type(self): mySymbolicNestedMatricesList = TypedListType(T.TensorType( theano.config.floatX, (False, False)), 1)() mySymbolicMatricesList = TypedListType(T.TensorType( theano.config.floatX, (False, False)))() z = Count()(mySymbolicNestedMatricesList, mySymbolicMatricesList) f = theano.function([mySymbolicNestedMatricesList, mySymbolicMatricesList], z) x = rand_ranged_matrix(-1000, 1000, [100, 101]) y = rand_ranged_matrix(-1000, 1000, [100, 101]) self.assertTrue(f([[x, y], [x, y, y]], [x, y]) == 1)
def test_cdata(): if not theano.config.cxx: raise SkipTest("G++ not available, so we need to skip this test.") i = TensorType('float32', (False,))() c = ProdOp()(i) i2 = GetOp()(c) mode = None if theano.config.mode == "FAST_COMPILE": mode = "FAST_RUN" # This should be a passthrough function for vectors f = theano.function([i], i2, mode=mode) v = numpy.random.randn(9).astype('float32') v2 = f(v) assert (v2 == v).all()
def test_maxpool(): """TODO: test the gpu version!!! """ for d0, d1, r_true, r_false in [(4, 4, [[[[5, 7], [13, 15]]]], [[[[5, 7], [13, 15]]]]), (5, 5, [[[[6, 8], [16, 18], [21, 23]]]], [[[[6, 8, 9], [16, 18, 19], [21, 23, 24]]]])]: for border, ret in [(True, r_true), (False, r_false)]: ret = numpy.array(ret) a = tcn.blas.Pool((2, 2), border) dmatrix4 = tensor.TensorType("float32", (False, False, False, False)) b = dmatrix4() f = pfunc([b], [a(b)], mode=mode_with_gpu) bval = numpy.arange(0, d0 * d1).reshape(1, 1, d0, d1) r = f(bval)[0] # print bval, bval.shape, border # print r, r.shape assert (ret == r).all()
def local_gpu_extract_diagonal(node): """ extract_diagonal(host_from_gpu()) -> host_from_gpu(extract_diagonal) gpu_from_host(extract_diagonal) -> extract_diagonal(gpu_from_host) """ if (isinstance(node.op, nlinalg.ExtractDiag) and isinstance(node.inputs[0].type, theano.tensor.TensorType)): inp = node.inputs[0] if inp.owner and isinstance(inp.owner.op, HostFromGpu): return [host_from_gpu(nlinalg.extract_diag( as_cuda_ndarray_variable(inp)))] if isinstance(node.op, GpuFromHost): host_input = node.inputs[0] if (host_input.owner and isinstance(host_input.owner.op, nlinalg.ExtractDiag) and isinstance(host_input.owner.inputs[0].type, theano.tensor.TensorType)): diag_node = host_input.owner return [nlinalg.extract_diag( as_cuda_ndarray_variable(diag_node.inputs[0]))] return False
def build_encoder_network(num_inputs, num_hidden): input = T.TensorType('float32', [None]*3)('input') B, L = input.shape[0:2] l_in = InputLayer((None, max_seq_len, num_inputs)) l_mask = InputLayer(shape=(None, max_seq_len)) l_enc = MyLSTMLayer(l_in, num_hidden, mask_input=l_mask, grad_clipping=grad_clip, nonlinearity=lasagne.nonlinearities.rectify, only_return_final=True) params = lasagne.layers.get_all_params(l_enc) hid_out, _ = lasagne.layers.get_output(l_enc, {l_in: input}) tvars = [input, l_mask.input_var] return hid_out, tvars, theano.function(tvars, hid_out), params
def placeholder(shape=None, ndim=None, dtype=_FLOATX, name=None): '''Instantiate an input data placeholder variable. ''' if shape is None and ndim is None: raise Exception('Specify either a shape or ndim value.') if shape is not None: ndim = len(shape) broadcast = (False,) * ndim # ====== Modify add name prefix ====== # global _PLACEHOLDER_ID name_prefix = 'ID.%02d.' % _PLACEHOLDER_ID _PLACEHOLDER_ID += 1 if name is None: name = '' name = name_prefix + name placeholder = T.TensorType(dtype, broadcast)(name) # store the predefined shape of placeholder _PLACEHOLDER_SHAPE[name] = \ [None for _ in range(ndim)] if shape is None else shape return placeholder