我们从Python开源项目中,提取了以下5个代码示例,用于说明如何使用theano.tensor.bvector()。
def test_param_allow_downcast_int(self): a = tensor.wvector('a') # int16 b = tensor.bvector('b') # int8 c = tensor.bscalar('c') # int8 f = pfunc([In(a, allow_downcast=True), In(b, allow_downcast=False), In(c, allow_downcast=None)], (a + b + c)) # Both values are in range. Since they're not ndarrays (but lists), # they will be converted, and their value checked. assert numpy.all(f([3], [6], 1) == 10) # Values are in range, but a dtype too large has explicitly been given # For performance reasons, no check of the data is explicitly performed # (It might be OK to change this in the future.) self.assertRaises(TypeError, f, [3], numpy.array([6], dtype='int16'), 1) # Value too big for a, silently ignored assert numpy.all(f([2 ** 20], numpy.ones(1, dtype='int8'), 1) == 2) # Value too big for b, raises TypeError self.assertRaises(TypeError, f, [3], [312], 1) # Value too big for c, raises TypeError self.assertRaises(TypeError, f, [3], [6], 806)
def test_allow_input_downcast_int(self): a = tensor.wvector('a') # int16 b = tensor.bvector('b') # int8 c = tensor.bscalar('c') # int8 f = pfunc([a, b, c], (a + b + c), allow_input_downcast=True) # Value too big for a, b, or c, silently ignored assert f([2 ** 20], [1], 0) == 1 assert f([3], [312], 0) == 59 assert f([3], [1], 806) == 42 g = pfunc([a, b, c], (a + b + c), allow_input_downcast=False) # All values are in range. Since they're not ndarrays (but lists # or scalars), they will be converted, and their value checked. assert numpy.all(g([3], [6], 0) == 9) # Values are in range, but a dtype too large has explicitly been given # For performance reasons, no check of the data is explicitly performed # (It might be OK to change this in the future.) self.assertRaises(TypeError, g, [3], numpy.array([6], dtype='int16'), 0) # Value too big for b, raises TypeError self.assertRaises(TypeError, g, [3], [312], 0) h = pfunc([a, b, c], (a + b + c)) # Default: allow_input_downcast=None # Everything here should behave like with False assert numpy.all(h([3], [6], 0) == 9) self.assertRaises(TypeError, h, [3], numpy.array([6], dtype='int16'), 0) self.assertRaises(TypeError, h, [3], [312], 0)
def __init__(self, state_format, actions_number, gamma=0.99, learning_rate=0.00025, ddqn=False, **kwargs): self.inputs = dict() self.learning_rate = learning_rate architecture = kwargs self.loss_history = [] self.misc_state_included = (state_format["s_misc"] > 0) self.gamma = np.float64(gamma) self.inputs["S0"] = tensor.tensor4("S0") self.inputs["S1"] = tensor.tensor4("S1") self.inputs["A"] = tensor.ivector("Action") self.inputs["R"] = tensor.vector("Reward") self.inputs["Nonterminal"] = tensor.bvector("Nonterminal") if self.misc_state_included: self.inputs["S0_misc"] = tensor.matrix("S0_misc") self.inputs["S1_misc"] = tensor.matrix("S1_misc") self.misc_len = state_format["s_misc"] else: self.misc_len = None # save it for the evaluation reshape # TODO get rid of this? self.single_image_input_shape = (1,) + tuple(state_format["s_img"]) architecture["img_input_shape"] = (None,) + tuple(state_format["s_img"]) architecture["misc_len"] = self.misc_len architecture["output_size"] = actions_number if self.misc_state_included: self.network, input_layers, _ = self._initialize_network(img_input=self.inputs["S0"], misc_input=self.inputs["S0_misc"], **architecture) self.frozen_network, _, alternate_inputs = self._initialize_network(img_input=self.inputs["S1"], misc_input=self.inputs["S1_misc"], **architecture) else: self.network, input_layers, _ = self._initialize_network(img_input=self.inputs["S0"], **architecture) self.frozen_network, _, alternate_inputs = self._initialize_network(img_input=self.inputs["S1"], **architecture) self.alternate_input_mappings = {} for layer, input in zip(input_layers, alternate_inputs): self.alternate_input_mappings[layer] = input # print "Network initialized." self._compile(ddqn)
def load_model(modeldir, options, model, modelfn, loss_type): NB_CHANNELS, NB_FRAMES, NB_FEATURES, NB_CLASSES, BATCH_SIZE, removeMean, divideStd, TEST_LABELS, doAugment, feature_type = options['NB_CHANNELS'], \ options['NB_FRAMES'], \ options['NB_FEATURES'], \ options['NB_CLASSES'], \ options['BATCH_SIZE'],\ options['CENTER_DATA'], \ options['REDUCE_DATA'], \ options['TEST_LABELS'], \ options['AUGMENT'], \ options['FEATURE_TYPE'] print 'OPTIONS: ', options # Prepare Theano variables for inputs and targets # input_var = T.tensor4('inputs', dtype='float32') input_var = T.tensor4('inputs') # utiliser des int8 ne marche pas: # target_var = T.bvector('targets') target_var = T.ivector('targets') # Create neural network model (depending on first command line parameter) print("Building model and compiling functions...") # use batchnorm? network, input_layer, output_layer_1 = build_densenet(input_shape=(None, NB_CHANNELS, NB_FRAMES, NB_FEATURES), input_var=input_var, classes=NB_CLASSES, depth=19, first_output=32, growth_rate=15, num_blocks=3, dropout=0, feature_type=feature_type) print("Loading model...") with np.load(modelfn) as f: single_array = [f['arr_%d' % i] for i in range(len(f.files))] param_values = [el for el in single_array[0]] lasagne.layers.set_all_param_values(network, param_values) print 'INFO: total number of layers:', len(lasagne.layers.get_all_layers(network)) print("INFO: number of parameters in model: %d" % lasagne.layers.count_params(network, trainable=True)) # replace all the nonlinearities of the network: relu = lasagne.nonlinearities.rectify relu_layers = [layer for layer in lasagne.layers.get_all_layers(network) if getattr(layer, 'nonlinearity', None) is relu] modded_relu = GuidedBackprop(relu) # important: only instantiate this once! for layer in relu_layers: layer.nonlinearity = modded_relu return network, input_layer, output_layer_1
def __init__(self, game_params, arch_params, solver_params, trained_model, sn_dir): params=None if trained_model: params = common.load_params(trained_model) self.lr_func = create_learning_rate_func(solver_params) self.x_h_0 = tt.fvector('x_h_0') self.v_h_0 = tt.fvector('v_h_0') self.t_h_0 = tt.fvector('t_h_0') self.x_t_0 = tt.fmatrix('x_t_0') self.v_t_0 = tt.fmatrix('v_t_0') self.a_t_0 = tt.fmatrix('a_t_0') self.t_t_0 = tt.fvector('t_t_0') self.time_steps = tt.fvector('t_0') self.exist = tt.bvector('exist') self.is_leader = tt.fvector('is_leader') self.x_goal = tt.fvector('x_goal') self.turn_vec_h = tt.fvector('turn_vec_h') self.turn_vec_t = tt.fvector('turn_vec_t') self.n_steps = tt.iscalar('n_steps') self.lr = tt.fscalar('lr') self.sn_dir = sn_dir self.game_params = game_params self.arch_params = arch_params self.solver_params = solver_params self.model = CONTROLLER(self.x_h_0, self.v_h_0, self.t_h_0, self.x_t_0, self.v_t_0, self.a_t_0, self.t_t_0, self.time_steps, self.exist, self.is_leader, self.x_goal, self.turn_vec_h, self.turn_vec_t, self.n_steps, self.lr, self.game_params, self.arch_params, self.solver_params, params)