我们从Python开源项目中,提取了以下5个代码示例,用于说明如何使用theano.tensor.btensor3()。
def setup_encode(self): # dimensions: (batch, time, 12) chord_types = T.btensor3() # dimensions: (batch, time) chord_roots = T.imatrix() # dimensions: (batch, time) relative_posns = [T.imatrix() for _ in self.encodings] # dimesions: (batch, time, output_data) encoded_melodies = [T.btensor3() for _ in self.encodings] n_batch, n_time = chord_roots.shape all_activations = [] for encoding, enc_lstmstack, encoded_melody, relative_pos in zip(self.encodings, self.enc_lstmstacks, encoded_melodies, relative_posns): activations = enc_lstmstack.do_preprocess_scan( timestep=T.tile(T.arange(n_time), (n_batch,1)) , relative_position=relative_pos, cur_chord_type=chord_types, cur_chord_root=chord_roots, cur_input=encoded_melody, deterministic_dropout=True ) all_activations.append(activations) reduced_activations = functools.reduce((lambda x,y: x+y), all_activations) strengths, vects = self.qman.get_strengths_and_vects(reduced_activations) self.encode_fun = theano.function( inputs=[chord_types, chord_roots] + relative_posns + encoded_melodies, outputs=[strengths, vects], allow_input_downcast=True, mode=(NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True) if self.nanguard else None))
def setup_decode(self): # dimensions: (batch, time, 12) chord_types = T.btensor3() # dimensions: (batch, time) chord_roots = T.imatrix() # dimensions: (batch, time) feat_strengths = T.fmatrix() # dimensions: (batch, time, feature_size) feat_vects = T.ftensor3() n_batch, n_time = chord_roots.shape features = QueueManager.queue_transform(feat_strengths, feat_vects) specs = [lstmstack.prepare_sample_scan( start_pos=T.alloc(np.array(encoding.STARTING_POSITION, np.int32), (n_batch)), start_out=T.tile(encoding.initial_encoded_form(), (n_batch,1)), timestep=T.tile(T.arange(n_time), (n_batch,1)), cur_chord_type=chord_types, cur_chord_root=chord_roots, cur_feature=features, deterministic_dropout=True ) for lstmstack, encoding in zip(self.dec_lstmstacks, self.encodings)] updates, all_chosen, all_probs, indiv_probs = helper_generate_from_spec(specs, self.dec_lstmstacks, self.encodings, self.srng, n_batch, n_time, self.bounds) self.decode_fun = theano.function( inputs=[chord_roots, chord_types, feat_strengths, feat_vects], updates=updates, outputs=all_chosen, allow_input_downcast=True, mode=(NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True) if self.nanguard else None)) self.decode_visualize_fun = theano.function( inputs=[chord_roots, chord_types, feat_strengths, feat_vects], updates=updates, outputs=[all_chosen, all_probs] + indiv_probs + [features], allow_input_downcast=True, mode=(NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True) if self.nanguard else None))
def setup_generate(self): # dimensions: (batch, time, 12) chord_types = T.btensor3() # dimensions: (batch, time) chord_roots = T.imatrix() n_batch, n_time = chord_roots.shape specs = [lstmstack.prepare_sample_scan( start_pos=T.alloc(np.array(encoding.STARTING_POSITION, np.int32), (n_batch)), start_out=T.tile(encoding.initial_encoded_form(), (n_batch,1)), timestep=T.tile(T.arange(n_time), (n_batch,1)), cur_chord_type=chord_types, cur_chord_root=chord_roots, deterministic_dropout=True ) for lstmstack, encoding in zip(self.lstmstacks, self.encodings)] updates, all_chosen, all_probs, indiv_probs = helper_generate_from_spec(specs, self.lstmstacks, self.encodings, self.srng, n_batch, n_time, self.bounds, self.normalize_artic_only) self.generate_fun = theano.function( inputs=[chord_roots, chord_types], updates=updates, outputs=all_chosen, allow_input_downcast=True, mode=(NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True) if self.nanguard else None)) self.generate_visualize_fun = theano.function( inputs=[chord_roots, chord_types], updates=updates, outputs=[all_chosen, all_probs] + indiv_probs, allow_input_downcast=True, mode=(NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True) if self.nanguard else None))
def setup_train(self): # dimensions: (batch, time, 12) chord_types = T.btensor3() # dimensions: (batch, time) chord_roots = T.imatrix() # dimensions: (batch, time) relative_pos = T.imatrix() # dimesions: (batch, time, output_data) encoded_melody = T.btensor3() # dimesions: (batch, time) correct_notes = T.imatrix() n_batch, n_time = relative_pos.shape def _build(det_dropout): activations = self.lstmstack.do_preprocess_scan( timestep=T.tile(T.arange(n_time), (n_batch,1)) , relative_position=relative_pos, cur_chord_type=chord_types, cur_chord_root=chord_roots, last_output=T.concatenate([T.tile(self.encoding.initial_encoded_form(), (n_batch,1,1)), encoded_melody[:,:-1,:] ], 1), deterministic_dropout=det_dropout) out_probs = self.encoding.decode_to_probs(activations, relative_pos, self.bounds.lowbound, self.bounds.highbound) return Encoding.compute_loss(out_probs, correct_notes, True) train_loss, train_info = _build(False) updates = Adam(train_loss, self.params, lr=self.learning_rate_var) eval_loss, eval_info = _build(True) self.loss_info_keys = list(train_info.keys()) self.update_fun = theano.function( inputs=[chord_types, chord_roots, relative_pos, encoded_melody, correct_notes], outputs=[train_loss]+list(train_info.values()), updates=updates, allow_input_downcast=True, mode=(NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True) if self.nanguard else None)) self.eval_fun = theano.function( inputs=[chord_types, chord_roots, relative_pos, encoded_melody, correct_notes], outputs=[eval_loss]+list(eval_info.values()), allow_input_downcast=True, mode=(NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True) if self.nanguard else None))
def setup_generate(self): # dimensions: (batch, time, 12) chord_types = T.btensor3() # dimensions: (batch, time) chord_roots = T.imatrix() n_batch, n_time = chord_roots.shape spec = self.lstmstack.prepare_sample_scan( start_pos=T.alloc(np.array(self.encoding.STARTING_POSITION, np.int32), (n_batch)), start_out=T.tile(self.encoding.initial_encoded_form(), (n_batch,1)), timestep=T.tile(T.arange(n_time), (n_batch,1)), cur_chord_type=chord_types, cur_chord_root=chord_roots, deterministic_dropout=True ) def _scan_fn(*inputs): # inputs is [ spec_sequences..., last_absolute_position, spec_taps..., spec_non_sequences... ] inputs = list(inputs) last_absolute_chosen = inputs.pop(len(spec.sequences)) scan_rout = self.lstmstack.sample_scan_routine(spec, *inputs) last_rel_pos, last_out, cur_kwargs = scan_rout.send(None) new_pos = self.encoding.get_new_relative_position(last_absolute_chosen, last_rel_pos, last_out, self.bounds.lowbound, self.bounds.highbound, **cur_kwargs) addtl_kwargs = { "last_output": last_out } out_activations = scan_rout.send((new_pos, addtl_kwargs)) out_probs = self.encoding.decode_to_probs(out_activations,new_pos,self.bounds.lowbound, self.bounds.highbound) sampled_note = Encoding.sample_absolute_probs(self.srng, out_probs) encoded_output = self.encoding.note_to_encoding(sampled_note, new_pos, self.bounds.lowbound, self.bounds.highbound) scan_outputs = scan_rout.send(encoded_output) scan_rout.close() return [sampled_note, out_probs] + scan_outputs outputs_info = [{"initial":T.zeros((n_batch,),'int32'), "taps":[-1]}, None] + spec.outputs_info result, updates = theano.scan(fn=_scan_fn, sequences=spec.sequences, non_sequences=spec.non_sequences, outputs_info=outputs_info) all_chosen = result[0].dimshuffle((1,0)) all_probs = result[1].dimshuffle((1,0,2)) self.generate_fun = theano.function( inputs=[chord_roots, chord_types], updates=updates, outputs=all_chosen, allow_input_downcast=True, mode=(NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True) if self.nanguard else None)) self.generate_visualize_fun = theano.function( inputs=[chord_roots, chord_types], updates=updates, outputs=[all_chosen, all_probs], allow_input_downcast=True, mode=(NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True) if self.nanguard else None))