我们从Python开源项目中,提取了以下11个代码示例,用于说明如何使用theano.tensor.squeeze()。
def squeeze(x, axis): '''Remove a 1-dimension from the tensor at index "axis". ''' shape = list(x.shape) shape.pop(axis) return T.reshape(x, tuple(shape))
def squeeze(self, x, axis): '''Remove a 1-dimension from the tensor at index "axis". ''' x = T.addbroadcast(x, axis) return T.squeeze(x)
def squeeze(x, axis): """Remove a 1-dimension from the tensor at index "axis". """ # TODO: `keras_shape` inference. shape = list(x.shape) shape.pop(axis) return T.reshape(x, tuple(shape))
def squeeze(x, axis): '''Remove a 1-dimension from the tensor at index "axis". ''' # TODO: `keras_shape` inference. shape = list(x.shape) shape.pop(axis) return T.reshape(x, tuple(shape))
def squeeze(x, axis): '''Remove a 1-dimension from the tensor at index "axis". ''' x = T.addbroadcast(x, axis) return T.squeeze(x)
def squeeze(x, axis): """Remove a 1-dimension from the tensor at index "axis". """ shape = list(x.shape) shape.pop(axis) y = T.reshape(x, tuple(shape)) if hasattr(x, '_keras_shape'): kshape = list(x._keras_shape) kshape.pop(axis) y._keras_shape = tuple(kshape) return y
def get_output(self, go_backwards = False, train = False): self.reset_states(train.shape[0]) inputs = train.dimshuffle((1, 0, 2)) results, _ = theano.scan( self.step, sequences=inputs, outputs_info=[self.states[0],self.states[1]], go_backwards=go_backwards) ''' # deal with Theano API inconsistency if type(results) is list: outputs = results[0] states = results[1:] else: outputs = results states = [] outputs = T.squeeze(outputs) last_output = outputs[-1] ''' #outputs = np.asarray(results)[:,0] #outputs = T.squeeze(outputs) #outputs = outputs.dimshuffle((1, 0, 2)) #states = [T.squeeze(state[-1]) for state in states] #return last_output, outputs, states outputs = results[0] outputs = T.squeeze(outputs) outputs = outputs.dimshuffle((1, 0, 2)) return outputs
def squeeze(x, axis): '''Remove a 1-dimension from the tensor at index "axis". ''' broadcastable = x.broadcastable[:axis] + x.broadcastable[axis+1:] x = T.patternbroadcast(x, [i == axis for i in range(x.type.ndim)]) x = T.squeeze(x) x = T.patternbroadcast(x, broadcastable) return x
def conv1d(x, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): """1D convolution. # Arguments kernel: kernel tensor. strides: stride integer. padding: string, `"same"`, `"causal"` or `"valid"`. data_format: string, one of "channels_last", "channels_first" dilation_rate: integer. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ', data_format) if hasattr(kernel, '_keras_shape'): kernel_shape = kernel._keras_shape else: kernel_shape = None if padding == 'causal': # causal (dilated) convolution: if not kernel_shape: raise AttributeError('Causal padding requires kernel._keras_shape set.') left_pad = dilation_rate * (kernel_shape[0] - 1) x = temporal_padding(x, (left_pad, 0)) padding = 'valid' if hasattr(x, '_keras_shape'): shape = x._keras_shape else: shape = None if data_format == 'channels_last': # original shape: (batch, length, input_dim) # add dim to x to have (batch, length, 1, input_dim) x = expand_dims(x, 2) # update x._keras_shape if shape is not None: x._keras_shape = (shape[0], shape[1], 1, shape[2]) else: # original shape: (batch, input_dim, length) # add dim to x to have (batch, input_dim, length, 1) x = expand_dims(x, 3) # update x._keras_shape if shape is not None: x._keras_shape = (shape[0], shape[1], shape[2], 1) # update dilation rate, strides dilation_rate = (dilation_rate, 1) strides = (strides, 1) # add dim to kernel (always same format independently of data_format) # i.e. (rows, 1, input_depth, depth) kernel = expand_dims(kernel, 1) output = conv2d(x, kernel, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate) # remove added dim if data_format == 'channels_last': output = squeeze(output, 2) else: output = squeeze(output, 3) return output