我们从Python开源项目中,提取了以下44个代码示例,用于说明如何使用keras.backend.batch_flatten()。
def get_initial_states(self, onto_nse_input, input_mask=None): input_to_read = onto_nse_input # (batch_size, num_words, num_senses, num_hyps, output_dim + 1) memory_input = input_to_read[:, :, :, :, :-1] # (bs, words, senses, hyps, output_dim) if input_mask is None: mem_0 = K.mean(memory_input, axis=(2, 3)) # (batch_size, num_words, output_dim) else: memory_mask = input_mask if K.ndim(onto_nse_input) != K.ndim(input_mask): memory_mask = K.expand_dims(input_mask) memory_mask = K.cast(memory_mask / (K.sum(memory_mask) + K.epsilon()), 'float32') mem_0 = K.sum(memory_input * memory_mask, axis=(2,3)) # (batch_size, num_words, output_dim) flattened_mem_0 = K.batch_flatten(mem_0) initial_states = self.reader.get_initial_states(input_to_read) initial_states += [flattened_mem_0] return initial_states
def get_initial_states(self, nse_input, input_mask=None): ''' This method produces the 'read' mask for all timesteps and initializes the memory slot mem_0. Input: nse_input (batch_size, input_length, input_dim) Output: list[Tensors]: h_0 (batch_size, output_dim) c_0 (batch_size, output_dim) flattened_mem_0 (batch_size, input_length * output_dim) While this method simply copies input to mem_0, variants that inherit from this class can do something fancier. ''' input_to_read = nse_input mem_0 = input_to_read flattened_mem_0 = K.batch_flatten(mem_0) initial_states = self.reader.get_initial_states(nse_input) initial_states += [flattened_mem_0] return initial_states
def get_initial_states(self, nse_input, input_mask=None): ''' Read input in MMA-NSE will be of shape (batch_size, read_input_length*2, input_dim), a concatenation of the actual input to this NSE and the output from a different NSE. The latter will be used to initialize the shared memory. The former will be passed to the read LSTM and also used to initialize the current memory. ''' input_length = K.shape(nse_input)[1] read_input_length = input_length/2 input_to_read = nse_input[:, :read_input_length, :] initial_shared_memory = K.batch_flatten(nse_input[:, read_input_length:, :]) mem_0 = K.batch_flatten(input_to_read) o_mask = self.reader.compute_mask(input_to_read, input_mask) reader_states = self.reader.get_initial_states(nse_input) initial_states = reader_states + [mem_0, initial_shared_memory] return initial_states, o_mask
def gram_matrix(x): """ Computes the outer-product of the input tensor x. Input ----- - x: input tensor of shape (C x H x W) Returns ------- - x . x^T Note that this can be computed efficiently if x is reshaped as a tensor of shape (C x H*W). """ # assert K.ndim(x) == 3 if K.image_dim_ordering() == 'th': features = K.batch_flatten(x) else: features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1))) return K.dot(features, K.transpose(features))
def neur_coef(y_true, y_pred): """ Calculate the neurality coefficient of the 2 input Tensors. if dice coefficient > 0.5 for a single sample the neuarlity coefficient is 1, it is 0 in all other cases Keyword arguments: y_true -- Keras Tensor containing the ground truth y_pred -- Keras Tensor containing the prediction """ y_true = K.batch_flatten(y_true) y_pred = K.batch_flatten(y_pred) intersection = y_true * y_pred * 1.0 int_list = K.sum(intersection,axis=-1) y_list = K.sum(y_true,axis=-1) * 1.0 y_pred_list = K.sum(y_pred,axis=-1) * 1.0 score = K.mean(K.round((2.0*int_list+smooth) / (y_pred_list+ y_list + smooth))) return score
def step(self, inputs, states): vP_t = inputs hP_tm1 = states[0] _ = states[1:3] # ignore internal dropout/masks vP, WP_v, WPP_v, v, W_g2 = states[3:8] vP_mask, = states[8:] WP_v_Dot = K.dot(vP, WP_v) WPP_v_Dot = K.dot(K.expand_dims(vP_t, axis=1), WPP_v) s_t_hat = K.tanh(WPP_v_Dot + WP_v_Dot) s_t = K.dot(s_t_hat, v) s_t = K.batch_flatten(s_t) a_t = softmax(s_t, mask=vP_mask, axis=1) c_t = K.batch_dot(a_t, vP, axes=[1, 1]) GRU_inputs = K.concatenate([vP_t, c_t]) g = K.sigmoid(K.dot(GRU_inputs, W_g2)) GRU_inputs = g * GRU_inputs hP_t, s = super(SelfAttnGRU, self).step(GRU_inputs, states) return hP_t, s
def step(self, inputs, states): # input ha_tm1 = states[0] # (B, 2H) _ = states[1:3] # ignore internal dropout/masks hP, WP_h, Wa_h, v = states[3:7] # (B, P, 2H) hP_mask, = states[7:8] WP_h_Dot = K.dot(hP, WP_h) # (B, P, H) Wa_h_Dot = K.dot(K.expand_dims(ha_tm1, axis=1), Wa_h) # (B, 1, H) s_t_hat = K.tanh(WP_h_Dot + Wa_h_Dot) # (B, P, H) s_t = K.dot(s_t_hat, v) # (B, P, 1) s_t = K.batch_flatten(s_t) # (B, P) a_t = softmax(s_t, mask=hP_mask, axis=1) # (B, P) c_t = K.batch_dot(hP, a_t, axes=[1, 1]) # (B, 2H) GRU_inputs = c_t ha_t, (ha_t_,) = super(PointerGRU, self).step(GRU_inputs, states) return a_t, [ha_t]
def call(self, inputs, mask=None): assert(isinstance(inputs, list) and len(inputs) == 5) uQ, WQ_u, WQ_v, v, VQ_r = inputs uQ_mask = mask[0] if mask is not None else None ones = K.ones_like(K.sum(uQ, axis=1, keepdims=True)) # (B, 1, 2H) s_hat = K.dot(uQ, WQ_u) s_hat += K.dot(ones, K.dot(WQ_v, VQ_r)) s_hat = K.tanh(s_hat) s = K.dot(s_hat, v) s = K.batch_flatten(s) a = softmax(s, mask=uQ_mask, axis=1) rQ = K.batch_dot(uQ, a, axes=[1, 1]) return rQ
def step(self, inputs, states): uP_t = inputs vP_tm1 = states[0] _ = states[1:3] # ignore internal dropout/masks uQ, WQ_u, WP_v, WP_u, v, W_g1 = states[3:9] uQ_mask, = states[9:10] WQ_u_Dot = K.dot(uQ, WQ_u) #WQ_u WP_v_Dot = K.dot(K.expand_dims(vP_tm1, axis=1), WP_v) #WP_v WP_u_Dot = K.dot(K.expand_dims(uP_t, axis=1), WP_u) # WP_u s_t_hat = K.tanh(WQ_u_Dot + WP_v_Dot + WP_u_Dot) s_t = K.dot(s_t_hat, v) # v s_t = K.batch_flatten(s_t) a_t = softmax(s_t, mask=uQ_mask, axis=1) c_t = K.batch_dot(a_t, uQ, axes=[1, 1]) GRU_inputs = K.concatenate([uP_t, c_t]) g = K.sigmoid(K.dot(GRU_inputs, W_g1)) # W_g1 GRU_inputs = g * GRU_inputs vP_t, s = super(QuestionAttnGRU, self).step(GRU_inputs, states) return vP_t, s
def step(self, x, states): h, c, att = self._step(x, states) if self.return_attention: # Flattening attention to (batch_size, senses*hyps) return K.batch_flatten(att), [h, c] else: return h, [h, c]
def step(self, input_t, states): ''' This method is a step function that updates the memory at each time step and produces a new output vector (Equations 1 to 6 in the paper). The memory_state is flattened because K.rnn requires all states to be of the same shape as the output, because it uses the same mask for the output and the states. Inputs: input_t (batch_size, input_dim) states (list[Tensor]) flattened_mem_tm1 (batch_size, input_length * output_dim) writer_h_tm1 (batch_size, output_dim) writer_c_tm1 (batch_size, output_dim) Outputs: h_t (batch_size, output_dim) flattened_mem_t (batch_size, input_length * output_dim) ''' reader_states, flattened_mem_tm1, writer_states = self.split_states(states) input_mem_shape = K.shape(flattened_mem_tm1) mem_tm1_shape = (input_mem_shape[0], input_mem_shape[1]/self.output_dim, self.output_dim) mem_tm1 = K.reshape(flattened_mem_tm1, mem_tm1_shape) # (batch_size, input_length, output_dim) reader_constants = self.reader.get_constants(input_t) # Does not depend on input_t, see init. reader_states = reader_states[:2] + reader_constants + reader_states[2:] o_t, [_, reader_c_t] = self.reader.step(input_t, reader_states) # o_t, reader_c_t: (batch_size, output_dim) z_t, m_rt = self.summarize_memory(o_t, mem_tm1) c_t = self.compose_memory_and_output([o_t, m_rt]) # Collecting the necessary variables to directly call writer's step function. writer_constants = self.writer.get_constants(c_t) # returns dropouts for W and U (all 1s, see init) writer_states += writer_constants # Making a call to writer's step function, Equation 5 h_t, [_, writer_c_t] = self.writer.step(c_t, writer_states) # h_t, writer_c_t: (batch_size, output_dim) mem_t = self.update_memory(z_t, h_t, mem_tm1) flattened_mem_t = K.batch_flatten(mem_t) return h_t, [o_t, reader_c_t, flattened_mem_t, h_t, writer_c_t]
def gram_matrix(x): if K.image_dim_ordering() == "th": features = K.batch_flatten(x) else: features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1))) gram = K.dot(features, K.transpose(features)) return gram
def gram_matrix(x): features=K.batch_flatten(x) gram=K.dot(features,K.transpose(features)) return gram
def gram(x): # Flatten each channel flat = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1))) # Compute outer products of channel features with themselves gram = K.dot(flat, K.transpose(flat)) return gram # The "style loss": # how much do the Gram matrices of the reference and generated activations differ? # (using the mean square difference)
def gram_matrix(x): assert K.ndim(x) == 3 if K.image_dim_ordering() == 'th': features = K.batch_flatten(x) else: features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1))) gram = K.dot(features, K.transpose(features)) return gram # the "style loss" is designed to maintain # the style of the reference image in the generated image. # It is based on the gram matrices (which capture style) of # feature maps from the style reference image # and from the generated image
def gram_matrix(x): assert K.ndim(x) == 3 features = K.batch_flatten(x) gram = K.dot(features, K.transpose(features)) return gram
def gram_matrix(x): assert K.ndim(x) == 3 if K.image_data_format() == 'channels_first': features = K.batch_flatten(x) else: features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1))) gram = K.dot(features, K.transpose(features)) return gram # the "style loss" is designed to maintain # the style of the reference image in the generated image. # It is based on the gram matrices (which capture style) of # feature maps from the style reference image # and from the generated image
def call(self, x, training=None): flatten_inputs = K.batch_flatten(x) output = self.dense_layer.call(flatten_inputs) if self.batch_normalize: output = self.batchnorm_layer.call(output) output = self.activation_layer.call(output) return output
def gram_matrix(x, norm_by_channels=False): ''' Returns the Gram matrix of the tensor x. ''' if K.ndim(x) == 3: features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1))) shape = K.shape(x) C, H, W = shape[0], shape[1], shape[2] gram = K.dot(features, K.transpose(features)) elif K.ndim(x) == 4: # Swap from (H, W, C) to (B, C, H, W) x = K.permute_dimensions(x, (0, 3, 1, 2)) shape = K.shape(x) B, C, H, W = shape[0], shape[1], shape[2], shape[3] # Reshape as a batch of 2D matrices with vectorized channels features = K.reshape(x, K.stack([B, C, H*W])) # This is a batch of Gram matrices (B, C, C). gram = K.batch_dot(features, features, axes=2) else: raise ValueError('The input tensor should be either a 3d (H, W, C) or 4d (B, H, W, C) tensor.') # Normalize the Gram matrix if norm_by_channels: denominator = C * H * W # Normalization from Johnson else: denominator = H * W # Normalization from Google gram = gram / K.cast(denominator, x.dtype) return gram
def call(self, x, mask=None): s = K.dot(x, self.W) > 0 x = K.expand_dims(x, 1) * K.expand_dims(s, 2) return K.reshape(x, (x.shape[0], -1)) #K.batch_flatten(x)
def gram_matrix(x): assert K.ndim(x) == 3 if K.image_dim_ordering() == "th": features = K.batch_flatten(x) else: features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1))) gram = K.dot(features, K.transpose(features)) return gram # the "style loss" is designed to maintain # the style of the reference image in the generated image. # It is based on the gram matrices (which capture style) of # feature maps from the style reference image # and from the generated image
def gram_matrix(x): assert K.ndim(x) == 3 features = K.batch_flatten(x) gram = K.dot(features - 1, K.transpose(features - 1)) return gram
def gram_matrix(x): assert K.ndim(x) == 3 if K.image_dim_ordering() == "th": features = K.batch_flatten(x) else: features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1))) gram = K.dot(features - 1, K.transpose(features - 1)) return gram # the "style loss" is designed to maintain # the style of the reference image in the generated image. # It is based on the gram matrices (which capture style) of # feature maps from the style reference image # and from the generated image
def gram_matrix(x): assert K.ndim(x) == 3 features = K.batch_flatten(x) gram = K.dot(features, K.transpose(features)) return gram # the "style loss" is designed to maintain # the style of the reference image in the generated image. # It is based on the gram matrices (which capture style) of # feature maps from the style reference image # and from the generated image
def call(self, inputs): target, wrt = inputs grads = K.gradients(target, wrt) assert len(grads) == 1 grad = grads[0] return K.sqrt(K.sum(K.batch_flatten(K.square(grad)), axis=1, keepdims=True))
def call(self, x, mask=None): return K.batch_flatten(x)
def gram_matrix(x): features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1))) gram = K.dot(features, K.transpose(features)) return gram # The style loss is then the (scaled, squared) Frobenius norm of the difference # between the Gram matrices of the style and combination images.
def gram_matrix(x): ''' returns the gram matrix of an input image moved 1/(2*N*M) in the gram_matrix function to simplify style_loss ''' features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1))) return K.dot(features, K.transpose(features)) / (x.get_shape().num_elements()*2)
def gramian(filters): c_filters = K.batch_flatten(K.permute_dimensions(K.squeeze(filters, axis=0), pattern=(2, 0, 1))) return K.dot(c_filters, K.transpose(c_filters))
def gram_matrix(x): features = backend.batch_flatten(backend.permute_dimensions(x, (2, 0, 1))) gram = backend.dot(features, backend.transpose(features)) return gram
def compute_mask(self, x, mask=None): if mask is None or not self.return_mask: return None return K.batch_flatten(mask)