我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.backend.transpose()。
def deprocess(img4d): img = img4d.copy() if K.image_dim_ordering() == "th": # (B, C, H, W) img = img.reshape((img4d.shape[1], img4d.shape[2], img4d.shape[3])) # (C, H, W) -> (H, W, C) img = img.transpose((1, 2, 0)) else: # (B, H, W, C) img = img.reshape((img4d.shape[1], img4d.shape[2], img4d.shape[3])) img[:, :, 0] += 103.939 img[:, :, 1] += 116.779 img[:, :, 2] += 123.68 # BGR -> RGB img = img[:, :, ::-1] img = np.clip(img, 0, 255).astype("uint8") return img
def categorical_crossentropy_3d(y_true, y_predicted): """ Computes categorical cross-entropy loss for a softmax distribution in a hot-encoded 3D array with shape (num_samples, num_classes, dim1, dim2, dim3) Parameters ---------- y_true : keras.placeholder [batches, dim0,dim1,dim2] Placeholder for data holding the ground-truth labels encoded in a one-hot representation y_predicted : keras.placeholder [batches,channels,dim0,dim1,dim2] Placeholder for data holding the softmax distribution over classes Returns ------- scalar Categorical cross-entropy loss value """ y_true_flatten = K.flatten(y_true) y_pred_flatten = K.flatten(y_predicted) y_pred_flatten_log = -K.log(y_pred_flatten + K.epsilon()) num_total_elements = K.sum(y_true_flatten) # cross_entropy = K.dot(y_true_flatten, K.transpose(y_pred_flatten_log)) cross_entropy = tf.reduce_sum(tf.multiply(y_true_flatten, y_pred_flatten_log)) mean_cross_entropy = cross_entropy / (num_total_elements + K.epsilon()) return mean_cross_entropy
def deprocess_image(x): if K.image_dim_ordering() == 'th': x = x.reshape((3, img_nrows, img_ncols)) x = x.transpose((1, 2, 0)) else: x = x.reshape((img_nrows, img_ncols, 3)) # Remove zero-center by mean pixel x[:, :, 0] += 103.939 x[:, :, 1] += 116.779 x[:, :, 2] += 123.68 # 'BGR'->'RGB' x = x[:, :, ::-1] x = np.clip(x, 0, 255).astype('uint8') return x # get tensor representations of our images
def deprocess_image(x): if K.image_data_format() == 'channels_first': x = x.reshape((3, img_nrows, img_ncols)) x = x.transpose((1, 2, 0)) else: x = x.reshape((img_nrows, img_ncols, 3)) # Remove zero-center by mean pixel x[:, :, 0] += 103.939 x[:, :, 1] += 116.779 x[:, :, 2] += 123.68 # 'BGR'->'RGB' x = x[:, :, ::-1] x = np.clip(x, 0, 255).astype('uint8') return x # get tensor representations of our images
def get_gradcam(image,model,layer_name,mode): layer = model.get_layer(layer_name) image = np.expand_dims(image,0) loss = K.variable(0.) if mode == "abnormal": loss += K.sum(model.output) elif mode == "normal": loss += K.sum(1 - model.output) else: raise ValueError("mode must be normal or abnormal") #gradients of prediction wrt the conv layer of choice are used upstream_grads = K.gradients(loss,layer.output)[0] feature_weights = K.mean(upstream_grads,axis=[1,2]) #spatial global avg pool heatmap = K.relu(K.dot(layer.output, K.transpose(feature_weights))) fetch_heatmap = K.function([model.input, K.learning_phase()], [heatmap]) return fetch_heatmap([image,0])[0]
def call(self, x): s, s_hat = x # Compute the variables defined in the class comment S2 = K.sum(s) S1 = s_hat[0, 1] N = s_hat[0, 0] # Compute the unbiased weights a2 = (S1 + S2) / N / s # Compute the biased weights and the scaling factor t a1 = K.pow(a2, self.k) sT = K.transpose(s) t = K.dot(sT, a2) / K.dot(sT, a1) return K.stop_gradient([a1 * t])[0]
def deprocess_image(x): if K.image_dim_ordering() == "th": x = x.reshape((3, img_width, img_height)) x = x.transpose((1, 2, 0)) else: x = x.reshape((img_width, img_height, 3)) x[:, :, 0] += 103.939 x[:, :, 1] += 116.779 x[:, :, 2] += 123.68 # BGR -> RGB x = x[:, :, ::-1] x = np.clip(x, 0, 255).astype('uint8') return x # util function to preserve image color
def deprocess_image(x): if K.image_dim_ordering() == 'th': x = x.reshape((3, img_nrows, img_ncols)) x = x.transpose((1, 2, 0)) else: x = x.reshape((img_nrows, img_ncols, 3)) x[:, :, 0] += 103.939 x[:, :, 1] += 116.779 x[:, :, 2] += 123.68 # BGR to RGB x = x[:, :, ::-1] x = np.clip(x, 0, 255).astype('uint8') return x # util function to preserve image color
def preprocess_image(image_path, load_dims=False, style_image=False): global img_WIDTH, img_HEIGHT, aspect_ratio, b_scale_ratio_height, b_scale_ratio_width img = imread(image_path, mode="RGB") # Prevents crashes due to PNG images (ARGB) if load_dims: img_WIDTH = img.shape[0] img_HEIGHT = img.shape[1] aspect_ratio = img_HEIGHT / img_WIDTH if style_image: b_scale_ratio_width = float(img.shape[0]) / img_WIDTH b_scale_ratio_height = float(img.shape[1]) / img_HEIGHT img = imresize(img, (img_width, img_height)) img = img.transpose((2, 0, 1)).astype('float64') img = np.expand_dims(img, axis=0) return img # util function to convert a tensor into a valid image
def gram_matrix(x): """ Computes the outer-product of the input tensor x. Input ----- - x: input tensor of shape (C x H x W) Returns ------- - x . x^T Note that this can be computed efficiently if x is reshaped as a tensor of shape (C x H*W). """ # assert K.ndim(x) == 3 if K.image_dim_ordering() == 'th': features = K.batch_flatten(x) else: features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1))) return K.dot(features, K.transpose(features))
def __init__(self, vocab_size, sequence_size, setting=None, checkpoint_path="", temperature=10, tying=False): super().__init__(vocab_size, sequence_size, setting, checkpoint_path) self.temperature = temperature self.tying = tying self.gamma = self.setting.gamma if tying: self.model.pop() # remove activation self.model.pop() # remove projection (use self embedding) self.model.add(Lambda(lambda x: K.dot(x, K.transpose(self.embedding.embeddings)))) self.model.add(Activation("softmax"))
def augmented_loss(self, y_true, y_pred): _y_pred = Activation("softmax")(y_pred) loss = K.categorical_crossentropy(_y_pred, y_true) # y is (batch x seq x vocab) y_indexes = K.argmax(y_true, axis=2) # turn one hot to index. (batch x seq) y_vectors = self.embedding(y_indexes) # lookup the vector (batch x seq x vector_length) #v_length = self.setting.vector_length #y_vectors = K.reshape(y_vectors, (-1, v_length)) #y_t = K.map_fn(lambda v: K.dot(self.embedding.embeddings, K.reshape(v, (-1, 1))), y_vectors) #y_t = K.squeeze(y_t, axis=2) # unknown but necessary operation #y_t = K.reshape(y_t, (-1, self.sequence_size, self.vocab_size)) # vector x embedding dot products (batch x seq x vocab) y_t = tf.tensordot(y_vectors, K.transpose(self.embedding.embeddings), 1) y_t = K.reshape(y_t, (-1, self.sequence_size, self.vocab_size)) # explicitly set shape y_t = K.softmax(y_t / self.temperature) _y_pred_t = Activation("softmax")(y_pred / self.temperature) aug_loss = kullback_leibler_divergence(y_t, _y_pred_t) loss += (self.gamma * self.temperature) * aug_loss return loss
def kde_entropy(output, var): # Kernel density estimate of entropy, in nats dims = K.cast(K.shape(output)[1], K.floatx() ) N = K.cast(K.shape(output)[0], K.floatx() ) normconst = (dims/2.0)*K.log(2*np.pi*var) # get dists matrix x2 = K.expand_dims(K.sum(K.square(output), axis=1), 1) dists = x2 + K.transpose(x2) - 2*K.dot(output, K.transpose(output)) dists = dists / (2*var) lprobs = logsumexp(-dists, axis=1) - K.log(N) - normconst h = -K.mean(lprobs) return h
def preprocess(img): img4d = img.copy() img4d = img4d.astype("float64") if K.image_dim_ordering() == "th": # (H, W, C) -> (C, H, W) img4d = img4d.transpose((2, 0, 1)) img4d = np.expand_dims(img4d, axis=0) img4d = vgg16.preprocess_input(img4d) return img4d
def gram_matrix(x): if K.image_dim_ordering() == "th": features = K.batch_flatten(x) else: features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1))) gram = K.dot(features, K.transpose(features)) return gram
def categorical_crossentropy_3d_SW(y_true_sw, y_predicted): """ Computes categorical cross-entropy loss for a softmax distribution in a hot-encoded 3D array with shape (num_samples, num_classes, dim1, dim2, dim3) Parameters ---------- y_true : keras.placeholder [batches, dim0,dim1,dim2] Placeholder for data holding the ground-truth labels encoded in a one-hot representation y_predicted : keras.placeholder [batches,channels,dim0,dim1,dim2] Placeholder for data holding the softmax distribution over classes Returns ------- scalar Categorical cross-entropy loss value """ sw = y_true_sw[:,:,:,:,K.int_shape(y_predicted)[-1]:] y_true = y_true_sw[:,:,:,:,:K.int_shape(y_predicted)[-1]] y_true_flatten = K.flatten(y_true*sw) y_pred_flatten = K.flatten(y_predicted) y_pred_flatten_log = -K.log(y_pred_flatten + K.epsilon()) num_total_elements = K.sum(y_true_flatten) # cross_entropy = K.dot(y_true_flatten, K.transpose(y_pred_flatten_log)) cross_entropy = tf.reduce_sum(tf.multiply(y_true_flatten, y_pred_flatten_log)) mean_cross_entropy = cross_entropy / (num_total_elements + K.epsilon()) return mean_cross_entropy
def categorical_crossentropy_3d_masked(vectors): """ Computes categorical cross-entropy loss for a softmax distribution in a hot-encoded 3D array with shape (num_samples, num_classes, dim1, dim2, dim3) Parameters ---------- y_true : keras.placeholder [batches, dim0,dim1,dim2] Placeholder for data holding the ground-truth labels encoded in a one-hot representation y_predicted : keras.placeholder [batches,channels,dim0,dim1,dim2] Placeholder for data holding the softmax distribution over classes Returns ------- scalar Categorical cross-entropy loss value """ y_predicted, mask, y_true = vectors y_true_flatten = K.flatten(y_true) y_pred_flatten = K.flatten(y_predicted) y_pred_flatten_log = -K.log(y_pred_flatten + K.epsilon()) num_total_elements = K.sum(mask) # cross_entropy = K.dot(y_true_flatten, K.transpose(y_pred_flatten_log)) cross_entropy = tf.reduce_sum(tf.multiply(y_true_flatten, y_pred_flatten_log)) mean_cross_entropy = cross_entropy / (num_total_elements + K.epsilon()) return mean_cross_entropy
def categorical_crossentropy_3d_lambda(vectors): y_true, y_pred = vectors y_true_flatten = K.flatten(y_true) y_pred_flatten = K.flatten(y_pred) y_pred_flatten_log = -K.log(y_pred_flatten + K.epsilon()) # cross_entropy = K.dot(y_true_flatten, K.transpose(y_pred_flatten_log)) cross_entropy = tf.reduce_sum(tf.multiply(y_true_flatten, y_pred_flatten_log)) mean_cross_entropy = cross_entropy / (K.sum(y_true) + K.epsilon()) return mean_cross_entropy
def contractive_loss(model, lam=1e-4): def loss(y_true, y_pred): ent_loss = K.mean(K.binary_crossentropy(y_pred, y_true), axis=-1) W = K.variable(value=model.encoder.get_weights()[0]) # N x N_hidden W = K.transpose(W) # N_hidden x N h = model.encoder.output dh = h * (1 - h) # N_batch x N_hidden # N_batch x N_hidden * N_hidden x 1 = N_batch x 1 contractive = lam * K.sum(dh**2 * K.sum(W**2, axis=1), axis=1) return ent_loss + contractive return loss
def call(self, x, mask=None): # Use tied weights self.kernel = K.transpose(self.tied_to.kernel) output = K.dot(x, self.kernel) if self.use_bias: output += self.bias return self.activation(output)
def deprocess_image(x): x = x.reshape((3, im_height, im_width)) x = x.transpose((1, 2, 0)) x[:, :, 0] += 103.939 x[:, :, 1] += 116.779 x[:, :, 2] += 123.68 x = x[:, :, ::-1] x = np.clip(x, 0, 255).astype('uint8') return x
def gram_matrix(x): features=K.batch_flatten(x) gram=K.dot(features,K.transpose(features)) return gram
def gram(x): # Flatten each channel flat = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1))) # Compute outer products of channel features with themselves gram = K.dot(flat, K.transpose(flat)) return gram # The "style loss": # how much do the Gram matrices of the reference and generated activations differ? # (using the mean square difference)
def predict(im, pos, model, k): im_ary = np.array([im]).transpose((0, 2, 3, 1)) \ if K.image_data_format() == 'channels_last' else np.array([im]) res = model.predict([im_ary, np.array([pos])]) action = np.argmax(res) reward = get_layer_output(model, 'reward', im_ary) value = get_layer_output(model, 'value{}'.format(k), im_ary) reward = np.reshape(reward, im.shape[1:]) value = np.reshape(value, im.shape[1:]) return res, action, reward, value
def call(self, inputs, output_shape=None): """ Seen on https://github.com/tensorflow/tensorflow/issues/2169 Replace with unpool op when/if issue merged Add theano backend """ updates, mask = inputs[0], inputs[1] with K.tf.variable_scope(self.name): mask = K.cast(mask, 'int32') input_shape = K.tf.shape(updates, out_type='int32') # calculation new shape if output_shape is None: output_shape = (input_shape[0], input_shape[1] * self.size[0], input_shape[2] * self.size[1], input_shape[3]) self.output_shape1 = output_shape # calculation indices for batch, height, width and feature maps one_like_mask = K.ones_like(mask, dtype='int32') batch_shape = K.concatenate([[input_shape[0]], [1], [1], [1]], axis=0) batch_range = K.reshape(K.tf.range(output_shape[0], dtype='int32'), shape=batch_shape) b = one_like_mask * batch_range y = mask // (output_shape[2] * output_shape[3]) x = (mask // output_shape[3]) % output_shape[2] feature_range = K.tf.range(output_shape[3], dtype='int32') f = one_like_mask * feature_range # transpose indices & reshape update values to one dimension updates_size = K.tf.size(updates) indices = K.transpose(K.reshape(K.stack([b, y, x, f]), [4, updates_size])) values = K.reshape(updates, [updates_size]) ret = K.tf.scatter_nd(indices, values, output_shape) return ret
def gram_matrix(x): assert K.ndim(x) == 3 if K.image_dim_ordering() == 'th': features = K.batch_flatten(x) else: features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1))) gram = K.dot(features, K.transpose(features)) return gram # the "style loss" is designed to maintain # the style of the reference image in the generated image. # It is based on the gram matrices (which capture style) of # feature maps from the style reference image # and from the generated image
def deprocess_image(x): if K.image_dim_ordering() == 'th': x = x.reshape((3, img_nrows, img_ncols)) x = x.transpose((1, 2, 0)) else: x = x.reshape((img_nrows, img_ncols, 3)) # Remove zero-center by mean pixel x[:, :, 0] += 103.939 x[:, :, 1] += 116.779 x[:, :, 2] += 123.68 # 'BGR'->'RGB' x = x[:, :, ::-1] x = np.clip(x, 0, 255).astype('uint8') return x
def gram_matrix(x): assert K.ndim(x) == 3 features = K.batch_flatten(x) gram = K.dot(features, K.transpose(features)) return gram
def gram_matrix(x): assert K.ndim(x) == 3 if K.image_data_format() == 'channels_first': features = K.batch_flatten(x) else: features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1))) gram = K.dot(features, K.transpose(features)) return gram # the "style loss" is designed to maintain # the style of the reference image in the generated image. # It is based on the gram matrices (which capture style) of # feature maps from the style reference image # and from the generated image
def deprocess_image(x): if K.image_data_format() == 'channels_first': x = x.reshape((3, img_nrows, img_ncols)) x = x.transpose((1, 2, 0)) else: x = x.reshape((img_nrows, img_ncols, 3)) # Remove zero-center by mean pixel x[:, :, 0] += 103.939 x[:, :, 1] += 116.779 x[:, :, 2] += 123.68 # 'BGR'->'RGB' x = x[:, :, ::-1] x = np.clip(x, 0, 255).astype('uint8') return x
def attention_control(args): x,dense_2 = args find_att = K.reshape(x,(15,15,10)) find_att = K.transpose(find_att[:,:,:]) find_att = K.mean(find_att,axis=0) find_att = find_att/K.sum(find_att,axis=0) find_att = K.repeat_elements(find_att,32,axis=0) find_att = K.reshape(find_att,(1,32,15,15)) return find_att
def change_shape1(x): x = K.reshape(K.transpose(x),(15*15,32)) return x
def get_gradcam(image, model, layer_name): # remove dropout/noise layers K.set_learning_phase(0) K._LEARNING_PHASE = tf.constant(0) layer = model.get_layer(layer_name) image = np.expand_dims(image, 0) loss = K.variable(0.) loss += K.sum(model.output) # gradients of prediction wrt the conv layer of choice are used upstream_grads = K.gradients(loss, layer.output)[0] feature_weights = K.mean(upstream_grads, axis=[1, 2]) heatmap = K.relu(K.dot(layer.output, K.transpose(feature_weights))) fetch_heatmap = K.function([model.input], [heatmap]) return fetch_heatmap([image])[0]
def contractive_autoencoder(X, lam=1e-3): X = X.reshape(X.shape[0], -1) M, N = X.shape N_hidden = 64 N_batch = 100 inputs = Input(shape=(N,)) encoded = Dense(N_hidden, activation='sigmoid', name='encoded')(inputs) outputs = Dense(N, activation='linear')(encoded) model = Model(input=inputs, output=outputs) def contractive_loss(y_pred, y_true): mse = K.mean(K.square(y_true - y_pred), axis=1) W = K.variable(value=model.get_layer('encoded').get_weights()[0]) # N x N_hidden W = K.transpose(W) # N_hidden x N h = model.get_layer('encoded').output dh = h * (1 - h) # N_batch x N_hidden # N_batch x N_hidden * N_hidden x 1 = N_batch x 1 contractive = lam * K.sum(dh**2 * K.sum(W**2, axis=1), axis=1) return mse + contractive model.compile(optimizer='adam', loss=contractive_loss) model.fit(X, X, batch_size=N_batch, nb_epoch=3) return model, Model(input=inputs, output=encoded)
def rbf_moment_matching(y_true, y_pred, sigmas=[2, 5, 10, 20, 40, 80]): """Generative moment matching loss with RBF kernel. Reference: https://arxiv.org/abs/1502.02761 """ warnings.warn('Moment matching loss is still in development.') if len(K.int_shape(y_pred)) != 2 or len(K.int_shape(y_true)) != 2: raise ValueError('RBF Moment Matching function currently only works ' 'for outputs with shape (batch_size, num_features).' 'Got y_true="%s" and y_pred="%s".' % (str(K.int_shape(y_pred)), str(K.int_shape(y_true)))) sigmas = list(sigmas) if isinstance(sigmas, (list, tuple)) else [sigmas] x = K.concatenate([y_pred, y_true], 0) # Performs dot product between all combinations of rows in X. xx = K.dot(x, K.transpose(x)) # (batch_size, batch_size) # Performs dot product of all rows with themselves. x2 = K.sum(x * x, 1, keepdims=True) # (batch_size, None) # Gets exponent entries of the RBF kernel (without sigmas). exponent = xx - 0.5 * x2 - 0.5 * K.transpose(x2) # Applies all the sigmas. total_loss = None for sigma in sigmas: kernel_val = K.exp(exponent / sigma) loss = K.sum(kernel_val) total_loss = loss if total_loss is None else loss + total_loss return total_loss
def gram_matrix(x, norm_by_channels=False): ''' Returns the Gram matrix of the tensor x. ''' if K.ndim(x) == 3: features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1))) shape = K.shape(x) C, H, W = shape[0], shape[1], shape[2] gram = K.dot(features, K.transpose(features)) elif K.ndim(x) == 4: # Swap from (H, W, C) to (B, C, H, W) x = K.permute_dimensions(x, (0, 3, 1, 2)) shape = K.shape(x) B, C, H, W = shape[0], shape[1], shape[2], shape[3] # Reshape as a batch of 2D matrices with vectorized channels features = K.reshape(x, K.stack([B, C, H*W])) # This is a batch of Gram matrices (B, C, C). gram = K.batch_dot(features, features, axes=2) else: raise ValueError('The input tensor should be either a 3d (H, W, C) or 4d (B, H, W, C) tensor.') # Normalize the Gram matrix if norm_by_channels: denominator = C * H * W # Normalization from Johnson else: denominator = H * W # Normalization from Google gram = gram / K.cast(denominator, x.dtype) return gram
def call(self, x): s, x1 = x a = x1[:, :1] s_hat = x1[:, 1:2] # Rescale the weights, making sure we mostly scale down a_hat = a * K.clip(s_hat / s, self.min_decrease, self.max_increase) # Scale again so that the reported loss is comparable to the other ones t = 1 #sT = K.transpose(s) #t = K.dot(sT, a) / K.dot(sT, a_hat) return K.stop_gradient([a_hat * t])[0]