我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.backend.flatten()。
def categorical_crossentropy_3d(y_true, y_predicted): """ Computes categorical cross-entropy loss for a softmax distribution in a hot-encoded 3D array with shape (num_samples, num_classes, dim1, dim2, dim3) Parameters ---------- y_true : keras.placeholder [batches, dim0,dim1,dim2] Placeholder for data holding the ground-truth labels encoded in a one-hot representation y_predicted : keras.placeholder [batches,channels,dim0,dim1,dim2] Placeholder for data holding the softmax distribution over classes Returns ------- scalar Categorical cross-entropy loss value """ y_true_flatten = K.flatten(y_true) y_pred_flatten = K.flatten(y_predicted) y_pred_flatten_log = -K.log(y_pred_flatten + K.epsilon()) num_total_elements = K.sum(y_true_flatten) # cross_entropy = K.dot(y_true_flatten, K.transpose(y_pred_flatten_log)) cross_entropy = tf.reduce_sum(tf.multiply(y_true_flatten, y_pred_flatten_log)) mean_cross_entropy = cross_entropy / (num_total_elements + K.epsilon()) return mean_cross_entropy
def _buildEncoder(self, x, latent_rep_size, max_length, epsilon_std = 0.01): h = Convolution1D(9, 9, activation = 'relu', name='conv_1')(x) h = Convolution1D(9, 9, activation = 'relu', name='conv_2')(h) h = Convolution1D(10, 11, activation = 'relu', name='conv_3')(h) h = Flatten(name='flatten_1')(h) h = Dense(435, activation = 'relu', name='dense_1')(h) def sampling(args): z_mean_, z_log_var_ = args batch_size = K.shape(z_mean_)[0] epsilon = K.random_normal(shape=(batch_size, latent_rep_size), mean=0., std = epsilon_std) return z_mean_ + K.exp(z_log_var_ / 2) * epsilon z_mean = Dense(latent_rep_size, name='z_mean', activation = 'linear')(h) z_log_var = Dense(latent_rep_size, name='z_log_var', activation = 'linear')(h) def vae_loss(x, x_decoded_mean): x = K.flatten(x) x_decoded_mean = K.flatten(x_decoded_mean) xent_loss = max_length * objectives.binary_crossentropy(x, x_decoded_mean) kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis = -1) return xent_loss + kl_loss return (vae_loss, Lambda(sampling, output_shape=(latent_rep_size,), name='lambda')([z_mean, z_log_var]))
def dice(y_true, y_pred, smooth=1.0): """ The Dice coefficient, defined as :: \frac{2 |X \intersect Y|}{|X| + |Y|} Parameters ---------- y_true, y_pred : tensors The predicted and binary classification in an image """ y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return ((2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth))
def setup_output(self, x): """ Setup output tensor """ x_max = K.max(x, axis=1) x_max = K.flatten(x_max) z = K.dot(x_max, self.w_proj_to_z) #+ self.b_proj_to_z hidden = K.dot(z, self.weights[0]) + self.biases[0] hidden = K.reshape(hidden, shape=(self.input_channels, self.hidden_dim)) output = K.dot(hidden, self.weights[1]) + self.biases[1] self.output = K.reshape(output, (self.num_filters, self.input_channels, *self.output_shape)) return self.output
def run_length_encode(mask): ''' img: numpy array, 1 - mask, 0 - background Returns run length as string formated ''' inds = mask.flatten() runs = np.where(inds[1:] != inds[:-1])[0] + 2 runs[1::2] = runs[1::2] - runs[:-1:2] rle = ' '.join([str(r) for r in runs]) return rle # def dice(im1, im2, empty_score=1.0): # im1 = im1.astype(np.bool) # im2 = im2.astype(np.bool) # # if im1.shape != im2.shape: # raise ValueError("Shape mismatch: im1 and im2 must have the same shape.") # # im_sum = im1.sum() + im2.sum() # if im_sum == 0: # return empty_score # # intersection = np.logical_and(im1, im2) # return 2. * intersection.sum() / im_sum
def softmax_sparse_crossentropy_ignoring_last_label(y_true, y_pred): y_pred = K.reshape(y_pred, (-1, K.int_shape(y_pred)[-1])) log_softmax = tf.nn.log_softmax(y_pred) y_true = K.one_hot(tf.to_int32(K.flatten(y_true)), K.int_shape(y_pred)[-1]+1) unpacked = tf.unstack(y_true, axis=-1) y_true = tf.stack(unpacked[:-1], axis=-1) cross_entropy = -K.sum(y_true * log_softmax, axis=1) cross_entropy_mean = K.mean(cross_entropy) return cross_entropy_mean # Softmax cross-entropy loss function for coco segmentation # and models which expect but do not apply sigmoid on each entry # tensorlow only
def dice_coef(y_true, y_pred): smooth = 1. y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coef(y_true, y_pred): y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coef_np(y_true, y_pred): y_true_f = y_true.flatten() y_pred_f = y_pred.flatten() y_pred_f [y_pred_f < DICE_LOW_LIMIT] = 0. y_pred_f [y_pred_f > 1- DICE_LOW_LIMIT] = 1. intersection = np.sum(y_true_f * y_pred_f) return (2. * intersection + smooth) / (np.sum(y_true_f) + np.sum(y_pred_f) + smooth)
def dice_coef_pos_np(y_true, y_pred, pos = 0): y_true_f = y_true[:,pos].flatten() y_pred_f = y_pred[:,pos].flatten() intersection = np.sum(y_true_f * y_pred_f) return (2. * intersection + smooth) / (np.sum(y_true_f) + np.sum(y_pred_f) + smooth)