我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.backend.mean()。
def mean_acc(y_true, y_pred): s = K.shape(y_true) # reshape such that w and h dim are multiplied together y_true_reshaped = K.reshape( y_true, tf.stack( [-1, s[1]*s[2], s[-1]] ) ) y_pred_reshaped = K.reshape( y_pred, tf.stack( [-1, s[1]*s[2], s[-1]] ) ) # correctly classified clf_pred = K.one_hot( K.argmax(y_pred_reshaped), nb_classes = s[-1]) equal_entries = K.cast(K.equal(clf_pred,y_true_reshaped), dtype='float32') * y_true_reshaped correct_pixels_per_class = K.sum(equal_entries, axis=1) n_pixels_per_class = K.sum(y_true_reshaped,axis=1) acc = correct_pixels_per_class / n_pixels_per_class acc_mask = tf.is_finite(acc) acc_masked = tf.boolean_mask(acc,acc_mask) return K.mean(acc_masked)
def get_initial_states(self, onto_nse_input, input_mask=None): input_to_read = onto_nse_input # (batch_size, num_words, num_senses, num_hyps, output_dim + 1) memory_input = input_to_read[:, :, :, :, :-1] # (bs, words, senses, hyps, output_dim) if input_mask is None: mem_0 = K.mean(memory_input, axis=(2, 3)) # (batch_size, num_words, output_dim) else: memory_mask = input_mask if K.ndim(onto_nse_input) != K.ndim(input_mask): memory_mask = K.expand_dims(input_mask) memory_mask = K.cast(memory_mask / (K.sum(memory_mask) + K.epsilon()), 'float32') mem_0 = K.sum(memory_input * memory_mask, axis=(2,3)) # (batch_size, num_words, output_dim) flattened_mem_0 = K.batch_flatten(mem_0) initial_states = self.reader.get_initial_states(input_to_read) initial_states += [flattened_mem_0] return initial_states
def call(self, x, mask=None): # x: (batch_size, input_length, input_dim) if mask is None: return K.mean(x, axis=1) # (batch_size, input_dim) else: # This is to remove padding from the computational graph. if K.ndim(mask) > K.ndim(x): # This is due to the bug in Bidirectional that is passing the input mask # instead of computing output mask. # TODO: Fix the implementation of Bidirectional. mask = K.any(mask, axis=(-2, -1)) if K.ndim(mask) < K.ndim(x): mask = K.expand_dims(mask) masked_input = switch(mask, x, K.zeros_like(x)) weights = K.cast(mask / (K.sum(mask) + K.epsilon()), 'float32') return K.sum(masked_input * weights, axis=1) # (batch_size, input_dim)
def call(self, x, mask=None): mean = super(IntraAttention, self).call(x, mask) # x: (batch_size, input_length, input_dim) # mean: (batch_size, input_dim) ones = K.expand_dims(K.mean(K.ones_like(x), axis=(0, 2)), dim=0) # (1, input_length) # (batch_size, input_length, input_dim) tiled_mean = K.permute_dimensions(K.dot(K.expand_dims(mean), ones), (0, 2, 1)) if mask is not None: if K.ndim(mask) > K.ndim(x): # Assuming this is because of the bug in Bidirectional. Temporary fix follows. # TODO: Fix Bidirectional. mask = K.any(mask, axis=(-2, -1)) if K.ndim(mask) < K.ndim(x): mask = K.expand_dims(mask) x = switch(mask, x, K.zeros_like(x)) # (batch_size, input_length, proj_dim) projected_combination = K.tanh(K.dot(x, self.vector_projector) + K.dot(tiled_mean, self.mean_projector)) scores = K.dot(projected_combination, self.scorer) # (batch_size, input_length) weights = K.softmax(scores) # (batch_size, input_length) attended_x = K.sum(K.expand_dims(weights) * x, axis=1) # (batch_size, input_dim) return attended_x
def call(self, inputs): if self.data_format == 'channels_first': sq = K.mean(inputs, [2, 3]) else: sq = K.mean(inputs, [1, 2]) ex = K.dot(sq, self.kernel1) if self.use_bias: ex = K.bias_add(ex, self.bias1) ex= K.relu(ex) ex = K.dot(ex, self.kernel2) if self.use_bias: ex = K.bias_add(ex, self.bias2) ex= K.sigmoid(ex) if self.data_format == 'channels_first': ex = K.expand_dims(ex, -1) ex = K.expand_dims(ex, -1) else: ex = K.expand_dims(ex, 1) ex = K.expand_dims(ex, 1) return inputs * ex
def _buildEncoder(self, x, latent_rep_size, max_length, epsilon_std = 0.01): h = Convolution1D(9, 9, activation = 'relu', name='conv_1')(x) h = Convolution1D(9, 9, activation = 'relu', name='conv_2')(h) h = Convolution1D(10, 11, activation = 'relu', name='conv_3')(h) h = Flatten(name='flatten_1')(h) h = Dense(435, activation = 'relu', name='dense_1')(h) def sampling(args): z_mean_, z_log_var_ = args batch_size = K.shape(z_mean_)[0] epsilon = K.random_normal(shape=(batch_size, latent_rep_size), mean=0., std = epsilon_std) return z_mean_ + K.exp(z_log_var_ / 2) * epsilon z_mean = Dense(latent_rep_size, name='z_mean', activation = 'linear')(h) z_log_var = Dense(latent_rep_size, name='z_log_var', activation = 'linear')(h) def vae_loss(x, x_decoded_mean): x = K.flatten(x) x_decoded_mean = K.flatten(x_decoded_mean) xent_loss = max_length * objectives.binary_crossentropy(x, x_decoded_mean) kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis = -1) return xent_loss + kl_loss return (vae_loss, Lambda(sampling, output_shape=(latent_rep_size,), name='lambda')([z_mean, z_log_var]))
def region_style_loss(style_image, target_image, style_mask, target_mask): '''Calculate style loss between style_image and target_image, for one common region specified by their (boolean) masks ''' assert 3 == K.ndim(style_image) == K.ndim(target_image) assert 2 == K.ndim(style_mask) == K.ndim(target_mask) if K.image_dim_ordering() == 'th': masked_style = style_image * style_mask masked_target = target_image * target_mask nb_channels = K.shape(style_image)[0] else: masked_style = K.permute_dimensions( style_image, (2, 0, 1)) * style_mask masked_target = K.permute_dimensions( target_image, (2, 0, 1)) * target_mask nb_channels = K.shape(style_image)[-1] s = gram_matrix(masked_style) / K.mean(style_mask) / nb_channels c = gram_matrix(masked_target) / K.mean(target_mask) / nb_channels return K.mean(K.square(s - c))
def region_style_loss(style_image, target_image, style_mask, target_mask): '''Calculate style loss between style_image and target_image, for one common region specified by their (boolean) masks ''' assert 3 == K.ndim(style_image) == K.ndim(target_image) assert 2 == K.ndim(style_mask) == K.ndim(target_mask) if K.image_data_format() == 'channels_first': masked_style = style_image * style_mask masked_target = target_image * target_mask num_channels = K.shape(style_image)[0] else: masked_style = K.permute_dimensions( style_image, (2, 0, 1)) * style_mask masked_target = K.permute_dimensions( target_image, (2, 0, 1)) * target_mask num_channels = K.shape(style_image)[-1] s = gram_matrix(masked_style) / K.mean(style_mask) / num_channels c = gram_matrix(masked_target) / K.mean(target_mask) / num_channels return K.mean(K.square(s - c))
def deprocess_image(x): # normalize tensor: center on 0., ensure std is 0.1 x -= x.mean() x /= (x.std() + 1e-5) x *= 0.1 # clip to [0, 1] x += 0.5 x = np.clip(x, 0, 1) # convert to RGB array x *= 255 if K.image_data_format() == 'channels_first': x = x.transpose((1, 2, 0)) x = np.clip(x, 0, 255).astype('uint8') return x # build the VGG16 network with ImageNet weights
def visualize(model, layer_name): print 'Model loaded.' layer_dict = dict([(layer.name, layer) for layer in model.layers]) for filter_index in sample(range(0, layer_dict[layer_name].nb_filter),10): layer_output = layer_dict[layer_name].output loss = K.mean(layer_output[:, filter_index, :, :]) grads = K.gradients(loss, model.layers[0].input)[0] grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5) iterate = K.function([model.layers[0].input, K.learning_phase()], [loss, grads]) input_img_data = np.asarray([read_image('visimage.jpg')]) for _ in xrange(100): loss_value, grads_value = iterate([input_img_data, 0]) input_img_data += grads_value * 3 img = deprocess_image(input_img_data[0]) write_image(img, '../activations/out{}.jpg'.format(filter_index))
def get_gradcam(image,model,layer_name,mode): layer = model.get_layer(layer_name) image = np.expand_dims(image,0) loss = K.variable(0.) if mode == "abnormal": loss += K.sum(model.output) elif mode == "normal": loss += K.sum(1 - model.output) else: raise ValueError("mode must be normal or abnormal") #gradients of prediction wrt the conv layer of choice are used upstream_grads = K.gradients(loss,layer.output)[0] feature_weights = K.mean(upstream_grads,axis=[1,2]) #spatial global avg pool heatmap = K.relu(K.dot(layer.output, K.transpose(feature_weights))) fetch_heatmap = K.function([model.input, K.learning_phase()], [heatmap]) return fetch_heatmap([image,0])[0]
def cnn(height, width): question_input = Input(shape=(height, width, 1), name='question_input') conv1_Q = Conv2D(512, (2, 320), activation='sigmoid', padding='valid', kernel_regularizer=regularizers.l2(0.01), kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.02))(question_input) Max1_Q = MaxPooling2D((29, 1), strides=(1, 1), padding='valid')(conv1_Q) F1_Q = Flatten()(Max1_Q) Drop1_Q = Dropout(0.25)(F1_Q) predictQ = Dense(32, activation='relu', kernel_regularizer=regularizers.l2(0.01), kernel_initializer=initializers.random_normal(mean=0.0, stddev=0.02))(Drop1_Q) prediction2 = Dropout(0.25)(predictQ) predictions = Dense(1, activation='relu')(prediction2) model = Model(inputs=[question_input], outputs=predictions) model.compile(loss='mean_squared_error', optimizer=Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)) # model.compile(loss='mean_squared_error', # optimizer='nadam') return model
def get_total_loss(content_losses, style_losses, total_var_loss, content_weights, style_weights, tv_weights, class_targets): total_loss = K.variable(0.) # Compute content losses for loss in content_losses: weighted_loss = K.mean(K.gather(content_weights, class_targets) * loss) weighted_content_losses.append(weighted_loss) total_loss += weighted_loss # Compute style losses for loss in style_losses: weighted_loss = K.mean(K.gather(style_weights, class_targets) * loss) weighted_style_losses.append(weighted_loss) total_loss += weighted_loss # Compute tv loss weighted_tv_loss = K.mean(K.gather(tv_weights, class_targets) * total_var_loss) total_loss += weighted_tv_loss return (total_loss, weighted_content_losses, weighted_style_losses, weighted_tv_loss)
def append_metrics(self): # Last two weightlayers in model output_weights, output_biases = self.model.get_weights()[-2:] a_weights_mean, b_weights_mean = output_weights.mean(0) a_weights_min, b_weights_min = output_weights.min(0) a_weights_max, b_weights_max = output_weights.max(0) a_bias, b_bias = output_biases self.a_weights_mean.append(a_weights_mean) self.b_weights_mean.append(b_weights_mean) self.a_weights_min.append(a_weights_min) self.b_weights_min.append(b_weights_min) self.a_weights_max.append(a_weights_max) self.b_weights_max.append(b_weights_max) self.a_bias.append(a_bias) self.b_bias.append(b_bias)
def call(self, x, mask=None): if mask is None: return K.mean(x, axis=1) mask = K.cast(mask, "float32") expanded_mask = K.expand_dims(mask) # zero embedded vectors which come from masked characters x_masked = x * expanded_mask # how many non-masked characters are in each row? mask_counts = K.sum(mask, axis=-1) # add up the vector representations along the time dimension # the result should have dimension (n_samples, n_embedding_dims) x_sums = K.sum(x_masked, axis=1) # cast the number of non-zero elements to float32 and # give it an extra dimension so it can broadcast properly in # an elementwise divsion counts_cast = K.expand_dims(mask_counts) return x_sums / counts_cast
def prep_model(inputs, N, s0pad, s1pad, c, granlevels=1): # LSTM lstm = LSTM(N, return_sequences=True, implementation=2, kernel_regularizer=l2(c['l2reg']), recurrent_regularizer=l2(c['l2reg']), bias_regularizer=l2(c['l2reg'])) x1 = inputs[0] x2 = inputs[1] h1 = lstm(x1) h2 = lstm(x2) W_x = Dense(N, kernel_initializer='glorot_uniform', use_bias=True, kernel_regularizer=l2(c['l2reg'])) W_h = Dense(N, kernel_initializer='orthogonal', use_bias=True, kernel_regularizer=l2(c['l2reg'])) sigmoid = Activation('sigmoid') a1 = multiply([x1, sigmoid( add([W_x(x1), W_h(h1)]) )]) a2 = multiply([x2, sigmoid( add([W_x(x2), W_h(h2)]) )]) # Averaging avg = Lambda(function=lambda x: K.mean(x, axis=1), output_shape=lambda shape: (shape[0], ) + shape[2:]) gran1 = avg(a1) gran2 = avg(a2) return [gran1, gran2], N
def mean_IoU(y_true, y_pred): s = K.shape(y_true) # reshape such that w and h dim are multiplied together y_true_reshaped = K.reshape( y_true, tf.stack( [-1, s[1]*s[2], s[-1]] ) ) y_pred_reshaped = K.reshape( y_pred, tf.stack( [-1, s[1]*s[2], s[-1]] ) ) # correctly classified clf_pred = K.one_hot( K.argmax(y_pred_reshaped), nb_classes = s[-1]) equal_entries = K.cast(K.equal(clf_pred,y_true_reshaped), dtype='float32') * y_true_reshaped intersection = K.sum(equal_entries, axis=1) union_per_class = K.sum(y_true_reshaped,axis=1) + K.sum(y_pred_reshaped,axis=1) iou = intersection / (union_per_class - intersection) iou_mask = tf.is_finite(iou) iou_masked = tf.boolean_mask(iou,iou_mask) return K.mean( iou_masked )
def risk_estimation(y_true, y_pred): return -100. * K.mean((y_true - 0.0002) * y_pred) ###################### # my custom buy_hold_sell activation function ##################### # from keras_step_function import tf_stepy # # def buy_hold_sell(x): # return tf_stepy(x) # # get_custom_objects().update({'custom_activation': Activation(buy_hold_sell)}) ####################### # classification style # to work with y_pred as [buy, half, sell] ####################### # def risk_estimation_bhs(y_true, y_pred): # return -100 * K.mean((y_true - 0.0002) * K.constant([1.0, 0.75, 0.5, 0.25, 0.0]) * y_pred) # -0.0002 is removed from original # return -100 * K.mean((y_true - 0.0002) * K.constant([1.0, 0.5, 0.0]) * y_pred) # -0.0002 is removed from original
def deprocess_image(x): # normalize tensor: center on 0., ensure std is 0.1 x -= x.mean() x /= (x.std() + 1e-5) x *= 0.1 # clip to [0, 1] x += 0.5 x = np.clip(x, 0, 1) # convert to RGB array x *= 255 if K.image_dim_ordering() == 'th': x = x.transpose((1, 2, 0)) x = np.clip(x, 0, 255).astype('uint8') return x # load model
def SP_pixelwise_loss(y_true,y_pred): y_true_label=y_true[:,:class_number,:,:] y_true_SP_weight=y_true[:,class_number:,:,:] y_pred=K.clip(y_pred,-50.,50.)#prevent overflow sample_num_per_class=K.sum(y_true_label,axis=[2,3],keepdims=True) class_ind=K.cast(K.greater(sample_num_per_class,0.),'float32') avg_sample_num_per_class=K.sum(sample_num_per_class,axis=1,keepdims=True)/K.sum(class_ind,axis=1,keepdims=True) sample_weight_per_class=avg_sample_num_per_class/(sample_num_per_class+0.1) exp_pred=K.exp(y_pred-K.max(y_pred,axis=1,keepdims=True)) y_pred_softmax=exp_pred/K.sum(exp_pred,axis=1,keepdims=True) pixel_wise_loss=-K.log(y_pred_softmax)*y_true_label pixel_wise_loss=pixel_wise_loss*sample_weight_per_class weighter_pixel_wise_loss=K.sum(pixel_wise_loss,axis=1,keepdims=True) return K.mean(weighter_pixel_wise_loss*y_true_SP_weight) #label distribution loss
def layout_loss_hard(y_true,y_pred): y_pred=K.clip(y_pred,-50.,50.)#prevent overflow exp_pred=K.exp(y_pred-K.max(y_pred,axis=1,keepdims=True)) y_pred_softmax=exp_pred/K.sum(exp_pred,axis=1,keepdims=True) max_pred_softmax=K.max(y_pred_softmax,axis=1,keepdims=True) bin_pred_softmax_a=y_pred_softmax/max_pred_softmax bin_pred_softmax=bin_pred_softmax_a**6. final_pred=K.mean(bin_pred_softmax,axis=[2,3]) final_pred=final_pred/(K.sum(final_pred,axis=1,keepdims=True)+K.epsilon()) y_true_s=K.squeeze(y_true,axis=3) y_true_s=K.squeeze(y_true_s,axis=2) tier_wise_loss_v=-K.clip(K.log(final_pred),-500,500)*y_true_s return K.mean(K.sum(tier_wise_loss_v,axis=1)) #compile
def iterate_softmax(model, neuron): input_tensor = model.input # this is a placeholder tensor that will contain our generated images # build a loss function that maximizes the activation # of the nth filter of the layer considered print('X shape', model.output[:, neuron]) x = model.output loss_weight_continuity = 0.0 loss_weight_activity = 1.0 loss = K.mean(x) #loss += loss_weight_continuity * total_variation_norm(input_tensor) # compute the gradient of the input picture wrt this loss grads = K.gradients(loss, input_tensor)[0] # normalization trick: we normalize the gradient grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5) # this function returns the loss and grads given the input picture return K.function([input_tensor], [loss, grads])
def deprocess_image(x): # normalize tensor: center on 0., ensure std is 0.1 x -= x.mean() x /= (x.std() + 1e-5) x *= 0.3 # clip to [0, 1] x += 0.5 x = np.clip(x, 0, 1) # convert to RGB array x *= 255 #print 'cur x shape is', x.shape x = x.transpose((1, 2, 0)) #print 'after trnaspose x shape is', x.shape x = x[:,:,::-1] #print 'after bgr x shape is', x.shape #x = np.clip(x, 0, 255).astype('uint8') x = x.astype('uint8') return x
def createBaseNetworkSmall(inputDim, inputLength): baseNetwork = Sequential() baseNetwork.add(Embedding(input_dim=inputDim, output_dim=inputDim, input_length=inputLength)) baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Flatten()) baseNetwork.add(Dense(1024, activation='relu')) baseNetwork.add(Dropout(0.5)) baseNetwork.add(Dense(1024, activation='relu')) baseNetwork.add(Dropout(0.5)) return baseNetwork
def createBaseNetworkLarge(inputDim, inputLength): baseNetwork = Sequential() baseNetwork.add(Embedding(input_dim=inputDim, output_dim=inputDim, input_length=inputLength)) baseNetwork.add(Conv1D(1024, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(1024, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(Conv1D(1024, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.02), bias_initializer=RandomNormal(mean=0.0, stddev=0.02))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Flatten()) baseNetwork.add(Dense(2048, activation='relu')) baseNetwork.add(Dropout(0.5)) baseNetwork.add(Dense(2048, activation='relu')) baseNetwork.add(Dropout(0.5)) return baseNetwork
def createBaseNetworkSmall(inputLength, inputDim): baseNetwork = Sequential() baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', input_shape=(inputLength, inputDim), kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Flatten()) baseNetwork.add(Dense(1024, activation='relu')) baseNetwork.add(Dropout(0.5)) baseNetwork.add(Dense(1024, activation='relu')) baseNetwork.add(Dropout(0.5)) return baseNetwork
def createBaseNetworkSmall(inputLength, inputDim): baseNetwork = Sequential() baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', input_shape=(inputLength, inputDim), kernel_initializer=RandomNormal(mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 7, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Conv1D(256, 3, strides=1, padding='valid', activation='relu', kernel_initializer=RandomNormal( mean=0.0, stddev=0.05), bias_initializer=RandomNormal(mean=0.0, stddev=0.05))) baseNetwork.add(MaxPooling1D(pool_size=3, strides=3)) baseNetwork.add(Flatten()) baseNetwork.add(Dense(128, activation='relu')) baseNetwork.add(Dropout(0.2)) baseNetwork.add(Dense(128, activation='relu')) baseNetwork.add(Dropout(0.2)) return baseNetwork
def discriminator_loss(y_true,y_pred): BATCH_SIZE=10 return K.mean(K.binary_crossentropy(K.flatten(y_pred), K.concatenate([K.ones_like(K.flatten(y_pred[:BATCH_SIZE,:,:,:])),K.zeros_like(K.flatten(y_pred[:BATCH_SIZE,:,:,:])) ]) ), axis=-1)
def discriminator_on_generator_loss(y_true,y_pred): BATCH_SIZE=10 return K.mean(K.binary_crossentropy(K.flatten(y_pred), K.ones_like(K.flatten(y_pred))), axis=-1)
def generator_l1_loss(y_true,y_pred): BATCH_SIZE=10 return K.mean(K.abs(K.flatten(y_pred) - K.flatten(y_true)), axis=-1)
def wasserstein_loss(y_true,y_pred): """ Wasserstein distance for GAN author use: g_loss = mean(-fake_logit) c_loss = mean(fake_logit - true_logit) logit just denote result of discrimiantor without activated """ return K.mean(y_true*y_pred) #Build Generative Model
def pixel_softmax(y_true, y_pred): """ Thanks to Alexander Movchan :param y_true: y_true shape: (batch_size, h, w) :param y_pred: y_pred shape: (batch_size, h, w, num_classes) :return: """ y_true = tf.cast(y_true[:, :, :, 0], tf.int32) return K.mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_true, logits=y_pred))
def pixel_softmax(y_true, y_pred): y_pred = K.reshape(y_pred, (-1, num_mask_channels)) y_true = K.reshape(y_true, (-1, num_mask_channels)) return K.mean(K.categorical_crossentropy(y_pred, y_true, from_logits=True))
def mean_squared_error_normalized(y_true,y_pred): import keras.backend as K return K.mean(K.square(y_pred - y_true), axis=-1) # return K.mean(K.square(y_pred - y_true)/y_true, axis=-1) # return K.mean(tf.divide(K.square(y_pred - y_true),K.var(y_true,axis=-1,keepdims=True)), axis=-1)
def accuracy(y_true, y_pred): return K.mean(K.equal(K.argmax(y_true, axis=4), K.argmax(y_pred, axis=4)))
def call(self, x, mask=None): if mask is not None: mask = K.cast(mask, 'float32') if not K.any(mask): return K.mean(x, axis=1) else: return K.cast(x.sum(axis=1) / mask.sum(axis=1, keepdims=True), K.floatx()) else: return K.mean(x, axis=1)
def accuracy (y_pred,y_true): return K.mean(y_true)
def wasserstein(y_true, y_pred): return K.mean(y_true * y_pred)
def LN(x, gamma, beta, epsilon=1e-6, axis=-1): m = K.mean(x, axis=axis, keepdims=True) std = K.sqrt(K.var(x, axis=axis, keepdims=True) + epsilon) x_normed = (x - m) / (std + epsilon) x_normed = gamma * x_normed + beta return x_normed
def sampling(args): z_mean, z_log_var = args # N(0,1) ?????? epsilon = K.random_normal(shape=(batch_size, latent_dim), mean=0., std=epsilon_std) return z_mean + K.exp(z_log_var) * epsilon # note that "output_shape" isn't necessary with the TensorFlow backend # so you could write `Lambda(sampling)([z_mean, z_log_var])`
def vae_loss(x, x_decoded_mean): # NOTE: binary_crossentropy expects a batch_size by dim # for x and x_decoded_mean, so we MUST flatten these! # Flatten x = K.flatten(x) x_decoded_mean = K.flatten(x_decoded_mean) xent_loss = img_rows * img_cols * objectives.binary_crossentropy(x, x_decoded_mean) kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1) return xent_loss + kl_loss # input_shape: (100,1,28,28) # output_shape: (100,1,28,28)
def contractive_loss(model, lam=1e-4): def loss(y_true, y_pred): ent_loss = K.mean(K.binary_crossentropy(y_pred, y_true), axis=-1) W = K.variable(value=model.encoder.get_weights()[0]) # N x N_hidden W = K.transpose(W) # N_hidden x N h = model.encoder.output dh = h * (1 - h) # N_batch x N_hidden # N_batch x N_hidden * N_hidden x 1 = N_batch x 1 contractive = lam * K.sum(dh**2 * K.sum(W**2, axis=1), axis=1) return ent_loss + contractive return loss