我们从Python开源项目中,提取了以下11个代码示例,用于说明如何使用keras.backend.minimum()。
def ori_acc_delta_k(y_true, y_pred, k=10, max_delta=180): # get ROI label_seg = K.sum(y_true, axis=-1) label_seg = K.tf.cast(K.tf.greater(label_seg, 0), K.tf.float32) # get pred angle angle = K.cast(K.argmax(ori_highest_peak(y_pred, max_delta), axis=-1), dtype=K.tf.float32)*2.0+1.0 # get gt angle angle_t = K.cast(K.argmax(y_true, axis=-1), dtype=K.tf.float32)*2.0+1.0 # get delta angle_delta = K.abs(angle_t - angle) acc = K.tf.less_equal(K.minimum(angle_delta, max_delta-angle_delta), k) acc = K.cast(acc, dtype=K.tf.float32) # apply ROI acc = acc*label_seg acc = K.sum(acc) / (K.sum(label_seg)+K.epsilon()) return acc
def pairwise_and(a, b): column = K.expand_dims(a, 2) row = K.expand_dims(b, 1) return K.minimum(column, row)
def my_logloss(act, pred): epsilon = 1e-15 pred = K.maximum(epsilon, pred) pred = K.minimum(1 - epsilon, pred) ll = K.sum(act * K.log(pred) + (1 - act) * K.log(1 - pred)) ll = ll * -1.0 / K.shape(act)[0] return ll
def logloss(act, pred): ''' ???????? :param act: :param pred: :return: ''' epsilon = 1e-15 pred = sp.maximum(epsilon, pred) pred = sp.minimum(1 - epsilon, pred) ll = sum(act * sp.log(pred) + sp.subtract(1, act) * sp.log(sp.subtract(1, pred))) ll = ll * -1.0 / len(act) return ll
def get_updates(self, params, constraints, loss): grads = self.get_gradients(loss, params) self.updates = [K.update_add(self.iterations, 1)] lr = self.lr if self.initial_decay > 0: lr *= (1. / (1. + self.decay * self.iterations)) shapes = [K.get_variable_shape(p) for p in params] ms = [K.zeros(shape) for shape in shapes] vs = [K.zeros(shape) for shape in shapes] mems = [K.zeros(shape) for shape in shapes] self.weights = [self.iterations] + ms + vs + mems for p, g, m, v, mem in zip(params, grads, ms, vs, mems): r = 1. / (1. + mem) m_t = (1. - r) * m + r * g v_t = (1. - r) * v + r * K.square(g) denoise = K.square(m_t) / (v_t + self.epsilon) p_t = p - g * K.minimum(lr, denoise) / (K.sqrt(v_t) + self.epsilon) mem_t = 1. + mem * (1. - denoise) self.updates.append(K.update(m, m_t)) self.updates.append(K.update(v, v_t)) self.updates.append(K.update(mem, mem_t)) new_p = p_t # apply constraints if p in constraints: c = constraints[p] new_p = c(new_p) self.updates.append(K.update(p, new_p)) return self.updates
def get_updates(self, params, constraints, loss): grads = self.get_gradients(loss, params) self.updates = [K.update_add(self.iterations, 1)] shapes = [K.get_variable_shape(p) for p in params] ms = [K.zeros(shape) for shape in shapes] vs = [K.zeros(shape) for shape in shapes] mems = [K.zeros(shape) for shape in shapes] denoises = [K.zeros(shape) for shape in shapes] self.weights = [self.iterations] + ms + vs + mems + denoises for p, g, m, v, mem, denoise in zip(params, grads, ms, vs, mems, denoises): r = K.minimum(0.2, K.maximum(0.005, 1. / (1. + mem))) mem_t = 1. / r - 1. m_t = (1. - r) * m + r * g v_t = (1. - r) * v + r * K.square(g) denoise_t = 0.99 * denoise + 0.01 * K.square(m_t) / (v_t + self.epsilon) p_t = p - g * denoise_t / (K.sqrt(v_t) + self.epsilon) mem_t = K.maximum(0., 1. + mem_t * (1. - denoise_t)) self.updates.append(K.update(m, m_t)) self.updates.append(K.update(v, v_t)) self.updates.append(K.update(mem, mem_t)) self.updates.append(K.update(denoise, denoise_t)) new_p = p_t # apply constraints if p in constraints: c = constraints[p] new_p = c(new_p) self.updates.append(K.update(p, new_p)) return self.updates
def clip_relu(x): y = K.maximum(x, 0) return K.minimum(y, 1)
def cross_entropy(self, y_true, y_pred): y_pred /= tf.reduce_sum(y_pred, axis=-1, keep_dims=True) y_pred = K.maximum(K.minimum(y_pred, 1 - 1e-15), 1e-15) cross_entropy_loss = - K.sum(y_true * K.log(y_pred), axis=-1) return cross_entropy_loss
def get_updates(self, params, constraints, loss): grads = self.get_gradients(loss, params) self.updates = [K.update_add(self.iterations, 1)] lr = self.lr if self.inital_decay > 0: lr *= (1. / (1. + self.decay * self.iterations)) t = self.iterations + 1 lr_t = lr * K.sqrt(1. - K.pow(self.beta_2, t)) / (1. - K.pow(self.beta_1, t)) shapes = [K.get_variable_shape(p) for p in params] ms = [K.zeros(shape) for shape in shapes] vs = [K.zeros(shape) for shape in shapes] f = K.variable(0) d = K.variable(1) self.weights = [self.iterations] + ms + vs + [f, d] cond = K.greater(t, K.variable(1)) small_delta_t = K.switch(K.greater(loss, f), self.small_k + 1, 1. / (self.big_K + 1)) big_delta_t = K.switch(K.greater(loss, f), self.big_K + 1, 1. / (self.small_k + 1)) c_t = K.minimum(K.maximum(small_delta_t, loss / (f + self.epsilon)), big_delta_t) f_t = c_t * f r_t = K.abs(f_t - f) / (K.minimum(f_t, f)) d_t = self.beta_3 * d + (1 - self.beta_3) * r_t f_t = K.switch(cond, f_t, loss) d_t = K.switch(cond, d_t, K.variable(1.)) self.updates.append(K.update(f, f_t)) self.updates.append(K.update(d, d_t)) for p, g, m, v in zip(params, grads, ms, vs): m_t = (self.beta_1 * m) + (1. - self.beta_1) * g v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g) p_t = p - lr_t * m_t / (d_t * K.sqrt(v_t) + self.epsilon) self.updates.append(K.update(m, m_t)) self.updates.append(K.update(v, v_t)) new_p = p_t # apply constraints if p in constraints: c = constraints[p] new_p = c(new_p) self.updates.append(K.update(p, new_p)) return self.updates
def compute_loss(self, y_true, y_pred): class_loss = self.cross_entropy(y_true[:, :, 4:], y_pred[:, :, 4:]) """ class_loss = K.categorical_crossentropy(y_true[:, :, 4:], y_pred[:, :, 4:]) """ # return K.concatenate([class_loss, class_loss_old], axis=0) local_loss = self.smooth_l1(y_true[:, :, :4], y_pred[:, :, :4]) negative_mask = y_true[:, :, 4 + self.background_id] positive_mask = 1 - negative_mask # calculating the positive loss positive_local_losses = local_loss * positive_mask positive_class_losses = class_loss * positive_mask positive_class_loss = K.sum(positive_class_losses, axis=-1) positive_local_loss = K.sum(positive_local_losses, axis=-1) # obtaining the number of negatives in the batch num_positives_per_sample = K.cast(K.sum(positive_mask, -1), 'int32') num_negatives_per_sample = K.cast(K.sum(negative_mask, -1), 'int32') num_negatives_in_batch = K.sum(num_negatives_per_sample) num_hard_negatives = self.neg_pos_ratio * num_positives_per_sample num_negatives = K.minimum(num_hard_negatives, num_negatives_in_batch) all_negative_class_losses = class_loss * negative_mask negative_class_loss = [] for batch_arg in range(self.batch_size): sample_num_negatives = num_negatives[batch_arg] all_negative_sample_loss = all_negative_class_losses[batch_arg] negative_sample_losses = tf.nn.top_k(all_negative_sample_loss, k=sample_num_negatives, sorted=True)[0] negative_sample_loss = K.sum(negative_sample_losses) negative_sample_loss = K.expand_dims(negative_sample_loss, -1) negative_class_loss.append(negative_sample_loss) negative_class_loss = K.concatenate(negative_class_loss) return negative_class_loss class_loss = positive_class_loss + negative_class_loss total_loss = class_loss + (self.alpha * positive_local_loss) batch_mask = K.not_equal(num_positives_per_sample, 0) total_loss = tf.where(batch_mask, total_loss, K.zeros_like(total_loss)) num_positives_per_sample = tf.where( batch_mask, num_positives_per_sample, K.ones_like(num_positives_per_sample)) num_positives_per_sample = K.cast(num_positives_per_sample, 'float32') total_loss = total_loss / num_positives_per_sample return total_loss