我们从Python开源项目中,提取了以下28个代码示例,用于说明如何使用keras.backend.one_hot()。
def mean_acc(y_true, y_pred): s = K.shape(y_true) # reshape such that w and h dim are multiplied together y_true_reshaped = K.reshape( y_true, tf.stack( [-1, s[1]*s[2], s[-1]] ) ) y_pred_reshaped = K.reshape( y_pred, tf.stack( [-1, s[1]*s[2], s[-1]] ) ) # correctly classified clf_pred = K.one_hot( K.argmax(y_pred_reshaped), nb_classes = s[-1]) equal_entries = K.cast(K.equal(clf_pred,y_true_reshaped), dtype='float32') * y_true_reshaped correct_pixels_per_class = K.sum(equal_entries, axis=1) n_pixels_per_class = K.sum(y_true_reshaped,axis=1) acc = correct_pixels_per_class / n_pixels_per_class acc_mask = tf.is_finite(acc) acc_masked = tf.boolean_mask(acc,acc_mask) return K.mean(acc_masked)
def generate_gpu(configs,**kwargs): configs = np.array(configs) import math size = int(math.sqrt(len(configs[0]))) base = panels.shape[1] dim = base*size def build(): P = 2 configs = Input(shape=(size*size,)) _configs = 1 - K.round((configs/2)+0.5) # from -1/1 to 1/0 configs_one_hot = K.one_hot(K.cast(_configs,'int32'), P) configs_one_hot = K.reshape(configs_one_hot, [-1,P]) _panels = K.variable(panels) _panels = K.reshape(_panels, [P, base*base]) states = tf.matmul(configs_one_hot, _panels) states = K.reshape(states, [-1, size, size, base, base]) states = K.permute_dimensions(states, [0, 1, 3, 2, 4]) states = K.reshape(states, [-1, size*base, size*base, 1]) states = K.spatial_2d_padding(states, padding=((pad,pad),(pad,pad))) states = K.squeeze(states, -1) return Model(configs, wrap(configs, states)) return preprocess(batch_swirl(build().predict(configs,**kwargs)))
def generate_gpu2(configs,**kwargs): configs = np.array(configs) import math size = int(math.sqrt(len(configs[0]))) base = panels.shape[1] dim = base*size def build(): P = 2 configs = Input(shape=(size*size,)) _configs = 1 - K.round((configs/2)+0.5) # from -1/1 to 1/0 configs_one_hot = K.one_hot(K.cast(_configs,'int32'), P) configs_one_hot = K.reshape(configs_one_hot, [-1,P]) _panels = K.variable(panels) _panels = K.reshape(_panels, [P, base*base]) states = tf.matmul(configs_one_hot, _panels) states = K.reshape(states, [-1, size, size, base, base]) states = K.permute_dimensions(states, [0, 1, 3, 2, 4]) states = K.reshape(states, [-1, size*base, size*base, 1]) states = K.spatial_2d_padding(states, padding=((pad,pad),(pad,pad))) states = K.squeeze(states, -1) states = tensor_swirl(states, radius=dim+2*pad * relative_swirl_radius, **swirl_args) return Model(configs, wrap(configs, states)) return preprocess(build().predict(configs,**kwargs))
def generate_gpu(configs, **kwargs): import math size = int(math.sqrt(len(configs[0]))) base = panels.shape[1] dim = base*size def build(): P = 2 configs = Input(shape=(size*size,)) _configs = 1 - K.round((configs/2)+0.5) # from -1/1 to 1/0 configs_one_hot = K.one_hot(K.cast(_configs,'int32'), P) configs_one_hot = K.reshape(configs_one_hot, [-1,P]) _panels = K.variable(panels) _panels = K.reshape(_panels, [P, base*base]) states = tf.matmul(configs_one_hot, _panels) states = K.reshape(states, [-1, size, size, base, base]) states = K.permute_dimensions(states, [0, 1, 3, 2, 4]) states = K.reshape(states, [-1, size*base, size*base]) return Model(configs, wrap(configs, states)) return build().predict(np.array(configs),**kwargs)
def make_model(state_shape, n_actions): in_t = Input(shape=(HISTORY_STEPS,) + state_shape, name='input') action_t = Input(shape=(1,), dtype='int32', name='action') advantage_t = Input(shape=(1,), name='advantage') fl_t = Flatten(name='flat')(in_t) l1_t = Dense(SIMPLE_L1_SIZE, activation='relu', name='l1')(fl_t) l2_t = Dense(SIMPLE_L2_SIZE, activation='relu', name='l2')(l1_t) policy_t = Dense(n_actions, name='policy', activation='softmax')(l2_t) def loss_func(args): p_t, act_t, adv_t = args oh_t = K.one_hot(act_t, n_actions) oh_t = K.squeeze(oh_t, 1) p_oh_t = K.log(1e-6 + K.sum(oh_t * p_t, axis=-1, keepdims=True)) res_t = adv_t * p_oh_t return -res_t loss_t = Lambda(loss_func, output_shape=(1,), name='loss')([policy_t, action_t, advantage_t]) return Model(input=[in_t, action_t, advantage_t], output=[policy_t, loss_t])
def mean_IoU(y_true, y_pred): s = K.shape(y_true) # reshape such that w and h dim are multiplied together y_true_reshaped = K.reshape( y_true, tf.stack( [-1, s[1]*s[2], s[-1]] ) ) y_pred_reshaped = K.reshape( y_pred, tf.stack( [-1, s[1]*s[2], s[-1]] ) ) # correctly classified clf_pred = K.one_hot( K.argmax(y_pred_reshaped), nb_classes = s[-1]) equal_entries = K.cast(K.equal(clf_pred,y_true_reshaped), dtype='float32') * y_true_reshaped intersection = K.sum(equal_entries, axis=1) union_per_class = K.sum(y_true_reshaped,axis=1) + K.sum(y_pred_reshaped,axis=1) iou = intersection / (union_per_class - intersection) iou_mask = tf.is_finite(iou) iou_masked = tf.boolean_mask(iou,iou_mask) return K.mean( iou_masked )
def modelSigmoid(inputLength, inputDim): inputA = Input(shape=(inputLength,), dtype='int32') inputB = Input(shape=(inputLength,), dtype='int32') # One hot encoding oheInputA = Lambda(K.one_hot, arguments={ 'num_classes': inputDim}, output_shape=(inputLength, inputDim))(inputA) oheInputB = Lambda(K.one_hot, arguments={ 'num_classes': inputDim}, output_shape=(inputLength, inputDim))(inputB) net = netSigmoid(inputLength, inputDim) processedA = net(oheInputA) processedB = net(oheInputB) # Concatenate conc = Concatenate()([processedA, processedB]) x = BatchNormalization()(conc) predictions = Dense(1, activation='sigmoid')(x) model = Model([inputA, inputB], predictions) return model
def modelC256P3C256P3C256P3f128_conc_f128(inputLength, inputDim): inputA = Input(shape=(inputLength,), dtype='int32') inputB = Input(shape=(inputLength,), dtype='int32') # One hot encoding oheInputA = Lambda(K.one_hot, arguments={ 'num_classes': inputDim}, output_shape=(inputLength, inputDim))(inputA) oheInputB = Lambda(K.one_hot, arguments={ 'num_classes': inputDim}, output_shape=(inputLength, inputDim))(inputB) net = netC256P3C256P3C256P3f128(inputLength, inputDim) processedA = net(oheInputA) processedB = net(oheInputB) # Concatenate conc = Concatenate()([processedA, processedB]) x = BatchNormalization()(conc) # Dense x = Dense(128, activation='relu')(x) x = Dropout(0.5)(x) x = BatchNormalization()(x) predictions = Dense(1, activation='sigmoid')(x) model = Model([inputA, inputB], predictions) return model
def modelC256P3C256P3C256P3f128_conc(inputLength, inputDim): inputA = Input(shape=(inputLength,), dtype='int32') inputB = Input(shape=(inputLength,), dtype='int32') # One hot encoding oheInputA = Lambda(K.one_hot, arguments={ 'num_classes': inputDim}, output_shape=(inputLength, inputDim))(inputA) oheInputB = Lambda(K.one_hot, arguments={ 'num_classes': inputDim}, output_shape=(inputLength, inputDim))(inputB) net = netC256P3C256P3C256P3f128(inputLength, inputDim) processedA = net(oheInputA) processedB = net(oheInputB) # Concatenate conc = Concatenate()([processedA, processedB]) x = BatchNormalization()(conc) predictions = Dense(1, activation='sigmoid')(x) model = Model([inputA, inputB], predictions) return model
def modelC256P3C256P3f32_conc_f64(inputLength, inputDim): inputA = Input(shape=(inputLength,), dtype='int32') inputB = Input(shape=(inputLength,), dtype='int32') # One hot encoding oheInputA = Lambda(K.one_hot, arguments={ 'num_classes': inputDim}, output_shape=(inputLength, inputDim))(inputA) oheInputB = Lambda(K.one_hot, arguments={ 'num_classes': inputDim}, output_shape=(inputLength, inputDim))(inputB) net = netC256P3C256P3f32(inputLength, inputDim) processedA = net(oheInputA) processedB = net(oheInputB) conc = Concatenate()([processedA, processedB]) x = BatchNormalization()(conc) x = Dense(64, activation='relu')(x) x = Dropout(0.5)(x) x = BatchNormalization()(x) predictions = Dense(1, activation='sigmoid')(x) model = Model([inputA, inputB], predictions) return model
def modelC256P3C256P3f64_conc_f64(inputLength, inputDim): inputA = Input(shape=(inputLength,), dtype='int32') inputB = Input(shape=(inputLength,), dtype='int32') # One hot encoding oheInputA = Lambda(K.one_hot, arguments={ 'num_classes': inputDim}, output_shape=(inputLength, inputDim))(inputA) oheInputB = Lambda(K.one_hot, arguments={ 'num_classes': inputDim}, output_shape=(inputLength, inputDim))(inputB) net = netC256P3C256P3f64(inputLength, inputDim) processedA = net(oheInputA) processedB = net(oheInputB) conc = Concatenate()([processedA, processedB]) x = BatchNormalization()(conc) x = Dense(64, activation='relu')(x) x = Dropout(0.5)(x) x = BatchNormalization()(x) predictions = Dense(1, activation='sigmoid')(x) model = Model([inputA, inputB], predictions) return model # To encode questions into char indices
def loss_function(self): if self.learn_mode == 'join': def loss(y_true, y_pred): assert self.inbound_nodes, 'CRF has not connected to any layer.' assert not self.outbound_nodes, 'When learn_model="join", CRF must be the last layer.' if self.sparse_target: y_true = K.one_hot(K.cast(y_true[:, :, 0], 'int32'), self.units) X = self.inbound_nodes[0].input_tensors[0] mask = self.inbound_nodes[0].input_masks[0] nloglik = self.get_negative_log_likelihood(y_true, X, mask) return nloglik return loss else: if self.sparse_target: return sparse_categorical_crossentropy else: return categorical_crossentropy
def softmax_sparse_crossentropy_ignoring_last_label(y_true, y_pred): y_pred = K.reshape(y_pred, (-1, K.int_shape(y_pred)[-1])) log_softmax = tf.nn.log_softmax(y_pred) y_true = K.one_hot(tf.to_int32(K.flatten(y_true)), K.int_shape(y_pred)[-1]+1) unpacked = tf.unstack(y_true, axis=-1) y_true = tf.stack(unpacked[:-1], axis=-1) cross_entropy = -K.sum(y_true * log_softmax, axis=1) cross_entropy_mean = K.mean(cross_entropy) return cross_entropy_mean # Softmax cross-entropy loss function for coco segmentation # and models which expect but do not apply sigmoid on each entry # tensorlow only
def create_maxatt_matching_layer(self, input_dim_a, input_dim_b): """Create a max-attentive-matching layer of a model.""" inp_a = Input(shape=(input_dim_a, self.hidden_dim,)) inp_b = Input(shape=(input_dim_b, self.hidden_dim,)) W = [] for i in range(self.perspective_num): wi = K.random_uniform_variable((1, self.hidden_dim), -1.0, 1.0, seed=self.seed if self.seed is not None else 243) W.append(wi) outp_a = Lambda(lambda x: K.l2_normalize(x, -1))(inp_a) outp_b = Lambda(lambda x: K.l2_normalize(x, -1))(inp_b) outp_b = Lambda(lambda x: K.permute_dimensions(x, (0, 2, 1)))(outp_b) alpha = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_b, outp_a]) alpha = Lambda(lambda x: K.one_hot(K.argmax(x, 1), self.max_sequence_length))(alpha) hmax = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([alpha, outp_b]) m = [] for i in range(self.perspective_num): outp_a = Lambda(lambda x: x * W[i])(inp_a) outp_hmax = Lambda(lambda x: x * W[i])(hmax) outp_a = Lambda(lambda x: K.l2_normalize(x, -1))(outp_a) outp_hmax = Lambda(lambda x: K.l2_normalize(x, -1))(outp_hmax) outp_hmax = Lambda(lambda x: K.permute_dimensions(x, (0, 2, 1)))(outp_hmax) outp = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=[1, 2]))([outp_hmax, outp_a]) val = np.eye(self.max_sequence_length) kcon = K.constant(value=val, dtype='float32') outp = Lambda(lambda x: K.sum(x * kcon, -1, keepdims=True))(outp) m.append(outp) if self.perspective_num > 1: persp = Lambda(lambda x: K.concatenate(x, 2))(m) else: persp = m model = Model(inputs=[inp_a, inp_b], outputs=persp) return model
def generate(configs, width, height, **kwargs): assert width*height <= 9 load(width, height) from keras.layers import Input, Reshape from keras.models import Model from keras import backend as K import tensorflow as tf def build(): base = setting['base'] P = len(setting['panels']) configs = Input(shape=(P,)) configs_one_hot = K.one_hot(K.cast(configs,'int32'), width*height) matches = K.permute_dimensions(configs_one_hot, [0,2,1]) matches = K.reshape(matches,[-1,P]) panels = K.variable(setting['panels']) panels = K.reshape(panels, [P, base*base]) states = tf.matmul(matches, panels) states = K.reshape(states, [-1, height, width, base, base]) states = K.permute_dimensions(states, [0, 1, 3, 2, 4]) states = K.reshape(states, [-1, height*base, width*base]) return Model(configs, wrap(configs, states)) model = build() return model.predict(configs,**kwargs)
def path_energy0(y, x, U, mask=None): '''Path energy without boundary potential handling.''' n_classes = K.shape(x)[2] y_one_hot = K.one_hot(y, n_classes) # Tag path energy energy = K.sum(x * y_one_hot, 2) energy = K.sum(energy, 1) # Transition energy y_t = y[:, :-1] y_tp1 = y[:, 1:] U_flat = K.reshape(U, [-1]) # Convert 2-dim indices (y_t, y_tp1) of U to 1-dim indices of U_flat: flat_indices = y_t * n_classes + y_tp1 U_y_t_tp1 = K.gather(U_flat, flat_indices) if mask is not None: mask = K.cast(mask, K.floatx()) y_t_mask = mask[:, :-1] y_tp1_mask = mask[:, 1:] U_y_t_tp1 *= y_t_mask * y_tp1_mask energy += K.sum(U_y_t_tp1, axis=1) return energy
def call(self, x, mask=None): y_pred = viterbi_decode(x, self.U, self.b_start, self.b_end, mask) nb_classes = self.input_spec[0].shape[2] y_pred_one_hot = K.one_hot(y_pred, nb_classes) return K.in_train_phase(x, y_pred_one_hot)
def create_policy_loss(policy_t, value_t, n_actions): """ Policy loss :param policy_t: policy tensor from prediction part :param value_t: value tensor from prediction part :param n_actions: count of actions in space :param entropy_beta: entropy loss scaling factor :return: action_t, advantage_t, policy_loss_t """ action_t = Input(batch_shape=(None, 1), name='action', dtype='int32') reward_t = Input(batch_shape=(None, 1), name="reward") def policy_loss_func(args): p_t, v_t, act_t, rew_t = args log_p_t = tf.nn.log_softmax(p_t) oh_t = K.one_hot(act_t, n_actions) oh_t = K.squeeze(oh_t, 1) p_oh_t = K.sum(log_p_t * oh_t, axis=-1, keepdims=True) adv_t = (rew_t - K.stop_gradient(v_t)) tf.summary.scalar("advantage_mean", K.mean(adv_t)) tf.summary.scalar("advantage_rms", K.sqrt(K.mean(K.square(adv_t)))) res_t = -adv_t * p_oh_t tf.summary.scalar("loss_policy_mean", K.mean(res_t)) tf.summary.scalar("loss_policy_rms", K.sqrt(K.mean(K.square(res_t)))) return res_t loss_args = [policy_t, value_t, action_t, reward_t] policy_loss_t = Lambda(policy_loss_func, output_shape=(1,), name='policy_loss')(loss_args) tf.summary.scalar("value_mean", K.mean(value_t)) tf.summary.scalar("reward_mean", K.mean(reward_t)) return action_t, reward_t, policy_loss_t
def pixel_acc(y_true, y_pred): s = K.shape(y_true) # reshape such that w and h dim are multiplied together y_true_reshaped = K.reshape( y_true, tf.stack( [-1, s[1]*s[2], s[-1]] ) ) y_pred_reshaped = K.reshape( y_pred, tf.stack( [-1, s[1]*s[2], s[-1]] ) ) # correctly classified clf_pred = K.one_hot( K.argmax(y_pred_reshaped), nb_classes = s[-1]) correct_pixels_per_class = K.cast( K.equal(clf_pred,y_true_reshaped), dtype='float32') return K.sum(correct_pixels_per_class) / K.cast(K.prod(s), dtype='float32')
def path_energy0(y, x, U, mask=None): """Path energy without boundary potential handling.""" n_classes = K.shape(x)[2] y_one_hot = K.one_hot(y, n_classes) # Tag path energy energy = K.sum(x * y_one_hot, 2) energy = K.sum(energy, 1) # Transition energy y_t = y[:, :-1] y_tp1 = y[:, 1:] U_flat = K.reshape(U, [-1]) # Convert 2-dim indices (y_t, y_tp1) of U to 1-dim indices of U_flat: flat_indices = y_t * n_classes + y_tp1 U_y_t_tp1 = K.gather(U_flat, flat_indices) if mask is not None: mask = K.cast(mask, K.floatx()) y_t_mask = mask[:, :-1] y_tp1_mask = mask[:, 1:] U_y_t_tp1 *= y_t_mask * y_tp1_mask energy += K.sum(U_y_t_tp1, axis=1) return energy
def viterbi_decoding(self, X, mask=None): input_energy = self.activation(K.dot(X, self.kernel) + self.bias) if self.use_boundary: input_energy = self.add_boundary_energy(input_energy, mask, self.left_boundary, self.right_boundary) argmin_tables = self.recursion(input_energy, mask, return_logZ=False) argmin_tables = K.cast(argmin_tables, 'int32') # backward to find best path, `initial_best_idx` can be any, as all elements in the last argmin_table are the same argmin_tables = K.reverse(argmin_tables, 1) initial_best_idx = [K.expand_dims(argmin_tables[:, 0, 0])] # matrix instead of vector is required by tf `K.rnn` if K.backend() == 'theano': initial_best_idx = [K.T.unbroadcast(initial_best_idx[0], 1)] def gather_each_row(params, indices): n = K.shape(indices)[0] if K.backend() == 'theano': return params[K.T.arange(n), indices] else: indices = K.transpose(K.stack([K.tf.range(n), indices])) return K.tf.gather_nd(params, indices) def find_path(argmin_table, best_idx): next_best_idx = gather_each_row(argmin_table, best_idx[0][:, 0]) next_best_idx = K.expand_dims(next_best_idx) if K.backend() == 'theano': next_best_idx = K.T.unbroadcast(next_best_idx, 1) return next_best_idx, [next_best_idx] _, best_paths, _ = K.rnn(find_path, argmin_tables, initial_best_idx, input_length=K.int_shape(X)[1], unroll=self.unroll) best_paths = K.reverse(best_paths, 1) best_paths = K.squeeze(best_paths, 2) return K.one_hot(best_paths, self.units)
def target_category_loss(x, category_index, nb_classes): return tf.multiply(x, K.one_hot([category_index], nb_classes))
def sparse_accuracy_ignoring_last_label(y_true, y_pred): nb_classes = K.int_shape(y_pred)[-1] y_pred = K.reshape(y_pred, (-1, nb_classes)) y_true = K.one_hot(tf.to_int32(K.flatten(y_true)), nb_classes + 1) unpacked = tf.unstack(y_true, axis=-1) legal_labels = ~tf.cast(unpacked[-1], tf.bool) y_true = tf.stack(unpacked[:-1], axis=-1) return K.sum(tf.to_float(legal_labels & K.equal(K.argmax(y_true, axis=-1), K.argmax(y_pred, axis=-1)))) / K.sum(tf.to_float(legal_labels)) # This IOU implementation is wrong!!!
def vin_model(l_s=16, k=10, l_h=150, l_q=10, l_a=8): _handle_dim_ordering() def ext_start(inputs): m = inputs[0] s = inputs[1] w = K.one_hot(s[:, 0] + l_s * s[:, 1], l_s * l_s) # (None, l_s * l_s) return K.transpose(K.sum(w * K.permute_dimensions(m, (1, 0, 2)), axis=2)) map_in = Input(shape=(l_s, l_s, 2) if K.image_dim_ordering() == 'tf' else (2, l_s, l_s)) x = Convolution2D(l_h, 3, 3, subsample=(1, 1), activation='relu', border_mode='same')(map_in) r = Convolution2D(1, 1, 1, subsample=(1, 1), border_mode='valid', bias=False, name='reward')(x) conv3 = Convolution2D(l_q, 3, 3, subsample=(1, 1), border_mode='same', bias=False) conv3b = Convolution2D(l_q, 3, 3, subsample=(1, 1), border_mode='same', bias=False) q_ini = conv3(r) q = q_ini for idx in range(k): v = Lambda(lambda x: K.max(x, axis=CHANNEL_AXIS, keepdims=True), output_shape=(l_s, l_s, 1) if K.image_dim_ordering() == 'tf' else (1, l_s, l_s), name='value{}'.format(idx + 1))(q) q = merge([q_ini, conv3b(v)], mode='sum') if K.image_dim_ordering() == 'tf': q = Lambda(lambda x: K.permute_dimensions(x, (0, 3, 1, 2)), output_shape=(l_q, l_s, l_s))(q) q = Reshape(target_shape=(l_q, l_s * l_s))(q) s_in = Input(shape=(2,), dtype='int32') q_out = merge([q, s_in], mode=ext_start, output_shape=(l_q,)) out = Dense(l_a, activation='softmax', bias=False)(q_out) return Model(input=[map_in, s_in], output=out)
def vin_model(l_s=16, k=10, l_h=150, l_q=10, l_a=8): _handle_dim_ordering() def ext_start(inputs): m = inputs[0] s = inputs[1] w = K.one_hot(s[:, 0] + l_s * s[:, 1], l_s * l_s) return K.transpose( K.sum(w * K.permute_dimensions(m, (1, 0, 2)), axis=2)) map_in = Input(shape=(l_s, l_s, 2) if data_format == 'channels_last' else (2, l_s, l_s)) x = Conv2D(l_h, (3, 3), strides=(1, 1), activation='relu', padding='same')(map_in) r = Conv2D(1, (1, 1), strides=(1, 1), padding='valid', use_bias=False, name='reward')(x) conv3 = Conv2D(l_q, (3, 3), strides=(1, 1), padding='same', use_bias=False) conv3b = Conv2D(l_q, (3, 3), strides=(1, 1), padding='same', use_bias=False) q_ini = conv3(r) q = q_ini for idx in range(k): v = Lambda(lambda x: K.max(x, axis=CHANNEL_AXIS, keepdims=True), output_shape=(l_s, l_s, 1) if data_format == 'channels_last' else (1, l_s, l_s), name='value{}'.format(idx + 1))(q) q = add([q_ini, conv3b(v)]) if data_format == "channels_last": q = Lambda(lambda x: K.permute_dimensions(x, (0, 3, 1, 2)), output_shape=(l_q, l_s, l_s))(q) q = Reshape(target_shape=(l_q, l_s * l_s))(q) s_in = Input(shape=(2,), dtype='int32') q_out = merge([q, s_in], mode=ext_start, output_shape=(l_q,)) out = Dense(l_a, activation='softmax', use_bias=False)(q_out) return Model(inputs=[map_in, s_in], outputs=out)