我们从Python开源项目中,提取了以下18个代码示例,用于说明如何使用keras.backend.set_learning_phase()。
def _loadTFGraph(self, sess, graph): """ Loads the Keras model into memory, then uses the passed-in session to load the model's inference-related ops into the passed-in Tensorflow graph. :return: A tuple (graph, input_name, output_name) where graph is the TF graph corresponding to the Keras model's inference subgraph, input_name is the name of the Keras model's input tensor, and output_name is the name of the Keras model's output tensor. """ keras_backend = K.backend() assert keras_backend == "tensorflow", \ "Only tensorflow-backed Keras models are supported, tried to load Keras model " \ "with backend %s." % (keras_backend) with graph.as_default(): K.set_learning_phase(0) # Inference phase model = load_model(self.getModelFile()) out_op_name = tfx.op_name(model.output, graph) stripped_graph = tfx.strip_and_freeze_until([out_op_name], graph, sess, return_graph=True) return stripped_graph, model.input.name, model.output.name
def executeKerasInceptionV3(image_df, uri_col="filePath"): """ Apply Keras InceptionV3 Model on input DataFrame. :param image_df: Dataset. contains a column (uri_col) for where the image file lives. :param uri_col: str. name of the column indicating where each row's image file lives. :return: ({str => np.array[float]}, {str => (str, str, float)}). image file uri to prediction probability array, image file uri to top K predictions (class id, class description, probability). """ K.set_learning_phase(0) model = InceptionV3(weights="imagenet") values = {} topK = {} for row in image_df.select(uri_col).collect(): raw_uri = row[uri_col] image = loadAndPreprocessKerasInceptionV3(raw_uri) values[raw_uri] = model.predict(image) topK[raw_uri] = decode_predictions(values[raw_uri], top=5)[0] return values, topK
def get_deploy_tensors(self, num_gpus=1): K.set_learning_phase(False) images_T = tf.placeholder(tf.float32, (None, self.image_size[1], self.image_size[0], 3)) inputs_list = split_batch(images_T, num_gpus) output_list = list() for i,inputs in enumerate(inputs_list): with tf.device('/gpu:{}'.format(i)): mbox_logit, mbox_loc = self.model(inputs) output_list.append([ tf.nn.softmax(mbox_logit, dim=2), mbox_loc ]) mbox_conf_T, mbox_loc_T = concat_batch(output_list) return images_T, mbox_conf_T, mbox_loc_T
def get_saliency(image, model): """Returns a saliency map with same shape as image. """ K.set_learning_phase(0) K._LEARNING_PHASE = tf.constant(0) image = np.expand_dims(image, 0) loss = K.variable(0.) loss += K.sum(K.square(model.output)) grads = K.abs(K.gradients(loss, model.input)[0]) saliency = K.max(grads, axis=3) fetch_saliency = K.function([model.input], [loss, saliency]) outputs, saliency = fetch_saliency([image]) K.set_learning_phase(True) return saliency
def get_gradcam(image, model, layer_name): # remove dropout/noise layers K.set_learning_phase(0) K._LEARNING_PHASE = tf.constant(0) layer = model.get_layer(layer_name) image = np.expand_dims(image, 0) loss = K.variable(0.) loss += K.sum(model.output) # gradients of prediction wrt the conv layer of choice are used upstream_grads = K.gradients(loss, layer.output)[0] feature_weights = K.mean(upstream_grads, axis=[1, 2]) heatmap = K.relu(K.dot(layer.output, K.transpose(feature_weights))) fetch_heatmap = K.function([model.input], [heatmap]) return fetch_heatmap([image])[0]
def get_saliency(image,model): """Returns a saliency map with same shape as image. """ K.set_learning_phase(0) K._LEARNING_PHASE = tf.constant(0) image = np.expand_dims(image,0) loss = K.variable(0.) loss += K.sum(K.square(model.output)) grads = K.abs(K.gradients(loss,model.input)[0]) saliency = K.max(grads,axis=3) fetch_saliency = K.function([model.input,K.learning_phase()],[loss,saliency]) outputs, saliency = fetch_saliency([image,0]) K.set_learning_phase(True) return saliency
def main(): K.set_learning_phase(1) model = load_model() layer_dict = get_layer_dict(model) for layer_name in layer_dict: plot_hidden_layer_activation(model, layer_dict[layer_name])
def getModelData(self, featurize): sess = tf.Session() with sess.as_default(): K.set_learning_phase(0) inputImage = imageInputPlaceholder(nChannels=3) preprocessed = self.preprocess(inputImage) model = self.model(preprocessed, featurize) return dict(inputTensorName=inputImage.name, outputTensorName=model.output.name, session=sess, inputTensorSize=self.inputShape(), outputMode="vector")
def _fromKerasModelFile(cls, file_path): """ Load a Keras model from a file path into a `GraphFunction`. :param file_path: the (HDF5) file path """ assert file_path.endswith('.h5'), \ 'Keras model must be specified as HDF5 file' with IsolatedSession(using_keras=True) as issn: K.set_learning_phase(0) # Testing phase model = load_model(file_path) gfn = issn.asGraphFunction(model.inputs, model.outputs) return gfn
def test_prediction_vs_tensorflow_inceptionV3(self): output_col = "prediction" image_df = image_utils.getSampleImageDF() # An example of how a pre-trained keras model can be used with TFImageTransformer with KSessionWrap() as (sess, g): with g.as_default(): K.set_learning_phase(0) # this is important but it's on the user to call it. # nChannels needed for input_tensor in the InceptionV3 call below image_string = utils.imageInputPlaceholder(nChannels=3) resized_images = tf.image.resize_images(image_string, InceptionV3Constants.INPUT_SHAPE) # keras expects array in RGB order, we get it from image schema in BGR => need to flip preprocessed = preprocess_input(imageIO._reverseChannels(resized_images)) model = InceptionV3(input_tensor=preprocessed, weights="imagenet") graph = tfx.strip_and_freeze_until([model.output], g, sess, return_graph=True) transformer = TFImageTransformer(channelOrder='BGR', inputCol="image", outputCol=output_col, graph=graph, inputTensor=image_string, outputTensor=model.output, outputMode="vector") transformed_df = transformer.transform(image_df.limit(10)) self.assertDfHasCols(transformed_df, [output_col]) collected = transformed_df.collect() transformer_values, transformer_topK = self.transformOutputToComparables(collected, output_col, lambda row: row['image']['origin']) tf_values, tf_topK = self._executeTensorflow(graph, image_string.name, model.output.name, image_df) self.compareClassSets(tf_topK, transformer_topK) self.compareClassOrderings(tf_topK, transformer_topK) self.compareArrays(tf_values, transformer_values)
def test_keras_consistency(self): """ Exported model in Keras should get same result as original """ img_fpaths = glob(os.path.join(_getSampleJPEGDir(), '*.jpg')) def keras_load_and_preproc(fpath): img = load_img(fpath, target_size=(299, 299)) img_arr = img_to_array(img) img_iv3_input = iv3.preprocess_input(img_arr) return np.expand_dims(img_iv3_input, axis=0) imgs_iv3_input = np.vstack([keras_load_and_preproc(fp) for fp in img_fpaths]) model_ref = InceptionV3(weights="imagenet") preds_ref = model_ref.predict(imgs_iv3_input) with IsolatedSession(using_keras=True) as issn: K.set_learning_phase(0) model = InceptionV3(weights="imagenet") gfn = issn.asGraphFunction(model.inputs, model.outputs) with IsolatedSession(using_keras=True) as issn: K.set_learning_phase(0) feeds, fetches = issn.importGraphFunction(gfn, prefix="InceptionV3") preds_tgt = issn.run(fetches[0], {feeds[0]: imgs_iv3_input}) self.assertTrue(np.all(preds_tgt == preds_ref))
def main(args): assert args.dataset in ['mnist', 'cifar', 'svhn'], \ "Dataset parameter must be either 'mnist', 'cifar' or 'svhn'" assert args.attack in ['fgsm', 'bim-a', 'bim-b', 'jsma', 'cw', 'all'], \ "Attack parameter must be either 'fgsm', 'bim-a', 'bim-b', " \ "'jsma' or 'cw'" assert os.path.isfile('../data/model_%s.h5' % args.dataset), \ 'model file not found... must first train model using train_model.py.' print('Dataset: %s. Attack: %s' % (args.dataset, args.attack)) # Create TF session, set it as Keras backend sess = tf.Session() K.set_session(sess) K.set_learning_phase(0) model = load_model('../data/model_%s.h5' % args.dataset) _, _, X_test, Y_test = get_data(args.dataset) _, acc = model.evaluate(X_test, Y_test, batch_size=args.batch_size, verbose=0) print("Accuracy on the test set: %0.2f%%" % (100*acc)) if args.attack == 'all': # Cycle through all attacks for attack in ['fgsm', 'bim-a', 'bim-b', 'jsma', 'cw']: craft_one_type(sess, model, X_test, Y_test, args.dataset, attack, args.batch_size) else: # Craft one specific attack type craft_one_type(sess, model, X_test, Y_test, args.dataset, args.attack, args.batch_size) print('Adversarial samples crafted and saved to data/ subfolder.') sess.close()
def __init__(self, input_shape, classes, model_save_filepath): self.model_save_filepath = model_save_filepath self.neptune_organizer = None self.old_session = K.get_session() session = tf.Session('') K.set_session(session) K.set_learning_phase(1) face_input = Input(batch_shape=(None,) + (input_shape)) pretrained_model = VGG16(input_tensor=face_input, weights='imagenet', include_top=False) x = pretrained_model.get_layer('block4_pool').output x = Flatten(name='flatten')(x) x = Dense(256, activation='relu', name='fc1')(x) x = Dense(256, activation='relu', name='fc2')(x) output = Dense(classes, activation='softmax', name='predictions')(x) self.facenet = Model(face_input, output) self.facenet.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) self.facenet.summary() self.datagen = ImageDataGenerator(rotation_range=5, horizontal_flip=False, vertical_flip=True)
def get_train_tensors(self, optimizer, num_gpus=1): K.set_learning_phase(True) regularization_loss_list = list() for l in self.model.layers: if hasattr(l, 'losses'): regularization_loss_list.extend(l.losses) regularization_loss = tf.add_n(regularization_loss_list) images_T = tf.placeholder(tf.float32, (None, self.image_size[1], self.image_size[0], 3)) mbox_conf_target_T = tf.placeholder(tf.float32, (None, len(self.prior_boxes))) mbox_loc_target_T = tf.placeholder(tf.float32, (None, len(self.prior_boxes), 4)) inputs_list = split_batch([images_T, mbox_conf_target_T, mbox_loc_target_T], num_gpus) loss_list = list() grads_list = list() operation_list = list() for i,inputs in enumerate(inputs_list): with tf.device('/gpu:{}'.format(i)): mbox_logit, mbox_loc = self.model(inputs[0]) mbox_loss = multibox_loss([mbox_logit, mbox_loc, inputs[1], inputs[2]], **self.mbox_loss_params) loss = (tf.reduce_mean(mbox_loss)+regularization_loss)/num_gpus grads = optimizer.compute_gradients(loss) update_ops = self.model.get_updates_for(inputs[0]) loss_list.append(loss) grads_list.append(grads) operation_list.extend(update_ops) with tf.device('/cpu:0'): operation_list.append(optimizer.apply_gradients(sum_gradients(grads_list))) loss_T = tf.add_n(loss_list) optimize_O = tf.group(*operation_list) return images_T, mbox_conf_target_T, mbox_loc_target_T, loss_T, optimize_O
def make_model(x_shape, batch_size=128, num_classes=10): y = tf.placeholder(dtype=tf.int32,shape=(batch_size,))# Input(dtype=tf.int32, shape=y_shape) K.set_learning_phase(1) img_input = Input(batch_shape= tuple( [batch_size] + list(x_shape))) bn_axis = 3 x = Conv2D(64, (3, 3), strides=(1, 1), padding='same',name='conv1')(img_input) x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x) x = Activation('relu')(x) x = conv_block(x, 3, [16,16,16], stage=2, block='a', strides=(1, 1)) x = identity_block(x, 3, [16,16,16], stage=2, block='b') x = identity_block(x, 3, [16,16,16], stage=2, block='c') x = identity_block(x, 3, [16,16,16], stage=3, block='d') x = conv_block(x, 3, [32,32,32], stage=3, block='a', strides=(2,2)) x = identity_block(x, 3, [32,32,32], stage=3, block='b') x = identity_block(x, 3, [32,32,32], stage=3, block='c') x = identity_block(x, 3, [32,32,32], stage=3, block='d') x = conv_block(x, 3, [64,64,64], stage=3, block='a', strides=(2,2)) x = identity_block(x, 3, [64,64,64], stage=3, block='b') x = identity_block(x, 3, [64,64,64], stage=3, block='c') x = identity_block(x, 3, [64,64,64], stage=3, block='d') x = AveragePooling2D((8, 8), name='avg_pool')(x) x = Flatten()(x) x = Dense(num_classes, activation='softmax', name='fc1000')(x) loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=x,labels=y)) return img_input, y, loss
def visualize_features(model, problem_name): out_path = 'output/figures/{}/features/' im_size = (52, 52) K.set_learning_phase(0) if model is None: model = ModelLipnet4(verbose=True, compile_on_build=False, include_top=True) model.build_model((1, im_size[0], im_size[1]), 3) model.restore('/home/sergii/Documents/Thesis/lipnet/output/models/{}_model_lipnet6.h5'.format(problem_name)) vis = VisualizerFeatures() vis.visualize_model(model.model, out_path.format(problem_name), im_size)
def convertGraph( modelPath, outdir, numoutputs, prefix, name): ''' Converts an HD5F file to a .pb file for use with Tensorflow. Args: modelPath (str): path to the .h5 file outdir (str): path to the output directory numoutputs (int): prefix (str): the prefix of the output aliasing name (str): Returns: None ''' #NOTE: If using Python > 3.2, this could be replaced with os.makedirs( name, exist_ok=True ) if not os.path.isdir(outdir): os.mkdir(outdir) K.set_learning_phase(0) net_model = load_model(modelPath) # Alias the outputs in the model - this sometimes makes them easier to access in TF pred = [None]*numoutputs pred_node_names = [None]*numoutputs for i in range(numoutputs): pred_node_names[i] = prefix+'_'+str(i) pred[i] = tf.identity(net_model.output[i], name=pred_node_names[i]) print('Output nodes names are: ', pred_node_names) sess = K.get_session() # Write the graph in human readable f = 'graph_def_for_reference.pb.ascii' tf.train.write_graph(sess.graph.as_graph_def(), outdir, f, as_text=True) print('Saved the graph definition in ascii format at: ', osp.join(outdir, f)) # Write the graph in binary .pb file from tensorflow.python.framework import graph_util from tensorflow.python.framework import graph_io constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), pred_node_names) graph_io.write_graph(constant_graph, outdir, name, as_text=False) print('Saved the constant graph (ready for inference) at: ', osp.join(outdir, name))
def load(self, data_dir): """Load graph and weight data. Args: data_dir (:obj:`str`): location of Keras checkpoint (`.hdf5`) files and model (in `.json`) structure. The default behavior is to take the latest of each, by OS timestamp. """ # for tensorflow compatibility K.set_learning_phase(0) # find newest ckpt and graph files try: latest_ckpt = max(glob.iglob( os.path.join(data_dir, '*.h*5')), key=os.path.getctime) latest_ckpt_name = os.path.basename(latest_ckpt) latest_ckpt_time = str( datetime.fromtimestamp(os.path.getmtime(latest_ckpt))) except ValueError: raise FileNotFoundError('No checkpoint (.hdf5 or .h5) files ' 'available at {}'.format(data_dir)) try: latest_json = max(glob.iglob(os.path.join(data_dir, '*.json')), key=os.path.getctime) with open(latest_json, 'r') as f: model_json = json.loads(f.read()) self._model = model_from_json(model_json) self._model.load_weights(latest_ckpt) except ValueError: try: self._model = load_model(latest_ckpt) except ValueError: raise FileNotFoundError('The (.hdf5 or .h5) files available at' '{} don\'t have the model' ' architecture.' .format(latest_ckpt)) self._sess = K.get_session() self._tf_predict_var = self._model.outputs[0] self._tf_input_var = self._model.inputs[0] self._model_name = type(self).__name__ self._latest_ckpt_name = latest_ckpt_name self._latest_ckpt_time = latest_ckpt_time