我们从Python开源项目中,提取了以下13个代码示例,用于说明如何使用keras.optimizers.rmsprop()。
def test_sequential_model_saving_2(): # test with custom optimizer, loss custom_opt = optimizers.rmsprop custom_loss = objectives.mse model = Sequential() model.add(Dense(2, input_dim=3)) model.add(Dense(3)) model.compile(loss=custom_loss, optimizer=custom_opt(), metrics=['acc']) x = np.random.random((1, 3)) y = np.random.random((1, 3)) model.train_on_batch(x, y) out = model.predict(x) _, fname = tempfile.mkstemp('.h5') save_model(model, fname) model = load_model(fname, custom_objects={'custom_opt': custom_opt, 'custom_loss': custom_loss}) os.remove(fname) out2 = model.predict(x) assert_allclose(out, out2, atol=1e-05)
def _initialize_model(self): input_layer = Input(shape=self.input_shape) tower_1 = Convolution2D(16, 1, 1, border_mode="same", activation="elu")(input_layer) tower_1 = Convolution2D(16, 3, 3, border_mode="same", activation="elu")(tower_1) tower_2 = Convolution2D(16, 1, 1, border_mode="same", activation="elu")(input_layer) tower_2 = Convolution2D(16, 3, 3, border_mode="same", activation="elu")(tower_2) tower_2 = Convolution2D(16, 3, 3, border_mode="same", activation="elu")(tower_2) tower_3 = MaxPooling2D((3, 3), strides=(1, 1), border_mode="same")(input_layer) tower_3 = Convolution2D(16, 1, 1, border_mode="same", activation="elu")(tower_3) merged_layer = merge([tower_1, tower_2, tower_3], mode="concat", concat_axis=1) output = AveragePooling2D((7, 7), strides=(8, 8))(merged_layer) output = Flatten()(output) output = Dense(self.action_count)(output) model = Model(input=input_layer, output=output) model.compile(rmsprop(lr=self.model_learning_rate, clipvalue=1), "mse") return model
def get_optimizer(config): if(config['optimizer'] == 'rmsprop'): opti = optimizers.rmsprop(lr=config['learning_rate'], clipvalue=config['grad_clip'], decay=config['decay_rate']) return opti elif(config['optimizer'] == 'adadelta'): opti = optimizers.adadelta(lr=config['learning_rate'], clipvalue=config['grad_clip']) return opti elif(config['optimizer'] == 'sgd'): opti = optimizers.sgd(lr=config['learning_rate'], momentum=config['momentum'], decay=config['learning_rate_decay']) return opti else: raise StandardError('optimizer name error')
def compile_model(self): self.model.compile(loss=losses.mean_squared_error, optimizer=optimizers.rmsprop(), metrics=['accuracy'])
def compile_model(self): self.model.compile(loss=losses.categorical_crossentropy, optimizer=optimizers.rmsprop(), metrics=['accuracy']) self.graph = tf.get_default_graph()
def test_loading_weights_by_name(): """ test loading model weights by name on: - sequential model """ # test with custom optimizer, loss custom_opt = optimizers.rmsprop custom_loss = objectives.mse # sequential model model = Sequential() model.add(Dense(2, input_dim=3, name="rick")) model.add(Dense(3, name="morty")) model.compile(loss=custom_loss, optimizer=custom_opt(), metrics=['acc']) x = np.random.random((1, 3)) y = np.random.random((1, 3)) model.train_on_batch(x, y) out = model.predict(x) old_weights = [layer.get_weights() for layer in model.layers] _, fname = tempfile.mkstemp('.h5') model.save_weights(fname) # delete and recreate model del(model) model = Sequential() model.add(Dense(2, input_dim=3, name="rick")) model.add(Dense(3, name="morty")) model.compile(loss=custom_loss, optimizer=custom_opt(), metrics=['acc']) # load weights from first model model.load_weights(fname, by_name=True) os.remove(fname) out2 = model.predict(x) assert_allclose(out, out2, atol=1e-05) for i in range(len(model.layers)): new_weights = model.layers[i].get_weights() for j in range(len(new_weights)): assert_allclose(old_weights[i][j], new_weights[j], atol=1e-05)
def test_loading_weights_by_name_2(): """ test loading model weights by name on: - both sequential and functional api models - different architecture with shared names """ # test with custom optimizer, loss custom_opt = optimizers.rmsprop custom_loss = objectives.mse # sequential model model = Sequential() model.add(Dense(2, input_dim=3, name="rick")) model.add(Dense(3, name="morty")) model.compile(loss=custom_loss, optimizer=custom_opt(), metrics=['acc']) x = np.random.random((1, 3)) y = np.random.random((1, 3)) model.train_on_batch(x, y) out = model.predict(x) old_weights = [layer.get_weights() for layer in model.layers] _, fname = tempfile.mkstemp('.h5') model.save_weights(fname) # delete and recreate model using Functional API del(model) data = Input(shape=(3,)) rick = Dense(2, name="rick")(data) jerry = Dense(3, name="jerry")(rick) # add 2 layers (but maintain shapes) jessica = Dense(2, name="jessica")(jerry) morty = Dense(3, name="morty")(jessica) model = Model(input=[data], output=[morty]) model.compile(loss=custom_loss, optimizer=custom_opt(), metrics=['acc']) # load weights from first model model.load_weights(fname, by_name=True) os.remove(fname) out2 = model.predict(x) assert np.max(np.abs(out - out2)) > 1e-05 rick = model.layers[1].get_weights() jerry = model.layers[2].get_weights() jessica = model.layers[3].get_weights() morty = model.layers[4].get_weights() assert_allclose(old_weights[0][0], rick[0], atol=1e-05) assert_allclose(old_weights[0][1], rick[1], atol=1e-05) assert_allclose(old_weights[1][0], morty[0], atol=1e-05) assert_allclose(old_weights[1][1], morty[1], atol=1e-05) assert_allclose(np.zeros_like(jerry[1]), jerry[1]) # biases init to 0 assert_allclose(np.zeros_like(jessica[1]), jessica[1]) # biases init to 0
def test_loading_weights_by_name_2(): """ test loading model weights by name on: - both sequential and functional api models - different architecture with shared names """ # test with custom optimizer, loss custom_opt = optimizers.rmsprop custom_loss = objectives.mse # sequential model model = Sequential() model.add(Dense(2, input_dim=3, name="rick")) model.add(Dense(3, name="morty")) model.compile(loss=custom_loss, optimizer=custom_opt(), metrics=['acc']) x = np.random.random((1, 3)) y = np.random.random((1, 3)) model.train_on_batch(x, y) out = model.predict(x) old_weights = [layer.get_weights() for layer in model.layers] _, fname = tempfile.mkstemp('.h5') model.save_weights(fname) # delete and recreate model using Functional API del(model) data = Input(shape=(3,)) rick = Dense(2, name="rick")(data) jerry = Dense(3, name="jerry")(rick) # add 2 layers (but maintain shapes) jessica = Dense(2, name="jessica")(jerry) morty = Dense(3, name="morty")(jessica) model = Model(input=[data], output=[morty]) model.compile(loss=custom_loss, optimizer=custom_opt(), metrics=['acc']) # load weights from first model model.load_weights(fname, by_name=True) os.remove(fname) out2 = model.predict(x) assert np.max(np.abs(out - out2)) > 1e-05 rick = model.layers[1].get_weights() jerry = model.layers[2].get_weights() jessica = model.layers[3].get_weights() morty = model.layers[4].get_weights() assert_allclose(old_weights[0][0], rick[0], atol=1e-05) assert_allclose(old_weights[0][1], rick[1], atol=1e-05) assert_allclose(old_weights[1][0], morty[0], atol=1e-05) assert_allclose(old_weights[1][1], morty[1], atol=1e-05) assert_allclose(np.zeros_like(jerry[1]), jerry[1]) # biases init to 0 assert_allclose(np.zeros_like(jessica[1]), jessica[1]) # biases init to 0 # a function to be called from the Lambda layer