我们从Python开源项目中,提取了以下21个代码示例,用于说明如何使用keras.optimizers.adam()。
def add_fit_args(train): train.add_argument('--ngpus', default=1, type=int, help='amount of gpus') train.add_argument('--versn', default='rn-21', type=str, help='version of net') train.add_argument('--begin', default=0, type=int, help='start epoch') train.add_argument('--batch', default=8000, type=int, help='the batch size') train.add_argument('--nepoh', default=30, type=int, help='amount of epoch') train.add_argument('--check', default=20, type=int, help='period of check in iteration') train.add_argument('--lrate', default=0.001, type=float, help='start learning rate') train.add_argument('--optim', default='adam', type=str, help='optimizer') train.add_argument('--patin', default=15, type=int, help='waiting for n iteration without improvement') train.add_argument('--losss', default='categorical_crossentropy', type=str, help='loss function') train.add_argument('--mtype', default=1, type=int, help='neurons on branch audio') train.add_argument('--wpath', default=WPATH, type=str, help='net symbol path') train.add_argument('--dpath', default=FAST, type=str, help='data_path') train.add_argument('--split', default=200000, type=int, help='data_path') return train
def build_mod5(opt=adam()): n = 3 * 1024 in1 = Input((128,), name='x1') x1 = fc_block1(in1, n) x1 = fc_identity(x1, n) in2 = Input((1024,), name='x2') x2 = fc_block1(in2, n) x2 = fc_identity(x2, n) x = merge([x1, x2], mode='concat', concat_axis=1) x = fc_identity(x, n) out = Dense(4716, activation='sigmoid', name='output')(x) model = Model(input=[in1, in2], output=out) model.compile(optimizer=opt, loss='categorical_crossentropy') # model.summary() # plot(model=model, show_shapes=True) return model
def EES_train(): EES = model_EES16() EES.compile(optimizer=adam(lr=0.0003), loss='mse') print EES.summary() data, label = pd.read_training_data("./train.h5") val_data, val_label = pd.read_training_data("./val.h5") checkpoint = ModelCheckpoint("EES_check.h5", monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='min') callbacks_list = [checkpoint] history_callback = EES.fit(data, label, batch_size=64, validation_data=(val_data, val_label), callbacks=callbacks_list, shuffle=True, nb_epoch=200, verbose=1) pandas.DataFrame(history_callback.history).to_csv("history.csv") EES.save_weights("EES_final.h5")
def __init__(self, scale=3, load_set=None, build_model=None, optimizer='adam', save_dir='.'): self.scale = scale self.load_set = partial(load_set, scale=scale) self.build_model = partial(build_model, scale=scale) self.optimizer = optimizer self.save_dir = Path(save_dir) self.save_dir.mkdir(parents=True, exist_ok=True) self.config_file = self.save_dir / 'config.yaml' self.model_file = self.save_dir / 'model.hdf5' self.train_dir = self.save_dir / 'train' self.train_dir.mkdir(exist_ok=True) self.history_file = self.train_dir / 'history.csv' self.weights_dir = self.train_dir / 'weights' self.weights_dir.mkdir(exist_ok=True) self.test_dir = self.save_dir / 'test' self.test_dir.mkdir(exist_ok=True)
def build_mod2(opt=adam()): n = 2 * 1024 in1 = Input((128,), name='x1') x1 = fc_block1(in1, n) # x1 = fc_block1(x1, n) x1 = fc_identity(x1, n) in2 = Input((1024,), name='x2') x2 = fc_block1(in2, n) # x2 = fc_block1(x2, n) x2 = fc_identity(x2, n) x = merge([x1, x2], mode='concat', concat_axis=1) # x = fc_block1(x, n) x = fc_identity(x, n) x = fc_block1(x, n) out = Dense(4716, activation='sigmoid', name='output')(x) model = Model(input=[in1, in2], output=out) model.compile(optimizer=opt, loss='categorical_crossentropy') # model.summary() plot(model=model, show_shapes=True) return model
def build_mod3(opt=adam()): n = 2 * 1024 in1 = Input((128,), name='x1') x1 = fc_block1(in1, n) x1 = fc_identity(x1, n) x1 = fc_identity(x1, n) in2 = Input((1024,), name='x2') x2 = fc_block1(in2, n) x2 = fc_identity(x2, n) x2 = fc_identity(x2, n) x = merge([x1, x2], mode='concat', concat_axis=1) x = fc_identity(x, n) x = fc_identity(x, n) x = fc_block1(x, n) out = Dense(4716, activation='sigmoid', name='output')(x) model = Model(input=[in1, in2], output=out) model.compile(optimizer=opt, loss='categorical_crossentropy') # model.summary() plot(model=model, show_shapes=True) return model
def build_mod7(opt=adam()): n = 3 * 1024 in1 = Input((128,), name='x1') x1 = fc_block1(in1, n) x1 = fc_identity(x1, n) # x1 = fc_identity(x1, n) in2 = Input((1024,), name='x2') x2 = fc_block1(in2, n) x2 = fc_identity(x2, n) # x2 = fc_identity(x2, n) x = merge([x1, x2], mode='concat', concat_axis=1) x = fc_identity(x, n) # x = fc_identity(x, n) x = fc_block1(x, n) out = Dense(4716, activation='sigmoid', name='output')(x) model = Model(input=[in1, in2], output=out) model.compile(optimizer=opt, loss='categorical_crossentropy') # model.summary() plot(model=model, show_shapes=True) return model
def build_mod8(opt=adam()): n = 3 * 1024 in1 = Input((128,), name='x1') x1 = fc_block1(in1, n) x1 = fc_identity(x1, n) # x1 = fc_identity(x1, n) in2 = Input((1024,), name='x2') x2 = fc_block1(in2, n) x2 = fc_identity(x2, n) # x2 = fc_identity(x2, n) x = merge([x1, x2], mode='concat', concat_axis=1) x = fc_identity(x, 4000) # x = fc_identity(x, n) # x = fc_block1(x, n) out = Dense(4716, activation='sigmoid', name='output')(x) model = Model(input=[in1, in2], output=out) model.compile(optimizer=opt, loss='categorical_crossentropy') # model.summary() plot(model=model, show_shapes=True) return model
def build_mod4(opt=adam()): n = 1500 in1 = Input((128,), name='x1') x1 = fc_block1(in1, n) x1 = fc_identity(x1, n) x1 = fc_identity(x1, n) x1 = fc_identity(x1, n) in2 = Input((1024,), name='x2') x2 = fc_block1(in2, n) x2 = fc_identity(x2, n) x2 = fc_identity(x2, n) x2 = fc_identity(x2, n) x = merge([x1, x2], mode='concat', concat_axis=1) x = fc_identity(x, n) x = fc_identity(x, n) x = fc_identity(x, n) x = fc_identity(x, n) x = fc_block1(x, 2*n) out = Dense(4716, activation='sigmoid', name='output')(x) model = Model(input=[in1, in2], output=out) model.compile(optimizer=opt, loss='categorical_crossentropy') # model.summary() # plot(model=model, show_shapes=True) return model
def build_mod9(opt=adam()): n = int(2.2 * 1024) in1 = Input((128,), name='x1') x1 = fc_block1(in1, n, d=0.3) x1 = fc_identity(x1, n, d=0.3) x1 = fc_identity(x1, n, d=0.3) in2 = Input((1024,), name='x2') x2 = fc_block1(in2, n, d=0.3) x2 = fc_identity(x2, n, d=0.3) x2 = fc_identity(x2, n, d=0.3) x = merge([x1, x2], mode='concat', concat_axis=1) x = fc_identity(x, n, d=0.3) x = fc_identity(x, n, d=0.3) x = fc_block1(x, n) out = Dense(4716, activation='sigmoid', name='output')(x) model = Model(input=[in1, in2], output=out) model.compile(optimizer=opt, loss='categorical_crossentropy') # model.summary() # plot(model=model, show_shapes=True) return model
def build_mod10(opt=adam()): n = int(1800) in1 = Input((128,), name='x1') x1 = fc_block1(in1, n) x1 = fc_inception(x1, n) x1 = fc_inception(x1, n) in2 = Input((1024,), name='x2') x2 = fc_block1(in2, n) x2 = fc_inception(x2, n) x2 = fc_inception(x2, n) x = merge([x1, x2], mode='concat', concat_axis=1) x = fc_inception(x, n) x = fc_inception(x, n) x = fc_block1(x, 2000) out = Dense(4716, activation='sigmoid', name='output')(x) model = Model(input=[in1, in2], output=out) model.compile(optimizer=opt, loss='categorical_crossentropy') # model.summary() plot(model=model, show_shapes=True) return model
def build_mod12(opt=adam()): n = int(2 * 1024) in1 = Input((128,), name='x1') x1 = fc_block1(in1, n, d=0.2) x1 = fc_identity(x1, n, d=0.2) x1 = fc_identity(x1, n, d=0.2) in2 = Input((1024,), name='x2') x2 = fc_block1(in2, n, d=0.2) x2 = fc_identity(x2, n, d=0.2) x2 = fc_identity(x2, n, d=0.2) x = merge([x1, x2], mode='concat', concat_axis=1) x = fc_identity(x, n, d=0.2) x = fc_identity(x, n, d=0.2) x = fc_identity(x, n, d=0.2) x = fc_block1(x, n) out = Dense(4716, activation='sigmoid', name='output')(x) model = Model(input=[in1, in2], output=out) model.compile(optimizer=opt, loss='categorical_crossentropy') model.summary() # plot(model=model, show_shapes=True) return model
def build_mod13(opt=adam()): n = int(2 * 1024) in1 = Input((128,), name='x1') x1 = fc_block1(in1, n, d=0.2) x1 = fc_identity(x1, n, d=0.2) x1 = fc_identity(x1, n, d=0.2) x1 = fc_identity(x1, n, d=0.2) in2 = Input((1024,), name='x2') x2 = fc_block1(in2, n, d=0.2) x2 = fc_identity(x2, n, d=0.2) x2 = fc_identity(x2, n, d=0.2) x2 = fc_identity(x2, n, d=0.2) x = merge([x1, x2], mode='concat', concat_axis=1) x = fc_identity(x, n, d=0.2) x = fc_identity(x, n, d=0.2) x = fc_block1(x, n) out = Dense(4716, activation='sigmoid', name='output')(x) model = Model(input=[in1, in2], output=out) model.compile(optimizer=opt, loss='categorical_crossentropy') model.summary() # plot(model=model, show_shapes=True) return model
def train(self, x, y, learning_rate=0.01, epochs=200): optimizer = optimizers.adam(lr=learning_rate, decay=1e-6) self._model.compile(loss="mean_squared_error", optimizer=optimizer) self._model.fit(x, y, batch_size=32, validation_split=0.05, epochs=epochs, verbose=1)
def model_EES(input_col, input_row): _input = Input(shape=(input_col, input_row, 1), name='input') EES = Conv2D(nb_filter=8, nb_row=3, nb_col=3, init='he_normal', activation='relu', border_mode='same', bias=True)(_input) EES = Deconvolution2D(nb_filter=16, nb_row=14, nb_col=14, output_shape=(None, input_col * 2, input_row * 2, 16), subsample=(2, 2), border_mode='same', init='glorot_uniform', activation='relu')(EES) out = Conv2D(nb_filter=1, nb_row=5, nb_col=5, init='glorot_uniform', activation='relu', border_mode='same')(EES) model = Model(input=_input, output=out) # sgd = SGD(lr=0.0001, decay=0.005, momentum=0.9, nesterov=True) Adam = adam(lr=0.001) model.compile(optimizer=Adam, loss='mean_squared_error', metrics=['mean_squared_error']) return model
def model_EEDS(input_col, input_row): _input = Input(shape=(input_col, input_row, 1), name='input') EES = model_EES(input_col, input_row)(_input) EED = model_EED(input_col, input_row)(_input) _EEDS = merge(inputs=[EED, EES], mode='sum') model = Model(input=_input, output=_EEDS) Adam = adam(lr=0.001) model.compile(optimizer=Adam, loss='mean_squared_error', metrics=['mean_squared_error']) return model
def model_EEDS(): _input = Input(shape=(None, None, 1), name='input') _EES = EES.model_EES()(_input) _EED = EED.model_EED()(_input) _EEDS = add(inputs=[_EED, _EES]) model = Model(input=_input, output=_EEDS) Adam = adam(lr=0.0003) model.compile(optimizer=Adam, loss='mse') return model
def get_mod(ags): dst = os.path.join(ags.wpath, ags.versn) b_scr = -1 if ags.optim == 'adam': opt = adam(ags.lrate) elif ags.optim == 'sgd': opt = sgd(ags.lrate) else: opt = adam() lst = [build_mod2(), build_mod3(), build_mod7(), build_mod9(), build_mod11(), build_mod12(), build_mod13()] model = lst[ags.mtype] if ags.mtype == 0: model = build_mod2(opt) logging.info('start with model 2') elif ags.mtype == 1: model = build_mod3(opt) logging.info('start with model 3') elif ags.mtype == 2: model = build_mod7(opt) logging.info('start with model 7') elif ags.mtype == 3: model = build_mod9(opt) logging.info('start with model 9') elif ags.mtype == 4: model = build_mod11(opt) logging.info('start with model 11') elif ags.mtype == 5: model = build_mod12(opt) logging.info('start with model 12') elif ags.mtype == 6: model = build_mod13(opt) logging.info('start with model 13') if ags.begin == -1: fls = sorted(glob.glob(dst + '/*h5')) if len(fls) > 0: logging.info('load weights: %s' % fls[-1]) model.load_weights(fls[-1]) b_scr = float(os.path.basename(fls[-1]).split('_')[0]) return model, b_scr
def __init__(self, image_shape, num_actions, frame_history_len=4, replay_buffer_size=1000000, training_freq=4, training_starts=5000, training_batch_size=32, target_update_freq=1000, reward_decay=0.99, exploration=LinearSchedule(5000, 0.1), log_dir="logs/"): """ Double Deep Q Network params: image_shape: (height, width, n_values) num_actions: how many different actions we can choose frame_history_len: feed this number of frame data as input to the deep-q Network replay_buffer_size: size limit of replay buffer training_freq: train base q network once per training_freq steps training_starts: only train q network after this number of steps training_batch_size: batch size for training base q network with gradient descent reward_decay: decay factor(called gamma in paper) of rewards that happen in the future exploration: used to generate an exploration factor(see 'epsilon-greedy' in paper). when rand(0,1) < epsilon, take random action; otherwise take greedy action. log_dir: path to write tensorboard logs """ super().__init__() self.num_actions = num_actions self.training_freq = training_freq self.training_starts = training_starts self.training_batch_size = training_batch_size self.target_update_freq = target_update_freq self.reward_decay = reward_decay self.exploration = exploration # use multiple frames as input to q network input_shape = image_shape[:-1] + (image_shape[-1] * frame_history_len,) # used to choose action self.base_model = q_model(input_shape, num_actions) self.base_model.compile(optimizer=optimizers.adam(clipnorm=10, lr=1e-4, decay=1e-6, epsilon=1e-4), loss='mse') # used to estimate q values self.target_model = q_model(input_shape, num_actions) self.replay_buffer = ReplayBuffer(size=replay_buffer_size, frame_history_len=frame_history_len) # current replay buffer offset self.replay_buffer_idx = 0 self.tensorboard_callback = TensorBoard(log_dir=log_dir) self.latest_losses = deque(maxlen=100)
def model_EED(input_col, input_row): _input = Input(shape=(input_col, input_row, 1), name='input') Feature = Conv2D(nb_filter=64, nb_row=3, nb_col=3, init='glorot_uniform', activation='relu', border_mode='same', bias=True)(_input) Feature = Conv2D(nb_filter=64, nb_row=3, nb_col=3, init='glorot_uniform', activation='relu', border_mode='same', bias=True)(Feature) Feature3 = Conv2D(nb_filter=64, nb_row=3, nb_col=3, init='glorot_uniform', activation='relu', border_mode='same', bias=True)(Feature) Feature_out = merge(inputs=[Feature, Feature3], mode='sum') # Upsampling Upsampling1 = Conv2D(nb_filter=8, nb_row=1, nb_col=1, init='glorot_uniform', activation='relu', border_mode='same', bias=True)(Feature_out) Upsampling2 = Deconvolution2D(nb_filter=8, nb_row=14, nb_col=14, output_shape=(None, input_col * 2, input_row * 2, 8), subsample=(2, 2), border_mode='same', init='glorot_uniform', activation='relu')(Upsampling1) Upsampling3 = Conv2D(nb_filter=64, nb_row=1, nb_col=1, init='glorot_uniform', activation='relu', border_mode='same', bias=True)(Upsampling2) # Mulyi-scale Reconstruction Reslayer1 = Conv2D(nb_filter=64, nb_row=3, nb_col=3, init='glorot_uniform', activation='relu', border_mode='same', bias=True)(Upsampling3) Reslayer2 = Conv2D(nb_filter=64, nb_row=3, nb_col=3, init='glorot_uniform', activation='relu', border_mode='same', bias=True)(Reslayer1) Block1 = merge(inputs=[Reslayer1, Reslayer2], mode='sum') Reslayer3 = Conv2D(nb_filter=64, nb_row=3, nb_col=3, init='glorot_uniform', activation='relu', border_mode='same', bias=True)(Block1) Reslayer4 = Conv2D(nb_filter=64, nb_row=3, nb_col=3, init='glorot_uniform', activation='relu', border_mode='same', bias=True)(Reslayer3) Block2 = merge(inputs=[Reslayer3, Reslayer4], mode='sum') # ***************// Multi_scale1 = Conv2D(nb_filter=16, nb_row=1, nb_col=1, init='glorot_uniform', activation='relu', border_mode='same', bias=True)(Block2) Multi_scale2a = Conv2D(nb_filter=16, nb_row=1, nb_col=1, init='glorot_uniform', activation='relu', border_mode='same', bias=True)(Multi_scale1) Multi_scale2b = Conv2D(nb_filter=16, nb_row=3, nb_col=3, init='glorot_uniform', activation='relu', border_mode='same', bias=True)(Multi_scale1) Multi_scale2c = Conv2D(nb_filter=16, nb_row=5, nb_col=5, init='glorot_uniform', activation='relu', border_mode='same', bias=True)(Multi_scale1) Multi_scale2d = Conv2D(nb_filter=16, nb_row=7, nb_col=7, init='glorot_uniform', activation='relu', border_mode='same', bias=True)(Multi_scale1) Multi_scale2 = merge(inputs=[Multi_scale2a, Multi_scale2b, Multi_scale2c, Multi_scale2d], mode='concat') out = Conv2D(nb_filter=1, nb_row=1, nb_col=1, init='glorot_uniform', activation='relu', border_mode='same', bias=True)(Multi_scale2) model = Model(input=_input, output=out) Adam = adam(lr=0.001) model.compile(optimizer=Adam, loss='mean_squared_error', metrics=['mean_squared_error']) return model