我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.layers.advanced_activations.LeakyReLU()。
def make_generator(): """Creates a generator model that takes a 100-dimensional noise vector as a "seed", and outputs images of size 28x28x1.""" model = Sequential() model.add(Dense(1024, input_dim=100)) model.add(LeakyReLU()) model.add(Dense(128 * 7 * 7)) model.add(BatchNormalization()) model.add(LeakyReLU()) if K.image_data_format() == 'channels_first': model.add(Reshape((128, 7, 7), input_shape=(128 * 7 * 7,))) bn_axis = 1 else: model.add(Reshape((7, 7, 128), input_shape=(128 * 7 * 7,))) bn_axis = -1 model.add(Conv2DTranspose(128, (5, 5), strides=2, padding='same')) model.add(BatchNormalization(axis=bn_axis)) model.add(LeakyReLU()) model.add(Convolution2D(64, (5, 5), padding='same')) model.add(BatchNormalization(axis=bn_axis)) model.add(LeakyReLU()) model.add(Conv2DTranspose(64, (5, 5), strides=2, padding='same')) model.add(BatchNormalization(axis=bn_axis)) model.add(LeakyReLU()) # Because we normalized training inputs to lie in the range [-1, 1], # the tanh function should be used for the output of the generator to ensure its output # also lies in this range. model.add(Convolution2D(1, (5, 5), padding='same', activation='tanh')) return model
def generator_model(noise_dim=100, aux_dim=47, model_name="generator"): # Merge noise and auxilary inputs gen_input = Input(shape=(noise_dim,), name="noise_input") aux_input = Input(shape=(aux_dim,), name="auxilary_input") x = concatenate([gen_input, aux_input], axis=-1) # Dense Layer 1 x = Dense(10 * 100)(x) x = BatchNormalization()(x) x = LeakyReLU(0.2)(x) # output shape is 10*100 # Reshape the tensors to support CNNs x = Reshape((100, 10))(x) # shape is 100 x 10 # Conv Layer 1 x = Conv1D(filters=250, kernel_size=13, padding='same')(x) x = BatchNormalization()(x) x = LeakyReLU(0.2)(x) # output shape is 100 x 250 x = UpSampling1D(size=2)(x) # output shape is 200 x 250 # Conv Layer 2 x = Conv1D(filters=100, kernel_size=13, padding='same')(x) x = BatchNormalization()(x) x = LeakyReLU(0.2)(x) # output shape is 200 x 100 x = UpSampling1D(size=2)(x) # output shape is 400 x 100 # Conv Layer 3 x = Conv1D(filters=1, kernel_size=13, padding='same')(x) x = BatchNormalization()(x) x = Activation('tanh')(x) # final output shape is 400 x 1 generator_model = Model( outputs=[x], inputs=[gen_input, aux_input], name=model_name) return generator_model
def make_discriminator(): """Creates a discriminator model that takes an image as input and outputs a single value, representing whether the input is real or generated. Unlike normal GANs, the output is not sigmoid and does not represent a probability! Instead, the output should be as large and negative as possible for generated inputs and as large and positive as possible for real inputs. Note that the improved WGAN paper suggests that BatchNormalization should not be used in the discriminator.""" model = Sequential() if K.image_data_format() == 'channels_first': model.add(Convolution2D(64, (5, 5), padding='same', input_shape=(1, 28, 28))) else: model.add(Convolution2D(64, (5, 5), padding='same', input_shape=(28, 28, 1))) model.add(LeakyReLU()) model.add(Convolution2D(128, (5, 5), kernel_initializer='he_normal', strides=[2, 2])) model.add(LeakyReLU()) model.add(Convolution2D(128, (5, 5), kernel_initializer='he_normal', padding='same', strides=[2, 2])) model.add(LeakyReLU()) model.add(Flatten()) model.add(Dense(1024, kernel_initializer='he_normal')) model.add(LeakyReLU()) model.add(Dense(1, kernel_initializer='he_normal')) return model
def build_model(): """ ???? """ model = Sequential() model.add(LSTM(units=Conf.LAYERS[1], input_shape=(Conf.LAYERS[1], Conf.LAYERS[0]), return_sequences=True)) model.add(Dropout(0.2)) model.add(LSTM(Conf.LAYERS[2], return_sequences=False)) model.add(Dropout(0.2)) model.add(Dense(units=Conf.LAYERS[3])) # model.add(BatchNormalization(weights=None, epsilon=1e-06, momentum=0.9)) model.add(Activation("tanh")) # act = PReLU(alpha_initializer='zeros', weights=None) # act = LeakyReLU(alpha=0.3) # model.add(act) start = time.time() model.compile(loss="mse", optimizer="rmsprop") print("> Compilation Time : ", time.time() - start) return model
def test_keras_export(self): tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app', 'keras_export_test.json'), 'r') response = json.load(tests) tests.close() net = yaml.safe_load(json.dumps(response['net'])) net = {'l0': net['Input'], 'l1': net['ReLU']} # Test 1 net['l0']['connection']['output'].append('l1') inp = data(net['l0'], '', 'l0')['l0'] temp = activation(net['l1'], [inp], 'l1') model = Model(inp, temp['l1']) self.assertEqual(model.layers[1].__class__.__name__, 'Activation') # Test 2 net['l1']['params']['negative_slope'] = 1 net['l0']['connection']['output'].append('l1') inp = data(net['l0'], '', 'l0')['l0'] temp = activation(net['l1'], [inp], 'l1') model = Model(inp, temp['l1']) self.assertEqual(model.layers[1].__class__.__name__, 'LeakyReLU')
def make_dcgan_discriminator(Xk_d): x = Convolution2D(nb_filter=64, nb_row=5, nb_col=5, subsample=(2,2), activation=None, border_mode='same', init='glorot_uniform', dim_ordering='th')(Xk_d) x = BatchNormalization(mode=2, axis=1)(x) x = LeakyReLU(0.2)(x) x = Convolution2D(nb_filter=128, nb_row=5, nb_col=5, subsample=(2,2), activation=None, border_mode='same', init='glorot_uniform', dim_ordering='th')(x) x = BatchNormalization(mode=2, axis=1)(x) x = LeakyReLU(0.2)(x) x = Flatten()(x) x = Dense(1024)(x) x = BatchNormalization(mode=2)(x) x = LeakyReLU(0.2)(x) d = Dense(1, activation=None)(x) return d
def make_dcgan_discriminator(Xk_d): x = Convolution2D(nb_filter=64, nb_row=4, nb_col=4, subsample=(2,2), activation=None, border_mode='same', init=conv2D_init, dim_ordering='th')(Xk_d) # x = BatchNormalization(mode=2, axis=1)(x) # <- makes things much worse! x = LeakyReLU(0.2)(x) x = Convolution2D(nb_filter=128, nb_row=4, nb_col=4, subsample=(2,2), activation=None, border_mode='same', init=conv2D_init, dim_ordering='th')(x) x = BatchNormalization(mode=2, axis=1)(x) x = LeakyReLU(0.2)(x) x = Flatten()(x) x = Dense(1024, init=conv2D_init)(x) x = BatchNormalization(mode=2)(x) x = LeakyReLU(0.2)(x) d = Dense(1, activation=None)(x) return d
def build_discriminator(self): model = Sequential() model.add(Conv2D(64, kernel_size=3, strides=2, input_shape=self.missing_shape, padding="same")) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(128, kernel_size=3, strides=2, padding="same")) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(256, kernel_size=3, padding="same")) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Flatten()) model.add(Dense(1, activation='sigmoid')) model.summary() img = Input(shape=self.missing_shape) validity = model(img) return Model(img, validity)
def build_discriminator(self): img_shape = (self.img_rows, self.img_cols, self.channels) model = Sequential() model.add(Flatten(input_shape=img_shape)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(Dense(256)) model.add(LeakyReLU(alpha=0.2)) model.add(Dense(1, activation='sigmoid')) model.summary() img = Input(shape=img_shape) validity = model(img) return Model(img, validity)
def build_discriminator(self): model = Sequential() model.add(Dense(512, input_dim=self.encoded_dim)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(1, activation="sigmoid")) model.summary() encoded_repr = Input(shape=(self.encoded_dim, )) validity = model(encoded_repr) return Model(encoded_repr, validity)
def build_encoder(self): model = Sequential() model.add(Flatten(input_shape=self.img_shape)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(self.latent_dim)) model.summary() img = Input(shape=self.img_shape) z = model(img) return Model(img, z)
def build_discriminator(self): z = Input(shape=(self.latent_dim, )) img = Input(shape=self.img_shape) d_in = concatenate([z, Flatten()(img)]) model = Dense(1024)(d_in) model = LeakyReLU(alpha=0.2)(model) model = Dropout(0.5)(model) model = Dense(1024)(model) model = LeakyReLU(alpha=0.2)(model) model = Dropout(0.5)(model) model = Dense(1024)(model) model = LeakyReLU(alpha=0.2)(model) model = Dropout(0.5)(model) validity = Dense(1, activation="sigmoid")(model) return Model([z, img], validity)
def _adversary(): model = Sequential() model.add(Convolution2D( 64, 5, 5, border_mode='same', input_shape=(3, 32, 32),subsample=(2,2))) model.add(LeakyReLU(0.2)) model.add(Convolution2D(128, 5, 5,subsample=(2,2))) model.add(BatchNormalization(mode=2)) model.add(LeakyReLU(0.2)) model.add(Flatten()) model.add(Dense(1024)) model.add(LeakyReLU()) model.add(Dense(1)) model.add(Activation('sigmoid')) return model
def transform_model(weight_loss_pix=5e-4): inputs = Input(shape=( 128, 128, 3)) x1 = Convolution2D(64, 5, 5, border_mode='same')(inputs) x2 = LeakyReLU(alpha=0.3, name='wkcw')(x1) x3 = BatchNormalization()(x2) x4 = Convolution2D(128, 4, 4, border_mode='same', subsample=(2,2))(x3) x5 = LeakyReLU(alpha=0.3)(x4) x6 = BatchNormalization()(x5) x7 = Convolution2D(256, 4, 4, border_mode='same', subsample=(2,2))(x6) x8 = LeakyReLU(alpha=0.3)(x7) x9 = BatchNormalization()(x8) x10 = Deconvolution2D(128, 3, 3, output_shape=(None, 64, 64, 128), border_mode='same', subsample=(2,2))(x9) x11 = BatchNormalization()(x10) x12 = Deconvolution2D(64, 3, 3, output_shape=(None, 128, 128, 64), border_mode='same', subsample=(2,2))(x11) x13 = BatchNormalization()(x12) x14 = Deconvolution2D(3, 4, 4, output_shape=(None, 128, 128, 3), border_mode='same', activity_regularizer=activity_l1(weight_loss_pix))(x13) output = merge([inputs, x14], mode='sum') model = Model(input=inputs, output=output) return model
def seqCNN_BN(n_flow=4, seq_len=3, map_height=32, map_width=32): model=Sequential() model.add(Convolution2D(64, 3, 3, input_shape=(n_flow*seq_len, map_height, map_width), border_mode='same')) model.add(LeakyReLU(0.2)) model.add(BatchNormalization()) model.add(Convolution2D(128, 3, 3, border_mode='same')) model.add(LeakyReLU(0.2)) model.add(BatchNormalization()) model.add(Convolution2D(64, 3, 3, border_mode='same')) model.add(LeakyReLU(0.2)) model.add(BatchNormalization()) model.add(Convolution2D(n_flow, 3, 3, border_mode='same')) model.add(Activation('tanh')) return model
def seqCNN_LReLU(n_flow=4, seq_len=3, map_height=32, map_width=32): model=Sequential() model.add(Convolution2D(64, 3, 3, input_shape=(n_flow*seq_len, map_height, map_width), border_mode='same')) model.add(LeakyReLU(0.2)) # model.add(BatchNormalization()) model.add(Convolution2D(128, 3, 3, border_mode='same')) model.add(LeakyReLU(0.2)) # model.add(BatchNormalization()) model.add(Convolution2D(64, 3, 3, border_mode='same')) model.add(LeakyReLU(0.2)) # model.add(BatchNormalization()) model.add(Convolution2D(n_flow, 3, 3, border_mode='same')) model.add(Activation('tanh')) return model
def create_model(img_height,img_width,img_channel): ip = Input(shape=(img_height, img_width,img_channel)) L_1 = Conv2D(64, (9, 9), padding='same', activation='linear', kernel_initializer='glorot_uniform')(ip) L_1 = LeakyReLU(alpha=0.25)(L_1) L_2=L_1 for i in range(3): L_2 = residual_block(L_2, 64,3) L_3 = Conv2D(64, (3, 3), padding='same',kernel_initializer='glorot_uniform')(L_2) L_3 = BatchNormalization(axis=-1)(L_3) L_3 = add([L_1,L_3]) L_4= Conv2D(128, (1, 1), padding='same',kernel_initializer='glorot_uniform')(L_3) op = Conv2D(img_channel, (9, 9),padding='same', activation='tanh', kernel_initializer='glorot_uniform')(L_4) deblocking =Model(inputs=ip,outputs= op) optimizer = optimizers.Adam(lr=1e-4) deblocking.compile(optimizer=optimizer,loss='mean_squared_error', metrics=[psnr,ssim]) return deblocking
def create_model(img_height,img_width,img_channel): ip = Input(shape=(img_height, img_width,img_channel)) x_1 = Conv2D(64, (9, 9), padding='same', activation='linear', kernel_initializer='glorot_uniform')(ip) x_1 = LeakyReLU(alpha=0.25)(x_1) x=x_1 for i in range(5):#or 15 x = residual_block(x, 64,3) x = Conv2D(64, (3, 3), padding='same',kernel_initializer='glorot_uniform')(x) x = BatchNormalization(axis=-1)(x) x = add([x_1,x]) x=upscale(x) op = Conv2D(img_channel, (9, 9),padding='same', activation='tanh', kernel_initializer='glorot_uniform')(x) deblocking =Model(inputs=ip,outputs= op) optimizer = optimizers.Adam(lr=1e-4) deblocking.compile(optimizer=optimizer,loss='mean_squared_error', metrics=[psnr,ssim]) return deblocking
def create_model(img_height,img_width,img_channel): ip = Input(shape=(img_height, img_width,img_channel)) x = Conv2D(64, (9, 9), padding='same', activation='linear', kernel_initializer='glorot_uniform')(ip) x = BatchNormalization(axis= -1)(x) x = LeakyReLU(alpha=0.25)(x) for i in range(5): x = residual_block(x, 64,3) x = Conv2D(64, (3, 3), padding='same',kernel_initializer='glorot_uniform')(x) x = BatchNormalization(axis=-1)(x) x=Conv2D(64,(3, 3),padding='same',activation='relu')(x) op=Conv2D(img_channel,(9,9),padding='same',activation='tanh',kernel_initializer='glorot_uniform')(x) deblocking =Model(inputs=ip,outputs= op) optimizer = optimizers.Adam(lr=1e-4) deblocking.compile(optimizer=optimizer,loss='mean_squared_error', metrics=[psnr,ssim]) return deblocking #plot_model(deblocking, to_file='model.png', show_shapes=True, show_layer_names=True)
def createModel(self, inputs, outputs, hiddenLayers, activationType): model = Sequential() if len(hiddenLayers) == 0: model.add(Dense(self.output_size, input_shape=(self.input_size,), init='lecun_uniform')) model.add(Activation("linear")) else : model.add(Dense(hiddenLayers[0], input_shape=(self.input_size,), init='lecun_uniform')) if (activationType == "LeakyReLU") : model.add(LeakyReLU(alpha=0.01)) else : model.add(Activation(activationType)) for index in range(1, len(hiddenLayers)-1): layerSize = hiddenLayers[index] model.add(Dense(layerSize, init='lecun_uniform')) if (activationType == "LeakyReLU") : model.add(LeakyReLU(alpha=0.01)) else : model.add(Activation(activationType)) model.add(Dense(self.output_size, init='lecun_uniform')) model.add(Activation("linear")) optimizer = optimizers.RMSprop(lr=1, rho=0.9, epsilon=1e-06) model.compile(loss="mse", optimizer=optimizer) return model
def get_model(): model = Sequential() model.add(Dense(1024, init='normal', input_dim = 460)) model.add(LeakyReLU(0.3)) model.add(Dropout(0.5)) model.add(Dense(1024, init='normal')) model.add(LeakyReLU(0.3)) model.add(Dropout(0.5)) model.add(Dense(512, init='normal')) model.add(LeakyReLU(0.3)) model.add(Dropout(0.5)) model.add(Dense(1, init='normal')) return model
def initAgent(neurons=512, layers=1, lr=1e-3, moment=0.9, width=19, alpha=0.1): """Initialize agent: specify num of neurons and hidden layers""" model = Sequential() model.add(Dense(2 * width**2, init='lecun_uniform', input_shape=(2 * width**2,))) model.add(LeakyReLU(alpha=alpha)) for i in range(layers): model.add(Dense(neurons, init='lecun_uniform')) model.add(LeakyReLU(alpha=alpha)) model.add(Dropout(0.2)) model.add(Dense(width**2, init='lecun_uniform')) # use linear output layer to generate real-valued outputs model.add(Activation('linear')) # opt = RMSprop(lr=lr) opt = SGD(lr=lr, momentum=moment, decay=1e-18, nesterov=False) model.compile(loss='mse', optimizer=opt) return model
def __initial_conv_block_imagenet(input, weight_decay=5e-4): ''' Adds an initial conv block, with batch norm and relu for the inception resnext Args: input: input tensor weight_decay: weight decay factor Returns: a keras tensor ''' channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 x = Conv2D(64, (7, 7), padding='same', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay), strides=(2, 2))(input) x = BatchNormalization(axis=channel_axis)(x) x = LeakyReLU()(x) x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x) return x
def conv_block(x0, scale): x = Conv2D(int(64*scale), (1, 1))(x0) x = InstanceNormalization()(x) x = LeakyReLU()(x) x = Conv2D(int(64*scale), (3, 3), padding='same')(x) x = InstanceNormalization()(x) x = LeakyReLU()(x) x = Conv2D(int(256*scale), (1, 1))(x) x = InstanceNormalization()(x) x1 = Conv2D(int(256*scale), (1, 1))(x0) x1 = InstanceNormalization()(x1) x = Add()([x, x1]) x = LeakyReLU()(x) return x
def mnist_generator(input_shape=(28, 28, 1), scale=1/4): x0 = Input(input_shape) x = Conv2D(int(128*scale), (3, 3), strides=(2, 2), padding='same')(x0) x = InstanceNormalization()(x) x = LeakyReLU()(x) x = Conv2D(int(64*scale), (3, 3), strides=(2, 2), padding='same')(x) x = InstanceNormalization()(x) x = LeakyReLU()(x) x = residual_block(x, scale, num_id=2) x = residual_block(x, scale*2, num_id=3) x = UpSampling2D(size=(2, 2))(x) x = Conv2D(int(1024*scale), (1, 1))(x) x = InstanceNormalization()(x) x = LeakyReLU()(x) x = UpSampling2D(size=(2, 2))(x) x = Conv2D(1, (1, 1), activation='sigmoid')(x) return Model(x0, x)
def mnist_discriminator(input_shape=(28, 28, 1), scale=1/4): x0 = Input(input_shape) x = Conv2D(int(128*scale), (3, 3), strides=(2, 2), padding='same')(x0) x = InstanceNormalization()(x) x = LeakyReLU()(x) x = Conv2D(int(64*scale), (3, 3), strides=(2, 2), padding='same')(x) x = InstanceNormalization()(x) x = LeakyReLU()(x) x = residual_block(x, scale, num_id=2) x = residual_block(x, scale*2, num_id=3) x = Conv2D(int(128*scale), (3, 3), strides=(2, 2), padding='same')(x) x = InstanceNormalization()(x) x = LeakyReLU()(x) x = Conv2D(int(128*scale), (3, 3), strides=(2, 2), padding='same')(x) x = InstanceNormalization()(x) x = LeakyReLU()(x) x = Conv2D(1, (3, 3), strides=(2, 2), padding='same')(x) x = GlobalAveragePooling2D()(x) # Flatten x = Activation('sigmoid')(x) return Model(x0, x)
def create_network(**kwargs): defaults = {"timesteps": 128, "data_dim": 15} params = defaults params.update(**kwargs) network = Sequential() network.add(LSTM(output_dim=16, activation='sigmoid', inner_activation='hard_sigmoid', input_shape=(params['timesteps'], params['data_dim'] ))) network.add(Dropout(0.15)) network.add(Dense(1)) # network.add(LeakyReLU(alpha=0.5)) network.add(Activation('relu')) network.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) return network
def build_model(self): model = Sequential() model.add(Dropout(0.2, input_shape=(nn_input_dim_NN,))) model.add(Dense(input_dim=nn_input_dim_NN, output_dim=120, init='uniform')) model.add(LeakyReLU(alpha=.00001)) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(input_dim=120,output_dim=280, init='uniform')) model.add(LeakyReLU(alpha=.00001)) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(input_dim=280,output_dim=100, init='uniform', activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Dense(input_dim=100,output_dim=2, init='uniform', activation='softmax')) #model.add(Activation('softmax')) sgd = SGD(lr=0.015, decay=1e-6, momentum=0.9, nesterov=True) model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary') return KerasClassifier(nn=model,**self.params)
def build_model(self): model = Sequential() model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,))) model.add(Dense(input_dim=nn_input_dim_NN, output_dim=112, init='he_normal')) model.add(LeakyReLU(alpha=.00001)) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(input_dim=112,output_dim=128, init='he_normal')) model.add(LeakyReLU(alpha=.00001)) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(input_dim=128,output_dim=68, init='he_normal')) model.add(LeakyReLU(alpha=.00003)) model.add(BatchNormalization()) model.add(Dropout(0.3)) model.add(Dense(input_dim=68,output_dim=2, init='he_normal')) model.add(Activation('softmax')) sgd = SGD(lr=0.01, decay=1e-10, momentum=0.99, nesterov=True) model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary') return KerasClassifier(nn=model,**self.params)
def build_model(self): model = Sequential() model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,))) model.add(Dense(input_dim=nn_input_dim_NN, output_dim=310, init='he_normal')) model.add(LeakyReLU(alpha=.001)) model.add(BatchNormalization()) model.add(Dropout(0.6)) model.add(Dense(input_dim=310,output_dim=252, init='he_normal')) model.add(PReLU(init='zero')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(input_dim=252,output_dim=128, init='he_normal')) model.add(LeakyReLU(alpha=.001)) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Dense(input_dim=128,output_dim=2, init='he_normal', activation='softmax')) #model.add(Activation('softmax')) sgd = SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True) model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary') return KerasClassifier(nn=model,**self.params)
def build_model(self): model = Sequential() model.add(Dropout(0.1, input_shape=(nn_input_dim_NN,))) model.add(Dense(input_dim=nn_input_dim_NN, output_dim=62, init='he_normal')) model.add(LeakyReLU(alpha=.001)) model.add(Dropout(0.3)) model.add(Dense(input_dim=62,output_dim=158, init='he_normal')) model.add(LeakyReLU(alpha=.001)) model.add(Dropout(0.25)) model.add(Dense(input_dim=158,output_dim=20, init='he_normal')) model.add(PReLU(init='zero')) model.add(Dropout(0.2)) model.add(Dense(input_dim=20,output_dim=2, init='he_normal', activation='softmax')) #model.add(Activation('softmax')) sgd = SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True) model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary') return KerasClassifier(nn=model,**self.params)
def build_model(self): model = Sequential() model.add(Dropout(0.2, input_shape=(nn_input_dim_NN,))) model.add(Dense(input_dim=nn_input_dim_NN, output_dim=140, init='uniform')) model.add(LeakyReLU(alpha=.00001)) model.add(BatchNormalization()) model.add(Dropout(0.6)) model.add(Dense(input_dim=140,output_dim=250, init='uniform')) model.add(LeakyReLU(alpha=.00001)) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(input_dim=250,output_dim=90, init='uniform', activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Dense(input_dim=90,output_dim=2, init='uniform', activation='softmax')) #model.add(Activation('softmax')) sgd = SGD(lr=0.013, decay=1e-6, momentum=0.9, nesterov=True) model.compile(optimizer=sgd, loss='binary_crossentropy',class_mode='binary') return KerasClassifier(nn=model,**self.params)
def build_model(self): model = Sequential() model.add(Dense(64, input_shape=nn_input_dim_NN, init='he_normal')) model.add(LeakyReLU(alpha=.00001)) model.add(Dropout(0.5)) model.add(Dense(2, init='he_normal')) model.add(Activation('softmax')) sgd = SGD(lr=0.1, decay=1e-5, momentum=0.9, nesterov=True) model.compile(optimizer=sgd, loss='binary_crossentropy', metrics=["accuracy"]) return KerasClassifier(nn=model,**self.params) # ----- END first stage stacking model ----- # ----- Second stage stacking model -----
def build_model(self): model = Sequential() model.add(Dense(64, input_shape=nn_input_dim_NN, init='he_normal')) model.add(LeakyReLU(alpha=.00001)) model.add(Dropout(0.5)) model.add(Dense(output_dim, init='he_normal')) model.add(Activation('softmax')) sgd = SGD(lr=0.1, decay=1e-5, momentum=0.9, nesterov=True) model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=["accuracy"]) return KerasClassifier(nn=model,**self.params) # ----- END first stage stacking model ----- # ----- Second stage stacking model -----
def deep_mlp(self): """ Deep Multilayer Perceptrop. """ if self._config.num_mlp_layers == 0: self.add(Dropout(0.5)) else: for j in xrange(self._config.num_mlp_layers): self.add(Dense(self._config.mlp_hidden_dim)) if self._config.mlp_activation == 'elu': self.add(ELU()) elif self._config.mlp_activation == 'leaky_relu': self.add(LeakyReLU()) elif self._config.mlp_activation == 'prelu': self.add(PReLU()) else: self.add(Activation(self._config.mlp_activation)) self.add(Dropout(0.5))
def Discriminator(image_size = 64): L = int(image_size) images = Input(shape = (L, L, 3)) x = Conv2D(64, (4, 4), strides = (2, 2), kernel_initializer = init, padding = 'same')(images) # shape(L/2, L/2, 32) x = LeakyReLU(0.2)(x) x = Conv2D(128, (4, 4), strides = (2, 2), kernel_initializer = init, padding = 'same')(x) # shape(L/4, L/4, 64) x = BatchNormalization()(x) x = LeakyReLU(0.2)(x) x = Conv2D(256, (4, 4), strides = (2, 2), kernel_initializer = init, padding = 'same')(x) # shape(L/8, L/8, 128) x = BatchNormalization()(x) x = LeakyReLU(0.2)(x) x = Conv2D(512, (4, 4), strides = (2, 2), kernel_initializer = init, padding = 'same')(x) # shape(L/16, L/16, 256) x = BatchNormalization()(x) x = LeakyReLU(0.2)(x) x = Flatten()(x) outputs = Dense(1)(x) model = Model(inputs = images, outputs = outputs) model.summary() return model
def fc_block1(x, n=1000, d=0.5): x = Dense(n)(x) x = BatchNormalization()(x) x = LeakyReLU()(x) x = Dropout(d)(x) return x
def fc_identity(input_tensor, n=1000, d=0.5): x = fc_block1(input_tensor, n, d) x = Dense(int(input_tensor.shape[1]))(x) x = merge([x, input_tensor], mode='sum', concat_axis=1) x = LeakyReLU()(x) return x
def fc_inception(input_tensor, n=3000, d=0.5): br1 = Dense(n)(input_tensor) br1 = LeakyReLU()(br1) br1 = BatchNormalization()(br1) br1 = Dropout(d)(br1) br1 = Dense(int(n/3.0))(br1) br2 = Dense(n)(input_tensor) br2 = BatchNormalization()(br2) br2 = ELU()(br2) br2 = Dropout(d)(br2) br2 = Dense(int(n/3.0))(br2) br3 = Dense(int(n/3.0))(input_tensor) br3 = BatchNormalization()(br3) br3 = PReLU()(br3) br3 = Dropout(d)(br3) br3 = Dense(int(n/3.0))(br3) br3 = BatchNormalization()(br3) br3 = PReLU()(br3) br3 = Dropout(d)(br3) br3 = Dense(int(n/3.0))(br3) br3 = BatchNormalization()(br3) br3 = PReLU()(br3) br3 = Dropout(d)(br3) x = merge([br1, br2, br3], mode='concat', concat_axis=1) return x
def create_critic_network(self, state_size,action_dim): print("Now we build the model") S = Input(shape=[state_size]) A = Input(shape=[action_dim],name='action2') ## Original Version w1 = Dense(HIDDEN1_UNITS)(S) w1 = LeakyReLU()(w1) h1 = Dense(HIDDEN2_UNITS)(w1) h1 = LeakyReLU()(h1) a1 = Dense(HIDDEN2_UNITS)(A) a1 = LeakyReLU()(a1) h2 = layers.add([h1, a1]) h3 = Dense(HIDDEN2_UNITS)(h2) h3 = LeakyReLU()(h3) h3 = Dense(HIDDEN2_UNITS)(h3) h3 = LeakyReLU()(h3) h3 = Dense(HIDDEN1_UNITS)(h3) h3 = LeakyReLU()(h3) V = Dense(action_dim,activation='linear')(h3) model = Model(inputs=[S,A],outputs=V) adam = Adam(lr=self.LEARNING_RATE) model.compile(loss='mse', optimizer=adam) return model, A, S
def build_discriminator(): # build a relatively standard conv net, with LeakyReLUs as suggested in # the reference paper cnn = Sequential() cnn.add(Conv2D(32, 3, padding='same', strides=2, input_shape=(1, 28, 28))) cnn.add(LeakyReLU()) cnn.add(Dropout(0.3)) cnn.add(Conv2D(64, 3, padding='same', strides=1)) cnn.add(LeakyReLU()) cnn.add(Dropout(0.3)) cnn.add(Conv2D(128, 3, padding='same', strides=2)) cnn.add(LeakyReLU()) cnn.add(Dropout(0.3)) cnn.add(Conv2D(256, 3, padding='same', strides=1)) cnn.add(LeakyReLU()) cnn.add(Dropout(0.3)) cnn.add(Flatten()) image = Input(shape=(1, 28, 28)) features = cnn(image) # first output (name=generation) is whether or not the discriminator # thinks the image that is being shown is fake, and the second output # (name=auxiliary) is the class that the discriminator thinks the image # belongs to. fake = Dense(1, activation='sigmoid', name='generation')(features) aux = Dense(10, activation='softmax', name='auxiliary')(features) return Model(image, [fake, aux])
def build_discriminator(): # build a relatively standard conv net, with LeakyReLUs as suggested in # the reference paper cnn = Sequential() cnn.add(Convolution2D(32, 3, 3, border_mode='same', subsample=(2, 2), input_shape=(1, 28, 28))) cnn.add(LeakyReLU()) cnn.add(Dropout(0.3)) cnn.add(Convolution2D(64, 3, 3, border_mode='same', subsample=(1, 1))) cnn.add(LeakyReLU()) cnn.add(Dropout(0.3)) cnn.add(Convolution2D(128, 3, 3, border_mode='same', subsample=(2, 2))) cnn.add(LeakyReLU()) cnn.add(Dropout(0.3)) cnn.add(Convolution2D(256, 3, 3, border_mode='same', subsample=(1, 1))) cnn.add(LeakyReLU()) cnn.add(Dropout(0.3)) cnn.add(Flatten()) image = Input(shape=(1, 28, 28)) features = cnn(image) # first output (name=generation) is whether or not the discriminator # thinks the image that is being shown is fake, and the second output # (name=auxiliary) is the class that the discriminator thinks the image # belongs to. fake = Dense(1, activation='sigmoid', name='generation')(features) aux = Dense(10, activation='softmax', name='auxiliary')(features) return Model(input=image, output=[fake, aux])
def test_leaky_relu(): from keras.layers.advanced_activations import LeakyReLU for alpha in [0., .5, -1.]: layer_test(LeakyReLU, kwargs={'alpha': alpha}, input_shape=(2, 3, 4))
def generator_model(noise_dim=100, aux_dim=47, model_name="generator"): # Merge noise and auxilary inputs gen_input = Input(shape=(noise_dim,), name="noise_input") aux_input = Input(shape=(aux_dim,), name="auxilary_input") x = merge([gen_input, aux_input], mode="concat", concat_axis=-1) # Dense Layer 1 x = Dense(1024)(x) x = BatchNormalization()(x) x = LeakyReLU(0.2)(x) # Dense Layer 2 x = Dense(1024)(x) x = BatchNormalization()(x) x = LeakyReLU(0.2)(x) # Dense Layer 3 x = Dense(1024)(x) x = BatchNormalization()(x) x = LeakyReLU(0.2)(x) # Dense Layer 4 x = Dense(400)(x) x = BatchNormalization()(x) x = Activation("tanh")(x) generator_model = Model(input=[gen_input, aux_input], output=[x], name=model_name) return generator_model
def discriminator_model(model_name="discriminator"): # Merge noise and auxilary inputs disc_input = Input(shape=(400,), name="discriminator_input") aux_input = Input(shape=(47,), name="auxilary_input") x = merge([disc_input, aux_input], mode="concat", concat_axis=-1) # Dense Layer 1 x = Dense(1024)(x) x = LeakyReLU(0.2)(x) # Dense Layer 2 x = Dense(1024)(x) x = LeakyReLU(0.2)(x) # Dense Layer 3 x = Dense(1024)(x) x = LeakyReLU(0.2)(x) # Dense Layer 4 x = Dense(1024)(x) x = LeakyReLU(0.2)(x) # Dense Layer 5 x = Dense(1)(x) x = Activation("sigmoid")(x) discriminator_model = Model(input=[disc_input, aux_input], output=[x], name=model_name) return discriminator_model return discriminator_model
def generator_model(noise_dim=100, aux_dim=47, model_name="generator"): gen_input = Input(shape=(noise_dim,), name="noise_input") # Dense Layer 1 x = Dense(1024)(gen_input) x = BatchNormalization()(x) x = LeakyReLU(0.2)(x) # Dense Layer 2 x = Dense(1024)(x) x = BatchNormalization()(x) x = LeakyReLU(0.2)(x) # Dense Layer 3 x = Dense(1024)(x) x = BatchNormalization()(x) x = LeakyReLU(0.2)(x) # Dense Layer 4 x = Dense(400)(x) x = BatchNormalization()(x) x = Activation("tanh")(x) generator_model = Model(input=gen_input, output=[x], name=model_name) return generator_model
def discriminator_model(model_name="discriminator"): disc_input = Input(shape=(400,), name="discriminator_input") # Dense Layer 1 x = Dense(1024)(disc_input) x = LeakyReLU(0.2)(x) # Dense Layer 2 x = Dense(1024)(x) x = LeakyReLU(0.2)(x) # Dense Layer 3 x = Dense(1024)(x) x = LeakyReLU(0.2)(x) # Dense Layer 4 x = Dense(1024)(x) x = LeakyReLU(0.2)(x) # Dense Layer 5 x = Dense(1)(x) x = Activation("sigmoid")(x) discriminator_model = Model(input=disc_input, output=[x], name=model_name) return discriminator_model return discriminator_model