Python keras.layers.wrappers 模块,TimeDistributed() 实例源码

我们从Python开源项目中,提取了以下39个代码示例,用于说明如何使用keras.layers.wrappers.TimeDistributed()

项目:wtte-rnn    作者:ragulpr    | 项目源码 | 文件源码
def model_masking(discrete_time, init_alpha, max_beta):
    model = Sequential()

    model.add(Masking(mask_value=mask_value,
                      input_shape=(n_timesteps, n_features)))
    model.add(TimeDistributed(Dense(2)))
    model.add(Lambda(wtte.output_lambda, arguments={"init_alpha": init_alpha,
                                                    "max_beta_value": max_beta}))

    if discrete_time:
        loss = wtte.loss(kind='discrete', reduce_loss=False).loss_function
    else:
        loss = wtte.loss(kind='continuous', reduce_loss=False).loss_function

    model.compile(loss=loss, optimizer=RMSprop(
        lr=lr), sample_weight_mode='temporal')
    return model
项目:DeepMoji    作者:bfelbo    | 项目源码 | 文件源码
def change_trainable(layer, trainable, verbose=False):
    """ Helper method that fixes some of Keras' issues with wrappers and
        trainability. Freezes or unfreezes a given layer.

    # Arguments:
        layer: Layer to be modified.
        trainable: Whether the layer should be frozen or unfrozen.
        verbose: Verbosity flag.
    """

    layer.trainable = trainable

    if type(layer) == Bidirectional:
        layer.backward_layer.trainable = trainable
        layer.forward_layer.trainable = trainable

    if type(layer) == TimeDistributed:
        layer.backward_layer.trainable = trainable

    if verbose:
        action = 'Unfroze' if trainable else 'Froze'
        print("{} {}".format(action, layer.name))
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_lstm_td(self):
        np.random.seed(1988)
        input_dim = 2
        input_length = 4
        num_channels = 3

        # Define a model
        model = Sequential()
        model.add(SimpleRNN(num_channels, return_sequences=True, 
                input_shape=(input_length, input_dim),))
        model.add(TimeDistributed(Dense(5)))

        # Set some random weights
        model.set_weights([np.random.rand(*w.shape)*0.2 - 0.1 for w in \
                model.get_weights()])

        # Test the keras model
        self._test_keras_model(model, input_blob = 'data', 
                output_blob = 'output')


    # Making sure that giant channel sizes get handled correctly
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_tiny_image_captioning(self):
        # use a conv layer as a image feature branch
        img_input_1 = Input(shape=(16,16,3))
        x = Convolution2D(2,3,3)(img_input_1)
        x = Flatten()(x)
        img_model = Model([img_input_1], [x])

        img_input = Input(shape=(16,16,3))
        x = img_model(img_input)
        x = Dense(8, name = 'cap_dense')(x)
        x = Reshape((1,8), name = 'cap_reshape')(x)

        sentence_input = Input(shape=(5,)) # max_length = 5
        y = Embedding(8, 8, name = 'cap_embedding')(sentence_input)
        z = merge([x,y], mode = 'concat', concat_axis = 1, name = 'cap_merge')
        z = LSTM(4, return_sequences = True, name = 'cap_lstm')(z)
        z = TimeDistributed(Dense(8), name = 'cap_timedistributed')(z)

        combined_model = Model([img_input, sentence_input], [z])
        self._test_keras_model(combined_model, one_dim_seq_flags=[False, True])
项目:onto-lstm    作者:pdasigi    | 项目源码 | 文件源码
def train(self, S_ind, C_ind, use_onto_lstm=True, use_attention=True, num_epochs=20,  hierarchical=False, base=2):
    # Predict next word from current synsets
    X = C_ind[:,:-1] if use_onto_lstm else S_ind[:,:-1] # remove the last words' hyps in all sentences
    Y_inds = S_ind[:,1:] # remove the first words in all sentences
    if hierarchical:
      train_targets = self._factor_target_indices(Y_inds, base=base)
    else:
      train_targets = [self._make_one_hot(Y_inds, Y_inds.max() + 1)]
    length = Y_inds.shape[1]
    lstm_outdim = self.word_dim

    num_words = len(self.dp.word_index)
    num_syns = len(self.dp.synset_index)
    input = Input(shape=X.shape[1:], dtype='int32')
    embed_input_dim = num_syns if use_onto_lstm else num_words
    embed_layer = HigherOrderEmbedding(name='embedding', input_dim=embed_input_dim, output_dim=self.word_dim, input_shape=X.shape[1:], mask_zero=True)
    sent_rep = embed_layer(input)
    reg_sent_rep = Dropout(0.5)(sent_rep)
    if use_onto_lstm:
      lstm_out = OntoAttentionLSTM(name='sent_lstm', input_dim=self.word_dim, output_dim=lstm_outdim, input_length=length, num_senses=self.num_senses, num_hyps=self.num_hyps, return_sequences=True, use_attention=use_attention)(reg_sent_rep)
    else:
      lstm_out = LSTM(name='sent_lstm', input_dim=self.word_dim, output_dim=lstm_outdim, input_length=length, return_sequences=True)(reg_sent_rep)
    output_nodes = []
    # Make one node for each factored target
    for target in train_targets:
      node = TimeDistributed(Dense(input_dim=lstm_outdim, output_dim=target.shape[-1], activation='softmax'))(lstm_out)
      output_nodes.append(node)

    model = Model(input=input, output=output_nodes)
    print >>sys.stderr, model.summary()
    early_stopping = EarlyStopping()
    precompile_time = time.time()
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    postcompile_time = time.time()
    print >>sys.stderr, "Model compilation took %d s"%(postcompile_time - precompile_time)
    model.fit(X, train_targets, nb_epoch=num_epochs, validation_split=0.1, callbacks=[early_stopping])
    posttrain_time = time.time()
    print >>sys.stderr, "Training took %d s"%(posttrain_time - postcompile_time)
    concept_reps = model.layers[1].get_weights()
    self.model = model
    return concept_reps
项目:DeepLearning-OCR    作者:xingjian-f    | 项目源码 | 文件源码
def build_CNN_LSTM(channels, width, height, lstm_output_size, nb_classes):
    model = Sequential()
    # 1 conv
    model.add(Convolution2D(64, 3, 3, border_mode='same', activation='relu', 
        input_shape=(channels, height, width)))
    model.add(BatchNormalization(mode=0, axis=1))
    # 2 conv
    model.add(Convolution2D(64, 3, 3, border_mode='same', activation='relu'))
    model.add(BatchNormalization(mode=0, axis=1))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
    # 3 conv
    model.add(Convolution2D(128, 3, 3, border_mode='same', activation='relu'))
    model.add(BatchNormalization(mode=0, axis=1))
    # 4 conv
    model.add(Convolution2D(128, 3, 3, border_mode='same', activation='relu'))
    model.add(BatchNormalization(mode=0, axis=1))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
    # flaten
    a = model.add(Flatten())
    # 1 dense
    model.add(Dense(512, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    # 2 dense
    model.add(Dense(512, activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    # lstm
    model.add(RepeatVector(lstm_output_size))
    model.add(LSTM(512, return_sequences=True))
    model.add(TimeDistributed(Dropout(0.5)))
    model.add(TimeDistributed(Dense(nb_classes, activation='softmax')))
    model.summary()
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=[categorical_accuracy_per_sequence],
                  sample_weight_mode='temporal'
                  )

    return model
项目:keras-molecules    作者:maxhodak    | 项目源码 | 文件源码
def _buildDecoder(self, z, latent_rep_size, max_length, charset_length):
        h = Dense(latent_rep_size, name='latent_input', activation = 'relu')(z)
        h = RepeatVector(max_length, name='repeat_vector')(h)
        h = GRU(501, return_sequences = True, name='gru_1')(h)
        h = GRU(501, return_sequences = True, name='gru_2')(h)
        h = GRU(501, return_sequences = True, name='gru_3')(h)
        return TimeDistributed(Dense(charset_length, activation='softmax'), name='decoded_mean')(h)
项目:Kutils    作者:ishank26    | 项目源码 | 文件源码
def my_model(X_train, y_train, X_test, y_test):
    ############ model params ################
    line_length = 248  # seq size
    train_char = 58
    hidden_neurons = 512  # hidden neurons
    batch = 64  # batch_size
    no_epochs = 3
    ################### Model ################

    ######### begin model ########
    model = Sequential()
    # layer 1
    model.add(LSTM(hidden_neurons, return_sequences=True,
                   input_shape=(line_length, train_char)))
    model.add(Dropout({{choice([0.4, 0.5, 0.6, 0.7, 0.8])}}))
    # layer 2
    model.add(LSTM(hidden_neurons, return_sequences=True))
    model.add(Dropout({{choice([0.4, 0.5, 0.6, 0.7, 0.8])}}))
    # layer 3
    model.add(LSTM(hidden_neurons, return_sequences=True))
    model.add(Dropout({{choice([0.4, 0.5, 0.6, 0.7, 0.8])}}))
    # fc layer
    model.add(TimeDistributed(Dense(train_char, activation='softmax')))
    model.load_weights("weights/model_maha1_noep50_batch64_seq_248.hdf5")
    ########################################################################
    checkpoint = ModelCheckpoint("weights/hypmodel2_maha1_noep{0}_batch{1}_seq_{2}.hdf5".format(
        no_epochs, batch, line_length), monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=False, mode='min')

    initlr = 0.00114
    adagrad = Adagrad(lr=initlr, epsilon=1e-08,
                      clipvalue={{choice([0, 1, 2, 3, 4, 5, 6, 7])}})
    model.compile(optimizer=adagrad,
                  loss='categorical_crossentropy', metrics=['accuracy'])
    history = History()
    # fit model
    model.fit(X_train, y_train, batch_size=batch, nb_epoch=no_epochs,
              validation_split=0.2, callbacks=[history, checkpoint])

    score, acc = model.evaluate(X_test, y_test, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
项目:Kutils    作者:ishank26    | 项目源码 | 文件源码
def my_model(dropout):
    ############ model params ################
    line_length = 248  # seq size
    train_char = 58
    hidden_neurons = 512  # hidden neurons
    batch = 64  # batch_size
    no_epochs = 5
    ################### Model ################
    model = Sequential()
    # layer 1
    model.add(LSTM(hidden_neurons, return_sequences=True,
                   input_shape=(line_length, train_char)))
    model.add(Dropout(dropout))
    # layer 2
    model.add(LSTM(hidden_neurons, return_sequences=True))
    model.add(Dropout(dropout))
    # layer 3
    model.add(LSTM(hidden_neurons, return_sequences=True))
    model.add(Dropout(dropout))
    model.add(Reshape((248, 512)))
    # fc layer
    model.add(TimeDistributed(Dense(58, activation='softmax')))
    # model.load_weights("weights/model_maha1_noep50_batch64_seq_248.hdf5")
    # model.layers.pop()
    # model.layers.pop()
    # model.add(Dropout(dropout))
    #model.add(TimeDistributed(Dense(train_char, activation='softmax')))
    initlr = 0.00114
    adagrad = Adagrad(lr=initlr, epsilon=1e-08)
    model.compile(optimizer=adagrad,
                  loss='categorical_crossentropy', metrics=['accuracy'])
    ###load weights####
    return model
项目:soph    作者:Linusp    | 项目源码 | 文件源码
def build_model(input_size, seq_len, hidden_size):
    """???? seq2seq ??"""
    model = Sequential()
    model.add(GRU(input_dim=input_size, output_dim=hidden_size, return_sequences=False))
    model.add(Dense(hidden_size, activation="relu"))
    model.add(RepeatVector(seq_len))
    model.add(GRU(hidden_size, return_sequences=True))
    model.add(TimeDistributed(Dense(output_dim=input_size, activation="softmax")))
    model.compile(loss="categorical_crossentropy", optimizer='adam')

    return model
项目:soph    作者:Linusp    | 项目源码 | 文件源码
def build_model(input_size, seq_len, hidden_size):
    """???? sequence to sequence ??"""
    model = Sequential()
    model.add(GRU(input_dim=input_size, output_dim=hidden_size, return_sequences=False))
    model.add(Dense(hidden_size, activation="relu"))
    model.add(RepeatVector(seq_len))
    model.add(GRU(hidden_size, return_sequences=True))
    model.add(TimeDistributed(Dense(output_dim=input_size, activation="linear")))
    model.compile(loss="mse", optimizer='adam')

    return model
项目:autolipsync    作者:evgenijkatunov    | 项目源码 | 文件源码
def init(self):
        self.model = Sequential()
        self.model.add(Bidirectional(LSTM(126, return_sequences=True), 'sum',
                                     input_shape=(self._max_frames, self._features_count)))
        self.model.add(Dropout(0.5))
        self.model.add(TimeDistributed(Dense(units=self._phonemes_count, activation='softmax')))
        self.model.compile(loss='categorical_crossentropy',
                           optimizer='rmsprop',
                           metrics=[metrics.categorical_accuracy])
项目:wtte-rnn    作者:ragulpr    | 项目源码 | 文件源码
def model_no_masking(discrete_time, init_alpha, max_beta):
    model = Sequential()
    model.add(TimeDistributed(Dense(2), input_shape=(n_timesteps, n_features)))

    model.add(Lambda(wtte.output_lambda, arguments={"init_alpha": init_alpha,
                                                    "max_beta_value": max_beta}))

    if discrete_time:
        loss = wtte.loss(kind='discrete').loss_function
    else:
        loss = wtte.loss(kind='continuous').loss_function

    model.compile(loss=loss, optimizer=RMSprop(lr=lr))

    return model
项目:DrugAI    作者:Gananath    | 项目源码 | 文件源码
def Gen():
    #Generator model
    G = Sequential()
    G.add(TimeDistributed(Dense(x_dash.shape[2]), input_shape=(x_dash.shape[1],x_dash.shape[2])))
    G.add(LSTM(216, return_sequences=True))
    G.add(Dropout(0.3))
    G.add(LSTM(216, return_sequences=True))
    G.add(Dropout(0.3))
    G.add(LSTM(216, return_sequences=True))
    #G.add(BatchNormalization(momentum=0.9))
    G.add(TimeDistributed(Dense(y_dash.shape[2], activation='softmax')))
    G.compile(loss='categorical_crossentropy', optimizer=Adam(lr=2e-4))
    return G
项目:DrugAI    作者:Gananath    | 项目源码 | 文件源码
def Dis():
    #Discriminator model
    D = Sequential()
    D.add(TimeDistributed(Dense(y_dash.shape[2]), input_shape=(y_dash.shape[1],y_dash.shape[2])))
    D.add(LSTM(216, return_sequences=True))
    D.add(Dropout(0.3))
    D.add(LSTM(60, return_sequences=True))
    D.add(Flatten())
    D.add(Dense(1, activation='sigmoid'))
    D.compile(loss='binary_crossentropy', optimizer=SGD(lr=0.001))
    return D
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_tiny_time_distrbuted(self):

        # as the first layer in a model
        model = Sequential()
        model.add(TimeDistributed(Dense(8), input_shape=(10, 16)))

        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

        self._test_keras_model(model)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_dense_fused_act_in_td(self):
        np.random.seed(1988)
        x_in = Input(shape=(10,2))
        x = TimeDistributed(Dense(6, activation = 'softmax'))(x_in)
        model = Model(inputs=[x_in], outputs=[x])

        self._test_keras_model(model, input_blob = 'data', output_blob = 'output', delta=1e-4)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_large_batch_gpu(self):

        batch_size = 2049
        num_channels = 4
        kernel_size = 3

        model = Sequential()
        model.add(TimeDistributed(Dense(num_channels), input_shape=(batch_size, kernel_size)))

        model.set_weights([(np.random.rand(*w.shape)-0.5)*0.2 for w in model.get_weights()])

        self._test_keras_model(model, input_blob='data', output_blob='output', delta=1e-2)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_tiny_mcrnn_td(self):

        model = Sequential()
        model.add(Conv2D(3,(1,1), input_shape=(2,4,4), padding='same'))
        model.add(AveragePooling2D(pool_size=(2,2)))
        model.add(Reshape((2,3)))
        model.add(TimeDistributed(Dense(5)))

        self._test_keras_model(model)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_tiny_time_distrbuted(self):

        # as the first layer in a model
        model = Sequential()
        model.add(TimeDistributed(Dense(8), input_shape=(10, 16)))

        model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])

        self._test_keras_model(model)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_dense_fused_act_in_td(self):
        np.random.seed(1988)
        x_in = Input(shape=(10,2))
        x = TimeDistributed(Dense(6, activation = 'softmax'))(x_in)
        model = Model(x_in, x)

        self._test_keras_model(model, input_blob = 'data', output_blob = 'output', delta=1e-2)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_large_batch_gpu(self):

        batch_size = 2049
        num_channels = 4
        kernel_size = 3

        model = Sequential()
        model.add(TimeDistributed(Dense(num_channels), input_shape=(batch_size, kernel_size)))

        model.set_weights([(np.random.rand(*w.shape)-0.5)/5.0 for w in model.get_weights()])

        self._test_keras_model(model, input_blob='data', output_blob='output', delta=1e-2)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_tiny_mcrnn_td(self):

        model = Sequential()
        model.add(Convolution2D(3,1,1, input_shape=(2,4,4), border_mode='same'))
        model.add(AveragePooling2D(pool_size=(2,2)))
        model.add(Reshape((2,3)))
        model.add(TimeDistributed(Dense(5)))

        self._test_keras_model(model)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def test_regularizers():
    model = Sequential()
    model.add(wrappers.TimeDistributed(core.Dense(2, W_regularizer='l1'), input_shape=(3, 4)))
    model.add(core.Activation('relu'))
    model.compile(optimizer='rmsprop', loss='mse')
    assert len(model.losses) == 1
项目:SarcasmDetection    作者:AniSkywalker    | 项目源码 | 文件源码
def _build_network(self, vocab_size, maxlen, emb_weights=[], hidden_units=256, trainable=False):
        print('Build model...')
        model = Sequential()

        model.add(Embedding(vocab_size, emb_weights.shape[1], input_length=maxlen, weights=[emb_weights],
                            trainable=trainable))

        model.add(Reshape((maxlen,emb_weights.shape[1],1)))

        model.add(BatchNormalization(momentum=0.9))

        # model.add(Convolution2D(int(hidden_units/8), (5,5), kernel_initializer='he_normal', padding='valid', activation='sigmoid'))
        # model.add(MaxPooling2D((2,2)))
        # model.add(Dropout(0.5))
        #
        # model.add(Convolution2D(int(hidden_units/4), (5,5), kernel_initializer='he_normal', padding='valid', activation='sigmoid'))
        # model.add(MaxPooling2D((2,2)))
        # model.add(Dropout(0.5))


        model.add(TimeDistributed(LSTM(hidden_units, kernel_initializer='he_normal', activation='sigmoid', dropout=0.5, return_sequences=True)))
        model.add(TimeDistributed(LSTM(hidden_units, kernel_initializer='he_normal', activation='sigmoid', dropout=0.5)))

        model.add(Flatten())

        # model.add(Dense(int(hidden_units/2), kernel_initializer='he_normal', activation='sigmoid'))
        # model.add(Dropout(0.5))
        model.add(Dense(2,activation='softmax'))
        adam = Adam(lr=0.0001)
        model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
        print('No of parameter:', model.count_params())

        print(model.summary())
        return model
项目:BetaStock    作者:qweraqq    | 项目源码 | 文件源码
def build(self):
        dim_data = self.size_of_input_data_dim
        nb_time_step = self.size_of_input_timesteps
        financial_time_series_input = Input(shape=(nb_time_step, dim_data), name='x1')
        lstm_layer_1 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
                            W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
                            return_sequences=True, name='lstm_layer1')
        lstm_layer_21 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
                             W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
                             return_sequences=True, name='lstm_layer2_loss1')
        lstm_layer_22 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
                             W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
                             return_sequences=True, name='lstm_layer2_loss2')
        lstm_layer_23 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
                             W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
                             return_sequences=True, name='lstm_layer2_loss3')

        lstm_layer_24 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
                             W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
                             return_sequences=True, name='lstm_layer2_loss4')

        lstm_layer_25 = LSTM(output_dim=nb_hidden_units, dropout_U=dropout, dropout_W=dropout,
                             W_regularizer=l2(l2_norm_alpha), b_regularizer=l2(l2_norm_alpha), activation='tanh',
                             return_sequences=True, name='lstm_layer2_loss5')
        h1 = lstm_layer_1(financial_time_series_input)
        h21 = lstm_layer_21(h1)
        h22 = lstm_layer_22(h1)
        h23 = lstm_layer_23(h1)
        h24 = lstm_layer_24(h1)
        h25 = lstm_layer_25(h1)
        time_series_predictions1 = TimeDistributed(Dense(1), name="p1")(h21)  # custom 1
        time_series_predictions2 = TimeDistributed(Dense(1), name="p2")(h22)  # custom 2
        time_series_predictions3 = TimeDistributed(Dense(1), name="p3")(h23)  # mse
        time_series_predictions4 = TimeDistributed(Dense(1, activation='sigmoid'), name="p4")(h24)  # logloss
        time_series_predictions5 = TimeDistributed(Dense(nb_labels, activation='softmax'), name="p5")(h25)  # cross
        self.model = Model(input=financial_time_series_input,
                           output=[time_series_predictions1, time_series_predictions2,
                                   time_series_predictions3, time_series_predictions4,
                                   time_series_predictions5],
                           name="multi-task deep rnn for financial time series forecasting")
        plot(self.model, to_file='model.png')
项目:mars_express    作者:wsteitz    | 项目源码 | 文件源码
def fit(self, x, y):
        input_dim = x.shape[1]
        output_dim = y.shape[1]
        self.x_train = x

        start = len(x) % (self.batch_size * self.sequence_length)

        x_seq = self.sliding_window(x.iloc[start:])
        y_seq = self.sliding_window(y.iloc[start:])

        model = Sequential()
        model.add(GRU(1024, batch_input_shape=(self.batch_size, self.sequence_length, input_dim), return_sequences=True, stateful=True))
        model.add(Activation("tanh"))
        model.add(GRU(1024, return_sequences=True))
        model.add(Activation("tanh"))
        model.add(GRU(512, return_sequences=True))
        model.add(Activation("tanh"))
        #model.add(Dropout(0.5))
        model.add(TimeDistributed(Dense(output_dim)))
        model.add(Activation("linear"))

        optimizer = keras.optimizers.RMSprop(lr=0.002)
        optimizer = keras.optimizers.Nadam(lr=0.002)
        model.compile(loss='mse', optimizer=optimizer)

        model.fit(x_seq, y_seq, batch_size=self.batch_size, verbose=1, nb_epoch=self.n_epochs, shuffle=False)
        self.model = model
        return self
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def test_regularizers():
    model = Sequential()
    model.add(wrappers.TimeDistributed(core.Dense(2, W_regularizer='l1'), input_shape=(3, 4)))
    model.add(core.Activation('relu'))
    model.compile(optimizer='rmsprop', loss='mse')
    assert len(model.losses) == 1
项目:plasma    作者:jnkh    | 项目源码 | 文件源码
def get_model(batch_size = 32,num_layers = 2,hidden_units=100,num_output=1,dropout=0.1,timesteps = 100, featurelen=1,is_training=True):

    input_tensor = Input(batch_shape=(batch_size,timesteps,featurelen))
    recurrent_layer = LSTM(hidden_units,return_sequences=True,stateful = True)(input_tensor)
    output_tensor = TimeDistributed(Dense(num_output,activation='linear'))(recurrent_layer)

    model = Model(input =input_tensor,output=output_tensor)
    #model.compile(optimizer=SGD(lr=DUMMY_LR),loss='mse')

    return model
项目:plasma    作者:jnkh    | 项目源码 | 文件源码
def build_model(predict,batch_size,length,featurelen):
    if predict:
        batch_size = length = 1
    model = Sequential()
    model.add(LSTM(10 ,return_sequences=True, batch_input_shape=(batch_size, length , featurelen), stateful=True))
    model.add(Dropout(0.2))
    model.add(LSTM(10 , return_sequences=True,stateful=True))
    model.add(Dropout(0.2))
    model.add(TimeDistributed(Dense( featurelen )))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.reset_states()
    return model
项目:plasma    作者:jnkh    | 项目源码 | 文件源码
def build_model(predict,batch_size,length,featurelen):
    if predict:
        batch_size = length = 1
    model = Sequential()
    model.add(LSTM(10 ,return_sequences=True, batch_input_shape=(batch_size, length , featurelen), stateful=True))
    model.add(Dropout(0.2))
    model.add(LSTM(10 , return_sequences=True,stateful=True))
    model.add(Dropout(0.2))
    model.add(TimeDistributed(Dense( featurelen )))
    model.add(Activation('tanh'))
    model.compile(loss='mse', optimizer='rmsprop')
    model.reset_states()
    return model
项目:Book_DeepLearning_Practice    作者:wac81    | 项目源码 | 文件源码
def text_feature_extract_model1(embedding_size=128, hidden_size=256):
    '''
    this is a model use normal Bi-LSTM and maxpooling extract feature

    examples:
????????? [  1.62172219e-05]
???????? [  1.65377696e-05]
?????,??? [ 1.]
???????? [ 1.]
????????? [  1.76498161e-05]
??????????????16?12?????????? [  1.59666997e-05]
??????????????????? [ 1.]
?????????????? [  1.52662833e-05]
?????????????????????????????????? [ 1.]
???????????????????????????????????????? [  1.52281245e-05]
?????????????????????????? [ 1.]
??????????? [  1.59881820e-05]


    :return:
    '''
    model = Sequential()
    model.add(Embedding(input_dim=max_features,
                        output_dim=embedding_size,
                        input_length=max_seq))
    model.add(Bidirectional(LSTM(hidden_size, return_sequences=True)))
    model.add(TimeDistributed(Dense(embedding_size/2)))
    model.add(Activation('softplus'))
    model.add(MaxPooling1D(5))
    model.add(Flatten())
    # model.add(Dense(2048, activation='softplus'))
    # model.add(Dropout(0.2))
    model.add(Dense(1, activation='sigmoid'))

    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    model.summary()
    plot(model, to_file="text_feature_extract_model1.png", show_shapes=True)
    return model
项目:text_classification    作者:senochow    | 项目源码 | 文件源码
def HierarchicalRNN(embed_matrix, max_words, ans_cnt, sequence_length, embedding_dim, lstm_dim=100):
    ''' Hierachical RNN model
        Input: (batch_size, answers, answer words)
    Args:
        embed_matrix: word embedding
        max words:    word dict size of embedding layer
        ans_cnt:      answer count
        sequence_length: answer words count
        embedding_dim: embedding dimention
        lstm_dim:
    '''
    hnn = Sequential()
    x = Input(shape=(ans_cnt, sequence_length))
    # 1. time distributed word embedding: (None, steps, words, embed_dim)
    words_embed = TimeDistributed(Embedding(max_words, embedding_dim,input_length=sequence_length,weights=[embed_matrix]))(x)
    # 2. word level lstm embedding: --> (None, steps/sentence_num, hidden/sent_words, hidden_dim)
    word_lstm = TimeDistributed(Bidirectional(MGU(lstm_dim, return_sequences=True)))(words_embed)

    # 3. average pooling : --> (None,steps,dim)
    word_avg = TimeDistributed(GlobalMaxPooling1D())(word_lstm)
    #word_avg = TimeDistributed(AttentionLayer(lstm_dim*2))(word_lstm)

    # 4.  sentence lstm:  --> (None, hidden, hidden_dim)
    sent_lstm = Bidirectional(MGU(lstm_dim, return_sequences=True))(word_avg)

    # 5. pooling:  --> (None, hidden_dim)
    sent_avg = GlobalMaxPooling1D()(sent_lstm)
    #sent_avg = AttentionLayer(lstm_dim*2)(sent_lstm)
    model = Model(input=x, output=sent_avg)
    hnn.add(model)
    return hnn


# vim: set expandtab ts=4 sw=4 sts=4 tw=100:
项目:five-video-classification-methods    作者:harvitronix    | 项目源码 | 文件源码
def lrcn(self):
        """Build a CNN into RNN.
        Starting version from:
            https://github.com/udacity/self-driving-car/blob/master/
                steering-models/community-models/chauffeur/models.py

        Heavily influenced by VGG-16:
            https://arxiv.org/abs/1409.1556

        Also known as an LRCN:
            https://arxiv.org/pdf/1411.4389.pdf
        """
        model = Sequential()

        model.add(TimeDistributed(Conv2D(32, (7, 7), strides=(2, 2),
            activation='relu', padding='same'), input_shape=self.input_shape))
        model.add(TimeDistributed(Conv2D(32, (3,3),
            kernel_initializer="he_normal", activation='relu')))
        model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

        model.add(TimeDistributed(Conv2D(64, (3,3),
            padding='same', activation='relu')))
        model.add(TimeDistributed(Conv2D(64, (3,3),
            padding='same', activation='relu')))
        model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

        model.add(TimeDistributed(Conv2D(128, (3,3),
            padding='same', activation='relu')))
        model.add(TimeDistributed(Conv2D(128, (3,3),
            padding='same', activation='relu')))
        model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

        model.add(TimeDistributed(Conv2D(256, (3,3),
            padding='same', activation='relu')))
        model.add(TimeDistributed(Conv2D(256, (3,3),
            padding='same', activation='relu')))
        model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

        model.add(TimeDistributed(Conv2D(512, (3,3),
            padding='same', activation='relu')))
        model.add(TimeDistributed(Conv2D(512, (3,3),
            padding='same', activation='relu')))
        model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

        model.add(TimeDistributed(Flatten()))

        model.add(Dropout(0.5))
        model.add(LSTM(256, return_sequences=False, dropout=0.5))
        model.add(Dense(self.nb_classes, activation='softmax'))

        return model
项目:DeepNews    作者:kabrapratik28    | 项目源码 | 文件源码
def create_model(self,):
        """
        RNN model creation
        Layers include Embedding Layer, 3 LSTM stacked,
        Simple Context layer (manually defined),
        Time Distributed Layer
        """
        length_vocab, embedding_size = self.word2vec.shape
        print ("shape of word2vec matrix ", self.word2vec.shape)

        model = Sequential()

        # TODO: look at mask zero flag
        model.add(
                Embedding(
                        length_vocab, embedding_size,
                        input_length=max_length,
                        weights=[self.word2vec], mask_zero=True,
                        name='embedding_layer'
                )
        )

        for i in range(rnn_layers):
            lstm = LSTM(rnn_size, return_sequences=True,
                name='lstm_layer_%d' % (i + 1)
            )

            model.add(lstm)
            # No drop out added !

        model.add(Lambda(self.simple_context,
                     mask=lambda inputs, mask: mask[:, max_len_desc:],
                     output_shape=self.output_shape_simple_context_layer,
                     name='simple_context_layer'))

        vocab_size = self.word2vec.shape[0]
        model.add(TimeDistributed(Dense(vocab_size,
                                name='time_distributed_layer')))

        model.add(Activation('softmax', name='activation_layer'))

        model.compile(loss='categorical_crossentropy', optimizer='adam')
        K.set_value(model.optimizer.lr, np.float32(learning_rate))
        print (model.summary())
        return model
项目:UK_Imbalance_Price_Forecasting    作者:ADGEfficiency    | 项目源码 | 文件源码
def make_lstm(timestep,
              input_length,
              layer_nodes,
              dropout=0.35,
              optimizer='Adam',
              loss='mse'):
    """
    Creates a Long Short Term Memory (LSTM) neural network Keras model

    args
        timestep (int) : the length of the sequence
        input_length (int) : used to define input shape
        layer_nodes (list) : number of nodes in each of the layers input & hidden
        dropout (float) : the dropout rate to for the layer to layer connections
        optimizer (str) : reference to the Keras optimizer we want to use
        loss (str) : reference to the Keras loss function we want to use

    returns
        model (object) : the Keras LSTM neural network model
    """

    model = Sequential()

    #  first we add the input layer
    model.add(LSTM(units=layer_nodes[0],
                   input_shape=(timestep, input_length),
                   return_sequences=True))
    #  batch norm to normalize data going into the actvation functions
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    #  dropout some connections into the first hidden layer
    model.add(Dropout(dropout))

    #  now add hideen layers using the same strucutre
    for nodes in layer_nodes[1:]:
        model.add(LSTM(units=nodes, return_sequences=True))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(Dropout(dropout))

    #  add the output layer with a linear activation function
    #  we use a node size of 1 hard coded because we make one prediction
    #  per time step
    model.add(TimeDistributed(Dense(1)))
    model.add(Activation('linear'))

    #  compile model using user defined loss function and optimizer
    model.compile(loss=loss, optimizer=optimizer)
    print(model.summary())

    return model
项目:albemarle    作者:SeanTater    | 项目源码 | 文件源码
def __init__(self, output_dim, hidden_dim, output_length, depth=1,bidirectional=True, dropout=0.1, **kwargs):
        if bidirectional and hidden_dim % 2 != 0:
            raise Exception ("hidden_dim for AttentionSeq2seq should be even (Because of bidirectional RNN).")
        super(AttentionSeq2seq, self).__init__()
        if type(depth) not in [list, tuple]:
            depth = (depth, depth)
        if 'batch_input_shape' in kwargs:
            shape = kwargs['batch_input_shape']
            del kwargs['batch_input_shape']
        elif 'input_shape' in kwargs:
            shape = (None,) + tuple(kwargs['input_shape'])
            del kwargs['input_shape']
        elif 'input_dim' in kwargs:
            if 'input_length' in kwargs:
                input_length = kwargs['input_length']
            else:
                input_length = None
            shape = (None, input_length, kwargs['input_dim'])
            del kwargs['input_dim']
        self.add(Layer(batch_input_shape=shape))
        if bidirectional:
            self.add(Bidirectional(LSTMEncoder(output_dim=int(hidden_dim / 2), state_input=False, return_sequences=True, **kwargs)))
        else:
            self.add(LSTMEncoder(output_dim=hidden_dim, state_input=False, return_sequences=True, **kwargs))
        for i in range(0, depth[0] - 1):
            self.add(Dropout(dropout))
            if bidirectional:
                self.add(Bidirectional(LSTMEncoder(output_dim=int(hidden_dim / 2), state_input=False, return_sequences=True, **kwargs)))
            else:
                self.add(LSTMEncoder(output_dim=hidden_dim, state_input=False, return_sequences=True, **kwargs))
        encoder = self.layers[-1]
        self.add(Dropout(dropout))
        self.add(TimeDistributed(Dense(hidden_dim if depth[1] > 1 else output_dim)))
        decoder = AttentionDecoder(hidden_dim=hidden_dim, output_length=output_length, state_input=False, **kwargs)
        self.add(Dropout(dropout))
        self.add(decoder)
        for i in range(0, depth[1] - 1):
            self.add(Dropout(dropout))
            self.add(LSTMEncoder(output_dim=hidden_dim, state_input=False, return_sequences=True, **kwargs))
        self.add(Dropout(dropout))
        self.add(TimeDistributed(Dense(output_dim, activation='softmax')))
        self.encoder = encoder
        self.decoder = decoder
项目:albemarle    作者:SeanTater    | 项目源码 | 文件源码
def __init__(self, output_dim, hidden_dim, output_length, depth=1, broadcast_state=True, inner_broadcast_state=True, peek=False, dropout=0.1, **kwargs):
        super(Seq2seq, self).__init__()
        if type(depth) not in [list, tuple]:
            depth = (depth, depth)
        if 'batch_input_shape' in kwargs:
            shape = kwargs['batch_input_shape']
            del kwargs['batch_input_shape']
        elif 'input_shape' in kwargs:
            shape = (None,) + tuple(kwargs['input_shape'])
            del kwargs['input_shape']
        elif 'input_dim' in kwargs:
            shape = (None, None, kwargs['input_dim'])
            del kwargs['input_dim']
        lstms = []
        layer = LSTMEncoder(batch_input_shape=shape, output_dim=hidden_dim, state_input=False, return_sequences=depth[0] > 1, **kwargs)
        self.add(layer)
        lstms += [layer]
        for i in range(depth[0] - 1):
            self.add(Dropout(dropout))
            layer = LSTMEncoder(output_dim=hidden_dim, state_input=inner_broadcast_state, return_sequences=i < depth[0] - 2, **kwargs)
            self.add(layer)
            lstms += [layer]
        if inner_broadcast_state:
            for i in range(len(lstms) - 1):
                lstms[i].broadcast_state(lstms[i + 1])
        encoder = self.layers[-1]
        self.add(Dropout(dropout))
        decoder_type = LSTMDecoder2 if peek else LSTMDecoder
        decoder = decoder_type(hidden_dim=hidden_dim, output_length=output_length, state_input=broadcast_state, **kwargs)
        self.add(decoder)
        lstms = [decoder]
        for i in range(depth[1] - 1):
            self.add(Dropout(dropout))
            layer = LSTMEncoder(output_dim=hidden_dim, state_input=inner_broadcast_state, return_sequences=True, **kwargs)
            self.add(layer)
            lstms += [layer]
        if inner_broadcast_state:
                for i in range(len(lstms) - 1):
                    lstms[i].broadcast_state(lstms[i + 1])
        if broadcast_state:
            encoder.broadcast_state(decoder)
        self.add(Dropout(dropout))
        self.add(TimeDistributed(Dense(output_dim, **kwargs)))
        self.encoder = encoder
        self.decoder = decoder
项目:cocktail-party    作者:avivga    | 项目源码 | 文件源码
def build(video_shape, audio_spectrogram_size):
        model = Sequential()

        model.add(ZeroPadding3D(padding=(1, 2, 2), name='zero1', input_shape=video_shape))
        model.add(Convolution3D(32, (3, 5, 5), strides=(1, 2, 2), kernel_initializer='he_normal', name='conv1'))
        model.add(BatchNormalization())
        model.add(LeakyReLU())
        model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='max1'))
        model.add(Dropout(0.25))

        model.add(ZeroPadding3D(padding=(1, 2, 2), name='zero2'))
        model.add(Convolution3D(64, (3, 5, 5), strides=(1, 1, 1), kernel_initializer='he_normal', name='conv2'))
        model.add(BatchNormalization())
        model.add(LeakyReLU())
        model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='max2'))
        model.add(Dropout(0.25))

        model.add(ZeroPadding3D(padding=(1, 1, 1), name='zero3'))
        model.add(Convolution3D(128, (3, 3, 3), strides=(1, 1, 1), kernel_initializer='he_normal', name='conv3'))
        model.add(BatchNormalization())
        model.add(LeakyReLU())
        model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='max3'))
        model.add(Dropout(0.25))

        model.add(TimeDistributed(Flatten(), name='time'))

        model.add(Dense(1024, kernel_initializer='he_normal', name='dense1'))
        model.add(BatchNormalization())
        model.add(LeakyReLU())
        model.add(Dropout(0.25))

        model.add(Dense(1024, kernel_initializer='he_normal', name='dense2'))
        model.add(BatchNormalization())
        model.add(LeakyReLU())
        model.add(Dropout(0.25))

        model.add(Flatten())

        model.add(Dense(2048, kernel_initializer='he_normal', name='dense3'))
        model.add(BatchNormalization())
        model.add(LeakyReLU())
        model.add(Dropout(0.25))

        model.add(Dense(2048, kernel_initializer='he_normal', name='dense4'))
        model.add(BatchNormalization())
        model.add(LeakyReLU())
        model.add(Dropout(0.25))

        model.add(Dense(audio_spectrogram_size, name='output'))

        model.summary()

        return VideoToSpeechNet(model)