我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.activations.get()。
def __init__(self, output_dim, init='glorot_uniform', activation='relu',weights=None, W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, input_dim=None, **kwargs): self.W_initializer = initializers.get(init) self.b_initializer = initializers.get('zeros') self.activation = activations.get(activation) self.output_dim = output_dim self.input_dim = input_dim self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.initial_weights = weights self.input_spec = InputSpec(ndim=2) if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(SparseFullyConnectedLayer, self).__init__(**kwargs)
def __init__(self, output_dim, freq_dim, hidden_dim, init='glorot_uniform', inner_init='orthogonal', forget_bias_init='one', activation='tanh', inner_activation='hard_sigmoid', W_regularizer=None, U_regularizer=None, b_regularizer=None, dropout_W=0., dropout_U=0., **kwargs): self.output_dim = output_dim self.freq_dim = freq_dim self.hidden_dim = hidden_dim self.init = initializations.get(init) self.inner_init = initializations.get(inner_init) self.forget_bias_init = initializations.get(forget_bias_init) self.activation = activations.get(activation) self.inner_activation = activations.get(inner_activation) self.W_regularizer = regularizers.get(W_regularizer) self.U_regularizer = regularizers.get(U_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.dropout_W, self.dropout_U = dropout_W, dropout_U if self.dropout_W or self.dropout_U: self.uses_learning_phase = True super(ITOSFM, self).__init__(**kwargs)
def __init__(self, output_dim, memory_dim=128, memory_size=20, controller_output_dim=100, location_shift_range=1, num_read_head=1, num_write_head=1, init='glorot_uniform', inner_init='orthogonal', forget_bias_init='one', activation='tanh', inner_activation='hard_sigmoid', W_regularizer=None, U_regularizer=None, R_regularizer=None, b_regularizer=None, W_y_regularizer=None, W_xi_regularizer=None, W_r_regularizer=None, dropout_W=0., dropout_U=0., **kwargs): self.output_dim = output_dim self.init = initializations.get(init) self.inner_init = initializations.get(inner_init) self.forget_bias_init = initializations.get(forget_bias_init) self.activation = activations.get(activation) self.inner_activation = activations.get(inner_activation) self.W_regularizer = regularizers.get(W_regularizer) self.U_regularizer = regularizers.get(U_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.dropout_W, self.dropout_U = dropout_W, dropout_U if self.dropout_W or self.dropout_U: self.uses_learning_phase = True super(NTM, self).__init__(**kwargs)
def __init__(self, filters, centers_initializer='zeros', centers_regularizer=None, centers_constraint=None, stds_initializer='ones', stds_regularizer=None, stds_constraint=None, gauss_scale=100, **kwargs): self.filters = filters self.gauss_scale = gauss_scale super(GaussianReceptiveFields, self).__init__(**kwargs) self.centers_initializer = initializers.get(centers_initializer) self.stds_initializer = initializers.get(stds_initializer) self.centers_regularizer = regularizers.get(centers_regularizer) self.stds_regularizer = regularizers.get(stds_regularizer) self.centers_constraint = constraints.get(centers_constraint) self.stds_constraint = constraints.get(stds_constraint)
def __init__(self, quadratic_filters=2, init='glorot_uniform', weights=None, W_quad_regularizer=None, W_lin_regularizer=None, activity_regularizer=None, W_quad_constraint=None, W_lin_constraint=None, bias=True, input_dim=None, **kwargs): self.init = initializations.get(init) self.quadratic_filters = quadratic_filters self.input_dim = input_dim self.W_quad_regularizer = regularizers.get(W_quad_regularizer) self.W_lin_regularizer = regularizers.get(W_lin_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_quad_constraint = constraints.get(W_quad_constraint) self.W_lin_constraint = constraints.get(W_lin_constraint) self.initial_weights = weights self.input_spec = [InputSpec(ndim=2)] if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(GQM, self).__init__(**kwargs)
def __init__(self, activation='linear', bias_regularizer=None, bias_constraint=None, bias_initializer='zeros', use_bias=True, input_dim=None, **kwargs): self.activation = activations.get(activation) self.input_dim = input_dim self.bias_initializer = initializers.get(bias_initializer) self.bias_regularizer = regularizers.get(bias_regularizer) self.bias_constraint = constraints.get(bias_constraint) self.use_bias = use_bias self.input_spec = [InputSpec(ndim=2)] if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(EminusS, self).__init__(**kwargs)
def __init__(self, quadratic_filters=2, init='glorot_uniform', weights=None, W_quad_regularizer=None, W_lin_regularizer=None, activity_regularizer=None, W_quad_constraint=None, W_lin_constraint=None, bias=True, input_dim=None, **kwargs): self.init = initializations.get(init) self.quadratic_filters = quadratic_filters self.input_dim = input_dim self.W_quad_regularizer = regularizers.get(W_quad_regularizer) self.W_lin_regularizer = regularizers.get(W_lin_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_quad_constraint = constraints.get(W_quad_constraint) self.W_lin_constraint = constraints.get(W_lin_constraint) self.initial_weights = weights self.input_spec = [InputSpec(ndim=5)] if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(GQM_conv, self).__init__(**kwargs)
def __init__(self, quadratic_filters=2, init='glorot_uniform', weights=None, W_quad_regularizer=None, W_lin_regularizer=None, activity_regularizer=None, W_quad_constraint=None, W_lin_constraint=None, bias=True, input_dim=None, **kwargs): self.init = initializations.get(init) self.quadratic_filters = quadratic_filters self.input_dim = input_dim self.W_quad_regularizer = regularizers.get(W_quad_regularizer) self.W_lin_regularizer = regularizers.get(W_lin_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_quad_constraint = constraints.get(W_quad_constraint) self.W_lin_constraint = constraints.get(W_lin_constraint) self.initial_weights = weights self.input_spec = [InputSpec(ndim=5)] if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(GQM_4D, self).__init__(**kwargs)
def __init__(self, units, kernel_initializer='glorot_uniform', kernel_regularizer=None, kernel_constraint=constraints.NonNeg(), k_initializer='zeros', k_regularizer=None, k_constraint=None, tied_k=False, activity_regularizer=None, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(SoftMinMax, self).__init__(**kwargs) self.units = units self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.k_initializer = initializers.get(k_initializer) self.k_regularizer = regularizers.get(k_regularizer) self.k_constraint = constraints.get(k_constraint) self.tied_k = tied_k self.activity_regularizer = regularizers.get(activity_regularizer) self.input_spec = InputSpec(min_ndim=2) self.supports_masking = True
def __init__(self, units, kernel_initializer='glorot_uniform', kernel_regularizer=None, kernel_constraint=constraints.NonNeg(), activity_regularizer=None, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(WeightedMean, self).__init__(**kwargs) self.units = units self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.activity_regularizer = regularizers.get(activity_regularizer) self.input_spec = InputSpec(min_ndim=2) self.supports_masking = True
def __init__(self, output_dim, init='glorot_uniform', activation=None, weights=None, W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, bias=True, input_dim=None, **kwargs): self.init = initializations.get(init) self.activation = activations.get(activation) self.output_dim = output_dim self.input_dim = input_dim self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.initial_weights = weights self.input_spec = [InputSpec(ndim='2+')] if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(DenseNonNeg, self).__init__(**kwargs)
def __init__(self, init='glorot_uniform', activation=None, weights=None, W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, bias=True, input_dim=None, **kwargs): self.init = initializations.get(init) self.activation = activations.get(activation) self.input_dim = input_dim self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.initial_weights = weights self.input_spec = [InputSpec(ndim='2+')] if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(Feedback, self).__init__(**kwargs)
def __init__(self, init='glorot_uniform', activation=None, weights=None, W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, bias=True, input_dim=None, **kwargs): self.init = initializations.get(init) self.activation = activations.get(activation) self.input_dim = input_dim self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.initial_weights = weights self.input_spec = [InputSpec(ndim='2+')] if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(DivisiveNormalization, self).__init__(**kwargs)
def __init__(self, output_dim, inner_dim, depth = 2, init_output='uniform', activation_output='softmax', init_inner='identity', activation_inner='linear', scale_output=0.01, padding=False, **kwargs): if depth < 1: quit('Cannot use GraphFP with depth zero') self.init_output = initializations.get(init_output) self.activation_output = activations.get(activation_output) self.init_inner = initializations.get(init_inner) self.activation_inner = activations.get(activation_inner) self.output_dim = output_dim self.inner_dim = inner_dim self.depth = depth self.scale_output = scale_output self.padding = padding self.initial_weights = None self.input_dim = 4 # each entry is a 3D N_atom x N_atom x N_feature tensor if self.input_dim: kwargs['input_shape'] = (None, None, None,) # 3D tensor for each input #self.input = K.placeholder(ndim = 4) super(GraphFP, self).__init__(**kwargs)
def __init__(self,output_dim,mem_vec_dim,init='glorot_uniform', activation='linear', weights=None, activity_regularizer=None,input_dim=None, **kwargs): ''' Params: output_dim: ????? mem_vec_dim: query????? ''' self.init = initializations.get(init) self.activation = activations.get(activation) self.output_dim = output_dim self.input_dim = input_dim self.mem_vector_dim=mem_vec_dim self.activity_regularizer = regularizers.get(activity_regularizer) self.initial_weights = weights if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(MemoryNet,self).__init__(**kwargs)
def __init__(self, output_dim, L, init='glorot_uniform', inner_init='orthogonal', activation='tanh', inner_activation='hard_sigmoid', W_regularizer=None, U_regularizer=None, b_regularizer=None, dropout_W=0., dropout_U=0., **kwargs): self.output_dim = output_dim self.init = initializations.get(init) self.inner_init = initializations.get(inner_init) self.activation = activations.get(activation) self.inner_activation = activations.get(inner_activation) self.W_regularizer = regularizers.get(W_regularizer) self.U_regularizer = regularizers.get(U_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.dropout_W, self.dropout_U = dropout_W, dropout_U self.L = L if self.dropout_W or self.dropout_U: self.uses_learning_phase = True super(RHN, self).__init__(**kwargs)
def __init__(self, downsampling_factor=10, init='glorot_uniform', activation='linear', weights=None, W_regularizer=None, activity_regularizer=None, W_constraint=None, input_dim=None, **kwargs): self.downsampling_factor = downsampling_factor self.init = initializations.get(init) self.activation = activations.get(activation) self.W_regularizer = regularizers.get(W_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.initial_weights = weights self.input_dim = input_dim if self.input_dim: kwargs['input_shape'] = (self.input_dim,) self.input_spec = [InputSpec(ndim=4)] super(EltWiseProduct, self).__init__(**kwargs)
def __init__(self, W_regularizer=None, u_regularizer=None, b_regularizer=None, W_constraint=None, u_constraint=None, b_constraint=None, W_dropout=0., u_dropout=0., bias=True, **kwargs): self.supports_masking = True self.W_init = initializers.get('orthogonal') self.u_init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.u_regularizer = regularizers.get(u_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.u_constraint = constraints.get(u_constraint) self.b_constraint = constraints.get(b_constraint) self.W_dropout = min(1., max(0., W_dropout)) self.u_dropout = min(1., max(0., u_dropout)) self.bias = bias super(AttentionWithContext, self).__init__(**kwargs)
def __init__(self, output_dim, init='glorot_uniform', inner_init='orthogonal', activation='tanh', beta_init='zero', gamma_init='one', W_regularizer=None, U_regularizer=None, b_regularizer=None, gamma_regularizer=None, beta_regularizer=None, dropout_W=0., dropout_U=0., **kwargs): self.output_dim = output_dim self.activation = activations.get(activation) self.init = initializations.get(init) self.inner_init = initializations.get(inner_init) self.beta_init = initializations.get(beta_init) self.gamma_init = initializations.get(gamma_init) self.W_regularizer = regularizers.get(W_regularizer) self.U_regularizer = regularizers.get(U_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.gamma_regularizer = regularizers.get(gamma_regularizer) self.beta_regularizer = regularizers.get(beta_regularizer) self.dropout_W = dropout_W self.dropout_U = dropout_U self.epsilon = 1e-5 if self.dropout_W or self.dropout_U: self.uses_learning_phase = True super(LN_SimpleRNN, self).__init__(**kwargs)
def __init__(self, input_dim, output_dim, init='glorot_uniform', inner_init='orthogonal', forget_bias_init='one', activation='tanh', inner_activation='hard_sigmoid'): #self.input_dim = input_dim self.output_dim = int(output_dim / 2) self.init = initializations.get(init) self.inner_init = initializations.get(inner_init) self.forget_bias_init = initializations.get(forget_bias_init) self.activation = activations.get(activation) self.inner_activation = activations.get(inner_activation) self.input_dim = input_dim #self.input = K.placeholder(input_shape) # initial states: 2 all-zero tensor of shape (output_dim) self.forward_lstm = LSTM(input_dim = input_dim, output_dim = self.output_dim) self.backward_lstm = LSTM(input_dim = input_dim, output_dim = self.output_dim) self.params = self.forward_lstm.params + self.backward_lstm.params #if self.initial_weights is not None: # self.set_weights(self.initial_weights) # del self.initial_weights
def __init__(self, output_dim, init='glorot_uniform', inner_init='orthogonal', forget_bias_init='one', activation='tanh', inner_activation='hard_sigmoid', W_regularizer=None, U_regularizer=None, b_regularizer=None, dropout_W=0., dropout_U=0., **kwargs): self.output_dim = output_dim self.init = initializations.get(init) self.inner_init = initializations.get(inner_init) self.forget_bias_init = initializations.get(forget_bias_init) self.activation = activations.get(activation) self.inner_activation = activations.get(inner_activation) self.W_regularizer = regularizers.get(W_regularizer) self.U_regularizer = regularizers.get(U_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.dropout_W, self.dropout_U = dropout_W, dropout_U if self.dropout_W or self.dropout_U: self.uses_learning_phase = True super(DualCurrent, self).__init__(**kwargs)
def __init__(self, output_dim, init='glorot_uniform', inner_init='orthogonal', activation='tanh', inner_activation='hard_sigmoid', W_regularizer=None, U_regularizer=None, b_regularizer=None, shape_key=None, dropout_W=0., dropout_U=0., **kwargs): self.output_dim = output_dim self.init = initializations.get(init) self.inner_init = initializations.get(inner_init) self.activation = activations.get(activation) self.inner_activation = activations.get(inner_activation) self.W_regularizer = regularizers.get(W_regularizer) self.U_regularizer = regularizers.get(U_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.dropout_W, self.dropout_U = dropout_W, dropout_U self.shape_key = shape_key or {} if self.dropout_W or self.dropout_U: self.uses_learning_phase = True kwargs['consume_less'] = 'gpu' super(RTTN, self).__init__(**kwargs) self.num_actions = 4
def __init__(self, output_dim, init='glorot_uniform', inner_init='orthogonal', forget_bias_init='one', activation='tanh', inner_activation='hard_sigmoid', W_regularizer=None, U_regularizer=None, b_regularizer=None, dropout_W=0., dropout_U=0., **kwargs): self.output_dim = output_dim self.init = initializations.get(init) self.inner_init = initializations.get(inner_init) self.forget_bias_init = initializations.get(forget_bias_init) self.activation = activations.get(activation) self.inner_activation = activations.get(inner_activation) self.W_regularizer = regularizers.get(W_regularizer) self.U_regularizer = regularizers.get(U_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.dropout_W, self.dropout_U = dropout_W, dropout_U if self.dropout_W or self.dropout_U: self.uses_learning_phase = True super(DecoderVaeLSTM, self).__init__(**kwargs)
def __init__(self, output_dim, init='glorot_uniform', inner_init='orthogonal', forget_bias_init='one', activation='tanh', inner_activation='hard_sigmoid', W_regularizer=None, U_regularizer=None, b_regularizer=None, dropout_W=0., dropout_U=0., **kwargs): self.output_dim = output_dim self.init = initializations.get(init) self.inner_init = initializations.get(inner_init) self.forget_bias_init = initializations.get(forget_bias_init) self.activation = activations.get(activation) self.inner_activation = activations.get(inner_activation) self.W_regularizer = regularizers.get(W_regularizer) self.U_regularizer = regularizers.get(U_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.dropout_W = dropout_W self.dropout_U = dropout_U self.stateful = False if self.dropout_W or self.dropout_U: self.uses_learning_phase = True super(QRNN, self).__init__(**kwargs)
def __init__(self, output_dim, window_size=3, stride=1, kernel_initializer='uniform', bias_initializer='zero', activation='linear', activity_regularizer=None, kernel_regularizer=None, bias_regularizer=None, kernel_constraint=None, bias_constraint=None, use_bias=True, input_dim=None, input_length=None, **kwargs): self.output_dim = output_dim self.window_size = window_size self.strides = (stride, 1) self.use_bias = use_bias self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.activation = activations.get(activation) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.input_spec = [InputSpec(ndim=3)] self.input_dim = input_dim self.input_length = input_length if self.input_dim: kwargs['input_shape'] = (self.input_length, self.input_dim) super(GCNN, self).__init__(**kwargs)
def __init__(self, units, window_size=2, stride=1, return_sequences=False, go_backwards=False, stateful=False, unroll=False, activation='tanh', kernel_initializer='uniform', bias_initializer='zero', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, dropout=0, use_bias=True, input_dim=None, input_length=None, **kwargs): self.return_sequences = return_sequences self.go_backwards = go_backwards self.stateful = stateful self.unroll = unroll self.units = units self.window_size = window_size self.strides = (stride, 1) self.use_bias = use_bias self.activation = activations.get(activation) self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.dropout = dropout self.supports_masking = True self.input_spec = [InputSpec(ndim=3)] self.input_dim = input_dim self.input_length = input_length if self.input_dim: kwargs['input_shape'] = (self.input_length, self.input_dim) super(QRNN, self).__init__(**kwargs)
def __init__(self, filters, kernel_size, kernel_initializer='glorot_uniform', activation=None, weights=None, padding='valid', strides=(1, 1), data_format=None, kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, use_bias=True, **kwargs): if data_format is None: data_format = K.image_data_format() if padding not in {'valid', 'same', 'full'}: raise ValueError('Invalid border mode for CosineConvolution2D:', padding) self.filters = filters self.kernel_size = kernel_size self.nb_row, self.nb_col = self.kernel_size self.kernel_initializer = initializers.get(kernel_initializer) self.activation = activations.get(activation) self.padding = padding self.strides = tuple(strides) self.data_format = normalize_data_format(data_format) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.use_bias = use_bias self.input_spec = [InputSpec(ndim=4)] self.initial_weights = weights super(CosineConvolution2D, self).__init__(**kwargs)
def __init__(self, output_dim=None, activation=None, **kwargs): self.output_dim = output_dim self.activation = activations.get(activation) super().__init__(**kwargs)
def search_model(experiment_label, steps, batch_size=32): """ This is where we put everythin together. We get the dataset, build the Training and Experiment objects, and run the experiment. The experiments logs are generated in ~/minos/experiment_label We use the CpuEnvironment to have the experiment run on the cpu, with 2 parralel processes. We could use GpuEnvironment to use GPUs, and specify which GPUs to use, and how many tasks per GPU """ batch_iterator, test_batch_iterator, nb_classes = get_reuters_dataset(batch_size, max_words) layout = build_layout(max_words, nb_classes) training = Training( Objective('categorical_crossentropy'), Optimizer(optimizer='Adam'), Metric('categorical_accuracy'), epoch_stopping_condition(), batch_size) parameters = custom_experiment_parameters() experiment = Experiment( experiment_label, layout, training, batch_iterator, test_batch_iterator, CpuEnvironment(n_jobs=1), parameters=parameters) run_ga_search_experiment( experiment, population_size=100, generations=steps, resume=False, log_level='DEBUG')
def call(self, x, mask=None): activation = activations.get(self.activation) return activation(backend.dot(x, self.W) + self.b)
def __init__(self, units, activation='linear', weights=None, kernel_initializer='glorot_uniform', kernel_regularizer=None, kernel_constraint=None, bias_initializer='uniform', bias_regularizer=None, bias_constraint=None, activity_regularizer=None, bias=True, input_dim=None, factorization=simple_tensor_factorization(), **kwargs): self.activation = activations.get(activation) self.units = units self.input_dim = input_dim self.factorization = factorization self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.kernel_initializer = get_initializer(kernel_initializer) self.bias_initializer = get_initializer(bias_initializer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.activity_regularizer = regularizers.get(activity_regularizer) self.bias = bias self.initial_weights = weights self.input_spec = [InputSpec(ndim=2)] if self.input_dim: kwargs['input_shape'] = (self.input_dim,) super(DenseTensor, self).__init__(**kwargs)
def __init__(self,inp_size,out_size,activation='tanh', **kwargs): super(ntn_layer, self).__init__(**kwargs) self.k = out_size self.d = inp_size self.activation = activations.get(activation) self.test_out = 0
def __init__(self, filters_simple, filters_complex, nb_row, nb_col, init='glorot_uniform', activation='relu', weights=None, padding='valid', strides=(1, 1), data_format=K.image_data_format(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, W_constraint=None, bias_constraint=None, bias=True, **kwargs): if padding not in {'valid', 'same'}: raise Exception('Invalid border mode for Convolution2DEnergy:', padding) self.filters_simple = filters_simple self.filters_complex = filters_complex self.nb_row = nb_row self.nb_col = nb_col self.init = initializers.get(init, data_format=data_format) self.activation = activations.get(activation) assert padding in {'valid', 'same'}, 'padding must be in {valid, same}' self.padding = padding self.strides = tuple(strides) assert data_format in {'channels_last', 'channels_first'}, 'data_format must be in {tf, th}' self.data_format = data_format self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.UnitNormOrthogonal(filters_complex, data_format) self.bias_constraint = constraints.get(bias_constraint) self.bias = bias self.input_spec = [InputSpec(ndim=4)] self.initial_weights = weights super(Convolution2DEnergy, self).__init__(**kwargs)
def __init__(self, rank, kernel_size=3, data_format=None, kernel_initialization=.1, bias_initialization=1, kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(_ConvGDN, self).__init__(**kwargs) self.rank = rank self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size') self.strides = conv_utils.normalize_tuple(1, rank, 'strides') self.padding = conv_utils.normalize_padding('same') self.data_format = conv_utils.normalize_data_format(data_format) self.dilation_rate = conv_utils.normalize_tuple(1, rank, 'dilation_rate') self.kernel_initializer = initializers.Constant(kernel_initialization) self.bias_initializer = initializers.Constant(bias_initialization) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.input_spec = InputSpec(ndim=self.rank + 2)
def __init__(self, filters, kernel_initializer='glorot_uniform', kernel_regularizer=None, kernel_constraint=kconstraints.NonNeg(), k_initializer='zeros', k_regularizer=None, k_constraint=None, tied_k=False, activity_regularizer=None, strides=1, padding='valid', dilation_rate=1, data_format=K.image_data_format(), **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(Conv2DSoftMinMax, self).__init__(**kwargs) self.filters = filters self.kernel_initializer = initializers.get(kernel_initializer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.k_initializer = initializers.get(k_initializer) self.k_regularizer = regularizers.get(k_regularizer) self.k_constraint = constraints.get(k_constraint) self.tied_k = tied_k self.activity_regularizer = regularizers.get(activity_regularizer) self.strides = conv_utils.normalize_tuple(strides, 2, 'strides') self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2, 'dilation_rate') self.padding = conv_utils.normalize_padding(padding) self.input_spec = InputSpec(min_ndim=2) self.data_format = data_format self.supports_masking = True
def __init__(self, weights=None, kernel_initializer='glorot_uniform', alpha_initializer='ones', alpha_regularizer=None, alpha_constraint=None, beta_delta_initializer='ones', beta_delta_regularizer=None, beta_delta_constraint=None, gamma_eta_initializer='ones', gamma_eta_regularizer=None, gamma_eta_constraint=None, rho_initializer='ones', rho_regularizer=None, rho_constraint=None, **kwargs): self.alpha_initializer = initializers.get(alpha_initializer) self.beta_delta_initializer = initializers.get(beta_delta_initializer) self.gamma_eta_initializer = initializers.get(gamma_eta_initializer) self.rho_initializer = initializers.get(rho_initializer) self.alpha_constraint = constraints.get(alpha_constraint) self.beta_delta_constraint = constraints.get(beta_delta_constraint) self.gamma_eta_constraint = constraints.get(gamma_eta_constraint) self.rho_constraint = constraints.get(rho_constraint) self.alpha_regularizer = regularizers.get(alpha_regularizer) self.beta_delta_regularizer = regularizers.get(beta_delta_regularizer) self.gamma_eta_regularizer = regularizers.get(gamma_eta_regularizer) self.rho_regularizer = regularizers.get(rho_regularizer) self.input_spec = [InputSpec(ndim=2)] if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(NakaRushton, self).__init__(**kwargs)
def __init__(self, filters, sum_axes, filter_axes, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_activation=None, kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) super(FilterDims, self).__init__(**kwargs) self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.activation = activations.get(activation) self.kernel_activation = activations.get(kernel_activation) self.filters = filters self.sum_axes = list(sum_axes) self.sum_axes.sort() self.filter_axes = list(filter_axes) self.filter_axes.sort() self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.use_bias = use_bias self.input_spec = InputSpec(min_ndim=2) self.supports_masking = True
def __init__(self, output_dim, init='glorot_uniform', activation='linear', weights=None, W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, input_dim=None, input_length1=None, input_length2=None, **kwargs): self.output_dim = output_dim self.init = initializations.get(init) self.activation = activations.get(activation) self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.constraints = [self.W_constraint, self.b_constraint] self.initial_weights = weights self.input_dim = input_dim self.input_length1 = input_length1 self.input_length2 = input_length2 if self.input_dim: kwargs['input_shape'] = (self.input_length1, self.input_length2, self.input_dim) self.input = K.placeholder(ndim=4) super(HigherOrderTimeDistributedDense, self).__init__(**kwargs)
def __init__(self, input_shape, context='word', init='glorot_uniform', activation='tanh', weights=None, **kwargs): self.init = initializations.get(init) self.activation = activations.get(activation) self.context = context self.td1, self.td2, self.wd = input_shape self.initial_weights = weights kwargs['input_shape'] = input_shape super(TensorAttention, self).__init__(**kwargs)
def __init__(self, max_sentences, activation='linear', **kwargs): self.activation = activations.get(activation) self.max_sentences = max_sentences kwargs['input_shape'] = (self.max_sentences, 3) super(WeightedMean, self).__init__(**kwargs)
def get_initial_state(self, inputs): print('inputs shape:', inputs.get_shape()) # apply the matrix on the first time step to get the initial s0. s0 = activations.tanh(K.dot(inputs[:, 0], self.W_s)) # from keras.layers.recurrent to initialize a vector of (batchsize, # output_dim) y0 = K.zeros_like(inputs) # (samples, timesteps, input_dims) y0 = K.sum(y0, axis=(1, 2)) # (samples, ) y0 = K.expand_dims(y0) # (samples, 1) y0 = K.tile(y0, [1, self.output_dim]) return [y0, s0]
def __init__(self, nb_filter, nb_row, nb_col, transform_bias=-1, init='glorot_uniform', activation='relu', weights=None, border_mode='same', subsample=(1, 1), dim_ordering='th', W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs): if border_mode not in {'valid', 'same'}: raise Exception('Invalid border mode for Convolution2D:', border_mode) self.nb_filter = nb_filter self.nb_row = nb_row self.nb_col = nb_col self.transform_bias = transform_bias self.init = initializations.get(init, dim_ordering=dim_ordering) self.activation = activations.get(activation) assert border_mode in {'valid', 'same'}, 'border_mode must be in {valid, same}' self.border_mode = border_mode self.subsample = tuple(subsample) assert dim_ordering in {'tf', 'th'}, 'dim_ordering must be in {tf, th}' self.dim_ordering = dim_ordering self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.activity_regularizer = regularizers.get(activity_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.input_spec = [InputSpec(ndim=4)] self.initial_weights = weights super(Conv2DHighway, self).__init__(**kwargs)
def __init__(self,output_dim,att_dim,attn_activation='tanh', attn_inner_activation='tanh', single_attn=False,**kwargs): ''' attention_vec: ???????attention????????????????attention?? single_attention_param: ????t,??????????????attention? ''' self.attn_activation=activations.get(attn_activation) self.attn_inner_activation=activations.get(attn_inner_activation) self.single_attention_param=single_attn self.input_spec=None self.att_dim=att_dim super(AttentionLSTM,self).__init__(output_dim,**kwargs)
def __init__(self, units, n_slots=50, m_depth=20, shift_range=3, controller_model=None, read_heads=1, write_heads=1, activation='sigmoid', batch_size=777, stateful=False, **kwargs): self.output_dim = units self.units = units self.n_slots = n_slots self.m_depth = m_depth self.shift_range = shift_range self.controller = controller_model self.activation = get_activations(activation) self.read_heads = read_heads self.write_heads = write_heads self.batch_size = batch_size # self.return_sequence = True try: if controller.state.stateful: self.controller_with_state = True except: self.controller_with_state = False self.controller_read_head_emitting_dim = _controller_read_head_emitting_dim(m_depth, shift_range) self.controller_write_head_emitting_dim = _controller_write_head_emitting_dim(m_depth, shift_range) super(NeuralTuringMachine, self).__init__(**kwargs)
def __init__(self, nb_filter, nb_row, nb_col, init='glorot_uniform', inner_init='orthogonal', forget_bias_init='one', activation='tanh', inner_activation='hard_sigmoid', dim_ordering="tf", border_mode="valid", sub_sample=(1, 1), W_regularizer=None, U_regularizer=None, b_regularizer=None, dropout_W=0., dropout_U=0., **kwargs): self.nb_filter = nb_filter self.nb_row = nb_row self.nb_col = nb_col self.init = initializations.get(init) self.inner_init = initializations.get(inner_init) self.forget_bias_init = initializations.get(forget_bias_init) self.activation = activations.get(activation) self.inner_activation = activations.get(inner_activation) self.border_mode = border_mode self.subsample = sub_sample assert dim_ordering in {'tf', "th"}, 'dim_ordering must be in {tf,"th}' self.dim_ordering = dim_ordering kwargs["nb_filter"] = nb_filter kwargs["nb_row"] = nb_row kwargs["nb_col"] = nb_col kwargs["dim_ordering"] = dim_ordering self.W_regularizer = W_regularizer self.U_regularizer = U_regularizer self.b_regularizer = b_regularizer self.dropout_W, self.dropout_U = dropout_W, dropout_U super(LSTMConv2D, self).__init__(**kwargs)
def __init__(self, output_dim, weights=None, activation='linear', return_mask=True, **kwargs): self.supports_masking = True self.output_dim = output_dim self.init = initializations.get('glorot_uniform') self.activation = activations.get(activation) self.initial_weights = weights self.return_mask = return_mask super(Projection, self).__init__(**kwargs)
def __init__(self, output_dim, init = 'glorot_uniform', inner_init = 'orthogonal', activation = 'tanh', W_regularizer = None, U_regularizer = None, b_regularizer = None, dropout_W = 0.0, dropout_U = 0.0, tau=100, dt=20, noise=.1, dale_ratio = None, **kwargs): self.output_dim = output_dim self.init = initializations.get(init) self.inner_init = initializations.get(inner_init) self.activation = activations.get(activation) self.W_regularizer = regularizers.get(W_regularizer) self.U_regularizer = regularizers.get(U_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.dropout_W, self.dropout_U = dropout_W, dropout_U self.tau = tau self.dt = dt self.noise = noise self.dale_ratio = dale_ratio if dale_ratio: #make dales law matrix dale_vec = np.ones(output_dim) dale_vec[int(dale_ratio*output_dim):] = -1 dale = np.diag(dale_vec) self.Dale = K.variable(dale) if self.dropout_W or self.dropout_U: self.uses_learning_phase = True super(leak_recurrent, self).__init__(**kwargs)