我们从Python开源项目中,提取了以下7个代码示例,用于说明如何使用keras.backend.random_uniform()。
def _build(self,input_shape): discriminator, loss = self.parameters['discriminator'] if discriminator.trainable: print("discriminator is set to untrainable") discriminator.trainable = False x = Input(input_shape) # assumes zero vector generated = Sequential([ Lambda(lambda x: return x + K.random_uniform(shape=input_shape)) Dense(self.parameters['layer'],activation=self.parameters['activation']), BN(), Dropout(self.parameters['dropout']), Dense(self.parameters['layer'],activation=self.parameters['activation']), BN(), Dropout(self.parameters['dropout']), Dense(self.parameters['layer'],activation=self.parameters['activation']), BN(), Dropout(self.parameters['dropout']), Dense(np.prod(input_shape),activation="sigmoid"), Reshape(input_shape) ])(x) discriminator_output = discriminator(generated) self._discriminator = discriminator self._generator = Model(x, generated) self.net = Model(x, discriminator_output) self.loss = loss
def call(self,logits): u = K.random_uniform(K.shape(logits), 0, 1) gumbel = - K.log(-K.log(u + 1e-20) + 1e-20) return K.in_train_phase( K.softmax( ( logits + gumbel ) / self.tau ), K.softmax( ( logits + gumbel ) / self.min ))
def _merge_function(self, inputs): weights = K.random_uniform((BATCH_SIZE, 1, 1, 1)) return (weights * inputs[0]) + ((1 - weights) * inputs[1])
def call(self, x, mask=None): sims = [] for n, sim in zip(self.n, self.similarities): for _ in range(n): batch_size = K.shape(x)[0] idx = K.random_uniform((batch_size,), low=0, high=batch_size, dtype='int32') x_shuffled = K.gather(x, idx) pair_sim = sim(x, x_shuffled) for _ in range(K.ndim(x) - 1): pair_sim = K.expand_dims(pair_sim, dim=1) sims.append(pair_sim) return K.concatenate(sims, axis=-1)
def random_laplace(shape, mu=0., b=1.): ''' Draw random samples from a Laplace distriubtion. See: https://en.wikipedia.org/wiki/Laplace_distribution#Generating_random_variables_according_to_the_Laplace_distribution ''' U = K.random_uniform(shape, -0.5, 0.5) return mu - b * K.sign(U) * K.log(1 - 2 * K.abs(U))
def my_init(shape, dtype=None): scale = np.sqrt(3.0 / 30) return K.random_uniform(shape, minval=-scale, maxval=scale, dtype=dtype)
def sampling_gumbel(shape,eps=1e-8): u = K.random_uniform( shape ) return -K.log( -K.log(u+eps)+eps )