Python keras.objectives 模块,get() 实例源码
我们从Python开源项目中,提取了以下18个代码示例,用于说明如何使用keras.objectives.get()。
def keras_wrap(model, target, output, loss):
""" Convenience function for wrapping a Keras loss function.
"""
# pylint: disable=import-error
import keras.objectives as O
import keras.backend as K
# pylint: enable=import-error
if isinstance(loss, str):
loss = O.get(loss)
shape = model.outputs[target].value._keras_shape # pylint: disable=protected-access
ins = [
(target, K.placeholder(
ndim=len(shape),
dtype=K.dtype(model.outputs[target].value),
name=target
))
]
out = loss(ins[0][1], output)
return ins, out
###############################################################################
def keras_wrap(model, target, output, loss):
""" Convenience function for wrapping a Keras loss function.
"""
# pylint: disable=import-error
import keras.objectives as O
import keras.backend as K
# pylint: enable=import-error
if isinstance(loss, str):
loss = O.get(loss)
shape = model.outputs[target].value._keras_shape # pylint: disable=protected-access
ins = [
(target, K.placeholder(
ndim=len(shape),
dtype=K.dtype(model.outputs[target].value),
name=target
))
]
out = loss(ins[0][1], output)
return ins, out
###############################################################################
def test_loss_masking_time(self):
theano.config.mode = 'FAST_COMPILE'
weighted_loss = weighted_objective(objectives.get('categorical_crossentropy'))
shape = (3, 4, 2)
X = np.arange(24).reshape(shape)
Y = 2 * X
weights = np.ones((3, 4, 1)) # Normally the trailing 1 is added by standardize_weights
weights[0, 0] = 0
mask = np.ones((3, 4))
mask[1, 0] = 0
out = weighted_loss(X, Y, weights, mask).eval()
weights[0, 0] = 1e-9 # so that nonzero() doesn't remove this weight
out2 = weighted_loss(X, Y, weights, mask).eval()
print(out)
print(out2)
assert abs(out - out2) < 1e-8
def test_loss_masking():
weighted_loss = weighted_objective(objectives.get('mae'))
shape = (3, 4, 2)
X = np.arange(24).reshape(shape)
Y = 2 * X
# Normally the trailing 1 is added by standardize_weights
weights = np.ones((3,))
mask = np.ones((3, 4))
mask[1, 0] = 0
out = K.eval(weighted_loss(K.variable(X),
K.variable(Y),
K.variable(weights),
K.variable(mask)))
def __init__(self, loss, **kwargs):
self.supports_masking = True
self.loss = objectives.get(loss)
super(LossLayer, self).__init__(**kwargs)
def __init__(self, parameter_list, loss, fast=False, **kwargs):
self.supports_masking = True
self.parameter_list = parameter_list
self.loss = objectives.get(loss)
self.fast = fast
super(GradientNormLayer, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
# We get two inputs
assert len(input_shape) == 2
return (input_shape[0][0], 1)
def __init__(self, loss, **kwargs):
self.supports_masking = True
self.loss = objectives.get(loss)
super(LossLayer, self).__init__(**kwargs)
def __init__(self, parameter_list, loss, fast=False, **kwargs):
self.supports_masking = True
self.parameter_list = parameter_list
self.loss = objectives.get(loss)
self.fast = fast
super(GradientNormLayer, self).__init__(**kwargs)
def __init__(self, loss, **kwargs):
self.supports_masking = True
self.loss = objectives.get(loss)
super(LossLayer, self).__init__(**kwargs)
def __init__(self, parameter_list, loss, fast=False, **kwargs):
self.supports_masking = True
self.parameter_list = parameter_list
self.loss = objectives.get(loss)
self.fast = fast
super(GradientNormLayer, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
# We get two inputs
assert len(input_shape) == 2
return (input_shape[0][0], 1)
def __init__(self, loss, **kwargs):
self.supports_masking = True
self.loss = objectives.get(loss)
super(LossLayer, self).__init__(**kwargs)
def __init__(self, parameter_list, loss, fast=False, **kwargs):
self.supports_masking = True
self.parameter_list = parameter_list
self.loss = objectives.get(loss)
self.fast = fast
super(GradientNormLayer, self).__init__(**kwargs)
def test_loss_masking():
weighted_loss = weighted_objective(objectives.get('mae'))
shape = (3, 4, 2)
X = np.arange(24).reshape(shape)
Y = 2 * X
# Normally the trailing 1 is added by standardize_weights
weights = np.ones((3,))
mask = np.ones((3, 4))
mask[1, 0] = 0
out = K.eval(weighted_loss(K.variable(X),
K.variable(Y),
K.variable(weights),
K.variable(mask)))
def compile(self, state_dim_values, lr=0.2, policy_rule="maxrand", init_value=None):
"""Build and initialize table with all possible state values.
state_dim_values consists of a tuple of arrays or lists - each array
gives every possible value for the corresponding dimension.
"""
self.policy_rule = policies.get(policy_rule)
if init_value is None:
self.init_value = np.zeros(self.num_actions)
else:
self.init_value = init_value
self.table = {key: np.array(self.init_value) for key in list(itertools.product(*state_dim_values))}
self.lr = lr
def values(self, observation):
if observation.ndim == 1:
vals = self.table[tuple(observation)]
else:
obs_tuple = tuple(map(tuple, observation)) # convert to tuple of tuples
vals = map(self.table.__getitem__, obs_tuple) # get values from dict as list of arrays
vals = np.asarray(vals) # convert list of arrays to matrix (2-d array)
return vals
def test_loss_masking():
weighted_loss = weighted_objective(objectives.get('mae'))
shape = (3, 4, 2)
X = np.arange(24).reshape(shape)
Y = 2 * X
# Normally the trailing 1 is added by standardize_weights
weights = np.ones((3,))
mask = np.ones((3, 4))
mask[1, 0] = 0
out = K.eval(weighted_loss(K.variable(X),
K.variable(Y),
K.variable(weights),
K.variable(mask)))