Python keras.backend 模块,cast_to_floatx() 实例源码
我们从Python开源项目中,提取了以下38个代码示例,用于说明如何使用keras.backend.cast_to_floatx()。
def get_constants(self, inputs, training=None):
constants = self.recurrent_layer.get_constants(
inputs=inputs,
training=training
)
if 0 < self.dense_dropout < 1:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.recurrent_layer.units))
def dropped_inputs():
return K.dropout(ones, self.dense_dropout)
out_dp_mask = [K.in_train_phase(dropped_inputs,
ones,
training=training)]
constants.append(out_dp_mask)
else:
constants.append([K.cast_to_floatx(1.)])
return constants
def get_constants(self, inputs, training=None):
constants = []
'''if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0 < self.dropout_W < 1:
input_shape = K.int_shape(x)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
constants.append(B_W)
else:'''
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
def get_constants(self, inputs, training=None):
constants = []
'''if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0 < self.dropout_W < 1:
input_shape = K.int_shape(x)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
constants.append(B_W)
else:'''
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
def get_constants(self, inputs, training=None):
constants = []
'''if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0 < self.dropout_W < 1:
input_shape = K.int_shape(x)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
constants.append(B_W)
else:'''
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0 < self.dropout_W < 1:
input_shape = K.int_shape(x)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
def get_initial_states(self, x):
init_state_h = K.zeros_like(x)
init_state_h = K.sum(init_state_h, axis = 1)
reducer_s = K.zeros((self.input_dim, self.hidden_dim))
reducer_f = K.zeros((self.hidden_dim, self.freq_dim))
reducer_p = K.zeros((self.hidden_dim, self.output_dim))
init_state_h = K.dot(init_state_h, reducer_s)
init_state_p = K.dot(init_state_h, reducer_p)
init_state = K.zeros_like(init_state_h)
init_freq = K.dot(init_state_h, reducer_f)
init_state = K.reshape(init_state, (-1, self.hidden_dim, 1))
init_freq = K.reshape(init_freq, (-1, 1, self.freq_dim))
init_state_S_re = init_state * init_freq
init_state_S_im = init_state * init_freq
init_state_time = K.cast_to_floatx(0.)
initial_states = [init_state_p, init_state_h, init_state_S_re, init_state_S_im, init_state_time]
return initial_states
def get_initial_states(self, x):
init_state_h = K.zeros_like(x)
init_state_h = K.sum(init_state_h, axis = 1)
reducer_s = K.zeros((self.input_dim, self.hidden_dim))
reducer_f = K.zeros((self.hidden_dim, self.freq_dim))
reducer_p = K.zeros((self.hidden_dim, self.output_dim))
init_state_h = K.dot(init_state_h, reducer_s)
init_state_p = K.dot(init_state_h, reducer_p)
init_state = K.zeros_like(init_state_h)
init_freq = K.dot(init_state_h, reducer_f)
init_state = K.reshape(init_state, (-1, self.hidden_dim, 1))
init_freq = K.reshape(init_freq, (-1, 1, self.freq_dim))
init_state_S_re = init_state * init_freq
init_state_S_im = init_state * init_freq
init_state_time = K.cast_to_floatx(0.)
initial_states = [init_state_p, init_state_h, init_state_S_re, init_state_S_im, init_state_time]
return initial_states
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
return constants
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
constants.append(B_U)
else:
constants.append(K.cast_to_floatx(1.))
if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, input_dim))
B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
constants.append(B_W)
else:
constants.append(K.cast_to_floatx(1.))
return constants
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.concatenate([ones] * self.output_dim, 1)
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.concatenate([ones] * input_dim, 1)
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
return constants
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, input_dim))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
constants.append(B_U)
else:
constants.append(K.cast_to_floatx(1.0))
if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, input_dim))
B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
constants.append(B_W)
else:
constants.append(K.cast_to_floatx(1.0))
return constants
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
constants.append(B_U)
else:
constants.append(K.cast_to_floatx(1.))
if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, input_dim))
B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
constants.append(B_W)
else:
constants.append(K.cast_to_floatx(1.))
return constants
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
constants.append(B_U)
else:
constants.append(K.cast_to_floatx(1.))
if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
constants.append(B_W)
else:
constants.append(K.cast_to_floatx(1.))
return constants
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
constants.append(B_U)
else:
constants.append(K.cast_to_floatx(1.))
if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
constants.append(B_W)
else:
constants.append(K.cast_to_floatx(1.))
return constants
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.concatenate([ones] * self.output_dim, 1)
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.concatenate([ones] * input_dim, 1)
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
return constants
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.concatenate([ones] * self.output_dim, 1)
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.concatenate([ones] * input_dim, 1)
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(2)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(2)])
if 0 < self.dropout_W < 1:
input_shape = K.int_shape(x)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(2)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(2)])
return constants
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.hidden_recurrent_dim))
B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
constants.append(B_U)
else:
constants.append(K.cast_to_floatx(1.))
if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, input_dim))
B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
constants.append(B_W)
else:
constants.append(K.cast_to_floatx(1.))
return constants
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.input_dim))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0 < self.dropout_W < 1:
input_shape = K.int_shape(x)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
return constants
def get_constants(self, inputs, training=None):
constants = []
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
def get_constants(self, inputs, training=None):
constants = []
if 0 < self.dropout < 1:
input_shape = K.int_shape(inputs)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
def dropped_inputs():
return K.dropout(ones, self.dropout)
dp_mask = K.in_train_phase(dropped_inputs,
ones,
training=training)
constants.append(dp_mask)
else:
constants.append(K.cast_to_floatx(1.))
if 0 < self.recurrent_dropout < 1:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
def dropped_inputs():
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = K.in_train_phase(dropped_inputs,
ones,
training=training)
constants.append(rec_dp_mask)
else:
constants.append(K.cast_to_floatx(1.))
return constants
# Aliases
def get_constants(self, inputs, training=None):
constants = []
if self.implementation != 0 and 0 < self.dropout < 1:
input_shape = K.int_shape(inputs)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
def dropped_inputs():
return K.dropout(ones, self.dropout)
dp_mask = [K.in_train_phase(dropped_inputs,
ones,
training=training) for _ in range(5)]
constants.append(dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(5)])
if 0 < self.recurrent_dropout < 1:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
def dropped_inputs():
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = [K.in_train_phase(dropped_inputs,
ones,
training=training) for _ in range(5)]
constants.append(rec_dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(5)])
return constants
def __init__(self, l1=0., l2=0.,**kwargs):
self.l1 = K.cast_to_floatx(l1)
self.l2 = K.cast_to_floatx(l2)
self.uses_learning_phase = True
super(ActivityRegularizerOneDim, self).__init__(**kwargs)
#self.layer = None
def __init__(self, l1=0., l2=0.,**kwargs):
self.l1 = K.cast_to_floatx(l1)
self.l2 = K.cast_to_floatx(l2)
self.uses_learning_phase = True
super(ActivityRegularizerOneDim, self).__init__(**kwargs)
#self.layer = None
def get_constants(self, x):
constants = []
constants.append([K.cast_to_floatx(1.) for _ in range(6)])
constants.append([K.cast_to_floatx(1.) for _ in range(7)])
array = np.array([float(ii)/self.freq_dim for ii in range(self.freq_dim)])
constants.append([K.cast_to_floatx(array)])
return constants
def test_clip():
clip_instance = constraints.clip()
clipped = clip_instance(K.variable(example_array))
assert(np.max(np.abs(K.eval(clipped))) <= K.cast_to_floatx(0.01))
clip_instance = constraints.clip(0.1)
clipped = clip_instance(K.variable(example_array))
assert(np.max(np.abs(K.eval(clipped))) <= K.cast_to_floatx(0.1))
def get_constants(self, x):
print("begin get_constants(self, x)")
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.controller_output_dim))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
# if 0 < self.dropout_R < 1:
# input_shape = self.input_spec[0].shape
# input_dim = input_shape[-1]
# ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
# ones = K.tile(ones, (1, int(input_dim)))
# B_R = [K.in_train_phase(K.dropout(ones, self.dropout_R), ones) for _ in range(4)]
# constants.append(B_R)
# else:
# constants.append([K.cast_to_floatx(1.) for _ in range(4)])
print("end get_constants(self, x)")
return constants
def __init__(self, gamma=0., axis=1, division_idx=None):
self.gamma = K.cast_to_floatx(gamma)
self.axis = []
self.axis.append(axis)
self.division_idx = division_idx
def __init__(self, gamma=1., lam=10., axis='last'):
self.gamma = K.cast_to_floatx(gamma)
self.lam = K.cast_to_floatx(lam)
self.axis = axis
def __init__(self, l1=0., l2=0., axis=0):
self.l1 = K.cast_to_floatx(l1)
self.l2 = K.cast_to_floatx(l2)
self.axis = axis
def __init__(self, l1=0., l2=0., axis=0):
self.l1 = K.cast_to_floatx(l1)
self.l2 = K.cast_to_floatx(l2)
self.axis = []
self.axis.append(axis)
def __init__(self, TV=0., TV2=0., axes=[0, 1]):
self.TV = K.cast_to_floatx(TV)
self.TV2 = K.cast_to_floatx(TV2)
self.axes = list(axes)
def __init__(self, l1=0., l2=0.,**kwargs):
self.l1 = K.cast_to_floatx(l1)
self.l2 = K.cast_to_floatx(l2)
self.uses_learning_phase = True
super(ActivityRegularizerOneDim, self).__init__(**kwargs)
#self.layer = None
def iou(x_true,y_true,w_true,h_true,x_pred,y_pred,w_pred,h_pred,t):
xoffset = K.cast_to_floatx((np.tile(np.arange(side),side)))
yoffset = K.cast_to_floatx((np.repeat(np.arange(side),side)))
x = tf.select(t, K.sigmoid(x_pred), K.zeros_like(x_pred))
y = tf.select(t, K.sigmoid(y_pred), K.zeros_like(y_pred))
w = tf.select(t, K.sigmoid(w_pred), K.zeros_like(w_pred))
h = tf.select(t, K.sigmoid(h_pred), K.zeros_like(h_pred))
ow = overlap(x+xoffset, w*side, x_true+xoffset, w_true*side)
oh = overlap(y+yoffset, h*side, y_true+yoffset, h_true*side)
ow = tf.select(K.greater(ow,0), ow, K.zeros_like(ow))
oh = tf.select(K.greater(oh,0), oh, K.zeros_like(oh))
intersection = ow*oh
union = w*h*(side**2) + w_true*h_true*(side**2) - intersection + K.epsilon() # prevent div 0
#
recall_iou = intersection / union
recall_t = K.greater(recall_iou, 0.5)
recall_count = K.sum(tf.select(recall_t, K.ones_like(recall_iou), K.zeros_like(recall_iou)))
#
iou = K.sum(intersection / union, axis=1)
obj_count = K.sum(tf.select(t, K.ones_like(x_true), K.zeros_like(x_true)) )
ave_iou = K.sum(iou) / (obj_count)
recall = recall_count / (obj_count)
return ave_iou, recall, obj_count, intersection, union,ow,oh,x,y,w,h
# shape is (gridcells*(5+classes), )
def get_constants(self, inputs, training=None):
constants = []
if self.implementation == 0 and 0 < self.dropout < 1:
input_shape = K.int_shape(inputs)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
def dropped_inputs():
return K.dropout(ones, self.dropout)
dp_mask = [K.in_train_phase(dropped_inputs,
ones,
training=training) for _ in range(4)]
constants.append(dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0 < self.recurrent_dropout < 1:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
def dropped_inputs():
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = [K.in_train_phase(dropped_inputs,
ones,
training=training) for _ in range(4)]
constants.append(rec_dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
return constants
def step(self, x, states):
p_tm1 = states[0]
h_tm1 = states[1]
S_re_tm1 = states[2]
S_im_tm1 = states[3]
time_tm1 = states[4]
B_U = states[5]
B_W = states[6]
frequency = states[7]
x_i = K.dot(x * B_W[0], self.W_i) + self.b_i
x_ste = K.dot(x * B_W[0], self.W_ste) + self.b_ste
x_fre = K.dot(x * B_W[0], self.W_fre) + self.b_fre
x_c = K.dot(x * B_W[0], self.W_c) + self.b_c
x_o = K.dot(x * B_W[0], self.W_o) + self.b_o
i = self.inner_activation(x_i + K.dot(h_tm1 * B_U[0], self.U_i))
ste = self.inner_activation(x_ste + K.dot(h_tm1 * B_U[0], self.U_ste))
fre = self.inner_activation(x_fre + K.dot(h_tm1 * B_U[0], self.U_fre))
ste = K.reshape(ste, (-1, self.hidden_dim, 1))
fre = K.reshape(fre, (-1, 1, self.freq_dim))
f = ste * fre
c = i * self.activation(x_c + K.dot(h_tm1 * B_U[0], self.U_c))
time = time_tm1 + 1
omega = K.cast_to_floatx(2*np.pi)* time * frequency
re = T.cos(omega)
im = T.sin(omega)
c = K.reshape(c, (-1, self.hidden_dim, 1))
S_re = f * S_re_tm1 + c * re
S_im = f * S_im_tm1 + c * im
A = K.square(S_re) + K.square(S_im)
A = K.reshape(A, (-1, self.freq_dim))
A_a = K.dot(A * B_U[0], self.U_a)
A_a = K.reshape(A_a, (-1, self.hidden_dim))
a = self.activation(A_a + self.b_a)
o = self.inner_activation(x_o + K.dot(h_tm1 * B_U[0], self.U_o))
h = o * a
p = K.dot(h, self.W_p) + self.b_p
return p, [p, h, S_re, S_im, time]
def step(self, x, states):
p_tm1 = states[0]
h_tm1 = states[1]
S_re_tm1 = states[2]
S_im_tm1 = states[3]
time_tm1 = states[4]
B_U = states[5]
B_W = states[6]
frequency = states[7]
x_i = K.dot(x * B_W[0], self.W_i) + self.b_i
x_ste = K.dot(x * B_W[0], self.W_ste) + self.b_ste
x_fre = K.dot(x * B_W[0], self.W_fre) + self.b_fre
x_c = K.dot(x * B_W[0], self.W_c) + self.b_c
x_o = K.dot(x * B_W[0], self.W_o) + self.b_o
i = self.inner_activation(x_i + K.dot(h_tm1 * B_U[0], self.U_i))
ste = self.inner_activation(x_ste + K.dot(h_tm1 * B_U[0], self.U_ste))
fre = self.inner_activation(x_fre + K.dot(h_tm1 * B_U[0], self.U_fre))
ste = K.reshape(ste, (-1, self.hidden_dim, 1))
fre = K.reshape(fre, (-1, 1, self.freq_dim))
f = ste * fre
c = i * self.activation(x_c + K.dot(h_tm1 * B_U[0], self.U_c))
time = time_tm1 + 1
omega = K.cast_to_floatx(2*np.pi)* time * frequency
re = T.cos(omega)
im = T.sin(omega)
c = K.reshape(c, (-1, self.hidden_dim, 1))
S_re = f * S_re_tm1 + c * re
S_im = f * S_im_tm1 + c * im
A = K.square(S_re) + K.square(S_im)
A = K.reshape(A, (-1, self.freq_dim))
A_a = K.dot(A * B_U[0], self.U_a)
A_a = K.reshape(A_a, (-1, self.hidden_dim))
a = self.activation(A_a + self.b_a)
o = self.inner_activation(x_o + K.dot(h_tm1 * B_U[0], self.U_o))
h = o * a
p = K.dot(h, self.W_p) + self.b_p
return p, [p, h, S_re, S_im, time]