Python keras.backend 模块,spatial_2d_padding() 实例源码
我们从Python开源项目中,提取了以下10个代码示例,用于说明如何使用keras.backend.spatial_2d_padding()。
def crosschannelnormalization(alpha = 1e-4, k=2, beta=0.75, n=5,**kwargs):
"""
This is the function used for cross channel normalization in the original
Alexnet
"""
def f(X):
b, ch, r, c = X.shape
half = n // 2
square = K.square(X)
extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0,2,3,1))
, (0,half))
extra_channels = K.permute_dimensions(extra_channels, (0,3,1,2))
scale = k
for i in range(n):
scale += alpha * extra_channels[:,i:i+ch,:,:]
scale = scale ** beta
return X / scale
return Lambda(f, output_shape=lambda input_shape:input_shape,**kwargs)
def crosschannelnormalization(alpha = 1e-4, k=2, beta=0.75, n=5,**kwargs):
"""
This is the function used for cross channel normalization in the original
Alexnet
"""
def f(X):
b, ch, r, c = X.shape
half = n // 2
square = K.square(X)
extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0,2,3,1))
, (0,half))
extra_channels = K.permute_dimensions(extra_channels, (0,3,1,2))
scale = k
for i in range(n):
scale += alpha * extra_channels[:,i:i+ch,:,:]
scale = scale ** beta
return X / scale
return Lambda(f, output_shape=lambda input_shape:input_shape,**kwargs)
def generate_gpu(configs,**kwargs):
configs = np.array(configs)
import math
size = int(math.sqrt(len(configs[0])))
base = panels.shape[1]
dim = base*size
def build():
P = 2
configs = Input(shape=(size*size,))
_configs = 1 - K.round((configs/2)+0.5) # from -1/1 to 1/0
configs_one_hot = K.one_hot(K.cast(_configs,'int32'), P)
configs_one_hot = K.reshape(configs_one_hot, [-1,P])
_panels = K.variable(panels)
_panels = K.reshape(_panels, [P, base*base])
states = tf.matmul(configs_one_hot, _panels)
states = K.reshape(states, [-1, size, size, base, base])
states = K.permute_dimensions(states, [0, 1, 3, 2, 4])
states = K.reshape(states, [-1, size*base, size*base, 1])
states = K.spatial_2d_padding(states, padding=((pad,pad),(pad,pad)))
states = K.squeeze(states, -1)
return Model(configs, wrap(configs, states))
return preprocess(batch_swirl(build().predict(configs,**kwargs)))
def generate_gpu2(configs,**kwargs):
configs = np.array(configs)
import math
size = int(math.sqrt(len(configs[0])))
base = panels.shape[1]
dim = base*size
def build():
P = 2
configs = Input(shape=(size*size,))
_configs = 1 - K.round((configs/2)+0.5) # from -1/1 to 1/0
configs_one_hot = K.one_hot(K.cast(_configs,'int32'), P)
configs_one_hot = K.reshape(configs_one_hot, [-1,P])
_panels = K.variable(panels)
_panels = K.reshape(_panels, [P, base*base])
states = tf.matmul(configs_one_hot, _panels)
states = K.reshape(states, [-1, size, size, base, base])
states = K.permute_dimensions(states, [0, 1, 3, 2, 4])
states = K.reshape(states, [-1, size*base, size*base, 1])
states = K.spatial_2d_padding(states, padding=((pad,pad),(pad,pad)))
states = K.squeeze(states, -1)
states = tensor_swirl(states, radius=dim+2*pad * relative_swirl_radius, **swirl_args)
return Model(configs, wrap(configs, states))
return preprocess(build().predict(configs,**kwargs))
def crosschannelnormalization(alpha = 1e-4, k=2, beta=0.75, n=5,**kwargs):
"""
This is the function used for cross channel normalization in the original
Alexnet
"""
def f(X):
b, ch, r, c = X.shape
half = n // 2
square = K.square(X)
extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0,2,3,1))
, (0,half))
extra_channels = K.permute_dimensions(extra_channels, (0,3,1,2))
scale = k
for i in range(n):
scale += alpha * extra_channels[:,i:i+ch,:,:]
scale = scale ** beta
return X / scale
return Lambda(f, output_shape=lambda input_shape:input_shape,**kwargs)
def crosschannelnormalization(alpha = 1e-4, k=2, beta=0.75, n=5,**kwargs):
"""
This is the function used for cross channel normalization in the original
Alexnet
"""
def f(X):
b, ch, r, c = X.shape
half = n // 2
square = K.square(X)
extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0,2,3,1))
, (0,half))
extra_channels = K.permute_dimensions(extra_channels, (0,3,1,2))
scale = k
for i in range(n):
scale += alpha * extra_channels[:,i:i+ch,:,:]
scale = scale ** beta
return X / scale
return Lambda(f, output_shape=lambda input_shape:input_shape,**kwargs)
def crosschannelnormalization(alpha=1e-4, k=2, beta=0.75, n=5, **kwargs):
"""
This is the function used for cross channel normalization in the original
Alexnet
"""
def f(X):
b, ch, r, c = X.shape
half = n // 2
square = K.square(X)
extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0, 2, 3, 1))
, (0, half))
extra_channels = K.permute_dimensions(extra_channels, (0, 3, 1, 2))
scale = k
for i in range(n):
scale += alpha * extra_channels[:, i:i + ch, :, :]
scale = scale ** beta
return X / scale
return Lambda(f, output_shape=lambda input_shape: input_shape, **kwargs)
def im2col(x, r, c): # THEANO ONLY
if r == c == 1: return x
x = K.spatial_2d_padding(x, padding=(r/2, c/2))
v = []
def last(i, w): i -= (w-1); return i if i != 0 else None
for i, j in product(xrange(r), xrange(c)): v += [x[:,:,i:last(i,r),j:last(j,c)]]
return K.concatenate(v, axis=1)
def crosschannelnormalization(alpha=1e-4, k=2, beta=0.75, n=5, **kwargs):
"""
This is the function used for cross channel normalization in the original
Alexnet
"""
def f(X):
if K.image_dim_ordering()=='tf':
b, r, c, ch = X.get_shape()
else:
b, ch, r, c = X.shape
half = n // 2
square = K.square(X)
scale = k
if K.image_dim_ordering() == 'th':
extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0, 2, 3, 1)), (0, half))
extra_channels = K.permute_dimensions(extra_channels, (0, 3, 1, 2))
for i in range(n):
scale += alpha * extra_channels[:, i:i+ch, :, :]
if K.image_dim_ordering() == 'tf':
extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0, 3, 1, 2)), (half, 0))
extra_channels = K.permute_dimensions(extra_channels, (0, 2, 3, 1))
for i in range(n):
scale += alpha * extra_channels[:, :, :, i:i+int(ch)]
scale = scale ** beta
return X / scale
return Lambda(f, output_shape=lambda input_shape: input_shape, **kwargs)
def _deconv(self, X, lname, d_switch, feat_map=None):
o_width, o_height = self[lname].output_shape[-2:]
# Get filter size
f_width = self[lname].W_shape[2]
f_height = self[lname].W_shape[3]
# Compute padding needed
i_width, i_height = X.shape[-2:]
pad_width = (o_width - i_width + f_width - 1) / 2
pad_height = (o_height - i_height + f_height - 1) / 2
assert isinstance(
pad_width, int), "Pad width size issue at layer %s" % lname
assert isinstance(
pad_height, int), "Pad height size issue at layer %s" % lname
# Set to zero based on switch values
X[d_switch[lname]] = 0
# Get activation function
activation = self[lname].activation
X = activation(X)
if feat_map is not None:
feat_map = int(feat_map)
for i in range(X.shape[1]):
if i != feat_map:
X[:, i, :, :] = 0
for i in range(X.shape[0]):
iw, ih = np.unravel_index(
X[i, feat_map, :, :].argmax(), X[i, feat_map, :, :].shape)
m = np.max(X[i, feat_map, :, :])
X[i, feat_map, :, :] = 0
X[i, feat_map, iw, ih] = m
# Get filters. No bias for now
W = self[lname].W
# Transpose filter
W = W.transpose([1, 0, 2, 3])
W = W[:, :, ::-1, ::-1]
# CUDNN for conv2d ?
conv_out = K.T.nnet.conv2d(input=self.x, filters=W, border_mode='valid')
# Add padding to get correct size
pad = K.function([self.x], K.spatial_2d_padding(
self.x, padding=(pad_width, pad_height), dim_ordering="th"))
X_pad = pad([X])
# Get Deconv output
deconv_func = K.function([self.x], conv_out)
X_deconv = deconv_func([X_pad])
assert X_deconv.shape[-2:] == (o_width, o_height),\
"Deconv output at %s has wrong size" % lname
return X_deconv