Python numpy 模块,clip() 实例源码
我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.clip()。
def clipped_linscale_img(img_array,
cap=255.0,
lomult=2.0,
himult=2.0):
'''
This clips the image between the values:
[median(img_array) - lomult*stdev(img_array),
median(img_array) + himult*stdev(img_array)]
and returns a linearly scaled image using the cap given.
'''
img_med, img_stdev = np.median(img_array), np.std(img_array)
clipped_linear_img = np.clip(img_array,
img_med-lomult*img_stdev,
img_med+himult*img_stdev)
return cap*clipped_linear_img/(img_med+himult*img_stdev)
def idle(self):
"""Updates the QLearning table, retrieves an action to be performed
and checks for dead-ends."""
state = NavigationState(self.perception_)
action = self.learning_model.update(state).action_
if (all(s.imminent_collision for s in self.sensors['proximity']) or
self.sensors['orientation'][0].is_lying_on_the_ground):
# There's nothing left to do. Flag this is a dead-end.
self.behavior_ = self.BEHAVIORS.stuck
else:
move_to = self.INSTRUCTIONS_MAP[action]
if action < ACTIONS.left:
# It's walking straight or backwards. Reduce step size if it's
# going against a close obstacle.
dx = self.sensors['proximity'][action.index].distance
move_to = np.clip(move_to, -dx, dx).tolist()
self.motion.post.moveTo(move_to)
self.behavior_ = self.BEHAVIORS.moving
return self
def compHistDistance(h1, h2):
def normalize(h):
if np.sum(h) == 0:
return h
else:
return h / np.sum(h)
def smoothstep(x, x_min=0., x_max=1., k=2.):
m = 1. / (x_max - x_min)
b = - m * x_min
x = m * x + b
return betainc(k, k, np.clip(x, 0., 1.))
def fn(X, Y, k):
return 4. * (1. - smoothstep(Y, 0, (1 - Y) * X + Y + .1)) \
* np.sqrt(2 * X) * smoothstep(X, 0., 1. / k, 2) \
+ 2. * smoothstep(Y, 0, (1 - Y) * X + Y + .1) \
* (1. - 2. * np.sqrt(2 * X) * smoothstep(X, 0., 1. / k, 2) - 0.5)
h1 = normalize(h1)
h2 = normalize(h2)
return max(0, np.sum(fn(h2, h1, len(h1))))
# return np.sum(np.where(h2 != 0, h2 * np.log10(h2 / (h1 + 1e-10)), 0)) # KL divergence
def _step(self, action):
# Clip xor Assert
#actions = np.clip(actions,-self.joints_max_velocity, self.joints_max_velocity)
#assert self.action_space.contains(action), "%r (%s) invalid"%(action, type(action))
# Actuate
self._make_action(action)
#self._make_action(action*self.joints_max_velocity)
# Step
self.step_simulation()
# Observe
self._make_observation()
# Reward
torso_pos_z = self.observation[0] # up/down
torso_lvel_x = self.observation[4]
r_alive = 1.0
reward = (16.0)*(r_alive) +(8.0)*(torso_lvel_x)
# Early stop
stand_threshold = 0.10
done = (torso_pos_z < stand_threshold)
return self.observation, reward, done, {}
def _build_graph(self, image_size):
self.image_size = image_size
self.images = tf.placeholder(tf.float32,
shape = (None, image_size, image_size, 3))
images_mini = tf.image.resize_images(self.images,
size = (int(image_size/4),
int(image_size/4)))
self.images_blur = tf.image.resize_images(images_mini,
size = (image_size, image_size))
self.net = U_Net(output_ch = 3, block_fn = 'origin')
self.images_reconst = self.net(self.images_blur, reuse = False)
# self.image_reconst can be [-inf +inf], so need to clip its value if visualize them as images.
self.loss = tf.reduce_mean((self.images_reconst - self.images)**2)
self.opt = tf.train.AdamOptimizer()\
.minimize(self.loss, var_list = self.net.vars)
self.saver = tf.train.Saver()
self.sess.run(tf.global_variables_initializer())
def deprocess(img4d):
img = img4d.copy()
if K.image_dim_ordering() == "th":
# (B, C, H, W)
img = img.reshape((img4d.shape[1], img4d.shape[2], img4d.shape[3]))
# (C, H, W) -> (H, W, C)
img = img.transpose((1, 2, 0))
else:
# (B, H, W, C)
img = img.reshape((img4d.shape[1], img4d.shape[2], img4d.shape[3]))
img[:, :, 0] += 103.939
img[:, :, 1] += 116.779
img[:, :, 2] += 123.68
# BGR -> RGB
img = img[:, :, ::-1]
img = np.clip(img, 0, 255).astype("uint8")
return img
########################### main ###########################
def deprocess(img4d):
img = img4d.copy()
if K.image_dim_ordering() == "th":
# (B, C, H, W)
img = img.reshape((img4d.shape[1], img4d.shape[2], img4d.shape[3]))
# (C, H, W) -> (H, W, C)
img = img.transpose((1, 2, 0))
else:
# (B, H, W, C)
img = img.reshape((img4d.shape[1], img4d.shape[2], img4d.shape[3]))
img[:, :, 0] += 103.939
img[:, :, 1] += 116.779
img[:, :, 2] += 123.68
# BGR -> RGB
img = img[:, :, ::-1]
img = np.clip(img, 0, 255).astype("uint8")
return img
def forward(self, outputs, targets):
"""SoftmaxCategoricalCrossEntropy forward propagation.
.. math:: L_i = - \\sum_j{t_{i,j} \\log(p_{i,j})}
Parameters
----------
outputs : numpy.array
Predictions in (0, 1), such as softmax output of a neural network,
with data points in rows and class probabilities in columns.
targets : numpy.array
Either targets in [0, 1] matching the layout of `outputs`, or
a vector of int giving the correct class index per data point.
Returns
-------
numpy 1D array
An expression for the item-wise categorical cross-entropy.
"""
outputs = np.clip(outputs, self.epsilon, 1 - self.epsilon)
return np.mean(-np.sum(targets * np.log(outputs), axis=1))
def backward(self, outputs, targets):
"""SoftmaxCategoricalCrossEntropy backward propagation.
.. math:: dE = p - t
Parameters
----------
outputs : numpy 2D array
Predictions in (0, 1), such as softmax output of a neural network,
with data points in rows and class probabilities in columns.
targets : numpy 2D array
Either targets in [0, 1] matching the layout of `outputs`, or
a vector of int giving the correct class index per data point.
Returns
-------
numpy 1D array
"""
outputs = np.clip(outputs, self.epsilon, 1 - self.epsilon)
return outputs - targets
def mouseDragEvent(self, ev):
if self.movable and ev.button() == QtCore.Qt.LeftButton:
if ev.isStart():
self._moving = True
self._cursorOffset = self._posToRel(ev.buttonDownPos())
self._startPosition = self.orthoPos
ev.accept()
if not self._moving:
return
rel = self._posToRel(ev.pos())
self.orthoPos = np.clip(self._startPosition + rel - self._cursorOffset, 0, 1)
self.updatePosition()
if ev.isFinish():
self._moving = False
def test_rescaleData():
dtypes = map(np.dtype, ('ubyte', 'uint16', 'byte', 'int16', 'int', 'float'))
for dtype1 in dtypes:
for dtype2 in dtypes:
data = (np.random.random(size=10) * 2**32 - 2**31).astype(dtype1)
for scale, offset in [(10, 0), (10., 0.), (1, -50), (0.2, 0.5), (0.001, 0)]:
if dtype2.kind in 'iu':
lim = np.iinfo(dtype2)
lim = lim.min, lim.max
else:
lim = (-np.inf, np.inf)
s1 = np.clip(float(scale) * (data-float(offset)), *lim).astype(dtype2)
s2 = pg.rescaleData(data, scale, offset, dtype2)
assert s1.dtype == s2.dtype
if dtype2.kind in 'iu':
assert np.all(s1 == s2)
else:
assert np.allclose(s1, s2)
def map(self, data):
data = data[self.fieldName]
colors = np.empty((len(data), 4))
default = np.array(fn.colorTuple(self['Default'])) / 255.
colors[:] = default
for v in self.param('Values'):
mask = data == v.maskValue
c = np.array(fn.colorTuple(v.value())) / 255.
colors[mask] = c
#scaled = np.clip((data-self['Min']) / (self['Max']-self['Min']), 0, 1)
#cmap = self.value()
#colors = cmap.map(scaled, mode='float')
#mask = np.isnan(data) | np.isinf(data)
#nanColor = self['NaN']
#nanColor = (nanColor.red()/255., nanColor.green()/255., nanColor.blue()/255., nanColor.alpha()/255.)
#colors[mask] = nanColor
return colors
def _aperture(self):
"""
Determine aperture automatically under a variety of conditions.
"""
iso = self.iso
exp = self.exposure
light = self.lightMeter
try:
# shutter-priority mode
sh = self.shutter # this raises RuntimeError if shutter has not
# been specified
ap = 4.0 * (sh / (1./60.)) * (iso / 100.) * (2 ** exp) * (2 ** light)
ap = np.clip(ap, 2.0, 16.0)
except RuntimeError:
# program mode; we can select a suitable shutter
# value at the same time.
sh = (1./60.)
raise
return ap
def mouseDragEvent(self, ev):
if self.movable and ev.button() == QtCore.Qt.LeftButton:
if ev.isStart():
self._moving = True
self._cursorOffset = self._posToRel(ev.buttonDownPos())
self._startPosition = self.orthoPos
ev.accept()
if not self._moving:
return
rel = self._posToRel(ev.pos())
self.orthoPos = np.clip(self._startPosition + rel - self._cursorOffset, 0, 1)
self.updatePosition()
if ev.isFinish():
self._moving = False
def test_rescaleData():
dtypes = map(np.dtype, ('ubyte', 'uint16', 'byte', 'int16', 'int', 'float'))
for dtype1 in dtypes:
for dtype2 in dtypes:
data = (np.random.random(size=10) * 2**32 - 2**31).astype(dtype1)
for scale, offset in [(10, 0), (10., 0.), (1, -50), (0.2, 0.5), (0.001, 0)]:
if dtype2.kind in 'iu':
lim = np.iinfo(dtype2)
lim = lim.min, lim.max
else:
lim = (-np.inf, np.inf)
s1 = np.clip(float(scale) * (data-float(offset)), *lim).astype(dtype2)
s2 = pg.rescaleData(data, scale, offset, dtype2)
assert s1.dtype == s2.dtype
if dtype2.kind in 'iu':
assert np.all(s1 == s2)
else:
assert np.allclose(s1, s2)
def map(self, data):
data = data[self.fieldName]
colors = np.empty((len(data), 4))
default = np.array(fn.colorTuple(self['Default'])) / 255.
colors[:] = default
for v in self.param('Values'):
mask = data == v.maskValue
c = np.array(fn.colorTuple(v.value())) / 255.
colors[mask] = c
#scaled = np.clip((data-self['Min']) / (self['Max']-self['Min']), 0, 1)
#cmap = self.value()
#colors = cmap.map(scaled, mode='float')
#mask = np.isnan(data) | np.isinf(data)
#nanColor = self['NaN']
#nanColor = (nanColor.red()/255., nanColor.green()/255., nanColor.blue()/255., nanColor.alpha()/255.)
#colors[mask] = nanColor
return colors
def _aperture(self):
"""
Determine aperture automatically under a variety of conditions.
"""
iso = self.iso
exp = self.exposure
light = self.lightMeter
try:
# shutter-priority mode
sh = self.shutter # this raises RuntimeError if shutter has not
# been specified
ap = 4.0 * (sh / (1./60.)) * (iso / 100.) * (2 ** exp) * (2 ** light)
ap = np.clip(ap, 2.0, 16.0)
except RuntimeError:
# program mode; we can select a suitable shutter
# value at the same time.
sh = (1./60.)
raise
return ap
def multiclass_log_loss(y_true, y_pred, eps=1e-15):
"""Multi class version of Logarithmic Loss metric.
https://www.kaggle.com/wiki/MultiClassLogLoss
Parameters
----------
y_true : array, shape = [n_samples]
true class, intergers in [0, n_classes - 1)
y_pred : array, shape = [n_samples, n_classes]
Returns
-------
loss : float
"""
predictions = np.clip(y_pred, eps, 1 - eps)
# normalize row sums to 1
predictions /= predictions.sum(axis=1)[:, np.newaxis]
actual = np.zeros(y_pred.shape)
n_samples = actual.shape[0]
actual[np.arange(n_samples), y_true.astype(int)] = 1
vectsum = np.sum(actual * np.log(predictions))
loss = -1.0 / n_samples * vectsum
return loss
def eval(name,clip=False,bar=0.9):
base = pd.read_csv('../input/stage1_solution_filtered.csv')
base['Class'] = np.argmax(base[['class%d'%i for i in range(1,10)]].values,axis=1)
sub = pd.read_csv(name)
#sub = pd.merge(sub,base[['ID','Class']],on="ID",how='right')
#print(sub.head())
y = base['Class'].values
yp = sub[['class%d'%i for i in range(1,10)]].values
if clip:
yp = np.clip(yp,(1.0-bar)/8,bar)
yp = yp/np.sum(yp,axis=1).reshape([yp.shape[0],1])
print(name,cross_entropy(y,yp),multiclass_log_loss(y,yp))
for i in range(9):
y1 = y[y==i]
yp1 = yp[y==i]
print(i,y1.shape,cross_entropy(y1,yp1),multiclass_log_loss(y1,yp1))
def random_saturation(img, label, lower=0.5, upper=1.5):
"""
Multiplies saturation with a constant and clips the value between [0,1.0]
Args:
img: input image in float32
label: returns label unchanged
lower: lower val for sampling
upper: upper val for sampling
"""
alpha = lower + (upper - lower) * rand.rand()
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# saturation should always be within [0,1.0]
hsv[:, :, 1] = np.clip(alpha * hsv[:, :, 1], 0.0, 1.0)
return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR), label
def prior_cdf(self, u):
"""Inverse cumulative density function from Kroupa 2001b.
output mass scaled to 0-1 interval
min mass before scaling = 0.01
"""
self._norm = 1. / self.kroupa_cdf(self._max_value, 1)
if u < self.kroupa_cdf(0.08, self._norm):
value = (u * (0.7) / self._norm * 0.08**(-0.3) +
0.01**0.7)**(1 / 0.7)
elif u < self.kroupa_cdf(0.5, self._norm):
value = (((u - (self._norm / 0.7 * 0.08**0.3 *
(0.08**0.7 - 0.01**0.7))) * (-0.3) / self._norm *
0.08**(-1.3) + 0.08**(-0.3))**(1 / -0.3))
else:
value = (((u - (self._norm / -0.3) * 0.08**1.3 *
(0.5**(-0.3) - 0.08**(-0.3)) -
(self._norm / 0.7 * 0.08**0.3 *
(0.08**0.7 - 0.01**0.7))) * -1.3 / self._norm *
0.5**(-2.3) * (6.25)**1.3 + 0.5**(-1.3))**(1 / -1.3))
value = (value - self._min_value) / (self._max_value - self._min_value)
# np.clip in case of python errors in line above
return np.clip(value, 0.0, 1.0)
def sample_crop(self, n):
kx = np.array([len(x) for x in self.maps_with_class])
class_hist = np.random.multinomial(n, self.class_probs * (kx != 0))
class_ids = np.repeat(np.arange(class_hist.shape[0]), class_hist)
X = []
for class_id in class_ids:
for i in range(20):
random_image_idx = np.random.choice(self.maps_with_class[class_id])
if random_image_idx < 25:
break
x = self.kde_samplers[random_image_idx][class_id].sample()[0]
x /= self.mask_size
x = np.clip(x, 0., 1.)
return x, class_id, random_image_idx
X.append(x)
return X
def rel_crop(im, rel_cx, rel_cy, crop_size):
map_size = im.shape[1]
r = crop_size / 2
abs_cx = rel_cx * map_size
abs_cy = rel_cy * map_size
na = np.floor([abs_cy-r, abs_cy+r, abs_cx-r, abs_cx+r]).astype(np.int32)
a = np.clip(na, 0, map_size)
px0 = a[2] - na[2]
px1 = na[3] - a[3]
py0 = a[0] - na[0]
py1 = na[1] - a[1]
crop = im[a[0]:a[1], a[2]:a[3]]
crop = np.pad(crop, ((py0, py1), (px0, px1), (0, 0)),
mode='reflect')
assert crop.shape == (crop_size, crop_size, im.shape[2])
return crop
def deprocess_and_save(x, img_path):
# Remove the batch dimension
x = np.squeeze(x)
# Restore the mean values on each channel
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
# BGR --> RGB
x = x[:, :, ::-1]
# Clip unprintable colours
x = np.clip(x, 0, 255).astype('uint8')
# Save the image
imsave(img_path, x)
def _load_dataset_clipping(self, dataset_dir, epsilon):
"""Helper method which loads dataset and determines clipping range.
Args:
dataset_dir: location of the dataset.
epsilon: maximum allowed size of adversarial perturbation.
"""
self.dataset_max_clip = {}
self.dataset_min_clip = {}
self._dataset_image_count = 0
for fname in os.listdir(dataset_dir):
if not fname.endswith('.png'):
continue
image_id = fname[:-4]
image = np.array(
Image.open(os.path.join(dataset_dir, fname)).convert('RGB'))
image = image.astype('int32')
self._dataset_image_count += 1
self.dataset_max_clip[image_id] = np.clip(image + epsilon,
0,
255).astype('uint8')
self.dataset_min_clip[image_id] = np.clip(image - epsilon,
0,
255).astype('uint8')
def cleverhans_attack_wrapper(cleverhans_attack_fn, reset=True):
def attack(a):
session = tf.Session()
with session.as_default():
model = RVBCleverhansModel(a)
adversarial_image = cleverhans_attack_fn(model, session, a)
adversarial_image = np.squeeze(adversarial_image, axis=0)
if reset:
# optionally, reset to ignore other adversarials
# found during the search
a._reset()
# run predictions to make sure the returned adversarial
# is taken into account
min_, max_ = a.bounds()
adversarial_image = np.clip(adversarial_image, min_, max_)
a.predictions(adversarial_image)
return attack
def ExpM(self):
"""
Approximate a signal via element-wise exponentiation. As appears in :
S.I. Mimilakis, K. Drossos, T. Virtanen, and G. Schuller,
"Deep Neural Networks for Dynamic Range Compression in Mastering Applications,"
in proc. of the 140th Audio Engineering Society Convention, Paris, 2016.
Args:
sTarget: (2D ndarray) Magnitude Spectrogram of the target component
nResidual: (2D ndarray) Magnitude Spectrogram of the residual component
Returns:
mask: (2D ndarray) Array that contains time frequency gain values
"""
print('Exponential mask')
self._mask = np.divide(np.log(self._sTarget.clip(self._eps, np.inf)**self._alpha),\
np.log(self._nResidual.clip(self._eps, np.inf)**self._alpha))
def puzzle_plot(p):
p.setup()
def name(template):
return template.format(p.__name__)
from itertools import islice
configs = list(islice(p.generate_configs(9), 1000)) # be careful, islice is not immutable!!!
import numpy.random as random
random.shuffle(configs)
configs = configs[:10]
puzzles = p.generate(configs, 3, 3)
print(puzzles.shape, "mean", puzzles.mean(), "stdev", np.std(puzzles))
plot_image(puzzles[-1], name("{}.png"))
plot_image(np.clip(puzzles[-1]+np.random.normal(0,0.1,puzzles[-1].shape),0,1),name("{}+noise.png"))
plot_image(np.round(np.clip(puzzles[-1]+np.random.normal(0,0.1,puzzles[-1].shape),0,1)),name("{}+noise+round.png"))
plot_grid(puzzles, name("{}s.png"))
_transitions = p.transitions(3,3,configs=configs)
print(_transitions.shape)
transitions_for_show = \
np.einsum('ba...->ab...',_transitions) \
.reshape((-1,)+_transitions.shape[2:])
print(transitions_for_show.shape)
plot_grid(transitions_for_show, name("{}_transitions.png"))
def test_simple_nonnative(self):
# Test non native double input with scalar min/max.
# Test native double input with non native double scalar min/max.
a = self._generate_non_native_data(self.nr, self.nc)
m = -0.5
M = 0.6
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_equal(ac, act)
# Test native double input with non native double scalar min/max.
a = self._generate_data(self.nr, self.nc)
m = -0.5
M = self._neg_byteorder(0.6)
assert_(not M.dtype.isnative)
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_equal(ac, act)
def test_simple_complex(self):
# Test native complex input with native double scalar min/max.
# Test native input with complex double scalar min/max.
a = 3 * self._generate_data_complex(self.nr, self.nc)
m = -0.5
M = 1.
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
# Test native input with complex double scalar min/max.
a = 3 * self._generate_data(self.nr, self.nc)
m = -0.5 + 1.j
M = 1. + 2.j
ac = self.fastclip(a, m, M)
act = self.clip(a, m, M)
assert_array_strict_equal(ac, act)
def clip(val, minval, maxval):
if val > HUGE_VALUE:
val = HUGE_VALUE
if val < EPSILON:
val = EPSILON
if val < minval:
return minval
if val > maxval:
return maxval
return val
def _clip(self, action):
maxs = self.env.action_space.high
mins = self.env.action_space.low
if isinstance(action, np.ndarray):
np.clip(action, mins, maxs, out=action)
elif isinstance(action, list):
for i in range(len(action)):
action[i] = clip(action[i], mins[i], maxs[i])
else:
action = clip(action, mins[0], maxs[0])
return action
def __init__(self, env, shape, clip=10.0, update_freq=100):
self.env = env
self.clip = clip
self.update_freq = update_freq
self.count = 0
self.sum = 0.0
self.sum_sqr = 0.0
self.mean = np.zeros(shape, dtype=np.double)
self.std = np.ones(shape, dtype=np.double)
def _update(self):
self.mean = self.sum / self.count
self.std = self.sum_sqr / self.count - self.mean**2
self.std = np.clip(self.std, 1e-2, 1e9)**0.5
def normalize(self, new_state):
# Update
self.count += 1
self.sum += new_state
self.sum_sqr += new_state**2
if self.count % self.update_freq == 0 and False:
self._update()
# Normalize
new_state = new_state - self.mean
new_state = new_state / self.std
new_state = np.clip(new_state, -self.clip, self.clip)
return new_state
def random_channel_shift(x, intensity, channel_axis=0):
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [np.clip(x_channel + np.random.uniform(-intensity, intensity), min_x, max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def saveFinalPlots(self, errors_train, errors_test, sparsity_train, sparsity_test, errors_train_vector, errors_test_vector, epoch=0):
#plot errors
plt.figure(2, figsize=(10, 7))
plt.clf()
plt.plot(np.arange(len(errors_train)), errors_train, label='train error')
plt.plot(np.arange(len(errors_train)), errors_test, label='test error')
plt.colors()
plt.legend()
plt.title('Reconstruction error convergence')
plt.xlabel('t')
plt.ylabel('Reconstruction error')
plt.savefig('plots/Reconstruction_errors_'+str(epoch)+'.pdf')
#plot sparsity, real and non-zero
plt.figure(3, figsize=(10, 7))
plt.clf()
plt.plot(np.arange(len(sparsity_train)), sparsity_train, label='train error')
plt.plot(np.arange(len(sparsity_test)), sparsity_test, label='test error')
plt.colors()
plt.legend()
plt.title('Objective function error convergence')
plt.xlabel('t')
plt.ylabel('E')
plt.savefig('plots/Sparsity_'+str(epoch)+'.pdf')
# plot reconstruction error output progression over time
plt.figure(12, figsize=(10, 7))
plt.clf()
image=plt.imshow(np.clip(np.asarray(errors_train_vector).T, 0, 1), interpolation='nearest', aspect='auto', origin='lower')
plt.xlabel('t')
plt.ylabel('Output units \n (Rank Ordered)')
plt.colors()
plt.colorbar(image, label='reconstruction error')
plt.title('Progressive reconstruction input error convergence')
plt.savefig('plots/Reconstruction_errors_vector_' + str(epoch) + '.pdf')
def activation(self, X, out=None):
return np.clip(X, 0, 1, out=out)
def clip(self, X, out=None):
return np.clip(X, -1, 1, out=out)
def forward_prop(self):
# backprop
self.output_error = np.sum(self.errors * self.weights, axis=0).reshape(1, -1)
self.output_error /= self.weights.shape[0]
self.output_error *= self.derivative(self.output_raw, self.output_error)
# clip gradient to not exceed zero
self.output_error[self.output_raw > 0] = \
np.maximum(-self.output_raw[self.output_raw > 0],self.output_error[self.output_raw > 0])
self.output_error[self.output_raw < 0] = \
np.minimum(-self.output_raw[self.output_raw < 0],self.output_error[self.output_raw < 0])
def update_weights_final(self):
# clip the gradient norm
norm = np.sqrt(np.sum(self.gradient ** 2, axis=0))
norm_check = norm > self.norm_limit
self.gradient[:, norm_check] = ((self.gradient[:, norm_check]) / norm[norm_check]) * self.norm_limit
# update weights
self.weights += self.gradient * (self.learning_rate)
# update output average for sorting weights
self.output_average *= 0.99999
self.output_average += self.output.ravel() * 0.00001
def _sample_noise_precision(self):
prior_observations = .1 * self.batch_size
shape = prior_observations + self.batch_size / 2
rate = prior_observations / self._noise_precision_value + np.mean(self._target_loss_ema) / 2
scale = 1. / rate
sample = np.clip(np.random.gamma(shape, scale), 10., 1000.)
return sample
def _sample_weights_precision(self):
prior_observations = .1 * self.position_size
shape = prior_observations + self.position_size / 2
rate = prior_observations / self._weights_precision_value + np.mean(self._weight_norm_ema) / 2
scale = 1. / rate
sample = np.clip(np.random.gamma(shape, scale), .1, 10.)
return sample
def _sample_weights(self, aim_error, accuracy_error):
"""Sample weights based on the error.
Parameters
----------
aim_error : np.ndarray
The aim errors for each sample.
accuracy_error : np.ndarray
The accuracy error errors for each sample.
Returns
-------
weights : np.ndarray
The weights for each sample.
Notes
-----
This weighs samples based on their standard deviations above the mean
with some clipping.
"""
aim_zscore = (aim_error - aim_error.mean()) / aim_error.std()
aim_weight = np.clip(aim_zscore, 1, 4) / 4
accuracy_zscore = (
accuracy_error - accuracy_error.mean()
) / accuracy_error.std()
accuracy_weight = np.clip(accuracy_zscore, 1, 4) / 4
return {
'aim_error': aim_weight,
'accuracy_error': accuracy_weight,
}
def jitter_point_cloud(batch_data, sigma=0.01, clip=0.05):
""" Randomly jitter points. jittering is per point.
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, jittered batch of point clouds
"""
B, N, C = batch_data.shape
assert(clip > 0)
jittered_data = np.clip(sigma * np.random.randn(B, N, C), -1*clip, clip)
jittered_data += batch_data
return jittered_data
def add_noise(x_clean, noise_factor):
x = x_clean.copy()
x_shape = x.shape
x = x + noise_factor * 255 * (np.random.normal(loc=0.0, scale=1.0, size=x_shape) + 1) / 2
x_noisy = np.clip(x, 0., 255.)
return x_noisy
# converts image list to a normed image list (used as input for NN)
def to_32F(image):
if image.max() > 1.0:
image = image / 255.0
return np.clip(np.float32(image), 0, 1)
def to_8U(image):
if image.max() <= 1.0:
image = image * 255.0
return np.clip(np.uint8(image), 0, 255)
def applyColorAugmentation(self, img, std=0.55, gamma=2.5):
'''Applies random color augmentation following [1]. An additional gamma
transformation is added.
[1] Alex Krizhevsky, Ilya Sutskever, Geoffrey E. Hinton. ImageNet
Classification with Deep Convolutional Neural Networks. NIPS 2012.
'''
alpha = np.clip(np.random.normal(0, std, size=3), -1.3 * std, 1.3 * std)
perturbation = self.data_evecs.dot((alpha * np.sqrt(self.data_evals)).T)
gamma = 1.0 - sum(perturbation) / gamma
return np.power(np.clip(img + perturbation, 0., 1.), gamma)
return np.clip((img + perturbation), 0., 1.)
def applyColorAugmentation(img, std=0.5):
'''Applies random color augmentation following [1].
[1] Alex Krizhevsky, Ilya Sutskever, Geoffrey E. Hinton. \
ImageNet Classification with Deep Convolutional Neural Networks. \
NIPS 2012.'''
alpha = np.clip(np.random.normal(0, std, size=3), -2 * std, 2. * std)
perturbation = sld_evecs.dot((alpha * np.sqrt(sld_evals)).T)
gamma = 1.0 - sum(perturbation) / 3.
return np.power(np.clip(img + perturbation, 0., 1.), gamma)
return np.clip((img + perturbation), 0., 1.)