Python numpy 模块,zeros_like() 实例源码
我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.zeros_like()。
def roi(img,vertices):
# blank mask:
mask = np.zeros_like(img)
# filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, 255)
# returning the image only where mask pixels are nonzero
masked = cv2.bitwise_and(img, mask)
return masked
def roll_zeropad(a, shift, axis=None):
a = np.asanyarray(a)
if shift == 0: return a
if axis is None:
n = a.size
reshape = True
else:
n = a.shape[axis]
reshape = False
if np.abs(shift) > n:
res = np.zeros_like(a)
elif shift < 0:
shift += n
zeros = np.zeros_like(a.take(np.arange(n-shift), axis))
res = np.concatenate((a.take(np.arange(n-shift,n), axis), zeros), axis)
else:
zeros = np.zeros_like(a.take(np.arange(n-shift,n), axis))
res = np.concatenate((zeros, a.take(np.arange(n-shift), axis)), axis)
if reshape:
return res.reshape(a.shape)
else:
return res
def test_op(self):
logits = np.random.randn(self.sequence_length, self.batch_size,
self.vocab_size)
logits = logits.astype(np.float32)
sequence_length = np.array([1, 2, 3, 4])
targets = np.random.randint(0, self.vocab_size,
[self.sequence_length, self.batch_size])
losses = seq2seq_losses.cross_entropy_sequence_loss(logits, targets,
sequence_length)
with self.test_session() as sess:
losses_ = sess.run(losses)
# Make sure all losses not past the sequence length are > 0
np.testing.assert_array_less(np.zeros_like(losses_[:1, 0]), losses_[:1, 0])
np.testing.assert_array_less(np.zeros_like(losses_[:2, 1]), losses_[:2, 1])
np.testing.assert_array_less(np.zeros_like(losses_[:3, 2]), losses_[:3, 2])
# Make sure all losses past the sequence length are 0
np.testing.assert_array_equal(losses_[1:, 0], np.zeros_like(losses_[1:, 0]))
np.testing.assert_array_equal(losses_[2:, 1], np.zeros_like(losses_[2:, 1]))
np.testing.assert_array_equal(losses_[3:, 2], np.zeros_like(losses_[3:, 2]))
def __init__(self, input_shape, output_shape):
self.input_shape = input_shape
self.input = np.zeros((output_shape[0], self.input_shape[0] * self.input_shape[1] *
self.input_shape[2]),dtype=np.float32)
self.output = np.zeros(output_shape, dtype=np.float32)
self.output_raw = np.zeros_like(self.output)
self.output_error = np.zeros_like(self.output)
self.output_average = np.zeros(self.output.shape[1], dtype=np.float32)
self.weights = np.random.normal(0, np.sqrt(2.0 / (self.output.shape[1] + self.input.shape[1])),
size=(self.input.shape[1], self.output.shape[1])).astype(np.float32)
self.gradient = np.zeros_like(self.weights)
self.reconstruction = np.zeros_like(self.weights)
self.errors = np.zeros_like(self.weights)
self.output_ranks = np.zeros(self.output.shape[1], dtype=np.int32)
self.learning_rate = 1
self.norm_limit = 0.1
def gen_batches(data, n_seqs, n_steps):
"""Create a generator that returns batches of size n_seqs x n_steps."""
characters_per_batch = n_seqs * n_steps
n_batches = len(data) // characters_per_batch
# Keep only enough characters to make full batches
data = data[:n_batches*characters_per_batch]
data = data.reshape([n_seqs, -1])
for n in range(0, data.shape[1], n_steps):
x = data[:, n:n+n_steps]
y = np.zeros_like(x)
y[:, :-1], y[:, -1] = x[:, 1:], x[:, 0]
yield x, y
#-------------------------------------------------------------------------------
# Parse commandline
#-------------------------------------------------------------------------------
def filter(self, p):
"""
Parameters
----------
p: NDArray
Filtering input which is 2D or 3D with format
HW or HWC
Returns
-------
ret: NDArray
Filtering output whose shape is same with input
"""
p = to_32F(p)
if len(p.shape) == 2:
return self._Filter.filter(p)
elif len(p.shape) == 3:
channels = p.shape[2]
ret = np.zeros_like(p, dtype=np.float32)
for c in range(channels):
ret[:, :, c] = self._Filter.filter(p[:, :, c])
return ret
def _process_label(self, fn):
"""
TODO: Fix one-indexing to zero-index;
retained one-index due to uint8 constraint
"""
mat = loadmat(fn, squeeze_me=True)
_labels = mat['seglabel'].astype(np.uint8)
# _labels -= 1 # (move to zero-index)
labels = np.zeros_like(_labels)
for (idx, name) in enumerate(mat['names']):
try:
value = SUNRGBDDataset.target_hash[name]
except:
value = 0
mask = _labels == idx+1
labels[mask] = value
return self._pad_image(labels)
def recall_from_IoU(IoU, samples=500):
"""
plot recall_vs_IoU_threshold
"""
if not (isinstance(IoU, list) or IoU.ndim == 1):
raise ValueError('IoU needs to be a list or 1-D')
iou = np.float32(IoU)
# Plot intersection over union
IoU_thresholds = np.linspace(0.0, 1.0, samples)
recall = np.zeros_like(IoU_thresholds)
for idx, IoU_th in enumerate(IoU_thresholds):
tp, relevant = 0, 0
inds, = np.where(iou >= IoU_th)
recall[idx] = len(inds) * 1.0 / len(IoU)
return recall, IoU_thresholds
# =====================================================================
# Generic utility functions for object recognition
# ---------------------------------------------------------------------
def reset_index(self):
"""Reset index to range based
"""
dfs = self.to_delayed()
sizes = np.asarray(compute(*map(delayed(len), dfs)))
prefixes = np.zeros_like(sizes)
prefixes[1:] = np.cumsum(sizes[:-1])
@delayed
def fix_index(df, startpos):
return df.set_index(np.arange(start=startpos,
stop=startpos + len(df),
dtype=np.intp))
outdfs = [fix_index(df, startpos)
for df, startpos in zip(dfs, prefixes)]
return from_delayed(outdfs)
def recoded_features(self, inputs, layer=-1, inverse_fn=ielu):
hidden = self.get_hidden_values(inputs, store=True, layer=layer).eval()
bench = self.get_reconstructed_input(np.zeros_like(hidden),
layer=layer).eval().squeeze()
if inverse_fn: ibench = inverse_fn(bench)
results = []
for h in range(hidden.shape[-1]):
hidden_h = np.zeros_like(hidden)
hidden_h[..., h] = hidden[..., h]
feature = self.get_reconstructed_input(hidden_h, layer=layer).eval().squeeze()
if inverse_fn:
iresult = inverse_fn(feature) - ibench
results.append(self.coders[0].coding(iresult).eval())
else:
results.append(feature - bench)
return np.array(results), bench
def zscore(x):
"""Computes the Z-score of a vector x. Removes the mean and divides by the
standard deviation. Has a failback if std is 0 to return all zeroes.
Parameters
----------
x: list of int
Input time-series
Returns
-------
z: list of float
Z-score normalized time-series
"""
mean = np.mean(x)
sd = np.std(x)
if sd == 0:
z = np.zeros_like(x)
else:
z = (x - mean)/sd
return z
def _encode_observation(self, idx):
end_idx = idx + 1 # make noninclusive
start_idx = end_idx - self.frame_history_len
# this checks if we are using low-dimensional observations, such as RAM
# state, in which case we just directly return the latest RAM.
if len(self.obs.shape) == 2:
return self.obs[end_idx-1]
# if there weren't enough frames ever in the buffer for context
if start_idx < 0 and self.num_in_buffer != self.size:
start_idx = 0
for idx in range(start_idx, end_idx - 1):
if self.done[idx % self.size]:
start_idx = idx + 1
missing_context = self.frame_history_len - (end_idx - start_idx)
# if zero padding is needed for missing context
# or we are on the boundry of the buffer
if start_idx < 0 or missing_context > 0:
frames = [np.zeros_like(self.obs[0]) for _ in range(missing_context)]
for idx in range(start_idx, end_idx):
frames.append(self.obs[idx % self.size])
return np.concatenate(frames, 2)
else:
# this optimization has potential to saves about 30% compute time \o/
img_h, img_w = self.obs.shape[1], self.obs.shape[2]
return self.obs[start_idx:end_idx].transpose(1, 2, 0, 3).reshape(img_h, img_w, -1)
def do_batch_sampling(pp_unl, batch_size):
"""
return batch_size number of of item, sampled by uncertainty
each with a label sampled from prob
"""
n = pp_unl.shape[0]
uncertain = np.abs(pp_unl[:,0] - 0.5)
if n < batch_size:
batch_size = n
sam_weight = np.exp(20 * (1-uncertain))
items = np.random.choice(n, batch_size, replace = False, p = sam_weight*1.0 / np.sum(sam_weight) )
labels = np.zeros_like(items)
for (i,item) in enumerate(items):
if np.random.random() < pp_unl[item, 1]:
l = 1
else:
l = 0
labels[i] = l
return (items, labels)
def viterbi_decode(score, transition_params):
""" Adapted from Tensorflow implementation.
Decode the highest scoring sequence of tags outside of TensorFlow.
This should only be used at test time.
Args:
score: A [seq_len, num_tags] matrix of unary potentials.
transition_params: A [num_tags, num_tags] matrix of binary potentials.
Returns:
viterbi: A [seq_len] list of integers containing the highest scoring tag
indicies.
viterbi_score: A float containing the score for the Viterbi sequence.
"""
trellis = numpy.zeros_like(score)
backpointers = numpy.zeros_like(score, dtype=numpy.int32)
trellis[0] = score[0]
for t in range(1, score.shape[0]):
v = numpy.expand_dims(trellis[t - 1], 1) + transition_params
trellis[t] = score[t] + numpy.max(v, 0)
backpointers[t] = numpy.argmax(v, 0)
viterbi = [numpy.argmax(trellis[-1])]
for bp in reversed(backpointers[1:]):
viterbi.append(bp[viterbi[-1]])
viterbi.reverse()
viterbi_score = numpy.max(trellis[-1])
return viterbi, viterbi_score
def __init__(self, syn0, syn1):
# Hyperparameters
self.__b1 = 0.9
self.__b2 = 0.999
self.__learing_rate = 0.001
self.__epsilon = 1e-8
# initialize momentum
self.__m_syn0 = np.zeros_like(syn0)
self.__m_syn1 = np.zeros_like(syn1)
self.__v_syn0 = np.zeros_like(syn0)
self.__v_syn1 = np.zeros_like(syn1)
# get a copy of weight
self.__syn0 = copy.deepcopy(syn0)
self.__syn1 = copy.deepcopy(syn1)
def __init__(self, image_a, image_b, window_size=32, search_size=32, distance=16):
"""
Initialization of the class.
:param image_a: first image to be evaluated
:param image_b: second image to be evaluated
:param int window_size: size of the interrogation window on first image
:param int search_size: size of the search window on second image
:param int distance: distance between beginning if first interrogation window and second
"""
image_a, image_b = self._check_images(image_a, image_b)
self.grid_spec = GridSpec(image_a.shape, image_a.strides,
window_size, search_size, distance)
self._correlator = FFTCorrelator(window_size, search_size)
self._set_images(image_a, image_b)
self.u = np.zeros(self.grid_spec.get_grid_shape())
self.v = np.zeros_like(self.u)
self._grid_creator()
def segmentation(_image, typeOfFruit):
#_denoisedImage = rank.median(_image, disk(2));
#_elevationMap = sobel(_denoisedImage);
#print(_image);
_elevationMap = sobel(_image);
#print(_image);
_marker = np.zeros_like(_image);
if (typeOfFruit == 'Counting'):
_marker[_image < 1998] = 1;
_marker[_image > 61541] = 2;
elif (typeOfFruit == 'Temperature'):
#print("Printing Image");
#print(_image < 10);
_marker[_image < 30] = 1;
#print(_marker);
_marker[_image > 150] = 2;
#_marker = rank.gradient(_denoisedImage, disk(5)) < 10;
#_marker = ndi.label(_marker)[0];
#_elevationMap = rank.gradient(_denoisedImage, disk(2));
_segmentation = watershed(_elevationMap, _marker);
#print(_segmentation);
return _segmentation;
def segmentation(_image, typeOfFruit):
#_denoisedImage = rank.median(_image, disk(2));
#_elevationMap = sobel(_denoisedImage);
#print(_image);
_elevationMap = sobel(_image);
#print(_image);
_marker = np.zeros_like(_image);
if (typeOfFruit == 'Counting'):
_marker[_image < 1998] = 1;
_marker[_image > 61541] = 2;
elif (typeOfFruit == 'Temperature'):
#print("Printing Image");
#print(_image < 10);
_marker[_image < 30] = 1;
#print(_marker);
_marker[_image > 150] = 2;
#_marker = rank.gradient(_denoisedImage, disk(5)) < 10;
#_marker = ndi.label(_marker)[0];
#_elevationMap = rank.gradient(_denoisedImage, disk(2));
_segmentation = watershed(_elevationMap, _marker);
#print(_segmentation);
return _segmentation;
def dbFun(_x,_original_vals, f):
db = DBSCAN(eps=0.3, min_samples=20).fit(_x)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
#print(labels)
n_clusters_ = len(set(labels)) - (1 if -1 else 0)
#gettingCharacteristics(_x, core_samples_mask, labels, n_clusters_,
#_original_vals)
print("Wait plotting clusters.....")
plotCluster(_x, labels, core_samples_mask, n_clusters_, f)
return
##############################################################################################
# Plotting the cluster after the result of DBSCAN
def overlap_ratio(boxes1, boxes2):
# find intersection bbox
x_int_bot = np.maximum(boxes1[:, 0], boxes2[0])
x_int_top = np.minimum(boxes1[:, 0] + boxes1[:, 2], boxes2[0] + boxes2[2])
y_int_bot = np.maximum(boxes1[:, 1], boxes2[1])
y_int_top = np.minimum(boxes1[:, 1] + boxes1[:, 3], boxes2[1] + boxes2[3])
# find intersection area
dx = x_int_top - x_int_bot
dy = y_int_top - y_int_bot
area_int = np.where(np.logical_and(dx>0, dy>0), dx * dy, np.zeros_like(dx))
# find union
area_union = boxes1[:,2] * boxes1[:,3] + boxes2[2] * boxes2[3] - area_int
# find overlap ratio
ratio = np.where(area_union > 0, area_int/area_union, np.zeros_like(area_int))
return ratio
###########################################################################
# overlap_ratio of two bboxes #
###########################################################################
def overlap_ratio_pair(boxes1, boxes2):
# find intersection bbox
x_int_bot = np.maximum(boxes1[:, 0], boxes2[:, 0])
x_int_top = np.minimum(boxes1[:, 0] + boxes1[:, 2], boxes2[:, 0] + boxes2[:, 2])
y_int_bot = np.maximum(boxes1[:, 1], boxes2[:, 1])
y_int_top = np.minimum(boxes1[:, 1] + boxes1[:, 3], boxes2[:, 1] + boxes2[:, 3])
# find intersection area
dx = x_int_top - x_int_bot
dy = y_int_top - y_int_bot
area_int = np.where(np.logical_and(dx>0, dy>0), dx * dy, np.zeros_like(dx))
# find union
area_union = boxes1[:,2] * boxes1[:,3] + boxes2[:, 2] * boxes2[:, 3] - area_int
# find overlap ratio
ratio = np.where(area_union > 0, area_int/area_union, np.zeros_like(area_int))
return ratio
def get_Conductivity(XYZ, sig0, sig1, R):
"""
Define the conductivity for each point of the space
"""
x, y, z = XYZ[:, 0], XYZ[:, 1], XYZ[:, 2]
r_view = r(x, y, z)
ind0 = (r_view > R)
ind1 = (r_view <= R)
assert (ind0 + ind1).all(), 'Some indicies not included'
Sigma = np.zeros_like(x)
Sigma[ind0] = sig0
Sigma[ind1] = sig1
return Sigma
def E_field_from_SheetCurruent(XYZ, srcLoc, sig, t, E0=1., orientation='X', kappa=0., epsr=1.):
"""
Computing Analytic Electric fields from Plane wave in a Wholespace
TODO:
Add description of parameters
"""
XYZ = Utils.asArray_N_x_Dim(XYZ, 3)
# Check
if XYZ.shape[0] > 1 & t.shape[0] > 1:
raise Exception("I/O type error: For multiple field locations only a single frequency can be specified.")
mu = mu_0*(1+kappa)
if orientation == "X":
z = XYZ[:, 2]
bunja = -E0*(mu*sig)**0.5 * z * np.exp(-(mu*sig*z**2) / (4*t))
bunmo = 2 * np.pi**0.5 * t**1.5
Ex = bunja / bunmo
Ey = np.zeros_like(z)
Ez = np.zeros_like(z)
return Ex, Ey, Ez
else:
raise NotImplementedError()
def H_field_from_SheetCurruent(XYZ, srcLoc, sig, t, E0=1., orientation='X', kappa=0., epsr=1.):
"""
Plane wave propagating downward (negative z (depth))
"""
XYZ = Utils.asArray_N_x_Dim(XYZ, 3)
# Check
if XYZ.shape[0] > 1 & t.shape[0] > 1:
raise Exception("I/O type error: For multiple field locations only a single frequency can be specified.")
mu = mu_0*(1+kappa)
if orientation == "X":
z = XYZ[:, 2]
Hx = np.zeros_like(z)
Hy = E0 * np.sqrt(sig / (np.pi*mu*t))*np.exp(-(mu*sig*z**2) / (4*t))
Hz = np.zeros_like(z)
return Hx, Hy, Hz
else:
raise NotImplementedError()
def appres(F, H, sig, chg, taux, c, mu, eps, n):
Res = np.zeros_like(F)
Phase = np.zeros_like(F)
App_ImpZ= np.zeros_like(F, dtype='complex_')
for i in range(0, len(F)):
UD, EH, Z , K = Propagate(F[i], H, sig, chg, taux, c, mu, eps, n)
App_ImpZ[i] = EH[0, 1]/EH[1, 1]
Res[i] = np.abs(App_ImpZ[i])**2./(mu_0*omega(F[i]))
Phase[i] = np.angle(App_ImpZ[i], deg = True)
return Res, Phase
# Evaluate Up, Down components, E and H field, for a frequency range,
# a discretized depth range and a time range (use to calculate envelope)
def run(n, plotIt=True):
# something to make a plot
F = frange(-5., 5., 20)
H = thick(50., 100., n)
sign = sig(-5., 0., n)
mun = mu(1., 2., n)
epsn = eps(1., 9., n)
chg = np.zeros_like(sign)
taux = np.zeros_like(sign)
c = np.zeros_like(sign)
Res, Phase = appres(F, H, sign, chg, taux, c, mun, epsn, n)
if plotIt:
PlotAppRes(F, H, sign, chg, taux, c, mun, epsn, n, fenvelope=1000., PlotEnvelope=True)
return Res, Phase
def J_field_from_SheetCurruent(XYZ, srcLoc, sig, f, E0=1., orientation='X', kappa=0., epsr=1., t=0.):
"""
Plane wave propagating downward (negative z (depth))
"""
XYZ = Utils.asArray_N_x_Dim(XYZ, 3)
# Check
if XYZ.shape[0] > 1 & f.shape[0] > 1:
raise Exception("I/O type error: For multiple field locations only a single frequency can be specified.")
mu = mu_0*(1+kappa)
epsilon = epsilon_0*epsr
sig_hat = sig + 1j*omega(f)*epsilon
k = np.sqrt( omega(f)**2. *mu*epsilon -1j*omega(f)*mu*sig )
if orientation == "X":
z = XYZ[:,2]
Jx = sig*E0*np.exp(1j*(k*(z-srcLoc)+omega(f)*t))
Jy = np.zeros_like(z)
Jz = np.zeros_like(z)
return Jx, Jy, Jz
else:
raise NotImplementedError()
def H_field_from_SheetCurruent(XYZ, srcLoc, sig, f, E0=1., orientation='X', kappa=0., epsr=1., t=0.):
"""
Plane wave propagating downward (negative z (depth))
"""
XYZ = Utils.asArray_N_x_Dim(XYZ, 3)
# Check
if XYZ.shape[0] > 1 & f.shape[0] > 1:
raise Exception("I/O type error: For multiple field locations only a single frequency can be specified.")
mu = mu_0*(1+kappa)
epsilon = epsilon_0*epsr
sig_hat = sig + 1j*omega(f)*epsilon
k = np.sqrt( omega(f)**2. *mu*epsilon -1j*omega(f)*mu*sig )
Z = omega(f)*mu/k
if orientation == "X":
z = XYZ[:,2]
Hx = np.zeros_like(z)
Hy = E0/Z*np.exp(1j*(k*(z-srcLoc)+omega(f)*t))
Hz = np.zeros_like(z)
return Hx, Hy, Hz
else:
raise NotImplementedError()
def B_field_from_SheetCurruent(XYZ, srcLoc, sig, f, E0=1., orientation='X', kappa=0., epsr=1., t=0.):
"""
Plane wave propagating downward (negative z (depth))
"""
XYZ = Utils.asArray_N_x_Dim(XYZ, 3)
# Check
if XYZ.shape[0] > 1 & f.shape[0] > 1:
raise Exception("I/O type error: For multiple field locations only a single frequency can be specified.")
mu = mu_0*(1+kappa)
epsilon = epsilon_0*epsr
sig_hat = sig + 1j*omega(f)*epsilon
k = np.sqrt( omega(f)**2. *mu*epsilon -1j*omega(f)*mu*sig )
Z = omega(f)*mu/k
if orientation == "X":
z = XYZ[:,2]
Bx = mu*np.zeros_like(z)
By = mu*E0/Z*np.exp(1j*(k*(z-srcLoc)+omega(f)*t))
Bz = mu*np.zeros_like(z)
return Bx, By, Bz
else:
raise NotImplementedError()
def stup():
x0 = x[0]
init_cond = np.zeros_like(X)
for i in range(0, x_nods_quantity):
if 0 <= x0 < 0.3:
init_cond[i] = 0
x0 += h
elif(0.3 <= x0 <= 0.7):
init_cond[i] = 1
x0 += h
else:
init_cond[i] = 0
x0 += h
return init_cond
#stupenka end
#different initial conditions end
# different transfer velocity
def iou_loss_val(p, t):
tp, tt = p.reshape((p.shape[0], 2, 2)), t.reshape((t.shape[0], 2, 2))
overlaps = np.zeros_like(tp, dtype=np.float32)
overlaps[:, 0, :] = np.maximum(tp[:, 0, :], tt[:, 0, :])
overlaps[:, 1, :] = np.minimum(tp[:, 1, :], tt[:, 1, :])
intersection = overlaps[:, 1, :] - overlaps[:, 0, :]
bool_overlap = np.min(intersection, axis=1) > 0
intersection = intersection[:, 0] * intersection[:, 1]
intersection = np.maximum(intersection, 0.)
# print "bool", bool_overlap
# print "Int", intersection
dims_p = tp[:, 1, :] - tp[:, 0, :]
areas_p = dims_p[:, 0] * dims_p[:, 1]
dims_t = tt[:, 1, :] - tt[:, 0, :]
areas_t = dims_t[:, 0] * dims_t[:, 1]
union = areas_p + areas_t - intersection
# print "un", union
loss = 1. - np.minimum(
np.exp(np.log(np.abs(intersection)) - np.log(np.abs(union) + 1e-5)),
1.
)
# print loss
return np.mean(loss)
def test_constant_network_with_tags_dry_run(self):
shape1 = loom.TypeShape('int64', (3,), 'alpha')
shape2 = loom.TypeShape('int64', (3,), 'beta')
value1 = np.array([1, 2, 3], dtype='int64')
value2 = np.array([4, 5, 6], dtype='int64')
ops = {'add1': BinaryLoomOp(shape1, tf.add),
'add2': BinaryLoomOp(shape2, tf.add)}
the_loom = loom.Loom(named_ops=ops, dry_run=True)
output_tensor1 = the_loom.output_tensor(shape1)
output_tensor2 = the_loom.output_tensor(shape2)
with self.test_session():
weaver = the_loom.make_weaver()
c1 = weaver(value1, tag='alpha')
c2 = weaver(value2, tag='beta')
result1 = output_tensor1.eval(
feed_dict=weaver.build_feed_dict([c2, c1]))
result2 = output_tensor2.eval(
feed_dict=weaver.build_feed_dict([c2, c1]))
zero_vec = np.zeros_like(value1)
self.assertTrue((result1[0] == zero_vec).all())
self.assertTrue((result2[0] == zero_vec).all())
def optimise_f2_thresholds(y, p, verbose=True, resolution=100):
def mf(x):
p2 = np.zeros_like(p)
for i in range(17):
p2[:, i] = (p[:, i] > x[i]).astype(np.int)
score = fbeta_score(y, p2, beta=2, average='samples')
return score
x = [0.2] * 17
for i in range(17):
best_i2 = 0
best_score = 0
for i2 in range(resolution):
i2 /= resolution
x[i] = i2
score = mf(x)
if score > best_score:
best_i2 = i2
best_score = score
x[i] = best_i2
if verbose:
print(i, best_i2, best_score)
return x
def buildFock(self):
"""Routine to build the AO basis Fock matrix"""
if self.direct:
if self.incFockRst: # restart incremental fock build?
self.G = formPT(self.P,np.zeros_like(self.P),self.bfs,
self.nbasis,self.screen,self.scrTol)
self.G = 0.5*(self.G + self.G.T)
self.F = self.Core.astype('complex') + self.G
else:
self.G = formPT(self.P,self.P_old,self.bfs,self.nbasis,
self.screen,self.scrTol)
self.G = 0.5*(self.G + self.G.T)
self.F = self.F_old + self.G
else:
self.J = np.einsum('pqrs,sr->pq', self.TwoE.astype('complex'),self.P)
self.K = np.einsum('psqr,sr->pq', self.TwoE.astype('complex'),self.P)
self.G = 2.*self.J - self.K
self.F = self.Core.astype('complex') + self.G
def svm_loss(x, y):
"""
Computes the loss and gradient using for multiclass SVM classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
N = x.shape[0]
correct_class_scores = x[np.arange(N), y]
margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)
margins[np.arange(N), y] = 0
loss = np.sum(margins) / N
num_pos = np.sum(margins > 0, axis=1)
dx = np.zeros_like(x)
dx[margins > 0] = 1
dx[np.arange(N), y] -= num_pos
dx /= N
return loss, dx
def sgd_momentum(w, dw, config=None):
"""
Performs stochastic gradient descent with momentum.
config format:
- learning_rate: Scalar learning rate.
- momentum: Scalar between 0 and 1 giving the momentum value.
Setting momentum = 0 reduces to sgd.
- velocity: A numpy array of the same shape as w and dw used to store a moving
average of the gradients.
"""
v = config.get('velocity', np.zeros_like(w))
next_v = config['momentum'] * v - config['learning_rate'] * dw
next_w = w + next_v
config['velocity'] = next_v
return next_w, config
def hls_select(image, thresh=(0, 255)):
# 1) Convert to HLS color space
hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
H = hls[:, :, 0]
L = hls[:, :, 1]
S = hls[:, :, 2]
# 2) Apply a threshold to the S channel
thresh = (90, 255)
binary = np.zeros_like(S)
binary[(S > thresh[0]) & (S <= thresh[1])] = 1
# 3) Return a binary image of threshold result
return binary
# Define a function that applies Sobel x and y,
# then computes the direction of the gradient
# and applies a threshold.
def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):
# Apply the following steps to img
# 1) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# 3) Take the absolute value of the x and y gradients
abs_sobelx = np.absolute(sobelx)
abs_sobely = np.absolute(sobely)
# 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient
absgraddir = np.arctan2(abs_sobely, abs_sobelx)
# 5) Create a binary mask where direction thresholds are met
binary_output = np.zeros_like(absgraddir)
binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1
# 6) Return this mask as your binary_output image
return binary_output
# Define a function that applies Sobel x and y,
# then computes the magnitude of the gradient
# and applies a threshold
def mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)):
# Apply the following steps to img
# 1) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# 3) Calculate the magnitude
gradmag = np.sqrt(sobelx**2 + sobely**2)
# 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8
scale_factor = np.max(gradmag)/255
gradmag = (gradmag/scale_factor).astype(np.uint8)
# 5) Create a binary mask where mag thresholds are met
binary_output = np.zeros_like(gradmag)
binary_output[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1
# 6) Return this mask as your binary_output image
return binary_output
# Define a function that applies Sobel x or y,
# then takes an absolute value and applies a threshold.
# Note: calling your function with orient='x', thresh_min=5, thresh_max=100
# should produce output like the example image shown above this quiz.
def abs_sobel_thresh(img, orient='x', thresh_min=0, thresh_max=255):
# Apply the following steps to img
# 1) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the derivative in x or y given orient = 'x' or 'y'
if orient == 'x':
sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0)
if orient == 'y':
sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1)
# 3) Take the absolute value of the derivative or gradient
abs_sobel = np.absolute(sobel)
# 4) Scale to 8-bit (0 - 255) then convert to type = np.uint8
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
# 5) Create a mask of 1's where the scaled gradient magnitude
# is > thresh_min and < thresh_max
binary_output = np.zeros_like(scaled_sobel)
binary_output[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
# 6) Return this mask as your binary_output image
return binary_output
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
# defining a blank mask to start with
mask = np.zeros_like(img)
# defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
# filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
# returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def update_grad_input(self, x, grad_output, scale=1):
x_cols = self.x_cols
dout = grad_output
N, C, H, W = self.x_shape
pool_height, pool_width = self.kW, self.kH
stride = self.dW
pool_dim = pool_height * pool_width
dout_reshaped = dout.transpose(2, 3, 0, 1).flatten()
dx_cols = np.zeros_like(x_cols)
dx_cols[:, np.arange(dx_cols.shape[1])] = 1. / pool_dim * dout_reshaped
dx = col2im_cython(dx_cols, N * C, 1, H, W, pool_height, pool_width,
padding=0, stride=stride)
self.grad_input = dx.reshape(self.x_shape)
return self.grad_input
def update_grad_input(self, x, grad_output, scale=1):
x_cols = self.x_cols
x_cols_argmax = self.x_cols_argmax
dout = grad_output
N, C, H, W = x.shape
pool_height, pool_width = self.kW, self.kH
stride = self.dW
dout_reshaped = dout.transpose(2, 3, 0, 1).flatten()
dx_cols = np.zeros_like(x_cols)
dx_cols[x_cols_argmax, np.arange(dx_cols.shape[1])] = dout_reshaped
dx = col2im_cython(dx_cols, N * C, 1, H, W, pool_height, pool_width,
padding=0, stride=stride)
dx = dx.reshape(self.x_shape)
self.grad_input = dx
return self.grad_input
def eval_numerical_gradient_array(f, x, df, h=1e-5):
'''
Evaluate a numeric gradient for a function that accepts a numpy
array and returns a numpy array.
'''
grad = np.zeros_like(x)
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
oldval = x[ix]
x[ix] = oldval + h
pos = f(x).copy()
x[ix] = oldval - h
neg = f(x).copy()
x[ix] = oldval
grad[ix] = np.sum((pos - neg) * df) / (2 * h)
it.iternext()
return grad
def nesterov(w, dw, config=None):
'''
Performs stochastic gradient descent with nesterov momentum.
config format:
- learning_rate: Scalar learning rate.
- momentum: Scalar between 0 and 1 giving the momentum value.
Setting momentum = 0 reduces to sgd.
- velocity: A numpy array of the same shape as w and dw used to store a moving
average of the gradients.
'''
if config is None:
config = {}
config.setdefault('learning_rate', 1e-2)
config.setdefault('momentum', 0.9)
v = config.get('velocity', np.zeros_like(w, dtype=np.float64))
next_w = None
prev_v = v
v = config['momentum'] * v - config['learning_rate'] * dw
next_w = w - config['momentum'] * prev_v + (1 + config['momentum']) * v
config['velocity'] = v
return next_w, config
def sgd_momentum(w, dw, config=None):
'''
Performs stochastic gradient descent with momentum.
config format:
- learning_rate: Scalar learning rate.
- momentum: Scalar between 0 and 1 giving the momentum value.
Setting momentum = 0 reduces to sgd.
- velocity: A numpy array of the same shape as w and dw used to store a moving
average of the gradients.
'''
if config is None:
config = {}
config.setdefault('learning_rate', 1e-2)
config.setdefault('momentum', 0.9)
v = config.get('velocity', np.zeros_like(w))
next_w = None
v = config['momentum'] * v + config['learning_rate'] * dw
next_w = w - v
config['velocity'] = v
return next_w, config
def test_float(self):
# offset for alignment test
for i in range(4):
assert_array_equal(self.f[i:] > 0, self.ef[i:])
assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:])
assert_array_equal(self.f[i:] == 0, ~self.ef[i:])
assert_array_equal(-self.f[i:] < 0, self.ef[i:])
assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:])
r = self.f[i:] != 0
assert_array_equal(r, self.ef[i:])
r2 = self.f[i:] != np.zeros_like(self.f[i:])
r3 = 0 != self.f[i:]
assert_array_equal(r, r2)
assert_array_equal(r, r3)
# check bool == 0x1
assert_array_equal(r.view(np.int8), r.astype(np.int8))
assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
# isnan on amd64 takes the same code path
assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:])
def test_double(self):
# offset for alignment test
for i in range(2):
assert_array_equal(self.d[i:] > 0, self.ed[i:])
assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:])
assert_array_equal(self.d[i:] == 0, ~self.ed[i:])
assert_array_equal(-self.d[i:] < 0, self.ed[i:])
assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:])
r = self.d[i:] != 0
assert_array_equal(r, self.ed[i:])
r2 = self.d[i:] != np.zeros_like(self.d[i:])
r3 = 0 != self.d[i:]
assert_array_equal(r, r2)
assert_array_equal(r, r3)
# check bool == 0x1
assert_array_equal(r.view(np.int8), r.astype(np.int8))
assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
# isnan on amd64 takes the same code path
assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:])
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert_equal(out, res)
check_round(np.array([1.2, 1.5]), [1, 2])
check_round(np.array(1.5), 2)
check_round(np.array([12.2, 15.5]), [10, 20], -1)
check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(np.array([4.5 + 1.5j]), [4 + 2j])
check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
def test_basic(self):
dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
c = np.ones(53, dtype=np.bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
assert_equal(np.where(False, dt(0), dt(1)), dt(1))
d = np.ones_like(c).astype(dt)
e = np.zeros_like(d)
r = d.astype(dt)
c[7] = False
r[7] = e[7]
assert_equal(np.where(c, e, e), e)
assert_equal(np.where(c, d, e), r)
assert_equal(np.where(c, d, e[0]), r)
assert_equal(np.where(c, d[0], e), r)
assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])