Python numpy 模块,add() 实例源码
我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.add()。
def add_row(self, row_id):
logger.debug('TreeCatTrainer.add_row %d', row_id)
assert row_id not in self._added_rows, row_id
self._added_rows.add(row_id)
# These are used for scratch work, so we create them each step.
np.add(self._vert_ss, self._vert_prior, out=self._vert_probs)
np.add(self._feat_ss, self._feat_prior, out=self._feat_probs)
np.add(self._meas_ss, self._meas_prior, out=self._meas_probs)
treecat_add_row(
self._table.feature_types,
self._table.ragged_index,
self._table.data[row_id, :],
self._tree.tree_grid,
self._program,
self._assignments[row_id, :],
self._vert_ss,
self._edge_ss,
self._feat_ss,
self._meas_ss,
self._vert_probs,
self._edge_probs,
self._feat_probs,
self._meas_probs, )
def test_out_parameter(self):
""" Test that the kwargs ``out`` is correctly passed to reduction function """
with self.subTest('axis = -1'):
not_out = last(ireduce_ufunc(self.source, np.add, axis = -1))
out = np.empty_like(self.source[0])
last(ireduce_ufunc(self.source, ufunc = np.add, out = out))
self.assertTrue(np.allclose(not_out, out))
with self.subTest('axis != -1'):
not_out = last(ireduce_ufunc(self.source, np.add, axis = 2))
out = np.empty_like(self.source[0])
from_out = last(ireduce_ufunc(self.source, ufunc = np.add, out = out, axis = 2))
self.assertTrue(np.allclose(not_out, from_out))
def proc(csv_na,con):
dicts=[]
for i in range(0,len(con)):
dicts.append(dict())
sum=0
f=csv.DictReader(open(csv_na))
for rec in f:
rec['single']='1'
#print(csv_na,rec['clickTime'])
label=int(rec['label'])
for i in range(0,len(con)):
k=rec[con[i][0]]+'#'+rec[con[i][1]]
if dicts[i].__contains__(k):
dicts[i][k]=np.add(dicts[i][k],[label,1])
else:
dicts[i][k]=[label,1]
sum+=1
return dicts,sum
def train(self, training_data_array):
for data in training_data_array:
# ??????????
y1 = np.dot(np.mat(self.theta1), np.mat(data.y0).T)
sum1 = y1 + np.mat(self.input_layer_bias)
y1 = self.sigmoid(sum1)
y2 = np.dot(np.array(self.theta2), y1)
y2 = np.add(y2, self.hidden_layer_bias)
y2 = self.sigmoid(y2)
# ??????????
actual_vals = [0] * 10
actual_vals[data.label] = 1
output_errors = np.mat(actual_vals).T - np.mat(y2)
hidden_errors = np.multiply(np.dot(np.mat(self.theta2).T, output_errors), self.sigmoid_prime(sum1))
# ???????????
self.theta1 += self.LEARNING_RATE * np.dot(np.mat(hidden_errors), np.mat(data.y0))
self.theta2 += self.LEARNING_RATE * np.dot(np.mat(output_errors), np.mat(y1).T)
self.hidden_layer_bias += self.LEARNING_RATE * output_errors
self.input_layer_bias += self.LEARNING_RATE * hidden_errors
def getComplexRepr(sequenza, K):
"""
Crea la rappresentazione ottenuta dalla somma delle varie rappresentazioni
in una unica matrice. K e' la lista delle dimensioni dei k-mers.
"""
m_init=getMatrice(sequenza, K[0])
dim=2**K[-1]
#out=np.zeros((dim, dim))
for ki in K[1:]:
m_init=espandiMatrice(m_init)
# divido gli elementi per due per diminuire l'impatto dei termini
# piu' corti e quindi piu' comuni
m_init /= 2.0
temp=getMatrice(sequenza, ki)
m_init=np.add(m_init,temp)
return m_init
def test_ufunc_override_normalize_signature(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
# gh-5674
class SomeClass(object):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
def test_forced_sig(self):
a = 0.5*np.arange(3, dtype='f8')
assert_equal(np.add(a, 0.5), [0.5, 1, 1.5])
assert_equal(np.add(a, 0.5, sig='i', casting='unsafe'), [0, 0, 1])
assert_equal(np.add(a, 0.5, sig='ii->i', casting='unsafe'), [0, 0, 1])
assert_equal(np.add(a, 0.5, sig=('i4',), casting='unsafe'), [0, 0, 1])
assert_equal(np.add(a, 0.5, sig=('i4', 'i4', 'i4'),
casting='unsafe'), [0, 0, 1])
b = np.zeros((3,), dtype='f8')
np.add(a, 0.5, out=b)
assert_equal(b, [0.5, 1, 1.5])
b[:] = 0
np.add(a, 0.5, sig='i', out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
b[:] = 0
np.add(a, 0.5, sig='ii->i', out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
b[:] = 0
np.add(a, 0.5, sig=('i4',), out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
b[:] = 0
np.add(a, 0.5, sig=('i4', 'i4', 'i4'), out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
def test_safe_casting(self):
# In old versions of numpy, in-place operations used the 'unsafe'
# casting rules. In versions >= 1.10, 'same_kind' is the
# default and an exception is raised instead of a warning.
# when 'same_kind' is not satisfied.
a = np.array([1, 2, 3], dtype=int)
# Non-in-place addition is fine
assert_array_equal(assert_no_warnings(np.add, a, 1.1),
[2.1, 3.1, 4.1])
assert_raises(TypeError, np.add, a, 1.1, out=a)
def add_inplace(a, b):
a += b
assert_raises(TypeError, add_inplace, a, 1.1)
# Make sure that explicitly overriding the exception is allowed:
assert_no_warnings(np.add, a, 1.1, out=a, casting="unsafe")
assert_array_equal(a, [2, 3, 4])
def _test_type_repr(self, t):
finfo = np.finfo(t)
last_fraction_bit_idx = finfo.nexp + finfo.nmant
last_exponent_bit_idx = finfo.nexp
storage_bytes = np.dtype(t).itemsize*8
# could add some more types to the list below
for which in ['small denorm', 'small norm']:
# Values from http://en.wikipedia.org/wiki/IEEE_754
constr = np.array([0x00]*storage_bytes, dtype=np.uint8)
if which == 'small denorm':
byte = last_fraction_bit_idx // 8
bytebit = 7-(last_fraction_bit_idx % 8)
constr[byte] = 1 << bytebit
elif which == 'small norm':
byte = last_exponent_bit_idx // 8
bytebit = 7-(last_exponent_bit_idx % 8)
constr[byte] = 1 << bytebit
else:
raise ValueError('hmm')
val = constr.view(t)[0]
val_repr = repr(val)
val2 = t(eval(val_repr))
if not (val2 == 0 and val < 1e-100):
assert_equal(val, val2)
def test_addsumprod(self):
# Tests add, sum, product.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(np.add.reduce(x), add.reduce(x))
assert_equal(np.add.accumulate(x), add.accumulate(x))
assert_equal(4, sum(array(4), axis=0))
assert_equal(4, sum(array(4), axis=0))
assert_equal(np.sum(x, axis=0), sum(x, axis=0))
assert_equal(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0))
assert_equal(np.sum(x, 0), sum(x, 0))
assert_equal(np.product(x, axis=0), product(x, axis=0))
assert_equal(np.product(x, 0), product(x, 0))
assert_equal(np.product(filled(xm, 1), axis=0), product(xm, axis=0))
s = (3, 4)
x.shape = y.shape = xm.shape = ym.shape = s
if len(s) > 1:
assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1))
assert_equal(np.add.reduce(x, 1), add.reduce(x, 1))
assert_equal(np.sum(x, 1), sum(x, 1))
assert_equal(np.product(x, 1), product(x, 1))
def test_datafriendly_add(self):
# Test keeping data w/ (inplace) addition
x = array([1, 2, 3], mask=[0, 0, 1])
# Test add w/ scalar
xx = x + 1
assert_equal(xx.data, [2, 3, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test iadd w/ scalar
x += 1
assert_equal(x.data, [2, 3, 3])
assert_equal(x.mask, [0, 0, 1])
# Test add w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x + array([1, 2, 3], mask=[1, 0, 0])
assert_equal(xx.data, [1, 4, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test iadd w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x += array([1, 2, 3], mask=[1, 0, 0])
assert_equal(x.data, [1, 4, 3])
assert_equal(x.mask, [1, 0, 1])
def test_testAddSumProd(self):
# Test add, sum, product.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
self.assertTrue(eq(np.add.reduce(x), add.reduce(x)))
self.assertTrue(eq(np.add.accumulate(x), add.accumulate(x)))
self.assertTrue(eq(4, sum(array(4), axis=0)))
self.assertTrue(eq(4, sum(array(4), axis=0)))
self.assertTrue(eq(np.sum(x, axis=0), sum(x, axis=0)))
self.assertTrue(eq(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)))
self.assertTrue(eq(np.sum(x, 0), sum(x, 0)))
self.assertTrue(eq(np.product(x, axis=0), product(x, axis=0)))
self.assertTrue(eq(np.product(x, 0), product(x, 0)))
self.assertTrue(eq(np.product(filled(xm, 1), axis=0),
product(xm, axis=0)))
if len(s) > 1:
self.assertTrue(eq(np.concatenate((x, y), 1),
concatenate((xm, ym), 1)))
self.assertTrue(eq(np.add.reduce(x, 1), add.reduce(x, 1)))
self.assertTrue(eq(np.sum(x, 1), sum(x, 1)))
self.assertTrue(eq(np.product(x, 1), product(x, 1)))
def _as_spline(curve, geometry):
"""
add a spline into a blender curve
@curve : blender curve
"""
if hasattr(geometry, 'exterior'):
# Polygon
Io._add_spline(curve, geometry.exterior)
for geom in geometry.interiors:
Io._add_spline(curve, geom)
elif hasattr(geometry, 'geoms'):
# Multi and Collections
for geom in geometry.geoms:
Io._as_spline(curve, geom)
else:
# LinearRing, LineString and Shape
Io._add_spline(curve, geometry)
def test_cputensor_fusion():
"""TODO."""
M = ng.make_axis(length=1)
N = ng.make_axis(length=3)
np_a = np.array([[1, 2, 3]], dtype=np.float32)
np_b = np.array([[3, 2, 1]], dtype=np.float32)
np_d = np.multiply(np_b, np.add(np_a, 2))
a = ng.constant(np_a, [M, N])
b = ng.constant(np_b, [M, N])
c = ng.constant(2)
d = ng.multiply(b, ng.add(a, c))
with executor(d) as ex:
result = ex()
print(result)
assert np.array_equal(result, np_d)
def test_4d_elementwise(transformer_factory, input_axes):
# Limiting maximum absolute value for tensors elements to 7.9.
# See description in function test_exit_condition above
is_flex = is_flex_factory(transformer_factory)
clip_val = 7.9 if is_flex else 0
x_val = rng.randn_abs_clip(input_axes, clip_max=clip_val)
y_val = rng.randn_abs_clip(input_axes, clip_max=clip_val)
x = ng.constant(x_val, input_axes)
y = ng.constant(y_val, input_axes)
out = ng.add(x, y)
with executor(out) as ex:
graph_val = ex()
np_val = np.add(x_val, y_val)
ng.testing.assert_allclose(graph_val, np_val, rtol=1e-4)
def discrete_uniform(self, low, high, quantum, axes, dtype=None):
"""
Returns a tensor initialized with a discrete uniform distribution.
Arguments:
low: The lower limit of the values.
high: The upper limit of the values.
quantum: Distance between values.
axes: The axes of the tensor.
Returns:
The tensor.
"""
if dtype is None:
dtype = self.dtype
n = math.floor((high - low) / quantum)
result = np.array(self.rng.random_integers(
0, n, ng.make_axes(axes).lengths), dtype=dtype)
np.multiply(result, quantum, result)
np.add(result, low, result)
return result
def get_centroid_idf(text, emb, idf, stopwords, D):
# Computing Terms' Frequency
tf = defaultdict(int)
tokens = bioclean(text)
for word in tokens:
if word in emb and word not in stopwords:
tf[word] += 1
# Computing the centroid
centroid = np.zeros((1, D))
div = 0
for word in tf:
if word in idf:
p = tf[word] * idf[word]
centroid = np.add(centroid, emb[word]*p)
div += p
if div != 0:
centroid = np.divide(centroid, div)
return centroid
def apply_cmvn(utt, mean, variance, reverse=False):
"""Apply mean and variance normalisation based on previously computed statistics.
Args:
utt: The utterance feature numpy matrix.
stats: A numpy array containing the mean and variance statistics.
The first row contains the sum of all the fautures and as a last
element the total numbe of features. The second row contains the
squared sum of the features and a zero at the end
Returns:
A numpy array containing the mean and variance normalized features
"""
if not reverse:
#return mean and variance normalised utterance
return np.divide(np.subtract(utt, mean), np.sqrt(variance))
else:
#reversed normalization
return np.add(np.multiply(utt, np.sqrt(variance)), mean)
def sumNormalizedFeatures(features, levels=9, startSize=(1983*8, 1088*8)):
"""
Normalizes the feature maps in argument features and combines them into one.
Arguments:
features : list of feature maps (images)
levels : the levels of the Gaussian pyramid used to
calculate the feature maps.
startSize : the base size of the Gaussian pyramit used to
calculate the feature maps.
returns:
a combined feature map.
"""
commonWidth = startSize[0] / 2**(levels/2 - 1)
commonHeight = startSize[1] / 2**(levels/2 - 1)
commonSize = commonWidth, commonHeight
logger.info("Size of conspicuity map: %s", commonSize)
consp = N(cv2.resize(features[0][1], commonSize))
for f in features[1:]:
resized = N(cv2.resize(f[1], commonSize))
consp = cv2.add(consp, resized)
return consp
def test_ufunc_override_normalize_signature(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
# gh-5674
class SomeClass(object):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
def test_forced_sig(self):
a = 0.5*np.arange(3, dtype='f8')
assert_equal(np.add(a, 0.5), [0.5, 1, 1.5])
assert_equal(np.add(a, 0.5, sig='i', casting='unsafe'), [0, 0, 1])
assert_equal(np.add(a, 0.5, sig='ii->i', casting='unsafe'), [0, 0, 1])
assert_equal(np.add(a, 0.5, sig=('i4',), casting='unsafe'), [0, 0, 1])
assert_equal(np.add(a, 0.5, sig=('i4', 'i4', 'i4'),
casting='unsafe'), [0, 0, 1])
b = np.zeros((3,), dtype='f8')
np.add(a, 0.5, out=b)
assert_equal(b, [0.5, 1, 1.5])
b[:] = 0
np.add(a, 0.5, sig='i', out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
b[:] = 0
np.add(a, 0.5, sig='ii->i', out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
b[:] = 0
np.add(a, 0.5, sig=('i4',), out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
b[:] = 0
np.add(a, 0.5, sig=('i4', 'i4', 'i4'), out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
def test_safe_casting(self):
# In old versions of numpy, in-place operations used the 'unsafe'
# casting rules. In versions >= 1.10, 'same_kind' is the
# default and an exception is raised instead of a warning.
# when 'same_kind' is not satisfied.
a = np.array([1, 2, 3], dtype=int)
# Non-in-place addition is fine
assert_array_equal(assert_no_warnings(np.add, a, 1.1),
[2.1, 3.1, 4.1])
assert_raises(TypeError, np.add, a, 1.1, out=a)
def add_inplace(a, b):
a += b
assert_raises(TypeError, add_inplace, a, 1.1)
# Make sure that explicitly overriding the exception is allowed:
assert_no_warnings(np.add, a, 1.1, out=a, casting="unsafe")
assert_array_equal(a, [2, 3, 4])
def _test_type_repr(self, t):
finfo = np.finfo(t)
last_fraction_bit_idx = finfo.nexp + finfo.nmant
last_exponent_bit_idx = finfo.nexp
storage_bytes = np.dtype(t).itemsize*8
# could add some more types to the list below
for which in ['small denorm', 'small norm']:
# Values from http://en.wikipedia.org/wiki/IEEE_754
constr = np.array([0x00]*storage_bytes, dtype=np.uint8)
if which == 'small denorm':
byte = last_fraction_bit_idx // 8
bytebit = 7-(last_fraction_bit_idx % 8)
constr[byte] = 1 << bytebit
elif which == 'small norm':
byte = last_exponent_bit_idx // 8
bytebit = 7-(last_exponent_bit_idx % 8)
constr[byte] = 1 << bytebit
else:
raise ValueError('hmm')
val = constr.view(t)[0]
val_repr = repr(val)
val2 = t(eval(val_repr))
if not (val2 == 0 and val < 1e-100):
assert_equal(val, val2)
def test_addsumprod(self):
# Tests add, sum, product.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
assert_equal(np.add.reduce(x), add.reduce(x))
assert_equal(np.add.accumulate(x), add.accumulate(x))
assert_equal(4, sum(array(4), axis=0))
assert_equal(4, sum(array(4), axis=0))
assert_equal(np.sum(x, axis=0), sum(x, axis=0))
assert_equal(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0))
assert_equal(np.sum(x, 0), sum(x, 0))
assert_equal(np.product(x, axis=0), product(x, axis=0))
assert_equal(np.product(x, 0), product(x, 0))
assert_equal(np.product(filled(xm, 1), axis=0), product(xm, axis=0))
s = (3, 4)
x.shape = y.shape = xm.shape = ym.shape = s
if len(s) > 1:
assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1))
assert_equal(np.add.reduce(x, 1), add.reduce(x, 1))
assert_equal(np.sum(x, 1), sum(x, 1))
assert_equal(np.product(x, 1), product(x, 1))
def test_datafriendly_add(self):
# Test keeping data w/ (inplace) addition
x = array([1, 2, 3], mask=[0, 0, 1])
# Test add w/ scalar
xx = x + 1
assert_equal(xx.data, [2, 3, 3])
assert_equal(xx.mask, [0, 0, 1])
# Test iadd w/ scalar
x += 1
assert_equal(x.data, [2, 3, 3])
assert_equal(x.mask, [0, 0, 1])
# Test add w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
xx = x + array([1, 2, 3], mask=[1, 0, 0])
assert_equal(xx.data, [1, 4, 3])
assert_equal(xx.mask, [1, 0, 1])
# Test iadd w/ array
x = array([1, 2, 3], mask=[0, 0, 1])
x += array([1, 2, 3], mask=[1, 0, 0])
assert_equal(x.data, [1, 4, 3])
assert_equal(x.mask, [1, 0, 1])
def test_testAddSumProd(self):
# Test add, sum, product.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
self.assertTrue(eq(np.add.reduce(x), add.reduce(x)))
self.assertTrue(eq(np.add.accumulate(x), add.accumulate(x)))
self.assertTrue(eq(4, sum(array(4), axis=0)))
self.assertTrue(eq(4, sum(array(4), axis=0)))
self.assertTrue(eq(np.sum(x, axis=0), sum(x, axis=0)))
self.assertTrue(eq(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)))
self.assertTrue(eq(np.sum(x, 0), sum(x, 0)))
self.assertTrue(eq(np.product(x, axis=0), product(x, axis=0)))
self.assertTrue(eq(np.product(x, 0), product(x, 0)))
self.assertTrue(eq(np.product(filled(xm, 1), axis=0),
product(xm, axis=0)))
if len(s) > 1:
self.assertTrue(eq(np.concatenate((x, y), 1),
concatenate((xm, ym), 1)))
self.assertTrue(eq(np.add.reduce(x, 1), add.reduce(x, 1)))
self.assertTrue(eq(np.sum(x, 1), sum(x, 1)))
self.assertTrue(eq(np.product(x, 1), product(x, 1)))
def __getitem__(self, idx):
img_name = self.img_names[idx]
ensembled = np.zeros(const.img_size)
for i, pred_dir in enumerate(self.pred_dirs):
pred_path = os.path.join(const.OUTPUT_DIR, pred_dir, const.PROBS_DIR_NAME, img_name + '.npy')
img_prob = np.load(pred_path)
weighted_img_prob = np.multiply(img_prob, self.weights[i])
ensembled = np.add(ensembled, weighted_img_prob)
# save into new output/ folder
submit.save_ensembled_prob_map(self.ensemble_dir, img_name, ensembled)
#plt.imshow(ensembled)
#plt.show()
return img_name, ensembled
def __getitem__(self, idx):
img_name = self.img_names[idx]
ensembled_mask = np.zeros((const.img_size[0], const.img_size[1]))
for i, submission in enumerate(self.submissions):
rle = submission[img_name]
mask = run_length.decode(rle)
weighted_mask = np.multiply(mask, self.weights[i])
ensembled_mask = np.add(ensembled_mask, weighted_mask)
ensembled_mask[ ensembled_mask > 0.5 ] = 1
ensembled_mask[ ensembled_mask <= 0.5 ] = 0
# plt.imshow(ensembled_mask)
# plt.show()
ensembled_rle = run_length.encode(ensembled_mask)
return img_name, ensembled_rle
def compute_num_adds(self, cell, composition_space, random):
"""
Computes the number of atoms (or stoichiometries worth of atoms) to add
or remove. Returns a non-zero integer.
Args:
cell: the Cell of the parent organism
composition_space: the CompositionSpace of the search
random: a copy of Python's built in PRNG
"""
num_adds = int(round(random.gauss(self.mu_num_adds,
self.sigma_num_adds)))
# keep trying until we get a valid number
while num_adds == 0 or \
(composition_space.objective_function == 'epa' and num_adds*-1 >=
cell.num_sites/composition_space.endpoints[0].num_atoms) or \
(composition_space.objective_function == 'pd' and
num_adds*-1 >= cell.num_sites):
num_adds = int(round(random.gauss(self.mu_num_adds,
self.sigma_num_adds)))
return num_adds
def _get_boll(cls, df):
""" Get Bollinger bands.
boll_ub means the upper band of the Bollinger bands
boll_lb means the lower band of the Bollinger bands
boll_ub = MA + K?
boll_lb = MA ? K?
M = BOLL_PERIOD
K = BOLL_STD_TIMES
:param df: data
:return: None
"""
moving_avg = df['close_{}_sma'.format(cls.BOLL_PERIOD)]
moving_std = df['close_{}_mstd'.format(cls.BOLL_PERIOD)]
df['boll'] = moving_avg
moving_avg = list(map(np.float64, moving_avg))
moving_std = list(map(np.float64, moving_std))
# noinspection PyTypeChecker
df['boll_ub'] = np.add(moving_avg,
np.multiply(cls.BOLL_STD_TIMES, moving_std))
# noinspection PyTypeChecker
df['boll_lb'] = np.subtract(moving_avg,
np.multiply(cls.BOLL_STD_TIMES,
moving_std))
def __add__(self, other):
"""Implicitly broadcast lesser operand to a higher conformable dimension"""
if type(self) in self._types or type(other) in self._types:
return super().__add__(other)
# Stimuli become vectorized, but bias units remain 1D. To add wx + b, must broadcast
if self.ndim == 2 and other.ndim == 1:
return Array(np.add(self, np.tile(other[..., np.newaxis], self.shape[1])))
if self.ndim == 1 and other.ndim == 2:
return Array(np.add(np.tile(self[..., np.newaxis], other.shape[1]), other))
if self.ndim == 3 and other.ndim == 2:
return Array(np.add(self, np.tile(other[..., np.newaxis], self.shape[2])))
if self.ndim == 2 and other.ndim == 3:
return Array(np.add(np.tile(self[..., np.newaxis], other.shape[2]), other))
return np.add(self, other)
def logprob_dc(counts, prior, axis=None):
"""Non-normalized log probability of a Dirichlet-Categorical distribution.
See https://en.wikipedia.org/wiki/Dirichlet-multinomial_distribution
"""
# Note that this excludes the factorial(counts) term, since we explicitly
# track permutations in assignments.
return gammaln(np.add(counts, prior, dtype=np.float32)).sum(axis)
def set_edges(self, edges):
TreeTrainer.set_edges(self, edges)
V, E, K, M = self._VEKM
assignments = self._assignments[sorted(self._added_rows), :]
for e, v1, v2 in self._tree.tree_grid.T:
self._edge_ss[e, :, :] = count_pairs(assignments, v1, v2, M)
np.add(self._edge_ss, self._edge_prior, out=self._edge_probs)
def add_row(self, row_id):
logger.debug('TreeGaussTrainer.add_row %d', row_id)
assert row_id not in self._added_rows, row_id
self._added_rows.add(row_id)
treegauss_add_row(
self._data[row_id, :],
self._tree.tree_grid,
self._program,
self._latent[row_id, :, :],
self._vert_ss,
self._edge_ss,
self._feat_ss, )
def isum(arrays, axis = -1, dtype = None, ignore_nan = False):
"""
Streaming sum of array elements.
Parameters
----------
arrays : iterable
Arrays to be summed.
axis : int or None, optional
Reduction axis. Default is to sum the arrays in the stream as if
they had been stacked along a new axis, then sum along this new axis.
If None, arrays are flattened before summing. If `axis` is an int larger that
the number of dimensions in the arrays of the stream, arrays are summed
along the new axis.
dtype : numpy.dtype, optional
The type of the yielded array and of the accumulator in which the elements
are summed. The dtype of a is used by default unless a has an integer dtype
of less precision than the default platform integer. In that case, if a is
signed then the platform integer is used while if a is unsigned then an
unsigned integer of the same precision as the platform integer is used.
ignore_nan : bool, optional
If True, NaNs are ignored. Default is propagation of NaNs.
Yields
------
online_sum : ndarray
"""
yield from ireduce_ufunc(arrays, ufunc = np.add, axis = axis, ignore_nan = ignore_nan, dtype = dtype)
def test_no_side_effects(self):
""" Test that no arrays in the stream are modified """
for arr in self.source:
arr.setflags(write = False)
out = last(ireduce_ufunc(self.source, np.add))
def test_single_array(self):
""" Test ireduce_ufunc on a single array, not a sequence """
source = np.ones( (16, 16), dtype = np.int)
out = last(ireduce_ufunc(source, np.add, axis = -1))
self.assertTrue(np.allclose(source, out))
def test_output_shape(self):
""" Test output shape """
for axis in (0, 1, 2, 3, None):
with self.subTest('axis = {}'.format(axis)):
from_numpy = np.add.reduce(self.stack, axis = axis)
out = last(ireduce_ufunc(self.source, np.add, axis = axis))
self.assertSequenceEqual(from_numpy.shape, out.shape)
self.assertTrue(np.allclose(out, from_numpy))
def test_ignore_nan(self):
""" Test that ignore_nan is working """
for axis in (0, 1, 2, 3, None):
with self.subTest('axis = {}'.format(axis)):
out = last(ireduce_ufunc(self.source, np.add, axis = axis, ignore_nan = True))
self.assertFalse(np.any(np.isnan(out)))
# Dynamics generation of tests on binary ufuncs
def con_two_dict(dic1,dic2):
dic=[]
for item in dic1:
dic.append(item.copy())
for i in range(0,len(dic)):
for k in dic2[i].keys():
if dic[i].__contains__(k):
dic[i][k]=np.add(dic[i][k],dic2[i][k])
else:
dic[i][k]=dic2[i][k]
return dic
def compute_conf_intervals(data, cumulative=False):
'''
Args:
data (list): A 3D matrix, [algorithm][instance][episode]
cumulative (bool) *opt
'''
confidence_intervals_each_alg = [] # [alg][conf_inv_for_episode]
for i, all_instances in enumerate(data):
num_instances = len(data[i])
num_episodes = len(data[i][0])
all_instances = np.array(all_instances)
alg_i_ci = []
total_so_far = np.zeros(num_instances)
for j in xrange(num_episodes):
# Compute datum for confidence interval.
episode_j_all_instances = all_instances[:, j]
if cumulative:
# Cumulative.
summed_vector = np.add(episode_j_all_instances, total_so_far)
total_so_far = np.add(episode_j_all_instances, total_so_far)
episode_j_all_instances = summed_vector
# Compute the interval and add it to list.
conf_interv = compute_single_conf_interval(episode_j_all_instances)
alg_i_ci.append(conf_interv)
confidence_intervals_each_alg.append(alg_i_ci)
return confidence_intervals_each_alg
def __call__(self, sample):
# keep tract of absolute value of
self.diff = np.add(self.diff,
np.absolute(np.asarray(sample.channel_data)))
self.sample_count = self.sample_count + 1
elapsed_time = timeit.default_timer() - self.last_report
if elapsed_time > self.polling_interval:
channel_noise_power = np.divide(self.diff, self.sample_count)
print (channel_noise_power)
self.diff = np.zeros(self.eeg_channels)
self.last_report = timeit.default_timer()
# # Instanciate "monitor" thread
def predict(self, test):
print('predict')
y1 = np.dot(np.mat(self.theta1), np.mat(test).T)
y1 = y1 + np.mat(self.input_layer_bias) # Add the bias
y1 = self.sigmoid(y1)
y2 = np.dot(np.array(self.theta2), y1)
y2 = np.add(y2, self.hidden_layer_bias) # Add the bias
y2 = self.sigmoid(y2)
results = y2.T.tolist()[0]
return results.index(max(results))
def sample(gt, n, im_size, scale_factor, transfer_range, scale_range, valid, verbose=False):
samp = np.array([gt[0]+gt[2]/2.0, gt[1]+gt[3]/2.0, gt[2], gt[3]])
samples = np.repeat(np.reshape(samp, [1, -1]), n, axis=0)
h, w = im_size
if verbose:
print(w, h)
print(gt)
print(samp)
print(transfer_range)
print(scale_range)
samples[:, 0] = np.add(samples[:, 0], transfer_range*samp[2]*(np.random.rand(n)*2-1))
samples[:, 1] = np.add(samples[:, 1], transfer_range*samp[3]*(np.random.rand(n)*2-1))
samples[:, 2:] = np.multiply(samples[:, 2:], np.power(scale_factor, scale_range*np.repeat(np.random.rand(n,1)*2-1,2,axis=1)))
samples[:, 2] = np.maximum(0, np.minimum(w-5, samples[:,2]))
samples[:, 3] = np.maximum(0, np.minimum(h-5, samples[:,3]))
if verbose:
print(samples[0])
samples = np.c_[samples[:,0]-samples[:,2]/2, samples[:,1]-samples[:,3]/2, samples[:,2], samples[:,3]]
if verbose:
print(samples[0])
if valid:
samples[:,0] = np.maximum(0,np.minimum(w-samples[:,2],samples[:,0]))
samples[:,1] = np.maximum(0,np.minimum(h-samples[:,3],samples[:,1]))
else:
samples[:,0] = np.maximum(0-samples[:,2]/2,np.minimum(w-samples[:,2]/2,samples[:,0]))
samples[:,1] = np.maximum(0-samples[:,3]/2,np.minimum(h-samples[:,3]/2,samples[:,1]))
if verbose:
print(samples[0])
return samples
###########################################################################
# overlap_ratio #
###########################################################################
def set_attributes(self, task):
"""Set key replacement dictionary."""
Utility.set_attributes(self, task)
self._operands = task.get('operands', [])
if not self._operands:
raise ValueError('`Operator` must have at least one operand.')
self._result = task.get('result', 'result')
self._op = self.ops.get(task.get('operator', '+'), np.add)
def evaluate_model(self, x, w):
if not self.regularization or self.lambd == 0:
edge_weight = x.dot(w)
edge_weight = np.multiply(edge_weight, self.skipped)
else:
edge_weight = np.zeros((1, self.num_edges))
for idx, value in izip(x.indices, x.data):
# edge_weight = np.add(edge_weight, np.multiply(value, np.multiply(np.maximum(np.subtract(np.abs(w[idx, :]), self.lambd), 0), np.sign(w[idx, :]))))
for edge in xrange(self.num_edges):
if w[idx, edge] > self.lambd:
edge_weight[0, edge] += value * (w[idx, edge] - self.lambd)
elif w[idx, edge] < -self.lambd:
edge_weight[0, edge] += value * (w[idx, edge] + self.lambd)
return edge_weight
def get_unseen_labels(self, y):
unseen = set(y).difference(self.classes_seen)
for c in unseen:
self.classes_seen.add(c)
return unseen
def pathl(cop_dat):
# to calculate COP path length
delt = np.diff(cop_dat[:,(0,1)], axis = 0)
sqs = np.square(delt)
sum_s = np.add(sqs[:,0],sqs[:,1])
lgths = np.sqrt(sum_s)
return np.sum(lgths)
def test_reduce(self,level=rlevel):
# Ticket #40
assert_almost_equal(np.add.reduce([1., .5], dtype=None), 1.5)