Python numpy 模块,copyto() 实例源码
我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.copyto()。
def test_copyto_fromscalar():
a = np.arange(6, dtype='f4').reshape(2, 3)
# Simple copy
np.copyto(a, 1.5)
assert_equal(a, 1.5)
np.copyto(a.T, 2.5)
assert_equal(a, 2.5)
# Where-masked copy
mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?')
np.copyto(a, 3.5, where=mask)
assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]])
mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?')
np.copyto(a.T, 4.5, where=mask)
assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]])
def __ipow__(self, other):
"""
Raise self to the power other, in place.
"""
other_data = getdata(other)
other_mask = getmask(other)
with np.errstate(divide='ignore', invalid='ignore'):
self._data.__ipow__(np.where(self._mask, self.dtype.type(1),
other_data))
invalid = np.logical_not(np.isfinite(self._data))
if invalid.any():
if self._mask is not nomask:
self._mask |= invalid
else:
self._mask = invalid
np.copyto(self._data, self.fill_value, where=invalid)
new_mask = mask_or(other_mask, invalid)
self._mask = mask_or(self._mask, new_mask)
return self
def draw_keypoints_on_image_array(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True):
"""Draws keypoints on an image (numpy array).
Args:
image: a numpy array with shape [height, width, 3].
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_keypoints_on_image(image_pil, keypoints, color, radius,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def test_copyto_fromscalar():
a = np.arange(6, dtype='f4').reshape(2, 3)
# Simple copy
np.copyto(a, 1.5)
assert_equal(a, 1.5)
np.copyto(a.T, 2.5)
assert_equal(a, 2.5)
# Where-masked copy
mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?')
np.copyto(a, 3.5, where=mask)
assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]])
mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?')
np.copyto(a.T, 4.5, where=mask)
assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]])
def __ipow__(self, other):
"""
Raise self to the power other, in place.
"""
other_data = getdata(other)
other_mask = getmask(other)
with np.errstate(divide='ignore', invalid='ignore'):
self._data.__ipow__(np.where(self._mask, self.dtype.type(1),
other_data))
invalid = np.logical_not(np.isfinite(self._data))
if invalid.any():
if self._mask is not nomask:
self._mask |= invalid
else:
self._mask = invalid
np.copyto(self._data, self.fill_value, where=invalid)
new_mask = mask_or(other_mask, invalid)
self._mask = mask_or(self._mask, new_mask)
return self
def quantize_net(net, codebook):
layers = codebook.keys()
codes_W = {}
print "================Perform quantization=============="
for layer in layers:
print "Quantize layer:", layer
W = net.params[layer][0].data
codes, _ = scv.vq(W.flatten(), codebook[layer]) # ???????????
# codes = stochasitc_quantize2(W.flatten(), codebook[layer]) # ?????????
codes = np.reshape(codes, W.shape)
codes_W[layer] = np.array(codes, dtype=np.uint32)
# ?????????????
W_q = np.reshape(codebook[layer][codes], W.shape)
np.copyto(net.params[layer][0].data, W_q)
return codes_W
def upload_indices(self, context):
'''
Upload indices to graphic card
*Parameters:*
- `context`: `VulkContext`
**Note: Mesh must be indexed**
'''
if not self.has_indices:
raise Exception('No index in this mesh')
if not self.dirty_indices:
return
self.dirty_indices = False
with self.indices_buffer.bind(context) as b:
np.copyto(np.array(b, copy=False),
self.indices_array.view(dtype=np.uint8),
casting='no')
def upload_vertices(self, context):
'''
Upload vertices to graphic card
*Parameters:*
- `context`: `VulkContext`
'''
if not self.dirty_vertices:
return
self.dirty_vertices = False
with self.vertices_buffer.bind(context) as b:
np.copyto(np.array(b, copy=False),
self.vertices_array.view(dtype=np.uint8),
casting='no')
def copy_parameters_from(self, params):
"""Copies parameters from another source without reallocation.
Args:
params (Iterable): Iterable of parameter arrays.
"""
for dst, src in zip(self.parameters, params):
if isinstance(dst, numpy.ndarray):
if isinstance(src, numpy.ndarray):
numpy.copyto(dst, src)
else:
dst[:] = src.get()
elif isinstance(src, numpy.ndarray):
dst.set(src)
else:
cuda.copy(src, out=dst)
def test_copyto_fromscalar():
a = np.arange(6, dtype='f4').reshape(2, 3)
# Simple copy
np.copyto(a, 1.5)
assert_equal(a, 1.5)
np.copyto(a.T, 2.5)
assert_equal(a, 2.5)
# Where-masked copy
mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?')
np.copyto(a, 3.5, where=mask)
assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]])
mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?')
np.copyto(a.T, 4.5, where=mask)
assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]])
def outer(self, a, b):
"""
Return the function applied to the outer product of a and b.
"""
(da, db) = (getdata(a), getdata(b))
d = self.f.outer(da, db)
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
m = nomask
else:
ma = getmaskarray(a)
mb = getmaskarray(b)
m = umath.logical_or.outer(ma, mb)
if (not m.ndim) and m:
return masked
if m is not nomask:
np.copyto(d, da, where=m)
if not d.shape:
return d
masked_d = d.view(get_masked_subclass(a, b))
masked_d._mask = m
masked_d._update_from(d)
return masked_d
def test_copyto_fromscalar():
a = np.arange(6, dtype='f4').reshape(2, 3)
# Simple copy
np.copyto(a, 1.5)
assert_equal(a, 1.5)
np.copyto(a.T, 2.5)
assert_equal(a, 2.5)
# Where-masked copy
mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?')
np.copyto(a, 3.5, where=mask)
assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]])
mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?')
np.copyto(a.T, 4.5, where=mask)
assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]])
def __ipow__(self, other):
"""
Raise self to the power other, in place.
"""
other_data = getdata(other)
other_mask = getmask(other)
with np.errstate(divide='ignore', invalid='ignore'):
self._data.__ipow__(np.where(self._mask, self.dtype.type(1),
other_data))
invalid = np.logical_not(np.isfinite(self._data))
if invalid.any():
if self._mask is not nomask:
self._mask |= invalid
else:
self._mask = invalid
np.copyto(self._data, self.fill_value, where=invalid)
new_mask = mask_or(other_mask, invalid)
self._mask = mask_or(self._mask, new_mask)
return self
def _sample_words(model, c, maxlen, V_C, K=20):
def predict(samples):
context = np.array([c] * len(samples))
prev_chars = np.zeros((len(samples), maxlen), dtype=np.int32)
probs = np.zeros((len(samples), V_C.size), dtype=np.float32)
for i, prev in enumerate(samples):
for j, ch in enumerate(prev):
prev_chars[i, j + 1] = ch + 1
preds = model.predict_chars(context, prev_chars)
for i, prev in enumerate(samples):
np.copyto(probs[i], preds[i, len(prev)])
return probs
eow = V_C.get_index(EOW)
best_chars, losses = beamsearch(predict, eow, k=K, maxsample=maxlen)
best_words = []
for word_chars in best_chars:
word = ""
for ch in word_chars:
if ch == eow:
break
word += V_C.get_token(ch)
best_words.append(word)
probs = 1. / np.exp(np.array(losses))
return best_words, probs
def test_copyto_fromscalar():
a = np.arange(6, dtype='f4').reshape(2, 3)
# Simple copy
np.copyto(a, 1.5)
assert_equal(a, 1.5)
np.copyto(a.T, 2.5)
assert_equal(a, 2.5)
# Where-masked copy
mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?')
np.copyto(a, 3.5, where=mask)
assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]])
mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?')
np.copyto(a.T, 4.5, where=mask)
assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]])
def outer(self, a, b):
"""
Return the function applied to the outer product of a and b.
"""
(da, db) = (getdata(a), getdata(b))
d = self.f.outer(da, db)
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
m = nomask
else:
ma = getmaskarray(a)
mb = getmaskarray(b)
m = umath.logical_or.outer(ma, mb)
if (not m.ndim) and m:
return masked
if m is not nomask:
np.copyto(d, da, where=m)
if not d.shape:
return d
masked_d = d.view(get_masked_subclass(a, b))
masked_d._mask = m
return masked_d
def fake_blackbox_optimizer(self):
true_grads, losses, l2s, loss1, loss2, scores, nimgs = self.sess.run([self.grad_op, self.loss, self.l2dist, self.loss1, self.loss2, self.output, self.newimg], feed_dict={self.modifier: self.real_modifier})
# ADAM update
grad = true_grads[0].reshape(-1)
# print(true_grads[0])
epoch = self.adam_epoch[0]
mt = self.beta1 * self.mt + (1 - self.beta1) * grad
vt = self.beta2 * self.vt + (1 - self.beta2) * np.square(grad)
corr = (math.sqrt(1 - self.beta2 ** epoch)) / (1 - self.beta1 ** epoch)
# print(grad.shape, mt.shape, vt.shape, self.real_modifier.shape)
# m is a *view* of self.real_modifier
m = self.real_modifier.reshape(-1)
# this is in-place
m -= self.LEARNING_RATE * corr * (mt / (np.sqrt(vt) + 1e-8))
self.mt = mt
self.vt = vt
# m -= self.LEARNING_RATE * grad
if not self.use_tanh:
m_proj = np.maximum(np.minimum(m, self.modifier_up), self.modifier_down)
np.copyto(m, m_proj)
self.adam_epoch[0] = epoch + 1
return losses[0], l2s[0], loss1[0], loss2[0], scores[0], nimgs[0]
def add_data(self, input_cube, index):
"""Add data to a larger cube (this instance) from a smaller cube (input_cube)
Assumes all time samples are present in the smaller cube
Args:
input_cube (spdb.cube.Cube): Input Cube instance from which to merge data
index: relative morton ID indicating where to insert the data
Returns:
None
"""
x_offset = index[0] * input_cube.x_dim
y_offset = index[1] * input_cube.y_dim
z_offset = index[2] * input_cube.z_dim
np.copyto(self.data[input_cube.time_range[0] - self.time_range[0]:input_cube.time_range[1] - self.time_range[0],
z_offset:z_offset + input_cube.z_dim,
y_offset:y_offset + input_cube.y_dim,
x_offset:x_offset + input_cube.x_dim], input_cube.data[:, :, :, :])
def test_copyto_fromscalar():
a = np.arange(6, dtype='f4').reshape(2, 3)
# Simple copy
np.copyto(a, 1.5)
assert_equal(a, 1.5)
np.copyto(a.T, 2.5)
assert_equal(a, 2.5)
# Where-masked copy
mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?')
np.copyto(a, 3.5, where=mask)
assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]])
mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?')
np.copyto(a.T, 4.5, where=mask)
assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]])
def __ipow__(self, other):
"""
Raise self to the power other, in place.
"""
other_data = getdata(other)
other_mask = getmask(other)
with np.errstate(divide='ignore', invalid='ignore'):
self._data.__ipow__(np.where(self._mask, self.dtype.type(1),
other_data))
invalid = np.logical_not(np.isfinite(self._data))
if invalid.any():
if self._mask is not nomask:
self._mask |= invalid
else:
self._mask = invalid
np.copyto(self._data, self.fill_value, where=invalid)
new_mask = mask_or(other_mask, invalid)
self._mask = mask_or(self._mask, new_mask)
return self
def lb(self, lb):
if self.equality.any():
raise ValueError(
"The lb array can not be set "
"when there are indices of the "
"equality array that are True")
if lb is None:
lb = -numpy.inf
if isinstance(lb, numpy.ndarray):
numpy.copyto(self._lb, lb)
elif isinstance(lb, NumericValue):
raise ValueError("lb must be set to "
"a simple numeric type "
"or a numpy array")
else:
self._lb.fill(lb)
def ub(self, ub):
if self.equality.any():
raise ValueError(
"The ub array can not be set "
"when there are indices of the "
"equality array that are True")
if ub is None:
ub = numpy.inf
if isinstance(ub, numpy.ndarray):
numpy.copyto(self._ub, ub)
elif isinstance(ub, NumericValue):
raise ValueError("ub must be set to "
"a simple numeric type "
"or a numpy array")
else:
self._ub.fill(ub)
def rhs(self, rhs):
if rhs is None:
# None has a different meaning depending on the
# context (lb or ub), so there is no way to
# interpret this
raise ValueError(
"Constraint right-hand side can not "
"be assigned a value of None.")
elif isinstance(rhs, NumericValue):
raise ValueError("rhs must be set to "
"a simple numeric type "
"or a numpy array")
elif isinstance(rhs, numpy.ndarray):
numpy.copyto(self._lb, rhs)
numpy.copyto(self._ub, rhs)
else:
self._lb.fill(rhs)
self._ub.fill(rhs)
self._equality.fill(True)
def draw_keypoints_on_image_array(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True):
"""Draws keypoints on an image (numpy array).
Args:
image: a numpy array with shape [height, width, 3].
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_keypoints_on_image(image_pil, keypoints, color, radius,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def test_copyto_fromscalar():
a = np.arange(6, dtype='f4').reshape(2, 3)
# Simple copy
np.copyto(a, 1.5)
assert_equal(a, 1.5)
np.copyto(a.T, 2.5)
assert_equal(a, 2.5)
# Where-masked copy
mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?')
np.copyto(a, 3.5, where=mask)
assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]])
mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?')
np.copyto(a.T, 4.5, where=mask)
assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]])
def __ipow__(self, other):
"""
Raise self to the power other, in place.
"""
other_data = getdata(other)
other_mask = getmask(other)
with np.errstate(divide='ignore', invalid='ignore'):
self._data.__ipow__(np.where(self._mask, self.dtype.type(1),
other_data))
invalid = np.logical_not(np.isfinite(self._data))
if invalid.any():
if self._mask is not nomask:
self._mask |= invalid
else:
self._mask = invalid
np.copyto(self._data, self.fill_value, where=invalid)
new_mask = mask_or(other_mask, invalid)
self._mask = mask_or(self._mask, new_mask)
return self
def inject_noise_sample(self, data, noise_path, noise_level):
noise_src = load_audio(noise_path)
noise_offset_fraction = np.random.rand()
noise_dst = np.zeros_like(data)
src_offset = int(len(noise_src) * noise_offset_fraction)
src_left = len(noise_src) - src_offset
dst_offset = 0
dst_left = len(data)
while dst_left > 0:
copy_size = min(dst_left, src_left)
np.copyto(noise_dst[dst_offset:dst_offset + copy_size],
noise_src[src_offset:src_offset + copy_size])
if src_left > dst_left:
dst_left = 0
else:
dst_left -= copy_size
dst_offset += copy_size
src_left = len(noise_src)
src_offset = 0
data += noise_level * noise_dst
return data
def inject_noise_sample(self, data, noise_path, noise_level):
noise_src = load_audio(noise_path)
noise_offset_fraction = np.random.rand()
noise_dst = np.zeros_like(data)
src_offset = int(len(noise_src) * noise_offset_fraction)
src_left = len(noise_src) - src_offset
dst_offset = 0
dst_left = len(data)
while dst_left > 0:
copy_size = min(dst_left, src_left)
np.copyto(noise_dst[dst_offset:dst_offset + copy_size],
noise_src[src_offset:src_offset + copy_size])
if src_left > dst_left:
dst_left = 0
else:
dst_left -= copy_size
dst_offset += copy_size
src_left = len(noise_src)
src_offset = 0
data += noise_level * noise_dst
return data
def normalise_data(self, timestamp, data):
""" Convert the data if needed """
if self._passthrough:
return
i = 0
for datum in data:
if self.needsfixup[i] is None:
i += 1
continue
if len(datum) == 0:
# Ignore entries with no data - this typically occurs when the
# plugin requests multiple metrics and the metrics do not all appear
# at every timestep
i += 1
continue
if self.accumulator[i] is None:
self.accumulator[i] = numpy.array(datum)
self.last[i] = numpy.array(datum)
else:
self.accumulator[i] += (datum - self.last[i]) % numpy.uint64(1L << self.needsfixup[i]['range'])
numpy.copyto(self.last[i], datum)
numpy.copyto(datum, self.accumulator[i])
i += 1
def soften_targets(array, low=0.1, high=0.9):
assert list(set(np.unique(array)) ^ {0, 1}) == [], 'Targets must be binary'
array_new = np.empty_like(array)
array_new = np.copyto(array_new, array)
array_new[array == 0] = low
array_new[array == 1] = high
return array_new
# misc
def softassign(self):
"""
Run the softassign algorithm until convergence.
"""
# TODO add possibility of slack
for i, indices in enumerate(self.element_type_subset_indices):
M = self.match_matrix[indices]
old_M = M.copy()
for it in xrange(self.max_softassign_iterations):
# normalize across rows (except slack)
M /= np.sum(M,axis=1)[:,None]
# normalize across columns (except slack)
M /= np.sum(M,axis=0)
max_row_normalization_error = np.max(abs(np.sum(M, axis = 1)-1))
# break if converged
if max_row_normalization_error < self.softassign_convergence_threshold:
oprint(5, "Softassign algorithm for subset %d converged in iteration %d" % (i, it+1))
break
mean_squared_difference = np.max(abs(old_M-M))
if mean_squared_difference < self.softassign_convergence_threshold2:
oprint(5, "Softassign algorithm for subset %d converged in iteration %d" % (i, it+1))
break
if it == (self.max_softassign_iterations - 1):
eprint(3, "WARNING: Softassign algorithm for subset %d did not converge to %.2g (reached %.2g) in %d iterations" % (i, self.softassign_convergence_threshold, max_row_normalization_error, self.max_softassign_iterations))
np.copyto(old_M, M)
# M is NOT a view, but a copy
self.match_matrix[indices] = M
def backup_match_matrix(self):
np.copyto(self.old_old_match_matrix,self.old_match_matrix)
np.copyto(self.old_match_matrix,self.match_matrix)
def test_object_array_self_copy(self):
# An object array being copied into itself DECREF'ed before INCREF'ing
# causing segmentation faults (gh-3787)
a = np.array(object(), dtype=object)
np.copyto(a, a)
assert_equal(sys.getrefcount(a[()]), 2)
a[()].__class__ # will segfault if object was deleted
def test_copyto():
a = np.arange(6, dtype='i4').reshape(2, 3)
# Simple copy
np.copyto(a, [[3, 1, 5], [6, 2, 1]])
assert_equal(a, [[3, 1, 5], [6, 2, 1]])
# Overlapping copy should work
np.copyto(a[:, :2], a[::-1, 1::-1])
assert_equal(a, [[2, 6, 5], [1, 3, 1]])
# Defaults to 'same_kind' casting
assert_raises(TypeError, np.copyto, a, 1.5)
# Force a copy with 'unsafe' casting, truncating 1.5 to 1
np.copyto(a, 1.5, casting='unsafe')
assert_equal(a, 1)
# Copying with a mask
np.copyto(a, 3, where=[True, False, True])
assert_equal(a, [[3, 1, 3], [3, 1, 3]])
# Casting rule still applies with a mask
assert_raises(TypeError, np.copyto, a, 3.5, where=[True, False, True])
# Lists of integer 0's and 1's is ok too
np.copyto(a, 4.0, casting='unsafe', where=[[0, 1, 1], [1, 0, 0]])
assert_equal(a, [[3, 4, 4], [4, 1, 3]])
# Overlapping copy with mask should work
np.copyto(a[:, :2], a[::-1, 1::-1], where=[[0, 1], [1, 1]])
assert_equal(a, [[3, 4, 4], [4, 3, 3]])
# 'dst' must be an array
assert_raises(TypeError, np.copyto, [1, 2, 3], [2, 3, 4])
def _replace_nan(a, val):
"""
If `a` is of inexact type, make a copy of `a`, replace NaNs with
the `val` value, and return the copy together with a boolean mask
marking the locations where NaNs were present. If `a` is not of
inexact type, do nothing and return `a` together with a mask of None.
Note that scalars will end up as array scalars, which is important
for using the result as the value of the out argument in some
operations.
Parameters
----------
a : array-like
Input array.
val : float
NaN values are set to val before doing the operation.
Returns
-------
y : ndarray
If `a` is of inexact type, return a copy of `a` with the NaNs
replaced by the fill value, otherwise return `a`.
mask: {bool, None}
If `a` is of inexact type, return a boolean mask marking locations of
NaNs, otherwise return None.
"""
is_new = not isinstance(a, np.ndarray)
if is_new:
a = np.array(a)
if not issubclass(a.dtype.type, np.inexact):
return a, None
if not is_new:
# need copy
a = np.array(a, subok=True)
mask = np.isnan(a)
np.copyto(a, val, where=mask)
return a, mask
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
`place` uses the first N elements of `vals`, where N is the number of
True values in `mask`, while `copyto` uses the elements where `mask`
is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : ndarray
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
copyto, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
if not isinstance(arr, np.ndarray):
raise TypeError("argument 1 must be numpy.ndarray, "
"not {name}".format(name=type(arr).__name__))
return _insert(arr, mask, vals)
def __call__(self, a, b, *args, **kwargs):
"Execute the call behavior."
# Get the data
(da, db) = (getdata(a), getdata(b))
# Get the result
with np.errstate(divide='ignore', invalid='ignore'):
result = self.f(da, db, *args, **kwargs)
# Get the mask as a combination of the source masks and invalid
m = ~umath.isfinite(result)
m |= getmask(a)
m |= getmask(b)
# Apply the domain
domain = ufunc_domain.get(self.f, None)
if domain is not None:
m |= filled(domain(da, db), True)
# Take care of the scalar case first
if (not m.ndim):
if m:
return masked
else:
return result
# When the mask is True, put back da if possible
# any errors, just abort; impossible to guarantee masked values
try:
np.copyto(result, 0, casting='unsafe', where=m)
# avoid using "*" since this may be overlaid
masked_da = umath.multiply(m, da)
# only add back if it can be cast safely
if np.can_cast(masked_da.dtype, result.dtype, casting='safe'):
result += masked_da
except:
pass
# Transforms to a (subclass of) MaskedArray
masked_result = result.view(get_masked_subclass(a, b))
masked_result._mask = m
if isinstance(a, MaskedArray):
masked_result._update_from(a)
elif isinstance(b, MaskedArray):
masked_result._update_from(b)
return masked_result
def _recursive_printoption(result, mask, printopt):
"""
Puts printoptions in result where mask is True.
Private function allowing for recursion
"""
names = result.dtype.names
for name in names:
(curdata, curmask) = (result[name], mask[name])
if curdata.dtype.names:
_recursive_printoption(curdata, curmask, printopt)
else:
np.copyto(curdata, printopt, where=curmask)
return
def _recursive_filled(a, mask, fill_value):
"""
Recursively fill `a` with `fill_value`.
"""
names = a.dtype.names
for name in names:
current = a[name]
if current.dtype.names:
_recursive_filled(current, mask[name], fill_value[name])
else:
np.copyto(current, fill_value[name], where=mask[name])
def putmask(a, mask, values): # , mode='raise'):
"""
Changes elements of an array based on conditional and input values.
This is the masked array version of `numpy.putmask`, for details see
`numpy.putmask`.
See Also
--------
numpy.putmask
Notes
-----
Using a masked array as `values` will **not** transform a `ndarray` into
a `MaskedArray`.
"""
# We can't use 'frommethod', the order of arguments is different
if not isinstance(a, MaskedArray):
a = a.view(MaskedArray)
(valdata, valmask) = (getdata(values), getmask(values))
if getmask(a) is nomask:
if valmask is not nomask:
a._sharedmask = True
a._mask = make_mask_none(a.shape, a.dtype)
np.copyto(a._mask, valmask, where=mask)
elif a._hardmask:
if valmask is not nomask:
m = a._mask.copy()
np.copyto(m, valmask, where=mask)
a.mask |= m
else:
if valmask is nomask:
valmask = getmaskarray(values)
np.copyto(a._mask, valmask, where=mask)
np.copyto(a._data, valdata, where=mask)
return
def __getitem__(self, i):
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
np.copyto(a, self.buffer[self.data_offsets[i]:self.data_offsets[i + 1]])
return torch.from_numpy(a)
def test_copyto(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = xp.empty((2, 3, 4), dtype=dtype)
xp.copyto(b, a)
return b
def test_copyto_dtype(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype='?')
b = xp.empty((2, 3, 4), dtype=dtype)
xp.copyto(b, a)
return b
def test_copyto_broadcast(self, xp, dtype):
a = testing.shaped_arange((3, 1), xp, dtype)
b = xp.empty((2, 3, 4), dtype=dtype)
xp.copyto(b, a)
return b
def test_copyto_where(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = testing.shaped_reverse_arange((2, 3, 4), xp, dtype)
c = testing.shaped_arange((2, 3, 4), xp, '?')
xp.copyto(a, b, where=c)
return a
def test_copyto_multigpu(self, xp, dtype):
with cuda.Device(0):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
with cuda.Device(1):
b = xp.empty((2, 3, 4), dtype=dtype)
xp.copyto(b, a)
return b
def test_copyto_multigpu_noncontinguous(self, dtype):
with cuda.Device(0):
src = testing.shaped_arange((2, 3, 4), cupy, dtype)
src = src.swapaxes(0, 1)
with cuda.Device(1):
dst = cupy.empty_like(src)
cupy.copyto(dst, src)
expected = testing.shaped_arange((2, 3, 4), numpy, dtype)
expected = expected.swapaxes(0, 1)
testing.assert_array_equal(expected, src.get())
testing.assert_array_equal(expected, dst.get())
def test_copyto(self, xp, dtype):
dst = xp.ones(self.dst_shape, dtype=dtype)
xp.copyto(dst, self.src)
return dst
def combine_constraints(self, constraints):
if constraints is not None: #[hack]
# print('combine strokes')
[im_c, mask_c, im_e, mask_e] = constraints
if self.prev_im_c is None:
mask_c_f = mask_c
else:
mask_c_f = np.maximum(self.prev_mask_c, mask_c)
if self.prev_im_e is None:
mask_e_f = mask_e
else:
mask_e_f = np.maximum(self.prev_mask_e, mask_e)
if self.prev_im_c is None:
im_c_f = im_c
else:
im_c_f = self.prev_im_c.copy()
mask_c3 = np.tile(mask_c, [1,1, im_c.shape[2]])
np.copyto(im_c_f, im_c, where=mask_c3.astype(np.bool)) #[hack]
if self.prev_im_e is None:
im_e_f = im_e
else:
im_e_f = self.prev_im_e.copy()
mask_e3 = np.tile(mask_e, [1,1,im_e.shape[2]])
np.copyto(im_e_f, im_e, where=mask_e3.astype(np.bool))
return [im_c_f, mask_c_f, im_e_f, mask_e_f]
else:
return [self.prev_im_c, self.prev_mask_c, self.prev_im_e, self.prev_mask_e]
def align_fill_down(l, u,
long_indexed_df,
long_array):
'''Data align current values to all future months
(short array segment aligned to long array)
This function is used to set the values from the last standalone month as
the initial data for integrated dataset computation when a delayed
implementation exists.
uses pandas df auto align - relatively slow
TODO (for developer) - consider an all numpy solution
inputs
l, u (integers)
current month slice indexes (from long df)
long_indexed_df (dataframe)
empty long dataframe with empkey indexes
long_array (array)
long array of multiple month data
(orig_job, fur_codes, etc)
declare long indexed df outside of function (input).
grab current month slice for array insertion (copy).
chop long df to begin with current month (copy).
assign array to short df.
data align short df to long df (chopped to current month and future).
copy chopped df column as array to long_array
return long_array
'''
short_df = long_indexed_df[l:u].copy()
short_df['x'] = long_array[l:u]
# chopped_df begins with a defined index (row), normally the begining of
# a delayed implementation month
chopped_df = long_indexed_df[l:].copy()
# data align short_df to chopped_df
chopped_df['x'] = short_df['x']
result_array = chopped_df.x.values
result_size = result_array.size
np.copyto(long_array[-result_size:], result_array)
return long_array
# ALIGN NEXT (month)