Python numpy 模块,int8() 实例源码
我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.int8()。
def run_test_matmul_aa_correlator_kernel(self, ntime, nstand, nchan, misalign=0):
x_shape = (ntime, nchan, nstand*2)
perm = [1,0,2]
x8 = ((np.random.random(size=x_shape+(2,))*2-1)*127).astype(np.int8)
x = x8.astype(np.float32).view(np.complex64).reshape(x_shape)
x = x.transpose(perm)
x = x[..., misalign:]
b_gold = np.matmul(H(x), x)
triu = np.triu_indices(x.shape[-1], 1)
b_gold[..., triu[0], triu[1]] = 0
x = x8.view(bf.DataType.ci8).reshape(x_shape)
x = bf.asarray(x, space='cuda')
x = x.transpose(perm)
x = x[..., misalign:]
b = bf.zeros_like(b_gold, space='cuda')
self.linalg.matmul(1, None, x, 0, b)
b = b.copy('system')
np.testing.assert_allclose(b, b_gold, RTOL*10, ATOL)
def test_quantize_from_probs2(size, resolution):
set_random_seed(make_seed(size, resolution))
probs = np.exp(np.random.random(size)).astype(np.float32)
probs2 = probs.reshape((1, size))
quantized = quantize_from_probs2(probs2, resolution)
assert quantized.shape == probs2.shape
assert quantized.dtype == np.int8
assert np.all(quantized.sum(axis=1) == resolution)
# Check that quantized result is closer to target than any other value.
quantized = quantized.reshape((size, ))
target = resolution * probs / probs.sum()
distance = np.abs(quantized - target).sum()
for combo in itertools.combinations(range(size), resolution):
other = np.zeros(size, np.int8)
for i in combo:
other[i] += 1
assert other.sum() == resolution
other_distance = np.abs(other - target).sum()
assert distance <= other_distance
def test_server_logprob_normalized(N, V, C, M):
model = generate_fake_model(N, V, C, M)
config = TINY_CONFIG.copy()
config['model_num_clusters'] = M
model['config'] = config
server = TreeCatServer(model)
# The total probability of all categorical rows should be 1.
ragged_index = model['suffstats']['ragged_index']
factors = []
for v in range(V):
C = ragged_index[v + 1] - ragged_index[v]
factors.append([one_hot(c, C) for c in range(C)])
data = np.array(
[np.concatenate(columns) for columns in itertools.product(*factors)],
dtype=np.int8)
logprobs = server.logprob(data)
logtotal = np.logaddexp.reduce(logprobs)
assert logtotal == pytest.approx(0.0, abs=1e-5)
def test_server_median(N, V, C, M):
model = generate_fake_model(N, V, C, M)
config = TINY_CONFIG.copy()
config['model_num_clusters'] = M
model['config'] = config
server = TreeCatServer(model)
# Evaluate on random data.
counts = np.random.randint(10, size=[V], dtype=np.int8)
table = generate_dataset(N, V, C)['table']
median = server.median(counts, table.data)
assert median.shape == table.data.shape
assert median.dtype == np.int8
for v in range(V):
beg, end = table.ragged_index[v:v + 2]
totals = median[:, beg:end].sum(axis=1)
assert np.all(totals == counts[v])
def observed_perplexity(self, counts):
"""Compute perplexity = exp(entropy) of observed variables.
Perplexity is an information theoretic measure of the number of
clusters or latent classes. Perplexity is a real number in the range
[1, M], where M is model_num_clusters.
Args:
counts: A [V]-shaped array of multinomial counts.
Returns:
A [V]-shaped numpy array of perplexity.
"""
V, E, M, R = self._VEMR
if counts is not None:
counts = np.ones(V, dtype=np.int8)
assert counts.shape == (V, )
assert counts.dtype == np.int8
assert np.all(counts > 0)
observed_entropy = np.empty(V, dtype=np.float32)
for v in range(V):
beg, end = self._ragged_index[v:v + 2]
probs = np.dot(self._feat_cond[beg:end, :], self._vert_probs[v, :])
observed_entropy[v] = multinomial_entropy(probs, counts[v])
return np.exp(observed_entropy)
def quantize_from_probs2(probs, resolution):
"""Quantize multiple non-normalized probs to given resolution.
Args:
probs: An [N, M]-shaped numpy array of non-normalized probabilities.
Returns:
An [N, M]-shaped array of quantized probabilities such that
np.all(result.sum(axis=1) == resolution).
"""
assert len(probs.shape) == 2
N, M = probs.shape
probs = probs / probs.sum(axis=1, keepdims=True)
result = np.zeros(probs.shape, np.int8)
range_N = np.arange(N, dtype=np.int32)
for _ in range(resolution):
sample = probs.argmax(axis=1)
result[range_N, sample] += 1
probs[range_N, sample] -= 1.0 / resolution
return result
def count_observations(ragged_index, data):
"""Count the observations in each cell of a ragged data array.
Args:
ragged_index: A [V+1]-shaped numpy array as returned by
make_ragged_index.
data: A [N, R]-shaped ragged array of multinomial count data, where
N is the number of rows and R = ragged_index[-1].
Returns:
A [N, V]-shaped array whose entries are the number of observations
in each cell of data.
"""
N, R = data.shape
assert R == ragged_index[-1]
V = len(ragged_index) - 1
counts = np.zeros([N, V], np.int8)
for v in range(V):
beg, end = ragged_index[v:v + 2]
counts[:, v] = data[:, beg:end].sum(axis=1)
return counts
def __getitem__(self, index):
img_name = self.files[self.split][index]
img_path = self.root + '/' + self.split + '/' + img_name
lbl_path = self.root + '/' + self.split + 'annot/' + img_name
img = m.imread(img_path)
img = np.array(img, dtype=np.uint8)
lbl = m.imread(lbl_path)
lbl = np.array(lbl, dtype=np.int8)
if self.augmentations is not None:
img, lbl = self.augmentations(img, lbl)
if self.is_transform:
img, lbl = self.transform(img, lbl)
return img, lbl
def _get_dtype_maps():
""" Get dictionaries to map numpy data types to ITK types and the
other way around.
"""
# Define pairs
tmp = [ (np.float32, 'MET_FLOAT'), (np.float64, 'MET_DOUBLE'),
(np.uint8, 'MET_UCHAR'), (np.int8, 'MET_CHAR'),
(np.uint16, 'MET_USHORT'), (np.int16, 'MET_SHORT'),
(np.uint32, 'MET_UINT'), (np.int32, 'MET_INT'),
(np.uint64, 'MET_ULONG'), (np.int64, 'MET_LONG') ]
# Create dictionaries
map1, map2 = {}, {}
for np_type, itk_type in tmp:
map1[np_type.__name__] = itk_type
map2[itk_type] = np_type.__name__
# Done
return map1, map2
def flip_code(code):
if isinstance(code, (numpy.dtype,type)):
# since several things map to complex64 we must carefully select
# the opposite that is an exact match (ticket 1518)
if code == numpy.int8:
return gdalconst.GDT_Byte
if code == numpy.complex64:
return gdalconst.GDT_CFloat32
for key, value in codes.items():
if value == code:
return key
return None
else:
try:
return codes[code]
except KeyError:
return None
def UnHidePdfintoPng(img,x,y):
counterx = 0
countery = 0
h , w , c = img.shape
data = []
while counterx <= x:
temp1 = (img[counterx][countery][0] & 0x03) << 6;
temp2 = (img[counterx][countery][ 1] & 0x03) << 4
temp3 = (img[counterx][countery ][2] & 0x03) << 2
temp4 = (img[counterx][countery ][3] & 0x03)
data.append(temp1 | temp2 | temp3 | temp4)
if counterx == x and countery == y:
#print ('EOF Found')
break
if(countery == w - 1):
countery = 0
counterx += 1
else:
countery += 1
data = np.int8(data)
return data
def inc_region(self, dst, y, x, h, w):
'''Incremets dst in the specified region. Runs fastest on np.int8, but not much slower on
np.int16.'''
dh, dw = dst.shape
h2 = h // 2
w2 = w // 2
py = y - h2
px = x - w2
y_min = max(0, py)
y_max = min(dh, y + h2)
x_min = max(0, px)
x_max = min(dw, x + w2)
if y_max - y_min <= 0 or x_max - x_min <= 0:
return
dst[y_min:y_max, x_min:x_max] += 1
def __read_spike_fixed(self, numpts=40):
"""
Read a spike with a fixed waveform length (40 time bins)
-------------------------------------------
Returns the time, waveform and trig2 value.
The returned objects must be converted to a SpikeTrain then
added to the Block.
ID: 29079
"""
# float32 -- spike time stamp in ms since start of SpikeTrain
time = np.fromfile(self._fsrc, dtype=np.float32, count=1)
# int8 * 40 -- spike shape -- use numpts for spike_var
waveform = np.fromfile(self._fsrc, dtype=np.int8,
count=numpts).reshape(1, 1, numpts)
# uint8 -- point of return to noise
trig2 = np.fromfile(self._fsrc, dtype=np.uint8, count=1)
return time, waveform, trig2
def __read_spike_fixed(self, numpts=40):
"""
Read a spike with a fixed waveform length (40 time bins)
-------------------------------------------
Returns the time, waveform and trig2 value.
The returned objects must be converted to a SpikeTrain then
added to the Block.
ID: 29079
"""
# float32 -- spike time stamp in ms since start of SpikeTrain
time = np.fromfile(self._fsrc, dtype=np.float32, count=1)
# int8 * 40 -- spike shape -- use numpts for spike_var
waveform = np.fromfile(self._fsrc, dtype=np.int8,
count=numpts).reshape(1, 1, numpts)
# uint8 -- point of return to noise
trig2 = np.fromfile(self._fsrc, dtype=np.uint8, count=1)
return time, waveform, trig2
def _check_valid_data(self, data):
"""Checks that the incoming data is a 2 x #elements ndarray of ints.
Parameters
----------
data : :obj:`numpy.ndarray`
The data to verify.
Raises
------
ValueError
If the data is not of the correct shape or type.
"""
if data.dtype.type != np.int8 and data.dtype.type != np.int16 \
and data.dtype.type != np.int32 and data.dtype.type != np.int64 \
and data.dtype.type != np.uint8 and data.dtype.type != np.uint16 \
and data.dtype.type != np.uint32 and data.dtype.type != np.uint64:
raise ValueError('Must initialize image coords with a numpy int ndarray')
if data.shape[0] != 2:
raise ValueError('Illegal data array passed to image coords. Must have 2 coordinates')
if len(data.shape) > 2:
raise ValueError('Illegal data array passed to point cloud. Must have 1 or 2 dimensions')
def _gene_signature(self,wm,size,key):
'''????????????????????????'''
wm = cv2.resize(wm,(size,size))
wU,_,wV = np.linalg.svd(np.mat(wm))
sumU = np.sum(np.array(wU),axis=0)
sumV = np.sum(np.array(wV),axis=0)
sumU_mid = np.median(sumU)
sumV_mid = np.median(sumV)
sumU=np.array([1 if sumU[i] >sumU_mid else 0 for i in range(len(sumU)) ])
sumV=np.array([1 if sumV[i] >sumV_mid else 0 for i in range(len(sumV)) ])
uv_xor=np.logical_xor(sumU,sumV)
np.random.seed(key)
seq=np.random.randint(2,size=len(uv_xor))
signature = np.logical_xor(uv_xor, seq)
sqrts = int(np.sqrt(size))
return np.array(signature,dtype=np.int8).reshape((sqrts,sqrts))
def _gene_signature(self,wm,key):
'''????????????????????????'''
wm = cv2.resize(wm,(256,256))
wU,_,wV = np.linalg.svd(np.mat(wm))
sumU = np.sum(np.array(wU),axis=0)
sumV = np.sum(np.array(wV),axis=0)
sumU_mid = np.median(sumU)
sumV_mid = np.median(sumV)
sumU=np.array([1 if sumU[i] >sumU_mid else 0 for i in range(len(sumU)) ])
sumV=np.array([1 if sumV[i] >sumV_mid else 0 for i in range(len(sumV)) ])
uv_xor=np.logical_xor(sumU,sumV)
np.random.seed(key)
seq=np.random.randint(2,size=len(uv_xor))
signature = np.logical_xor(uv_xor, seq)
return np.array(signature,dtype=np.int8)
def _gene_signature(self,wU,wV,key):
'''????????????????????????'''
sumU = np.sum(wU,axis=0)
sumV = np.sum(wV,axis=0)
sumU_mid = np.median(sumU)
sumV_mid = np.median(sumV)
sumU=np.array([1 if sumU[i] >sumU_mid else 0 for i in range(len(sumU)) ])
sumV=np.array([1 if sumV[i] >sumV_mid else 0 for i in range(len(sumV)) ])
uv_xor=np.logical_xor(sumU,sumV)
np.random.seed(key)
seq=np.random.randint(2,size=len(uv_xor))
signature = np.logical_xor(uv_xor, seq)
return np.array(signature,dtype=np.int8)
def test_bin_counts(self):
metadata2 = {'cellType': 'int32ud-500',
'extent': self.extent,
'crs': '+proj=longlat +datum=WGS84 +no_defs ',
'bounds': {
'minKey': {'col': 0, 'row': 0},
'maxKey': {'col': 0, 'row': 0}},
'layoutDefinition': {
'extent': self.extent,
'tileLayout': {'tileCols': 4, 'tileRows': 4, 'layoutCols': 1, 'layoutRows': 1}}}
arr2 = np.int8([[[1, 1, 1, 1],
[3, 1, 1, 1],
[4, 3, 1, 1],
[5, 4, 3, 1]]])
tile2 = Tile(arr2, 'INT', -500)
rdd2 = BaseTestClass.pysc.parallelize([(self.spatial_key, tile2)])
tiled2 = TiledRasterLayer.from_numpy_rdd(LayerType.SPATIAL, rdd2,
metadata2)
hist2 = tiled2.get_class_histogram()
bin_counts = hist2.bin_counts()
self.assertEqual(bin_counts, [(1, 10), (3, 3), (4, 2), (5, 1)])
def load_texture(image_file, repeat=False):
"""Carga una textura desde un archivo image_file"""
img = Image.open(image_file)
data = numpy.array(list(img.getdata()), numpy.int8)
tex = glGenTextures(1)
glPixelStorei(GL_UNPACK_ALIGNMENT, 1)
glBindTexture(GL_TEXTURE_2D, tex)
if repeat:
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
else:
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, img.size[0], img.size[1], 0, GL_RGB, GL_UNSIGNED_BYTE, data)
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE)
return tex
def wavWrite(y, fs, nbits, audioFile):
""" Write samples to WAV file
Args:
samples: (ndarray / 2D ndarray) (floating point) sample vector
mono: DIM: nSamples
stereo: DIM: nSamples x nChannels
fs: (int) Sample rate in Hz
nBits: (int) Number of bits
fnWAV: (string) WAV file name to write
"""
if nbits == 8:
intsamples = (y+1.0) * AudioIO.normFact['int' + str(nbits)]
fX = np.int8(intsamples)
elif nbits == 16:
intsamples = y * AudioIO.normFact['int' + str(nbits)]
fX = np.int16(intsamples)
elif nbits > 16:
fX = y
write(audioFile, fs, fX)
def test_copy_from(self):
a = DynamicArray(0, numpy.dtype([
('a', numpy.int8),
('b', numpy.float32),
('c', numpy.float64),
]))
a.extend([
(4, 3.932, 902.345),
(7, 1.016, 548.229),
(2, 0.542, 771.031),
(8, 5.429, 858.063),
])
b = DynamicArray(0, numpy.dtype([
('a', numpy.int8),
('c', numpy.float64),
]))
b.copy_from(a.data)
self.assertEqual(len(b), 4)
self.assertEqual(b.data.tolist(), [(4, 902.345), (7, 548.229), (2, 771.031), (8, 858.063),])
def test_resize(self):
a = DynamicArray(0, numpy.dtype(numpy.int8))
a.extend([0, 1, 4, 9])
self.assertEqual(len(a), 4)
self.assertEqual(len(a.data), 4)
self.assertEqual(a.data.tolist(), [0, 1, 4, 9])
a.resize(2)
self.assertEqual(len(a), 2)
self.assertEqual(len(a.data), 2)
self.assertEqual(a.data.tolist(), [0, 1])
request = a.capacity * 2
a.resize(request)
self.assertEqual(len(a), request)
self.assertEqual(len(a.data), request)
self.assertGreaterEqual(a.capacity, request)
self.assertEqual(a.data.tolist()[:2], [0, 1])
def test_append(self):
b = OrderedBuffer(3, numpy.dtype(numpy.int8))
data = b.append(9)
self.assertEqual(data, 9)
self.assertIsInstance(data, numpy.int8)
self.assertEqual(len(b), 1)
self.assertEqual([chunk.tolist() for chunk in b.chunks], [[9]])
data = b.append(1)
self.assertIsInstance(data, numpy.int8)
self.assertEqual(len(b), 2)
self.assertEqual([chunk.tolist() for chunk in b.chunks], [[9, 1]])
data = b.append(4)
self.assertIsInstance(data, numpy.int8)
self.assertEqual(len(b), 3)
self.assertEqual([chunk.tolist() for chunk in b.chunks], [[9, 1, 4]])
data = b.append(0)
self.assertIsInstance(data, numpy.int8)
self.assertEqual(len(b), 4)
self.assertEqual([chunk.tolist() for chunk in b.chunks], [[9, 1, 4], [0]])
def test_clear(self):
b = OrderedBuffer(3, numpy.dtype(numpy.int8))
b.extend([9, 1])
b.extend([1, 2, 3, 4, 5])
b.append(4)
self.assertEqual(len(b), 8)
self.assertEqual([chunk.tolist() for chunk in b.chunks], [[9, 1], [1, 2, 3, 4, 5], [4]])
b.clear()
self.assertEqual(len(b), 0)
self.assertEqual([chunk.tolist() for chunk in b.chunks], [])
b.append(0)
b.extend([7, 8, 9])
b.append(1)
b.extend([2])
self.assertEqual(len(b), 6)
self.assertEqual([chunk.tolist() for chunk in b.chunks], [[0], [7, 8, 9], [1, 2]])
def set_prior(self, bernoulli_prior=None, density_conditions=None):
"""
density_conditions is the max no of ones in each dimension
[min_row, min_col, max_row, max_col].
zero means unrestricted
"""
if density_conditions is None:
self.density_conditions = np.array([0,0,0,0], dtype=np.int8)
else:
assert len(density_conditions) == 4
self.density_conditions = np.array(density_conditions, dtype=np.int8)
self.bernoulli_prior = bernoulli_prior
if bernoulli_prior is None:
self.logit_bernoulli_prior = 0
else:
self.logit_bernoulli_prior = np.log(bernoulli_prior/(1-bernoulli_prior))
def test_float(self):
# offset for alignment test
for i in range(4):
assert_array_equal(self.f[i:] > 0, self.ef[i:])
assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:])
assert_array_equal(self.f[i:] == 0, ~self.ef[i:])
assert_array_equal(-self.f[i:] < 0, self.ef[i:])
assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:])
r = self.f[i:] != 0
assert_array_equal(r, self.ef[i:])
r2 = self.f[i:] != np.zeros_like(self.f[i:])
r3 = 0 != self.f[i:]
assert_array_equal(r, r2)
assert_array_equal(r, r3)
# check bool == 0x1
assert_array_equal(r.view(np.int8), r.astype(np.int8))
assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
# isnan on amd64 takes the same code path
assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:])
def test_unaligned(self):
v = (np.zeros(64, dtype=np.int8) + ord('a'))[1:-7]
d = v.view(np.dtype("S8"))
# unaligned source
x = (np.zeros(16, dtype=np.int8) + ord('a'))[1:-7]
x = x.view(np.dtype("S8"))
x[...] = np.array("b" * 8, dtype="S")
b = np.arange(d.size)
#trivial
assert_equal(d[b], d)
d[b] = x
# nontrivial
# unaligned index array
b = np.zeros(d.size + 1).view(np.int8)[1:-(np.intp(0).itemsize - 1)]
b = b.view(np.intp)[:d.size]
b[...] = np.arange(d.size)
assert_equal(d[b.astype(np.int16)], d)
d[b.astype(np.int16)] = x
# boolean
d[b % 2 == 0]
d[b % 2 == 0] = x[::2]
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_padded_struct_array(self):
dt1 = np.dtype(
[('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
self._check_roundtrip(x1)
dt2 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
align=True)
x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
self._check_roundtrip(x2)
dt3 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int8, np.uint8, np.int16, np.uint16, np.int32,
np.uint32, np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
tgt = np.array([1, 3, 13, 24, 30, 35, 39], ctype)
assert_array_equal(np.cumsum(a, axis=0), tgt)
tgt = np.array(
[[1, 2, 3, 4], [6, 8, 10, 13], [16, 11, 14, 18]], ctype)
assert_array_equal(np.cumsum(a2, axis=0), tgt)
tgt = np.array(
[[1, 3, 6, 10], [5, 11, 18, 27], [10, 13, 17, 22]], ctype)
assert_array_equal(np.cumsum(a2, axis=1), tgt)
def test_dtypes(self):
c = array([11, -12, 13], dtype=np.int8)
v = vander(c)
expected = np.array([[121, 11, 1],
[144, -12, 1],
[169, 13, 1]])
yield (assert_array_equal, v, expected)
c = array([1.0+1j, 1.0-1j])
v = vander(c, N=3)
expected = np.array([[2j, 1+1j, 1],
[-2j, 1-1j, 1]])
# The data is floating point, but the values are small integers,
# so assert_array_equal *should* be safe here (rather than, say,
# assert_array_almost_equal).
yield (assert_array_equal, v, expected)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, 1),
("b", np.int32, 1)])]:
np.random.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
np.random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
np.testing.assert_array_equal(actual, desired)
def __init__(self, model_name, saved_model_file=None, max_training_batches=1, watch=False):
self.saved_model_file = saved_model_file
if saved_model_file is not None:
print('Loading saved model from %s' % saved_model_file)
self.model = self.load_model(saved_model_file)
else:
self.init_model(model_name)
# Treat as a ring buffer
self.current_pos = 0
self.max_pos = 0
self.states_t0 = np.zeros((BUFFER_SIZE,1,BOARD_HEIGHT,BOARD_WIDTH), dtype=np.int8)
self.actions = np.zeros([BUFFER_SIZE], dtype=np.int8)
self.states_t1 = np.zeros((BUFFER_SIZE,1,BOARD_HEIGHT,BOARD_WIDTH), dtype=np.int8)
self.rewards = np.zeros([BUFFER_SIZE], dtype=np.float32)
self.n_games = 0
self.state_printer = WebSocketPrinter()
self.current_game_length = 0
self.current_episode_length = 0
self.n_games = 0
self.max_training_batches = max_training_batches
self.n_training_batches = 0
self.model_name = model_name
def test_load_columnar_pandas_all(self, con, all_types_table):
pd = pytest.importorskip("pandas")
import numpy as np
data = pd.DataFrame({
"boolean_": [True, False],
"smallint_": np.array([0, 1], dtype=np.int8),
"int_": np.array([0, 1], dtype=np.int32),
"bigint_": np.array([0, 1], dtype=np.int64),
"float_": np.array([0, 1], dtype=np.float32),
"double_": np.array([0, 1], dtype=np.float64),
"varchar_": ["a", "b"],
"text_": ['a', 'b'],
"time_": [datetime.time(0, 11, 59), datetime.time(13)],
"timestamp_": [pd.Timestamp("2016"), pd.Timestamp("2017")],
"date_": [datetime.date(2016, 1, 1), datetime.date(2017, 1, 1)],
}, columns=['boolean_', 'smallint_', 'int_', 'bigint_', 'float_',
'double_', 'varchar_', 'text_', 'time_', 'timestamp_',
'date_'])
con.load_table_columnar(all_types_table, data, preserve_index=False)
def test_load_table_creates(self, con, not_a_table):
pd = pytest.importorskip("pandas")
import numpy as np
data = pd.DataFrame({
"boolean_": [True, False],
"smallint_cast": np.array([0, 1], dtype=np.int8),
"smallint_": np.array([0, 1], dtype=np.int16),
"int_": np.array([0, 1], dtype=np.int32),
"bigint_": np.array([0, 1], dtype=np.int64),
"float_": np.array([0, 1], dtype=np.float32),
"double_": np.array([0, 1], dtype=np.float64),
"varchar_": ["a", "b"],
"text_": ['a', 'b'],
"time_": [datetime.time(0, 11, 59), datetime.time(13)],
"timestamp_": [pd.Timestamp("2016"), pd.Timestamp("2017")],
"date_": [datetime.date(2016, 1, 1), datetime.date(2017, 1, 1)],
}, columns=['boolean_', 'smallint_', 'int_', 'bigint_', 'float_',
'double_', 'varchar_', 'text_', 'time_', 'timestamp_',
'date_'])
con.load_table(not_a_table, data, create=True)
def wavWrite(y, fs, nbits, audioFile):
""" Write samples to WAV file
Args:
samples: (ndarray / 2D ndarray) (floating point) sample vector
mono: DIM: nSamples
stereo: DIM: nSamples x nChannels
fs: (int) Sample rate in Hz
nBits: (int) Number of bits
fnWAV: (string) WAV file name to write
"""
if nbits == 8:
intsamples = (y+1.0) * AudioIO.normFact['int' + str(nbits)]
fX = np.int8(intsamples)
elif nbits == 16:
intsamples = y * AudioIO.normFact['int' + str(nbits)]
fX = np.int16(intsamples)
elif nbits > 16:
fX = y
write(audioFile, fs, fX)
def one_hot_comparison(hot_axes, axes, C):
"""
TODO.
Arguments:
hot_axes: TODO
axes: TODO
"""
u = rng.random_integers(0, C.length - 1, axes, dtype=np.int8)
u_p = ng.placeholder(axes, dtype=u.dtype)
v = np.zeros(hot_axes.lengths, dtype=np.float32)
udxiter = np.nditer(u, flags=['multi_index'])
for uiter in udxiter:
vindex = [int(uiter)]
vindex.extend(udxiter.multi_index)
v[tuple(vindex)] = 1
with executor(ng.one_hot(u_p, axis=C), u_p) as ex:
v_t = ex(u)
ng.testing.assert_allclose(v_t, v)
def interpret_header(self):
"""redefine variables from header dictionary"""
self.nifs = self.header['nifs']
self.nchans = self.header['nchans']
self.nbits = self.header['nbits']
signed = 'signed' in self.header and self.header['signed'] is True
if self.nbits >= 8:
if signed:
self.dtype = {8: np.int8,
16: np.int16,
32: np.float32,
64: np.float64}[self.nbits]
else:
self.dtype = {8: np.uint8,
16: np.uint16,
32: np.float32,
64: np.float64}[self.nbits]
else:
self.dtype = np.int8 if signed else np.uint8
def numpy2bifrost(dtype):
if dtype == np.int8: return _bf.BF_DTYPE_I8
elif dtype == np.int16: return _bf.BF_DTYPE_I16
elif dtype == np.int32: return _bf.BF_DTYPE_I32
elif dtype == np.uint8: return _bf.BF_DTYPE_U8
elif dtype == np.uint16: return _bf.BF_DTYPE_U16
elif dtype == np.uint32: return _bf.BF_DTYPE_U32
elif dtype == np.float16: return _bf.BF_DTYPE_F16
elif dtype == np.float32: return _bf.BF_DTYPE_F32
elif dtype == np.float64: return _bf.BF_DTYPE_F64
elif dtype == np.float128: return _bf.BF_DTYPE_F128
elif dtype == ci8: return _bf.BF_DTYPE_CI8
elif dtype == ci16: return _bf.BF_DTYPE_CI16
elif dtype == ci32: return _bf.BF_DTYPE_CI32
elif dtype == cf16: return _bf.BF_DTYPE_CF16
elif dtype == np.complex64: return _bf.BF_DTYPE_CF32
elif dtype == np.complex128: return _bf.BF_DTYPE_CF64
elif dtype == np.complex256: return _bf.BF_DTYPE_CF128
else: raise ValueError("Unsupported dtype: " + str(dtype))
def numpy2string(dtype):
if dtype == np.int8: return 'i8'
elif dtype == np.int16: return 'i16'
elif dtype == np.int32: return 'i32'
elif dtype == np.int64: return 'i64'
elif dtype == np.uint8: return 'u8'
elif dtype == np.uint16: return 'u16'
elif dtype == np.uint32: return 'u32'
elif dtype == np.uint64: return 'u64'
elif dtype == np.float16: return 'f16'
elif dtype == np.float32: return 'f32'
elif dtype == np.float64: return 'f64'
elif dtype == np.float128: return 'f128'
elif dtype == np.complex64: return 'cf32'
elif dtype == np.complex128: return 'cf64'
elif dtype == np.complex256: return 'cf128'
else: raise TypeError("Unsupported dtype: " + str(dtype))
def run_test_matmul_aa_ci8_shape(self, shape, transpose=False):
# **TODO: This currently never triggers the transpose path in the backend
shape_complex = shape[:-1] + (shape[-1] * 2,)
# Note: The xGPU-like correlation kernel does not support input values of -128 (only [-127:127])
a8 = ((np.random.random(size=shape_complex) * 2 - 1) * 127).astype(np.int8)
a_gold = a8.astype(np.float32).view(np.complex64)
if transpose:
a_gold = H(a_gold)
# Note: np.matmul seems to be slow and inaccurate when there are batch dims
c_gold = np.matmul(a_gold, H(a_gold))
triu = np.triu_indices(shape[-2] if not transpose else shape[-1], 1)
c_gold[..., triu[0], triu[1]] = 0
a = a8.view(bf.DataType.ci8)
a = bf.asarray(a, space='cuda')
if transpose:
a = H(a)
c = bf.zeros_like(c_gold, space='cuda')
self.linalg.matmul(1, a, None, 0, c)
c = c.copy('system')
np.testing.assert_allclose(c, c_gold, RTOL, ATOL)
def run_benchmark_matmul_aa_correlator_kernel(self, ntime, nstand, nchan):
x_shape = (ntime, nchan, nstand*2)
perm = [1,0,2]
x8 = ((np.random.random(size=x_shape+(2,))*2-1)*127).astype(np.int8)
x = x8.astype(np.float32).view(np.complex64).reshape(x_shape)
x = x.transpose(perm)
b_gold = np.matmul(H(x[:,[0],:]), x[:,[0],:])
triu = np.triu_indices(x_shape[-1], 1)
b_gold[..., triu[0], triu[1]] = 0
x = x8.view(bf.DataType.ci8).reshape(x_shape)
x = bf.asarray(x, space='cuda')
x = x.transpose(perm)
b = bf.zeros_like(b_gold, space='cuda')
bf.device.stream_synchronize();
t0 = time.time()
nrep = 200
for _ in xrange(nrep):
self.linalg.matmul(1, None, x, 0, b)
bf.device.stream_synchronize();
dt = time.time() - t0
nflop = nrep * nchan * ntime * nstand*(nstand+1)/2 * 2*2 * 8
print nstand, '\t', nflop / dt / 1e9, 'GFLOP/s'
print '\t\t', nrep*ntime*nchan / dt / 1e6, 'MHz'
def perform(self, node, inputs_storage, output_storage):
"""Peform the transformation from output to feature space.
Defines the Python implementation of the op. It is in charge of doing
the processing to go from output space (statematrix) to feature space.
Parameters
----------
node :
Reference to an Apply node which was previously obtained via
the Op‘s make_node() method.
inputs_storage : array_like
A list of references to data which can be operated on using
non-symbolic statements
output_storage : array_like
A list of storage cells where the output is to be stored
"""
state, time = inputs_storage
output_storage[0][0] = np.array(self.d.f.note_state_single_to_input_form(state, time), dtype='int8')
def load_board(string):
reverse_map = {
'X': go.BLACK,
'O': go.WHITE,
'.': go.EMPTY,
'#': go.FILL,
'*': go.KO,
'?': go.UNKNOWN
}
string = re.sub(r'[^XO\.#]+', '', string)
assert len(string) == go.N ** 2, "Board to load didn't have right dimensions"
board = np.zeros([go.N, go.N], dtype=np.int8)
for i, char in enumerate(string):
np.ravel(board)[i] = reverse_map[char]
return board
def set_data_type(self, type):
""" Sets the data type for the TRiP98 header files.
:param numpy.type type: numpy type, e.g. np.uint16
"""
if type is np.int8 or type is np.uint8:
self.data_type = "integer"
self.num_bytes = 1
elif type is np.int16 or type is np.uint16:
self.data_type = "integer"
self.num_bytes = 2
elif type is np.int32 or type is np.uint32:
self.data_type = "integer"
self.num_bytes = 4
elif type is np.float:
self.data_type = "float"
self.num_bytes = 4
elif type is np.double:
self.data_type = "double"
self.num_bytes = 8
# ###################### WRITING DICOM FILES #######################################
def schedule_to_array(schedule, events, slots):
"""Convert a schedule from schedule to array form
Parameters
----------
schedule : list or tuple
of instances of :py:class:`resources.ScheduledItem`
events : list or tuple
of :py:class:`resources.Event` instances
slots : list or tuple
of :py:class:`resources.Slot` instances
Returns
-------
np.array
An E by S array (X) where E is the number of events and S the
number of slots. Xij is 1 if event i is scheduled in slot j and
zero otherwise
"""
array = np.zeros((len(events), len(slots)), dtype=np.int8)
for item in schedule:
array[events.index(item.event), slots.index(item.slot)] = 1
return array
def export_rows(schema, data):
"""Export multiple rows of internal data to json format.
Args:
schema: A schema dict as returned by load_schema().
data: An [N, R]-shaped numpy array of ragged data, where N is the
number of rows and R = schema['ragged_index'][-1].
Returns:
A N-long list of sparse dicts mapping feature names to json values,
where N is the number of rows.
"""
logger.debug('Exporting {:d} rows', data.shape[0])
assert data.dtype == np.int8
assert len(data.shape) == 2
ragged_index = schema['ragged_index']
assert data.shape[1] == ragged_index[-1]
feature_names = schema['feature_names']
feature_types = schema['feature_types']
categorical_values = schema['categorical_values']
ordinal_ranges = schema['ordinal_ranges']
rows = [{} for _ in range(data.shape[0])]
for external_row, internal_row in zip(rows, data):
for v, name in enumerate(feature_names):
beg, end = ragged_index[v:v + 2]
internal_cell = internal_row[beg:end]
if np.all(internal_cell == 0):
continue
typename = feature_types[name]
if typename == CATEGORICAL:
assert internal_cell.sum() == 1, internal_cell
value = categorical_values[name][internal_cell.argmax()]
elif typename == ORDINAL:
min_max = ordinal_ranges[name]
assert internal_cell.sum() == min_max[1] - min_max[0]
value = internal_cell[0] + min_max[0]
else:
raise ValueError(typename)
external_row[name] = value
return rows
def validate_sample_shape(table, server):
# Sample many different counts patterns.
V = table.num_cols
N = table.num_rows
factors = [[0, 1, 2]] * V
for counts in itertools.product(*factors):
counts = np.array(counts, dtype=np.int8)
for n in range(N):
row = table.data[n, :]
samples = server.sample(N, counts, row)
assert samples.shape == (N, row.shape[0])
assert samples.dtype == row.dtype
for v in range(V):
beg, end = table.ragged_index[v:v + 2]
assert np.all(samples[:, beg:end].sum(axis=1) == counts[v])