Python numpy 模块,int16() 实例源码
我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.int16()。
def _write_binary_i_16(
task_handle, write_array, num_samps_per_chan, auto_start, timeout,
data_layout=FillMode.GROUP_BY_CHANNEL):
samps_per_chan_written = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxWriteBinaryI16
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int, c_bool32,
ctypes.c_double, ctypes.c_int,
wrapped_ndpointer(dtype=numpy.int16, flags=('C', 'W')),
ctypes.POINTER(ctypes.c_int), ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, num_samps_per_chan, auto_start, timeout,
data_layout.value, write_array,
ctypes.byref(samps_per_chan_written), None)
check_for_error(error_code)
return samps_per_chan_written.value
def _read_binary_i_16(
task_handle, read_array, num_samps_per_chan, timeout,
fill_mode=FillMode.GROUP_BY_CHANNEL):
samps_per_chan_read = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxReadBinaryI16
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int, ctypes.c_double,
ctypes.c_int,
wrapped_ndpointer(dtype=numpy.int16, flags=('C', 'W')),
ctypes.c_uint, ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, num_samps_per_chan, timeout, fill_mode.value,
read_array, numpy.prod(read_array.shape),
ctypes.byref(samps_per_chan_read), None)
check_for_error(error_code)
return samps_per_chan_read.value
def jit_remove_edge(grid, e2k, neighbors, components, e):
"""Remove an edge from a spanning tree."""
k = e2k[e]
v1, v2 = grid[1:3, k]
jit_set_remove(neighbors[v1], v2)
jit_set_remove(neighbors[v2], v1)
stack = np.zeros(neighbors.shape[0], np.int16)
jit_set_add(stack, v1)
while stack[0]:
v1 = jit_set_pop(stack)
components[v1] = True
for i in range(neighbors[v1, 0]):
v2 = neighbors[v1, i + 1]
if not components[v2]:
jit_set_add(stack, v2)
return k
def get_3d_data_slices(slices): # get data in Hunsfield Units
slices.sort(key = lambda x: float(x.ImagePositionPatient[2])) # from v 9
image = np.stack([s.pixel_array for s in slices])
image = image.astype(np.int16) # ensure int16 (it may be here uint16 for some images )
image[image == -2000] = 0 #correcting cyindrical bound entrioes to 0
# Convert to Hounsfield units (HU)
# The intercept is usually -1024
for slice_number in range(len(slices)): # from v 8
intercept = slices[slice_number].RescaleIntercept
slope = slices[slice_number].RescaleSlope
if slope != 1: # added 16 Jan 2016, evening
image[slice_number] = slope * image[slice_number].astype(np.float64)
image[slice_number] = image[slice_number].astype(np.int16)
image[slice_number] += np.int16(intercept)
return np.array(image, dtype=np.int16)
def get_pixels_hu(slices):
image = np.stack([s.pixel_array for s in slices])
image = image.astype(np.int16)
# Set outside-of-scan pixels to 0
# The intercept is usually -1024, so air is approximately 0
image[image == -2000] = 0
# Convert to Hounsfield units (HU)
### slope can differ per slice -- so do it individually (case in point black_tset, slices 95 vs 96)
### Changes/correction - 31.01.2017
for slice_number in range(len(slices)):
intercept = slices[slice_number].RescaleIntercept
slope = slices[slice_number].RescaleSlope
if slope != 1:
image[slice_number] = slope * image[slice_number].astype(np.float64)
image[slice_number] = image[slice_number].astype(np.int16)
image[slice_number] += np.int16(intercept)
return np.array(image, dtype=np.int16)
def get_3d_data_hu(path): # get data in Hunsfield Units
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
#slices.sort(key=lambda x: int(x.InstanceNumber)) # was x.InstanceNumber
#slices.sort(key = lambda x: int(x.ImagePositionPatient[2])) # from v8 - BUGGY
slices.sort(key = lambda x: float(x.ImagePositionPatient[2])) # from 22.02
image = np.stack([s.pixel_array for s in slices])
image = image.astype(np.int16) # ensure int16 (it may be here uint16 for some images )
image[image == -2000] = 0 #correcting cyindrical bound entrioes to 0
# Convert to Hounsfield units (HU)
# The intercept is usually -1024
for slice_number in range(len(slices)): # from v 8
intercept = slices[slice_number].RescaleIntercept
slope = slices[slice_number].RescaleSlope
if slope != 1: # added 16 Jan 2016, evening
image[slice_number] = slope * image[slice_number].astype(np.float64)
image[slice_number] = image[slice_number].astype(np.int16)
image[slice_number] += np.int16(intercept)
return np.array(image, dtype=np.int16)
def _get_dtype_maps():
""" Get dictionaries to map numpy data types to ITK types and the
other way around.
"""
# Define pairs
tmp = [ (np.float32, 'MET_FLOAT'), (np.float64, 'MET_DOUBLE'),
(np.uint8, 'MET_UCHAR'), (np.int8, 'MET_CHAR'),
(np.uint16, 'MET_USHORT'), (np.int16, 'MET_SHORT'),
(np.uint32, 'MET_UINT'), (np.int32, 'MET_INT'),
(np.uint64, 'MET_ULONG'), (np.int64, 'MET_LONG') ]
# Create dictionaries
map1, map2 = {}, {}
for np_type, itk_type in tmp:
map1[np_type.__name__] = itk_type
map2[itk_type] = np_type.__name__
# Done
return map1, map2
def generateCountMaps(self, coords):
'''Generates a count map for the provided list of coordinates.
'''
s = self.config['projective_field_size']
unpadded_size = self.config['output_size']
target_size = 3 + unpadded_size + 2 * s
countMaps = np.zeros((self.config['cls_nb'], target_size, target_size), dtype=np.int16)
for coord in coords:
y = coord[1] - self.config['contextual_pad']
x = coord[2] - self.config['contextual_pad']
if y >= 0 and y < self.config['tile_size'] and \
x >= 0 and x < self.config['tile_size']:
self.inc_region(countMaps[coord[0]], *self.target_sizes[y, x])
return np.moveaxis(countMaps, 0, -1).astype(np.float32)
def inc_region(self, dst, y, x, h, w):
'''Incremets dst in the specified region. Runs fastest on np.int8, but not much slower on
np.int16.'''
dh, dw = dst.shape
h2 = h // 2
w2 = w // 2
py = y - h2
px = x - w2
y_min = max(0, py)
y_max = min(dh, y + h2)
x_min = max(0, px)
x_max = min(dw, x + w2)
if y_max - y_min <= 0 or x_max - x_min <= 0:
return
dst[y_min:y_max, x_min:x_max] += 1
def generateCountMaps(self, coords):
'''Generates a count map for the provided list of coordinates. It can
count at most 256 object within the receptive field. Beyond that it
overflows.
'''
s = self.config['receptive_field_size']
pad = s // 2
unpadded_size = self.config['tile_size']
target_size = 1 + unpadded_size + 2 * pad
countMaps = np.zeros((self.config['cls_nb'], target_size, target_size), dtype=np.int16)
y_min = 0
y_max = unpadded_size
x_min = 0
x_max = unpadded_size
for coord in coords:
if coord[1] >= y_min and coord[1] < y_max and coord[2] >= x_min and coord[2] < x_max:
self.inc_region(countMaps[coord[0]], coord[1] + pad, coord[2] + pad, s, s)
return np.moveaxis(countMaps, 0, -1).astype(np.float32)
def generateCountMaps(self, coords):
'''Generates a count map for the provided list of coordinates.
'''
s = self.config['projective_field_size']
target_size = 3 + self.config['output_size'] + 2 * s
count_maps = np.zeros((self.config['cls_nb'], target_size, target_size), dtype=np.int16)
shift = - self.config['contextual_pad']
size = self.config['tile_size']
for coord in coords:
y = coord[1] + shift
x = coord[2] + shift
if y >= 0 and y < size and \
x >= 0 and x < size:
self.inc_region(count_maps[coord[0]], *self.target_sizes[y, x])
return np.moveaxis(count_maps, 0, -1).astype(np.float32)
def __read_annotations_old(self):
"""
Read the stimulus grid properties.
Returns a dictionary containing the parameter names as keys and the
parameter values as values.
------------------------------------------------
The returned objects must be added to the Block.
This reads an old version of the format that does not store paramater
names, so placeholder names are created instead.
ID: 29099
"""
# int16 * 14 -- an array of parameter values
values = np.fromfile(self._fsrc, dtype=np.int16, count=14)
# create dummy names and combine them with the values in a dict
# the dict will be added to the annotations
params = ['param%s' % i for i in range(len(values))]
annotations = dict(zip(params, values))
return annotations
def __read_annotations_old(self):
"""
Read the stimulus grid properties.
Returns a dictionary containing the parameter names as keys and the
parameter values as values.
------------------------------------------------
The returned objects must be added to the Block.
This reads an old version of the format that does not store paramater
names, so placeholder names are created instead.
ID: 29099
"""
# int16 * 14 -- an array of parameter values
values = np.fromfile(self._fsrc, dtype=np.int16, count=14)
# create dummy names and combine them with the values in a dict
# the dict will be added to the annotations
params = ['param%s' % i for i in range(len(values))]
annotations = dict(zip(params, values))
return annotations
def read_input_features(l, inp=sys.stdin):
if isinstance(inp, str):
with open(inp, 'r') as f:
return read_input_features(f)
print("%d samples" % l, file=sys.stderr)
xs = np.zeros((l, flen), np.int16)
ys = np.zeros((l, n*n*classes), np.int16)
i = 0
for line in inp:
xs[i, :], ys[i, :] = parse_csv_row_xy(line)
i += 1
if i % 10000 == 0:
print("%d read from disk" % i, file=sys.stderr)
return xs, ys
def slow_down_sound(sound, rate):
""" returns a sound which is a slowed down version of the original.
rate - at which the sound should be slowed down. eg. 0.5 would be half speed.
"""
raise NotImplementedError()
grow_rate = 1 / rate
# make it 1/rate times longer.
a1 = sndarray.array(sound)
surf = pygame.surfarray.make_surface(a1)
print (a1.shape[0] * grow_rate)
scaled_surf = pygame.transform.scale(surf, (int(a1.shape[0] * grow_rate), a1.shape[1]))
print (scaled_surf)
print (surf)
a2 = a1 * rate
print (a1.shape)
print (a2.shape)
print (a2)
sound2 = sndarray.make_sound(a2.astype(int16))
return sound2
def _check_valid_data(self, data):
"""Checks that the incoming data is a 2 x #elements ndarray of ints.
Parameters
----------
data : :obj:`numpy.ndarray`
The data to verify.
Raises
------
ValueError
If the data is not of the correct shape or type.
"""
if data.dtype.type != np.int8 and data.dtype.type != np.int16 \
and data.dtype.type != np.int32 and data.dtype.type != np.int64 \
and data.dtype.type != np.uint8 and data.dtype.type != np.uint16 \
and data.dtype.type != np.uint32 and data.dtype.type != np.uint64:
raise ValueError('Must initialize image coords with a numpy int ndarray')
if data.shape[0] != 2:
raise ValueError('Illegal data array passed to image coords. Must have 2 coordinates')
if len(data.shape) > 2:
raise ValueError('Illegal data array passed to point cloud. Must have 1 or 2 dimensions')
def update(self, x):
"""Update the buffer.
Args:
x (numpy.ndarray): array of shape
(n_new_samples, n_channels(, n_points))
"""
if x.ndim != self.buffer.ndim:
raise ValueError('x has not the same number of dimensions as '
'the buffer.')
nw = x.shape[0]
# Determine index at which new values should be put into array
ind = np.arange(self.ind, self.ind + nw, dtype=np.int16) % self.n
self.buffer[ind, :] = x
# Set self.ind = to the index at which new locations were put.
# Separately defined here to allow new data to be an array rather
# than just one row
self.ind = (ind[-1] + 1) % self.n
self.pts += nw
def mark_noise(self, noise, nw=None):
"""Mark noisy samples in the buffer.
Mark the last `nw` samples in the buffer as noisy (noisy -> True;
clean -> False).
Args:
noise (bool): if True, mark the last nw samples as noise
Keyword Args:
nw (int): number of samples to mark as noise. If None, use n
points.
"""
if not nw:
nw = self.n
ind = np.arange(self.ind - nw, self.ind, dtype=np.int16) % self.n
self.noise[ind, :] = noise
def test_no_data_deserialization(self):
arr = np.int16([[[-32768, -32768, -32768, -32768],
[-32768, -32768, -32768, -32768],
[-32768, -32768, -32768, -32768],
[-32768, -32768, -32768, -32768]]])
epsg_code = 3857
extent = Extent(0.0, 0.0, 10.0, 10.0)
projected_extent = ProjectedExtent(extent, epsg_code)
tile = Tile(arr, 'SHORT', -32768)
rdd = BaseTestClass.pysc.parallelize([(projected_extent, tile)])
raster_layer = RasterLayer.from_numpy_rdd(LayerType.SPATIAL, rdd)
actual_tile = raster_layer.to_numpy_rdd().first()[1]
self.assertEqual(actual_tile.cell_type, tile.cell_type)
self.assertEqual(actual_tile.no_data_value, tile.no_data_value)
self.assertTrue((actual_tile.cells == tile.cells).all())
def wavWrite(y, fs, nbits, audioFile):
""" Write samples to WAV file
Args:
samples: (ndarray / 2D ndarray) (floating point) sample vector
mono: DIM: nSamples
stereo: DIM: nSamples x nChannels
fs: (int) Sample rate in Hz
nBits: (int) Number of bits
fnWAV: (string) WAV file name to write
"""
if nbits == 8:
intsamples = (y+1.0) * AudioIO.normFact['int' + str(nbits)]
fX = np.int8(intsamples)
elif nbits == 16:
intsamples = y * AudioIO.normFact['int' + str(nbits)]
fX = np.int16(intsamples)
elif nbits > 16:
fX = y
write(audioFile, fs, fX)
def batch_works(k):
if k == n_processes - 1:
paths = all_paths[k * int(len(all_paths) / n_processes) : ]
else:
paths = all_paths[k * int(len(all_paths) / n_processes) : (k + 1) * int(len(all_paths) / n_processes)]
for path in paths:
probs = np.load(os.path.join(input_path, path))
pred = np.argmax(probs, axis=3)
fg_prob = 1 - probs[..., 0]
pred = clean_contour(fg_prob, pred)
seg = np.zeros(pred.shape, dtype=np.int16)
seg[pred == 1] = 1
seg[pred == 2] = 2
seg[pred == 3] = 4
img = nib.Nifti1Image(seg, np.eye(4))
nib.save(img, os.path.join(output_path, path.replace('_probs.npy', '.nii.gz')))
def test_unaligned(self):
v = (np.zeros(64, dtype=np.int8) + ord('a'))[1:-7]
d = v.view(np.dtype("S8"))
# unaligned source
x = (np.zeros(16, dtype=np.int8) + ord('a'))[1:-7]
x = x.view(np.dtype("S8"))
x[...] = np.array("b" * 8, dtype="S")
b = np.arange(d.size)
#trivial
assert_equal(d[b], d)
d[b] = x
# nontrivial
# unaligned index array
b = np.zeros(d.size + 1).view(np.int8)[1:-(np.intp(0).itemsize - 1)]
b = b.view(np.intp)[:d.size]
b[...] = np.arange(d.size)
assert_equal(d[b.astype(np.int16)], d)
d[b.astype(np.int16)] = x
# boolean
d[b % 2 == 0]
d[b % 2 == 0] = x[::2]
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
def test_prod(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int16, np.uint16, np.int32, np.uint32,
np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
self.assertRaises(ArithmeticError, a.prod)
self.assertRaises(ArithmeticError, a2.prod, axis=1)
else:
assert_equal(a.prod(axis=0), 26400)
assert_array_equal(a2.prod(axis=0),
np.array([50, 36, 84, 180], ctype))
assert_array_equal(a2.prod(axis=-1),
np.array([24, 1890, 600], ctype))
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int8, np.uint8, np.int16, np.uint16, np.int32,
np.uint32, np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
tgt = np.array([1, 3, 13, 24, 30, 35, 39], ctype)
assert_array_equal(np.cumsum(a, axis=0), tgt)
tgt = np.array(
[[1, 2, 3, 4], [6, 8, 10, 13], [16, 11, 14, 18]], ctype)
assert_array_equal(np.cumsum(a2, axis=0), tgt)
tgt = np.array(
[[1, 3, 6, 10], [5, 11, 18, 27], [10, 13, 17, 22]], ctype)
assert_array_equal(np.cumsum(a2, axis=1), tgt)
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int16, np.uint16, np.int32, np.uint32,
np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
self.assertRaises(ArithmeticError, np.prod, a)
self.assertRaises(ArithmeticError, np.prod, a2, 1)
else:
assert_equal(a.prod(axis=0), 26400)
assert_array_equal(a2.prod(axis=0),
np.array([50, 36, 84, 180], ctype))
assert_array_equal(a2.prod(axis=-1),
np.array([24, 1890, 600], ctype))
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int16, np.uint16, np.int32, np.uint32,
np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
self.assertRaises(ArithmeticError, np.cumprod, a)
self.assertRaises(ArithmeticError, np.cumprod, a2, 1)
self.assertRaises(ArithmeticError, np.cumprod, a)
else:
assert_array_equal(np.cumprod(a, axis=-1),
np.array([1, 2, 20, 220,
1320, 6600, 26400], ctype))
assert_array_equal(np.cumprod(a2, axis=0),
np.array([[1, 2, 3, 4],
[5, 12, 21, 36],
[50, 36, 84, 180]], ctype))
assert_array_equal(np.cumprod(a2, axis=-1),
np.array([[1, 2, 6, 24],
[5, 30, 210, 1890],
[10, 30, 120, 600]], ctype))
def get_crop_ix(self,training_size):
rescale_sizes=self.rescale_size
crop_inds=[]
for size_pair in rescale_sizes:
mother_w,mother_h=size_pair
crop_ix=np.zeros([5,4],dtype=np.int16)
w_indices=(0,mother_w-training_size)
h_indices=(0,mother_h-training_size)
w_center=(mother_w-training_size)/2
h_center=(mother_h-training_size)/2
crop_ix[4,:]=[w_center,h_center,training_size+w_center,training_size+h_center]
cnt=0
for i in w_indices:
for j in h_indices:
crop_ix[cnt,:]=[i,j,i+training_size,j+training_size]
cnt+=1
crop_inds.append(crop_ix)
return crop_inds
def __init__(self, data, comments = list()):
"""
Data structure for storing a sequence of amino acids.
The latter is represented by a contiguous array of integers.
The mapping between the amino acids and their numeric value
is done by using the ascii table.
Attributes
----------
comments [list] : list of informations about the sequence parsed from the FASTA file
The list is constructed by splitting the comments using the ' ' delimiter
N [int] : length of the sequence
data [np.ndarray] : contiguous array containing the ascii values of the amino acids
"""
self.comments = comments
self.N = len(data)
if isinstance(data, np.ndarray):
self.data = data
else:
# If a string is passed, the latter is converted to a numpy array
self.data = np.empty(self.N, dtype = np.int16)
for i in range(self.N):
self.data[i] = Sequence.charToInt(data[i])
def test_load_table_columnar_arrow_all(self, con, all_types_table):
pa = pytest.importorskip("pyarrow")
skip_if_no_arrow_loader(con)
names = ['boolean_', 'smallint_', 'int_', 'bigint_',
'float_', 'double_', 'varchar_', 'text_',
'time_', 'timestamp_', 'date_']
columns = [pa.array([True, False, None], type=pa.bool_()),
pa.array([1, 0, None]).cast(pa.int16()),
pa.array([1, 0, None]).cast(pa.int32()),
pa.array([1, 0, None]),
pa.array([1.0, 1.1, None]).cast(pa.float32()),
pa.array([1.0, 1.1, None]),
# no fixed-width string
pa.array(['a', 'b', None]),
pa.array(['a', 'b', None]),
(pa.array([1, 2, None]).cast(pa.int32())
.cast(pa.time32('s'))),
pa.array([datetime.datetime(2016, 1, 1, 12, 12, 12),
datetime.datetime(2017, 1, 1), None]),
pa.array([datetime.date(2016, 1, 1),
datetime.date(2017, 1, 1), None])]
table = pa.Table.from_arrays(columns, names=names)
con.load_table_arrow(all_types_table, table)
def test_load_table_creates(self, con, not_a_table):
pd = pytest.importorskip("pandas")
import numpy as np
data = pd.DataFrame({
"boolean_": [True, False],
"smallint_cast": np.array([0, 1], dtype=np.int8),
"smallint_": np.array([0, 1], dtype=np.int16),
"int_": np.array([0, 1], dtype=np.int32),
"bigint_": np.array([0, 1], dtype=np.int64),
"float_": np.array([0, 1], dtype=np.float32),
"double_": np.array([0, 1], dtype=np.float64),
"varchar_": ["a", "b"],
"text_": ['a', 'b'],
"time_": [datetime.time(0, 11, 59), datetime.time(13)],
"timestamp_": [pd.Timestamp("2016"), pd.Timestamp("2017")],
"date_": [datetime.date(2016, 1, 1), datetime.date(2017, 1, 1)],
}, columns=['boolean_', 'smallint_', 'int_', 'bigint_', 'float_',
'double_', 'varchar_', 'text_', 'time_', 'timestamp_',
'date_'])
con.load_table(not_a_table, data, create=True)
def wavWrite(y, fs, nbits, audioFile):
""" Write samples to WAV file
Args:
samples: (ndarray / 2D ndarray) (floating point) sample vector
mono: DIM: nSamples
stereo: DIM: nSamples x nChannels
fs: (int) Sample rate in Hz
nBits: (int) Number of bits
fnWAV: (string) WAV file name to write
"""
if nbits == 8:
intsamples = (y+1.0) * AudioIO.normFact['int' + str(nbits)]
fX = np.int8(intsamples)
elif nbits == 16:
intsamples = y * AudioIO.normFact['int' + str(nbits)]
fX = np.int16(intsamples)
elif nbits > 16:
fX = y
write(audioFile, fs, fX)
def interpret_header(self):
"""redefine variables from header dictionary"""
self.nifs = self.header['nifs']
self.nchans = self.header['nchans']
self.nbits = self.header['nbits']
signed = 'signed' in self.header and self.header['signed'] is True
if self.nbits >= 8:
if signed:
self.dtype = {8: np.int8,
16: np.int16,
32: np.float32,
64: np.float64}[self.nbits]
else:
self.dtype = {8: np.uint8,
16: np.uint16,
32: np.float32,
64: np.float64}[self.nbits]
else:
self.dtype = np.int8 if signed else np.uint8
def numpy2bifrost(dtype):
if dtype == np.int8: return _bf.BF_DTYPE_I8
elif dtype == np.int16: return _bf.BF_DTYPE_I16
elif dtype == np.int32: return _bf.BF_DTYPE_I32
elif dtype == np.uint8: return _bf.BF_DTYPE_U8
elif dtype == np.uint16: return _bf.BF_DTYPE_U16
elif dtype == np.uint32: return _bf.BF_DTYPE_U32
elif dtype == np.float16: return _bf.BF_DTYPE_F16
elif dtype == np.float32: return _bf.BF_DTYPE_F32
elif dtype == np.float64: return _bf.BF_DTYPE_F64
elif dtype == np.float128: return _bf.BF_DTYPE_F128
elif dtype == ci8: return _bf.BF_DTYPE_CI8
elif dtype == ci16: return _bf.BF_DTYPE_CI16
elif dtype == ci32: return _bf.BF_DTYPE_CI32
elif dtype == cf16: return _bf.BF_DTYPE_CF16
elif dtype == np.complex64: return _bf.BF_DTYPE_CF32
elif dtype == np.complex128: return _bf.BF_DTYPE_CF64
elif dtype == np.complex256: return _bf.BF_DTYPE_CF128
else: raise ValueError("Unsupported dtype: " + str(dtype))
def numpy2string(dtype):
if dtype == np.int8: return 'i8'
elif dtype == np.int16: return 'i16'
elif dtype == np.int32: return 'i32'
elif dtype == np.int64: return 'i64'
elif dtype == np.uint8: return 'u8'
elif dtype == np.uint16: return 'u16'
elif dtype == np.uint32: return 'u32'
elif dtype == np.uint64: return 'u64'
elif dtype == np.float16: return 'f16'
elif dtype == np.float32: return 'f32'
elif dtype == np.float64: return 'f64'
elif dtype == np.float128: return 'f128'
elif dtype == np.complex64: return 'cf32'
elif dtype == np.complex128: return 'cf64'
elif dtype == np.complex256: return 'cf128'
else: raise TypeError("Unsupported dtype: " + str(dtype))
def smooth(tile):
#first use this function to get mean and save it in an array
temp = import_all_year_data(tile)
####after get the mean value for all doy, I will run a bise gapfill first
print temp.size
##when using the single processing
#inputVI = pd.DataFrame(temp)
#VIsmoothed = inputVI.apply(VIsmooth, axis=0)
#VIsmoothed = VIsmoothed.as_matrix()
#VIsmoothed = parallelize_dataframe(temp)
##when using the multiprocessing
VIsmoothed = dataframeapply(temp)
VIsmoothed = VIsmoothed.reshape(VIsmoothed.size/2400/2400, 2400, 2400)
TILEdir = os.path.join(dirref, tile)
if not os.path.exists(TILEdir):
os.makedirs(TILEdir)
export_array (Rasters=np.int16(VIsmoothed), directory=TILEdir, \
prod='EVI.BISE.SG', tile=tile, index=range(1, 369, 8))
temp = None
inputVI = None
VIsmoothed = None
def update_wf_library(filename, pulses, offsets):
"""
Update a H5 waveform library in place give an iterable of (pulseName, pulse)
tuples and offsets into the waveform library.
"""
assert USE_PHASE_OFFSET_INSTRUCTION == False
#load the h5 file
with h5py.File(filename) as FID:
for label, pulse in pulses.items():
#create a new waveform
if pulse.isTimeAmp:
shape = np.repeat(pulse.amp * np.exp(1j * pulse.phase), 4)
else:
shape = pulse.amp * np.exp(1j * pulse.phase) * pulse.shape
try:
length = offsets[label][1]
except KeyError:
print("\t{} not found in offsets so skipping".format(pulse))
continue
for offset in offsets[label][0]:
print("\tUpdating {} at offset {}".format(pulse, offset))
FID['/chan_1/waveforms'][offset:offset + length] = np.int16(
MAX_WAVEFORM_VALUE * shape.real)
FID['/chan_2/waveforms'][offset:offset + length] = np.int16(
MAX_WAVEFORM_VALUE * shape.imag)
def write_field(FID, fieldName, data, dataType):
typeSizes = {'int16': 2, 'int32': 4, 'double': 8, 'uint128': 16}
formatChars = {'int16': '<h', 'int32': '<i', 'double': '<d'}
if dataType == 'char':
dataSize = len(data) + 1
data = data + chr(0)
else:
dataSize = typeSizes[dataType]
FID.write(struct.pack('<II', len(fieldName) + 1, dataSize))
FID.write(fieldName + chr(0))
if dataType == 'char':
FID.write(data)
elif dataType == 'uint128':
#struct doesn't support uint128 so write two 64bits
#there are smarter ways but we really only need this for the fake timestamp
FID.write(struct.pack('<QQ', 0, data))
else:
FID.write(struct.pack(formatChars[dataType], data))
def write_waveform(FID, WFname, WFnumber, data):
'''
Helper function to write a waveform
'''
numString = str(WFnumber)
write_field(FID, 'WAVEFORM_NAME_' + numString, WFname, 'char')
#Set integer format
write_field(FID, 'WAVEFORM_TYPE_' + numString, 1, 'int16')
write_field(FID, 'WAVEFORM_LENGTH_' + numString, data.size, 'int32')
write_field(FID, 'WAVEFORM_TIMESTAMP_' + numString, 0, 'uint128')
tmpString = 'WAVEFORM_DATA_' + numString + chr(0)
dataSize = 2 * data.size
FID.write(struct.pack('<II', len(tmpString), dataSize))
FID.write(tmpString)
FID.write(data.tostring())
def read_dicom(self, dcm):
""" Imports CT-images from Dicom object.
:param Dicom dcm: a Dicom object
"""
if "images" not in dcm:
raise InputError("Data doesn't contain ct data")
if not self.header_set:
self._set_header_from_dicom(dcm)
self.cube = np.zeros((self.dimz, self.dimy, self.dimx), dtype=np.int16)
intersect = float(dcm["images"][0].RescaleIntercept)
slope = float(dcm["images"][0].RescaleSlope)
for i in range(len(dcm["images"])):
data = np.array(dcm["images"][i].pixel_array) * slope + intersect
self.cube[i][:][:] = data
if self.slice_pos[1] < self.slice_pos[0]:
self.slice_pos.reverse()
self.zoffset = self.slice_pos[0]
self.cube = self.cube[::-1]
def set_data_type(self, type):
""" Sets the data type for the TRiP98 header files.
:param numpy.type type: numpy type, e.g. np.uint16
"""
if type is np.int8 or type is np.uint8:
self.data_type = "integer"
self.num_bytes = 1
elif type is np.int16 or type is np.uint16:
self.data_type = "integer"
self.num_bytes = 2
elif type is np.int32 or type is np.uint32:
self.data_type = "integer"
self.num_bytes = 4
elif type is np.float:
self.data_type = "float"
self.num_bytes = 4
elif type is np.double:
self.data_type = "double"
self.num_bytes = 8
# ###################### WRITING DICOM FILES #######################################
def load_data(predictions_file, labels_file):
'''
Loads prediction and label data into numpy arrays
Parameters
----------
predictions_file: str
Path to the prediction file
labels_file: str
Path to the label file
Returns
-------
ret_val: tuple
labels array, predictions array
'''
labels = io.load_nparray_from_bin_file(labels_file, np.uint8)
predictions = io.load_nparray_from_bin_file(predictions_file, np.int16)
return labels, predictions
def render_fonts_image(x, path, img_per_row, unit_scale=True):
if unit_scale:
# scale 0-1 matrix back to gray scale bitmaps
bitmaps = (x * 255.).astype(dtype=np.int16) % 256
else:
bitmaps = x
num_imgs, h, w = x.shape
width = img_per_row * w
height = int(np.ceil(float(num_imgs) / img_per_row)) * h
canvas = np.zeros(shape=(height, width), dtype=np.int16)
# make the canvas all white
canvas.fill(0)
for idx, bm in enumerate(bitmaps):
x = h * int(idx / img_per_row)
y = w * int(idx % img_per_row)
canvas[x: x + h, y: y + w] = bm
scipy.misc.toimage(canvas).save(path)
return path
def _typename(t):
if t == np.float16:
return 'float16'
elif t == np.float32:
return 'float32'
elif t == np.float64:
return 'float64'
elif t == np.uint8:
return 'uint8'
elif t == np.uint16:
return 'uint16'
elif t == np.int16:
return 'int16'
elif t == np.int32:
return 'int32'
elif t == np.int64:
return 'int64'
else:
raise TypeError('unknown type')
def default(self, obj):
# convert dates and numpy objects in a json serializable format
if isinstance(obj, datetime):
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
elif isinstance(obj, date):
return obj.strftime('%Y-%m-%d')
elif type(obj) in (np.int_, np.intc, np.intp, np.int8, np.int16,
np.int32, np.int64, np.uint8, np.uint16,
np.uint32, np.uint64):
return int(obj)
elif type(obj) in (np.bool_,):
return bool(obj)
elif type(obj) in (np.float_, np.float16, np.float32, np.float64,
np.complex_, np.complex64, np.complex128):
return float(obj)
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
def convert_dtype(dtype):
if dtype == np.float32:
return dt.DT_FLOAT
elif dtype == np.float64:
return dt.DT_DOUBLE
elif dtype == np.int32:
return dt.DT_INT32
elif dtype == np.uint8:
return dt.DT_UINT8
elif dtype == np.int16:
return dt.DT_INT16
elif dtype == np.int8:
return dt.DT_INT8
elif dtype == np.dtype('S1'):
return dt.DT_STRING
else:
raise ValueError('Unsupported type.')
def get_audio_from_model(model, sr, duration, seed_audio):
print 'Generating audio...'
new_audio = np.zeros((sr * duration))
curr_sample_idx = 0
while curr_sample_idx < new_audio.shape[0]:
distribution = np.array(model.predict(seed_audio.reshape(1,
frame_size, 1)
), dtype=float).reshape(256)
distribution /= distribution.sum().astype(float)
predicted_val = np.random.choice(range(256), p=distribution)
ampl_val_8 = ((((predicted_val) / 255.0) - 0.5) * 2.0)
ampl_val_16 = (np.sign(ampl_val_8) * (1/256.0) * ((1 + 256.0)**abs(
ampl_val_8) - 1)) * 2**15
new_audio[curr_sample_idx] = ampl_val_16
seed_audio[-1] = ampl_val_16
seed_audio[:-1] = seed_audio[1:]
pc_str = str(round(100*curr_sample_idx/float(new_audio.shape[0]), 2))
sys.stdout.write('Percent complete: ' + pc_str + '\r')
sys.stdout.flush()
curr_sample_idx += 1
print 'Audio generated.'
return new_audio.astype(np.int16)
def to_volume(slices):
"""Creates ndarray volume in Hounsfield units (HU) from array of pydicom slices.
"""
volume = np.stack([s.pixel_array for s in slices])
volume = volume.astype(np.int16)
# Set outside-of-scan pixels to 0
# The intercept is usually -1024, so air is approximately 0
volume[volume == -2000] = 0
# Convert to Hounsfield units (HU)
for n in range(len(slices)):
intercept = slices[n].RescaleIntercept
slope = slices[n].RescaleSlope
if slope != 1:
volume[n] = slope * volume[n].astype(np.float64)
volume[n] = volume[n].astype(np.int16)
volume[n] += np.int16(intercept)
volume = np.array(volume, dtype=np.int16)
spacing = tuple(map(float, ([slices[0].SliceThickness] + slices[0].PixelSpacing)))
return volume, spacing
def to_volume(slices):
"""Creates ndarray volume in Hounsfield units (HU) from array of pydicom slices.
"""
volume = np.stack([s.pixel_array for s in slices])
volume = volume.astype(np.int16)
# Set outside-of-scan pixels to 0
# The intercept is usually -1024, so air is approximately 0
volume[volume == -2000] = 0
# Convert to Hounsfield units (HU)
for n in range(len(slices)):
intercept = slices[n].RescaleIntercept
slope = slices[n].RescaleSlope
if slope != 1:
volume[n] = slope * volume[n].astype(np.float64)
volume[n] = volume[n].astype(np.int16)
volume[n] += np.int16(intercept)
volume = np.array(volume, dtype=np.int16)
spacing = tuple(map(float, ([slices[0].SliceThickness] + slices[0].PixelSpacing)))
return volume, spacing
def test_cell_indices_in_tile(self):
"""
Test get_cell_indices_in_tile by filling an int array for a tile,
using the indices returned by cell_indices_in_tile for each cell
in the tile. The array should be fully filled with 1 at the end
"""
h, v = (20, 11)
grid = MODISGrid()
tile_data = np.zeros(
(MODISGrid.MODIS_tile_height, MODISGrid.MODIS_tile_width),
dtype=np.int16)
cells = grid.get_cells_for_tile(h, v)
for cell in cells:
i_range, j_range = grid.get_cell_indices_in_tile(cell, h, v)
tile_data[i_range[0]:i_range[1], j_range[0]:j_range[1]] += 1
# If tile_data contains some zeros, this means the tile is not
# fully covered by the cells. If it contains values > 1, this means
# than more than one cell covers a given tile pixel
assert_array_equal(tile_data, np.ones_like(tile_data))