Python numpy 模块,iinfo() 实例源码
我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.iinfo()。
def test_rescaleData():
dtypes = map(np.dtype, ('ubyte', 'uint16', 'byte', 'int16', 'int', 'float'))
for dtype1 in dtypes:
for dtype2 in dtypes:
data = (np.random.random(size=10) * 2**32 - 2**31).astype(dtype1)
for scale, offset in [(10, 0), (10., 0.), (1, -50), (0.2, 0.5), (0.001, 0)]:
if dtype2.kind in 'iu':
lim = np.iinfo(dtype2)
lim = lim.min, lim.max
else:
lim = (-np.inf, np.inf)
s1 = np.clip(float(scale) * (data-float(offset)), *lim).astype(dtype2)
s2 = pg.rescaleData(data, scale, offset, dtype2)
assert s1.dtype == s2.dtype
if dtype2.kind in 'iu':
assert np.all(s1 == s2)
else:
assert np.allclose(s1, s2)
def test_rescaleData():
dtypes = map(np.dtype, ('ubyte', 'uint16', 'byte', 'int16', 'int', 'float'))
for dtype1 in dtypes:
for dtype2 in dtypes:
data = (np.random.random(size=10) * 2**32 - 2**31).astype(dtype1)
for scale, offset in [(10, 0), (10., 0.), (1, -50), (0.2, 0.5), (0.001, 0)]:
if dtype2.kind in 'iu':
lim = np.iinfo(dtype2)
lim = lim.min, lim.max
else:
lim = (-np.inf, np.inf)
s1 = np.clip(float(scale) * (data-float(offset)), *lim).astype(dtype2)
s2 = pg.rescaleData(data, scale, offset, dtype2)
assert s1.dtype == s2.dtype
if dtype2.kind in 'iu':
assert np.all(s1 == s2)
else:
assert np.allclose(s1, s2)
def to_best_type(array):
'''Convert array to lowest possible bitrate.
'''
ui8 = np.iinfo(np.uint8)
ui8 = ui8.max
ui16 = np.iinfo(np.uint16)
ui16 = ui16.max
ui32 = np.iinfo(np.uint32)
ui32 = ui32.max
ui64 = np.iinfo(np.uint64)
ui64 = ui64.max
if array.max() <= ui64:
new_type = np.uint64
if array.max() <= ui32:
new_type = np.uint32
if array.max() <= ui16:
new_type = np.uint16
if array.max() <= ui8:
new_type = np.uint8
return array.astype(new_type)
def pack_samples(self, samples, dtype=None):
"""Pack samples into one integer per sample
Store one sample in a single integer instead of a list of
integers with length `len(self.nsoutdims)`. Example:
>>> p = pauli_mpp(nr_sites=2, local_dim=2)
>>> p.outdims
(6, 6)
>>> p.pack_samples(np.array([[0, 1], [1, 0], [1, 2], [5, 5]]))
array([ 1, 6, 8, 35])
"""
assert samples.ndim == 2
assert samples.shape[1] == len(self.nsoutdims)
samples = np.ravel_multi_index(samples.T, self.nsoutdims)
if dtype not in (True, False, None) and issubclass(dtype, np.integer):
info = np.iinfo(dtype)
assert samples.min() >= info.min
assert samples.max() <= info.max
samples = samples.astype(dtype)
return samples
def read(cls, filename):
"""
Read an audio file (only wav is supported).
Parameters
----------
filename: string
Path to the wav file.
"""
sample_rate, samples = wavfile.read(filename)
if samples.dtype==np.dtype('int16'):
samples = samples.astype(_types.float_) / np.iinfo(np.dtype('int16')).min
if len(samples.shape)==1:
samples = samples.reshape((samples.shape[0],1))
instance = cls(samples, sample_rate)
return instance
def test_allclose(self):
# Tests allclose on arrays
a = np.random.rand(10)
b = a + np.random.rand(10) * 1e-8
self.assertTrue(allclose(a, b))
# Test allclose w/ infs
a[0] = np.inf
self.assertTrue(not allclose(a, b))
b[0] = np.inf
self.assertTrue(allclose(a, b))
# Test allclose w/ masked
a = masked_array(a)
a[-1] = masked
self.assertTrue(allclose(a, b, masked_equal=True))
self.assertTrue(not allclose(a, b, masked_equal=False))
# Test comparison w/ scalar
a *= 1e-8
a[0] = 0
self.assertTrue(allclose(a, 0, masked_equal=True))
# Test that the function works for MIN_INT integer typed arrays
a = masked_array([np.iinfo(np.int_).min], dtype=np.int_)
self.assertTrue(allclose(a, a))
def test_respect_dtype_singleton(self):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
self.assertEqual(sample.dtype, np.dtype(dt))
for dt in (np.bool, np.int, np.long):
lbnd = 0 if dt is np.bool else np.iinfo(dt).min
ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
self.assertFalse(hasattr(sample, 'dtype'))
self.assertEqual(type(sample), dt)
def test_dtype2(self, dtype):
dtype = numpy.dtype(dtype)
# randint does not support 64 bit integers
if dtype in (numpy.int64, numpy.uint64):
return
iinfo = numpy.iinfo(dtype)
size = (10000,)
x = random.randint(iinfo.min, iinfo.max + 1, size, dtype)
self.assertEqual(x.dtype, dtype)
self.assertLessEqual(iinfo.min, min(x))
self.assertLessEqual(max(x), iinfo.max)
# Lower bound check
with self.assertRaises(ValueError):
random.randint(iinfo.min - 1, iinfo.min + 10, size, dtype)
# Upper bound check
with self.assertRaises(ValueError):
random.randint(iinfo.max - 10, iinfo.max + 2, size, dtype)
def __init__(self, N=30, paired=False):
"""
Parameters
----------
N: int
number of calls to average over.
paired: boolean
if paired is chosen the same series of random seeds is used for different x
"""
self._N = int(N)
self.paired = paired
if self.paired:
self.uint32max = np.iinfo(np.uint32).max
self.seeds = list(np.random.randint(0, self.uint32max, size=int(N)))
# cache previous iterations
self.cache = {}
# number of evaluations
self.nev = 0
def _calculate(self, X, y, categorical, metafeatures, helpers):
occurrences = helpers.get_value("ClassOccurrences")
min_value = np.iinfo(np.int64).max
if len(y.shape) == 2:
for i in range(y.shape[1]):
for num_occurrences in occurrences[i].values():
if num_occurrences < min_value:
min_value = num_occurrences
else:
for num_occurrences in occurrences.values():
if num_occurrences < min_value:
min_value = num_occurrences
return float(min_value) / float(y.shape[0])
# aka default accuracy
def __init__(self, model, statistics_calc, backend, n_samples = 1000, seed = None):
self.model = model
self.statistics_calc = statistics_calc
self.backend = backend
self.rng = np.random.RandomState(seed)
self.model.prior.reseed(self.rng.randint(np.iinfo(np.uint32).max, dtype=np.uint32))
# main algorithm
seed_arr = self.rng.randint(1, n_samples*n_samples, size=n_samples, dtype=np.int32)
seed_pds = self.backend.parallelize(seed_arr)
sample_parameters_statistics_pds = self.backend.map(self._sample_parameter_statistics, seed_pds)
sample_parameters_and_statistics = self.backend.collect(sample_parameters_statistics_pds)
sample_parameters, sample_statistics = [list(t) for t in zip(*sample_parameters_and_statistics)]
sample_parameters = np.array(sample_parameters)
sample_statistics = np.concatenate(sample_statistics)
self.coefficients_learnt = np.zeros(shape=(sample_parameters.shape[1],sample_statistics.shape[1]))
regr = linear_model.LinearRegression(fit_intercept=True)
for ind in range(sample_parameters.shape[1]):
regr.fit(sample_statistics, sample_parameters[:,ind])
self.coefficients_learnt[ind,:] = regr.coef_
def test_allclose(self):
# Tests allclose on arrays
a = np.random.rand(10)
b = a + np.random.rand(10) * 1e-8
self.assertTrue(allclose(a, b))
# Test allclose w/ infs
a[0] = np.inf
self.assertTrue(not allclose(a, b))
b[0] = np.inf
self.assertTrue(allclose(a, b))
# Test allclose w/ masked
a = masked_array(a)
a[-1] = masked
self.assertTrue(allclose(a, b, masked_equal=True))
self.assertTrue(not allclose(a, b, masked_equal=False))
# Test comparison w/ scalar
a *= 1e-8
a[0] = 0
self.assertTrue(allclose(a, 0, masked_equal=True))
# Test that the function works for MIN_INT integer typed arrays
a = masked_array([np.iinfo(np.int_).min], dtype=np.int_)
self.assertTrue(allclose(a, a))
def test_respect_dtype_singleton(self):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
self.assertEqual(sample.dtype, np.dtype(dt))
for dt in (np.bool, np.int, np.long):
lbnd = 0 if dt is np.bool else np.iinfo(dt).min
ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
self.assertFalse(hasattr(sample, 'dtype'))
self.assertEqual(type(sample), dt)
def read_window(window, src_url, mask_url=None, scale=1):
tile_size = 256 * scale
with rasterio.Env(CPL_VSIL_CURL_ALLOWED_EXTENSIONS='.vrt,.tif,.ovr,.msk'):
src = get_source(src_url)
# TODO read the data and the mask in parallel
if mask_url:
data = src.read(out_shape=(3, tile_size, tile_size), window=window)
mask = get_source(mask_url)
mask_data = mask.read(out_shape=(1, tile_size, tile_size), window=window)
return np.concatenate((data, mask_data))
else:
if src.count == 4:
# alpha channel present
return src.read(out_shape=(4, tile_size, tile_size), window=window)
else:
# no alpha channel, create one
# TODO use src.bounds as an implicit mask
data = src.read(out_shape=(3, tile_size, tile_size), window=window)
alpha = np.full((1, tile_size, tile_size), np.iinfo(data.dtype).max, data.dtype)
return np.concatenate((data, alpha))
def read_window(window, src_url, mask_url=None, scale=1):
tile_size = 256 * scale
with rasterio.Env(CPL_VSIL_CURL_ALLOWED_EXTENSIONS='.vrt,.tif,.ovr,.msk'):
src = get_source(src_url)
# TODO read the data and the mask in parallel
if mask_url:
data = src.read(out_shape=(3, tile_size, tile_size), window=window)
mask = get_source(mask_url)
mask_data = mask.read(out_shape=(1, tile_size, tile_size), window=window)
return np.concatenate((data, mask_data))
else:
if src.count == 4:
# alpha channel present
return src.read(out_shape=(4, tile_size, tile_size), window=window)
else:
# no alpha channel, create one
# TODO use src.bounds as an implicit mask
data = src.read(out_shape=(3, tile_size, tile_size), window=window)
alpha = np.full((1, tile_size, tile_size), np.iinfo(data.dtype).max, data.dtype)
return np.concatenate((data, alpha))
def run(n_seeds, n_jobs, _run, _seed):
seed_list = check_random_state(_seed).randint(np.iinfo(np.uint32).max,
size=n_seeds)
exps = []
exps += [{'method': 'sgd',
'step_size': step_size}
for step_size in np.logspace(-3, 3, 7)]
exps += [{'method': 'gram',
'reduction': reduction}
for reduction in [1, 4, 6, 8, 12, 24]]
rundir = join(basedir, str(_run._id), 'run')
if not os.path.exists(rundir):
os.makedirs(rundir)
Parallel(n_jobs=n_jobs,
verbose=10)(delayed(single_run)(config_updates, rundir, i)
for i, config_updates in enumerate(exps))
def run(n_seeds, n_jobs, _run, _seed):
seed_list = check_random_state(_seed).randint(np.iinfo(np.uint32).max,
size=n_seeds)
exps = []
exps += [{'method': 'sgd',
'step_size': step_size}
for step_size in np.logspace(-7, -7, 1)]
exps += [{'method': 'gram',
'reduction': reduction}
for reduction in [12]]
rundir = join(basedir, str(_run._id), 'run')
if not os.path.exists(rundir):
os.makedirs(rundir)
Parallel(n_jobs=n_jobs,
verbose=10)(delayed(single_run)(config_updates, rundir, i)
for i, config_updates in enumerate(exps))
def argmin(self, axis=None):
"""
return a ndarray of the minimum argument indexer
See also
--------
numpy.ndarray.argmin
"""
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all():
return -1
i8 = i8.copy()
i8[mask] = np.iinfo('int64').max
return i8.argmin()
def test_allclose(self):
# Tests allclose on arrays
a = np.random.rand(10)
b = a + np.random.rand(10) * 1e-8
self.assertTrue(allclose(a, b))
# Test allclose w/ infs
a[0] = np.inf
self.assertTrue(not allclose(a, b))
b[0] = np.inf
self.assertTrue(allclose(a, b))
# Test all close w/ masked
a = masked_array(a)
a[-1] = masked
self.assertTrue(allclose(a, b, masked_equal=True))
self.assertTrue(not allclose(a, b, masked_equal=False))
# Test comparison w/ scalar
a *= 1e-8
a[0] = 0
self.assertTrue(allclose(a, 0, masked_equal=True))
# Test that the function works for MIN_INT integer typed arrays
a = masked_array([np.iinfo(np.int_).min], dtype=np.int_)
self.assertTrue(allclose(a, a))
def find_bmu(self, vec):
"""Find the best matching unit (BMU) for a given vector.
Args:
vec (np.array): The vector to match.
Returns:
bmu (somNode): The best matching unit node.
"""
minVal=np.iinfo(np.int).max
for node in self.nodeList:
dist=node.get_distance(vec)
if dist < minVal:
minVal=dist
bmu=node
return bmu
def test_allclose(self):
# Tests allclose on arrays
a = np.random.rand(10)
b = a + np.random.rand(10) * 1e-8
self.assertTrue(allclose(a, b))
# Test allclose w/ infs
a[0] = np.inf
self.assertTrue(not allclose(a, b))
b[0] = np.inf
self.assertTrue(allclose(a, b))
# Test all close w/ masked
a = masked_array(a)
a[-1] = masked
self.assertTrue(allclose(a, b, masked_equal=True))
self.assertTrue(not allclose(a, b, masked_equal=False))
# Test comparison w/ scalar
a *= 1e-8
a[0] = 0
self.assertTrue(allclose(a, 0, masked_equal=True))
# Test that the function works for MIN_INT integer typed arrays
a = masked_array([np.iinfo(np.int_).min], dtype=np.int_)
self.assertTrue(allclose(a, a))
def test_int64_uint64_corner_case(self):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1)
# None of these function calls should
# generate a ValueError now.
actual = mt19937.randint(lbnd, ubnd, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
self.assertEqual(sample.dtype, np.dtype(dt))
for dt in (np.bool, np.int, np.long):
lbnd = 0 if dt is np.bool else np.iinfo(dt).min
ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
self.assertFalse(hasattr(sample, 'dtype'))
self.assertEqual(type(sample), dt)
def test_respect_dtype_singleton(self):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
self.assertEqual(sample.dtype, np.dtype(dt))
for dt in (np.bool, np.int, np.long):
lbnd = 0 if dt is np.bool else np.iinfo(dt).min
ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
self.assertFalse(hasattr(sample, 'dtype'))
self.assertEqual(type(sample), dt)
def test_allclose(self):
# Tests allclose on arrays
a = np.random.rand(10)
b = a + np.random.rand(10) * 1e-8
self.assertTrue(allclose(a, b))
# Test allclose w/ infs
a[0] = np.inf
self.assertTrue(not allclose(a, b))
b[0] = np.inf
self.assertTrue(allclose(a, b))
# Test allclose w/ masked
a = masked_array(a)
a[-1] = masked
self.assertTrue(allclose(a, b, masked_equal=True))
self.assertTrue(not allclose(a, b, masked_equal=False))
# Test comparison w/ scalar
a *= 1e-8
a[0] = 0
self.assertTrue(allclose(a, 0, masked_equal=True))
# Test that the function works for MIN_INT integer typed arrays
a = masked_array([np.iinfo(np.int_).min], dtype=np.int_)
self.assertTrue(allclose(a, a))
def _compute_asset_lifetimes(self):
"""
Compute and cache a recarry of asset lifetimes.
"""
equities_cols = self.equities.c
buf = np.array(
tuple(
sa.select((
equities_cols.sid,
equities_cols.start_date,
equities_cols.end_date,
)).execute(),
), dtype='<f8', # use doubles so we get NaNs
)
lifetimes = np.recarray(
buf=buf,
shape=(len(buf),),
dtype=[
('sid', '<f8'),
('start', '<f8'),
('end', '<f8')
],
)
start = lifetimes.start
end = lifetimes.end
start[np.isnan(start)] = 0 # convert missing starts to 0
end[np.isnan(end)] = np.iinfo(int).max # convert missing end to INTMAX
# Cast the results back down to int.
return lifetimes.astype([
('sid', '<i8'),
('start', '<i8'),
('end', '<i8'),
])
def append_data_column(ds, column):
# Extend the dataset to fit the new data
new_count = column.shape[0]
existing_count = ds.shape[0]
ds.resize((existing_count + new_count,))
levels = get_levels(ds)
if levels is not None:
# update levels if we have new unique values
if type(column.values) == p.Categorical:
added_levels = set(column.values.categories) - set(levels)
elif len(column) == 0:
# Workaround for bug in pandas - get a crash in .unique() for an empty series
added_levels = set([])
else:
added_levels = set(column.unique()) - set(levels)
new_levels = list(levels)
new_levels.extend(added_levels)
# Check if the new categorical column has more levels
# than the current bit width supports.
# If so, rewrite the existing column data w/ more bits
if len(new_levels) > np.iinfo(ds.dtype).max:
new_dtype = pick_cat_dtype(len(new_levels))
ds = widen_cat_column(ds, new_dtype)
new_levels = np.array(new_levels, dtype=np.object)
new_data = make_index_array(new_levels, column.values, ds.dtype)
clear_levels(ds)
create_levels(ds, new_levels)
else:
new_data = column
# Append new data
ds[existing_count:(existing_count + new_count)] = new_data
def compute_scale_for_cesium(coordmin, coordmax):
'''
Cesium quantized positions need to be in uint16
This function computes the best scale to apply to coordinates
to fit the range [0, 65535]
'''
max_int = np.iinfo(np.uint16).max
delta = abs(coordmax - coordmin)
scale = 10 ** -(math.floor(math.log1p(max_int / delta) / math.log1p(10)))
return scale
def transform(self, pixels):
data = pixels.data
(count, height, width) = data.shape
if 3 > count > 4:
raise Exception("Source data must be 3 or 4 bands")
if count == 4:
raise Exception(
"Variable opacity (alpha channel) not yet implemented")
data *= np.iinfo(np.uint8).max
rgb = np.ma.transpose(data.astype(np.uint8), [1, 2, 0])
if data.mask.any():
a = np.logical_and.reduce(~data.mask).astype(np.uint8) * 255
else:
a = np.full((rgb.shape[:-1]), 255, np.uint8)
# Nearblack filtering for collar removal--partial, as edge values
# will have been resampled in such a way that they don't retain
# their crispness.
# See https://stackoverflow.com/a/22631583 for neighborhood
# filtering
# sums = np.add.reduce(data)
# threshold = 64
# a = np.logical_and(sums > threshold, sums <
# (255 * 3) - threshold).astype(np.uint8) * 255
return PixelCollection(np.dstack((rgb, a)), pixels.bounds), 'RGBA'
def _nodata(dtype):
if np.issubdtype(dtype, float):
return np.finfo(dtype).min
else:
return np.iinfo(dtype).min
def _create_variables(self):
if self.input_type.ndim != 0:
raise TypeError('Embeddings take scalar inputs.')
dtype = tf.as_dtype(self.input_type.dtype)
if not dtype.is_integer: raise TypeError('Embeddings take integer inputs.')
if dtype not in (tf.int32, tf.int64): # only dtypes supported by tf.gather
if np.iinfo(dtype.as_numpy_dtype).max > 2147483647:
# pedantic future-proofing to handle hypothetical tf.uint64
raise TypeError('cannot gather or upcast dtype %s' % dtype)
self._cast = True
else:
self._cast = False
self._weights = tf.get_variable(
'weights', self._weights_shape, initializer=self._initializer,
trainable=self._trainable)
def test_signed_integer_division_overflow(self):
# Ticket #1317.
def test_type(t):
min = np.array([np.iinfo(t).min])
min //= -1
with np.errstate(divide="ignore"):
for t in (np.int8, np.int16, np.int32, np.int64, np.int, np.long):
test_type(t)
def test_shape_invalid(self):
# Check that the shape is valid.
max_int = np.iinfo(np.intc).max
max_intp = np.iinfo(np.intp).max
# Too large values (the datatype is part of this)
assert_raises(ValueError, np.dtype, [('a', 'f4', max_int // 4 + 1)])
assert_raises(ValueError, np.dtype, [('a', 'f4', max_int + 1)])
assert_raises(ValueError, np.dtype, [('a', 'f4', (max_int, 2))])
# Takes a different code path (fails earlier:
assert_raises(ValueError, np.dtype, [('a', 'f4', max_intp + 1)])
# Negative values
assert_raises(ValueError, np.dtype, [('a', 'f4', -1)])
assert_raises(ValueError, np.dtype, [('a', 'f4', (-1, -1))])
def test_min_int(self):
# Could make problems because of abs(min_int) == min_int
min_int = np.iinfo(np.int_).min
a = np.array([min_int], dtype=np.int_)
assert_(np.allclose(a, a))
def test_ldexp_overflow(self):
# silence warning emitted on overflow
with np.errstate(over="ignore"):
imax = np.iinfo(np.dtype('l')).max
imin = np.iinfo(np.dtype('l')).min
assert_equal(ncu.ldexp(2., imax), np.inf)
assert_equal(ncu.ldexp(2., imin), 0)
def test_iter_too_large():
# The total size of the iterator must not exceed the maximum intp due
# to broadcasting. Dividing by 1024 will keep it small enough to
# give a legal array.
size = np.iinfo(np.intp).max // 1024
arr = np.lib.stride_tricks.as_strided(np.zeros(1), (size,), (0,))
assert_raises(ValueError, nditer, (arr, arr[:, None]))
# test the same for multiindex. That may get more interesting when
# removing 0 dimensional axis is allowed (since an iterator can grow then)
assert_raises(ValueError, nditer,
(arr, arr[:, None]), flags=['multi_index'])
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, EMPTY)
sz = sum([np.dtype(b).itemsize for a, b in dt])
if np.dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_basic(self):
dts = list(zip(['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8'],
[np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64]))
for dt1, dt2 in dts:
assert_equal(iinfo(dt1).min, iinfo(dt2).min)
assert_equal(iinfo(dt1).max, iinfo(dt2).max)
self.assertRaises(ValueError, iinfo, 'f4')
def test_unsigned_max(self):
types = np.sctypes['uint']
for T in types:
assert_equal(iinfo(T).max, T(-1))
def test_iinfo_repr(self):
expected = "iinfo(min=-32768, max=32767, dtype=int16)"
assert_equal(repr(np.iinfo(np.int16)), expected)
def test_int_raise_behaviour(self):
def overflow_error_func(dtype):
np.typeDict[dtype](np.iinfo(dtype).max + 1)
for code in 'lLqQ':
assert_raises(OverflowError, overflow_error_func, code)
def test_diophantine_overflow():
# Smoke test integer overflow detection
max_intp = np.iinfo(np.intp).max
max_int64 = np.iinfo(np.int64).max
if max_int64 <= max_intp:
# Check that the algorithm works internally in 128-bit;
# solving this problem requires large intermediate numbers
A = (max_int64//2, max_int64//2 - 10)
U = (max_int64//2, max_int64//2 - 10)
b = 2*(max_int64//2) - 10
assert_equal(solve_diophantine(A, U, b), (1, 1))
def test_min_int(self):
a = np.array([np.iinfo(np.int_).min], dtype=np.int_)
# Should not raise:
assert_allclose(a, a)
def test_randint_range(self):
# Test for ticket #1690
lmax = np.iinfo('l').max
lmin = np.iinfo('l').min
try:
random.randint(lmin, lmax)
except:
raise AssertionError
def test_rng_zero_and_extremes(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = (lbnd + ubnd)//2
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
def test_random_integers_max_int(self):
# Tests whether random_integers can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
actual = np.random.random_integers(np.iinfo('l').max,
np.iinfo('l').max)
desired = np.iinfo('l').max
np.testing.assert_equal(actual, desired)
def test_random_integers_deprecated(self):
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# DeprecationWarning raised with high == None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max)
# DeprecationWarning raised with high != None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max, np.iinfo('l').max)
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
assert_raises(ValueError, np.random.poisson, lamneg)
assert_raises(ValueError, np.random.poisson, [lamneg]*10)
assert_raises(ValueError, np.random.poisson, lambig)
assert_raises(ValueError, np.random.poisson, [lambig]*10)