Python numpy 模块,bool_() 实例源码
我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.bool_()。
def make_ragged_mask(ragged_index, mask):
"""Convert a boolean mask from dense to ragged format.
Args:
ragged_index: A [V+1]-shaped numpy array as returned by
make_ragged_index.
mask: A [V,...]-shaped numpy array of booleans.
Returns:
A [R,...]-shaped numpy array, where R = ragged_index[-1].
"""
V = ragged_index.shape[0] - 1
R = ragged_index[-1]
assert mask.shape[0] == V
assert mask.dtype == np.bool_
ragged_mask = np.empty((R, ) + mask.shape[1:], dtype=np.bool_)
for v in range(V):
beg, end = ragged_index[v:v + 2]
ragged_mask[beg:end] = mask[v]
return ragged_mask
def df_type_to_str(i):
'''
Convert into simple datatypes from pandas/numpy types
'''
if isinstance(i, np.bool_):
return bool(i)
if isinstance(i, np.int_):
return int(i)
if isinstance(i, np.float):
if np.isnan(i):
return 'NaN'
elif np.isinf(i):
return str(i)
return float(i)
if isinstance(i, np.uint):
return int(i)
if type(i) == bytes:
return i.decode('UTF-8')
if isinstance(i, (tuple, list)):
return str(i)
if i is pd.NaT: # not identified as a float null
return 'NaN'
return str(i)
def test_pts_in_bbs(self):
pt = np.array([1, 2])
bbs_a = np.array([1, 2, 3, 4])
assert isinstance(pts_in_bbs(pt, bbs_a), np.bool_)
assert pts_in_bbs(pt, bbs_a)
pts = np.array([
[1, 2],
[2, 3],
[3, 4]
])
bbs_b = np.array([
[1, 2, 3, 4],
[5, 6, 7, 8],
[2, 3, 4, 5]
])
assert (pts_in_bbs(pts, bbs_b) == np.array([1, 0, 1], dtype=np.bool)).all()
def get_shapes_and_dtypes(data):
shapes = {}
dtypes = {}
for k in data.keys():
if isinstance(data[k][0], str):
shapes[k] = []
dtypes[k] = tf.string
elif isinstance(data[k][0], np.ndarray):
shapes[k] = data[k][0].shape
dtypes[k] = tf.uint8
elif isinstance(data[k][0], np.bool_):
shapes[k] = []
dtypes[k] = tf.string
else:
raise TypeError('Unknown data type', type(data[k][0]))
return shapes, dtypes
def bool_to_str(val, **kwargs):
"""Convert input boolean to str
:param val: value to be evaluated
:returns: evaluated value
:rtype: str
"""
try:
if pd.isnull(val):
return kwargs['nan']
except BaseException:
pass
if isinstance(val, np.bool_) or isinstance(val, bool):
return str(val)
if kwargs.get('convert_inconsistent_dtypes', True):
if hasattr(val, '__str__'):
return str(val)
return kwargs['nan']
def bool_to_int(val):
"""Convert input boolean to int
:param val: value to be evaluated
:returns: evaluated value
:rtype: np.int64
"""
try:
if pd.isnull(val):
return kwargs['nan']
except BaseException:
pass
if isinstance(val, np.bool_) or isinstance(val, bool):
return np.int64(val)
if kwargs.get('convert_inconsistent_dtypes', False):
try:
return np.int64(val)
except BaseException:
pass
return kwargs['nan']
def test_allany(self):
# Checks the any/all methods/functions.
x = np.array([[0.13, 0.26, 0.90],
[0.28, 0.33, 0.63],
[0.31, 0.87, 0.70]])
m = np.array([[True, False, False],
[False, False, False],
[True, True, False]], dtype=np.bool_)
mx = masked_array(x, mask=m)
mxbig = (mx > 0.5)
mxsmall = (mx < 0.5)
self.assertFalse(mxbig.all())
self.assertTrue(mxbig.any())
assert_equal(mxbig.all(0), [False, False, True])
assert_equal(mxbig.all(1), [False, False, True])
assert_equal(mxbig.any(0), [False, False, True])
assert_equal(mxbig.any(1), [True, True, True])
self.assertFalse(mxsmall.all())
self.assertTrue(mxsmall.any())
assert_equal(mxsmall.all(0), [True, True, False])
assert_equal(mxsmall.all(1), [False, False, False])
assert_equal(mxsmall.any(0), [True, True, False])
assert_equal(mxsmall.any(1), [True, True, False])
def test_allany_onmatrices(self):
x = np.array([[0.13, 0.26, 0.90],
[0.28, 0.33, 0.63],
[0.31, 0.87, 0.70]])
X = np.matrix(x)
m = np.array([[True, False, False],
[False, False, False],
[True, True, False]], dtype=np.bool_)
mX = masked_array(X, mask=m)
mXbig = (mX > 0.5)
mXsmall = (mX < 0.5)
self.assertFalse(mXbig.all())
self.assertTrue(mXbig.any())
assert_equal(mXbig.all(0), np.matrix([False, False, True]))
assert_equal(mXbig.all(1), np.matrix([False, False, True]).T)
assert_equal(mXbig.any(0), np.matrix([False, False, True]))
assert_equal(mXbig.any(1), np.matrix([True, True, True]).T)
self.assertFalse(mXsmall.all())
self.assertTrue(mXsmall.any())
assert_equal(mXsmall.all(0), np.matrix([True, True, False]))
assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T)
assert_equal(mXsmall.any(0), np.matrix([True, True, False]))
assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T)
def _recursive_make_descr(datatype, newtype=bool_):
"Private function allowing recursion in make_descr."
# Do we have some name fields ?
if datatype.names:
descr = []
for name in datatype.names:
field = datatype.fields[name]
if len(field) == 3:
# Prepend the title to the name
name = (field[-1], name)
descr.append((name, _recursive_make_descr(field[0], newtype)))
return descr
# Is this some kind of composite a la (np.float,2)
elif datatype.subdtype:
mdescr = list(datatype.subdtype)
mdescr[0] = _recursive_make_descr(datatype.subdtype[0], newtype)
return tuple(mdescr)
else:
return newtype
def test_respect_dtype_singleton(self):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
self.assertEqual(sample.dtype, np.dtype(dt))
for dt in (np.bool, np.int, np.long):
lbnd = 0 if dt is np.bool else np.iinfo(dt).min
ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
self.assertFalse(hasattr(sample, 'dtype'))
self.assertEqual(type(sample), dt)
def for_int_dtypes(name='dtype', no_bool=False):
"""Decorator that checks the fixture with integer and optionally bool dtypes.
Args:
name(str): Argument name to which specified dtypes are passed.
no_bool(bool): If ``True``, ``numpy.bool_`` is
omitted from candidate dtypes.
dtypes to be tested are ``numpy.dtype('b')``, ``numpy.dtype('h')``,
``numpy.dtype('i')``, ``numpy.dtype('l')``, ``numpy.dtype('q')``,
``numpy.dtype('B')``, ``numpy.dtype('H')``, ``numpy.dtype('I')``,
``numpy.dtype('L')``, ``numpy.dtype('Q')``, and ``numpy.bool_`` (optional).
.. seealso:: :func:`cupy.testing.for_dtypes`,
:func:`cupy.testing.for_all_dtypes`
"""
if no_bool:
return for_dtypes(_int_dtypes, name=name)
else:
return for_dtypes(_int_bool_dtypes, name=name)
def for_all_dtypes_combination(names=('dtyes',),
no_float16=False, no_bool=False, full=None,
no_complex=False):
"""Decorator that checks the fixture with a product set of all dtypes.
Args:
names(list of str): Argument names to which dtypes are passed.
no_float16(bool): If ``True``, ``numpy.float16`` is
omitted from candidate dtypes.
no_bool(bool): If ``True``, ``numpy.bool_`` is
omitted from candidate dtypes.
full(bool): If ``True``, then all combinations of dtypes
will be tested.
Otherwise, the subset of combinations will be tested
(see description in :func:`cupy.testing.for_dtypes_combination`).
no_complex(bool): If, True, ``numpy.complex64`` and
``numpy.complex128`` are omitted from candidate dtypes.
.. seealso:: :func:`cupy.testing.for_dtypes_combination`
"""
types = _make_all_dtypes(no_float16, no_bool, no_complex)
return for_dtypes_combination(types, names, full)
def for_int_dtypes_combination(names=('dtype',), no_bool=False, full=None):
"""Decorator for parameterized test w.r.t. the product set of int and boolean.
Args:
names(list of str): Argument names to which dtypes are passed.
no_bool(bool): If ``True``, ``numpy.bool_`` is
omitted from candidate dtypes.
full(bool): If ``True``, then all combinations of dtypes
will be tested.
Otherwise, the subset of combinations will be tested
(see description in :func:`cupy.testing.for_dtypes_combination`).
.. seealso:: :func:`cupy.testing.for_dtypes_combination`
"""
if no_bool:
types = _int_dtypes
else:
types = _int_bool_dtypes
return for_dtypes_combination(types, names, full)
def shaped_reverse_arange(shape, xp=cupy, dtype=numpy.float32):
"""Returns an array filled with decreasing numbers.
Args:
shape(tuple of int): Shape of returned ndarray.
xp(numpy or cupy): Array module to use.
dtype(dtype): Dtype of returned ndarray.
Returns:
numpy.ndarray or cupy.ndarray:
The array filled with :math:`N, \\cdots, 1` with specified dtype
with given shape, array module.
Here, :math:`N` is the size of the returned array.
If ``dtype`` is ``numpy.bool_``, evens (resp. odds) are converted to
``True`` (resp. ``False``).
"""
dtype = numpy.dtype(dtype)
size = internal.prod(shape)
a = numpy.arange(size, 0, -1)
if dtype == '?':
a = a % 2 == 0
elif dtype.kind == 'c':
a = a + a * 1j
return xp.array(a.astype(dtype).reshape(shape))
def _fetch_documentation(version, base_url="https://spark.apache.org/docs"):
doc_urls = [
"{base_url}/{version}/configuration.html",
"{base_url}/{version}/sql-programming-guide.html",
"{base_url}/{version}/monitoring.html",
"{base_url}/{version}/spark-standalone.html",
"{base_url}/{version}/running-on-mesos.html",
"{base_url}/{version}/running-on-yarn.html",
]
for url in doc_urls:
doc_url = url.format(version=version, base_url=base_url)
# print(url)
print("Loading spark properties from %s", doc_url)
dfs = pd.read_html(doc_url, header=0)
desired_cols = ["Property Name", "Default", "Meaning"]
for df in dfs:
if ("Property Name" in df) and ('Default' in df):
for pn, default, desc in df[desired_cols].itertuples(index=False):
if type(default) == numpy.bool_:
default = bool(default)
yield pn, default, desc
def masked_matrix(matrix, all_zero=False):
"""
Returns masked version of HicMatrix. By default, all entries in zero-count
rows and columns are masked.
:param matrix: A numpy 2D matrix
:param all_zero: Mask ALL zero-count entries
:returns: MaskedArray with zero entries masked
"""
if all_zero:
return np.ma.MaskedArray(matrix, mask=np.isclose(matrix, 0.))
col_zero = np.isclose(np.sum(matrix, axis=0), 0.)
row_zero = np.isclose(np.sum(matrix, axis=1), 0.)
mask = np.zeros(matrix.shape, dtype=np.bool_)
mask[:, col_zero] = np.True_
mask[row_zero, :] = np.True_
return np.ma.MaskedArray(matrix, mask=mask)
def _fix_type(value):
"""convert possible types to str, float, and bool"""
# Because numpy floats can not be pickled to json
if isinstance(value, string_types):
return str(value)
if isinstance(value, float_):
return float(value)
if isinstance(value, bool_):
return bool(value)
if isinstance(value, set):
return list(value)
if isinstance(value, Basic):
return str(value)
if hasattr(value, 'id'):
return str(value.id)
# if value is None:
# return ''
return value
def __init__(self, match_fn=TermMatch, binary=True, dtype=np.bool_,
**cv_params):
"""initializes a Matching object
:match_fn: A matching function of signature `docs, query`
-> indices of matching docs
:binary: Store only binary term occurrences.
:dtype: Data type of internal feature matrix
:cv_params: Parameter for the count vectorizer such as lowercase=True
"""
# RetrievalBase.__init__(self)
self._match_fn = match_fn
self._vect = CountVectorizer(binary=binary, dtype=dtype,
**cv_params)
def default(self, obj):
# convert dates and numpy objects in a json serializable format
if isinstance(obj, datetime):
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
elif isinstance(obj, date):
return obj.strftime('%Y-%m-%d')
elif type(obj) in (np.int_, np.intc, np.intp, np.int8, np.int16,
np.int32, np.int64, np.uint8, np.uint16,
np.uint32, np.uint64):
return int(obj)
elif type(obj) in (np.bool_,):
return bool(obj)
elif type(obj) in (np.float_, np.float16, np.float32, np.float64,
np.complex_, np.complex64, np.complex128):
return float(obj)
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, obj)
def _init_discrete_filter_masks(self):
"""Create an array of passing ids for every discrete valued filter.
:rtype: dict `{filter_name: {value: [ids]}}`"""
translated = tuple(TRANSLATION.get(f,f) for f in ['variant_id']+DISCRETE_FILTER_NAMES)
cursor = connections[self.db].cursor()
cursor.execute("SELECT {} FROM variants".format(','.join(translated)))
# Create a variants mask per couple (filter, value), with 1 at indices corresponding to passing variants
variant_masks = {t:defaultdict(partial(np.zeros, self._N, dtype=np.bool_)) for t in DISCRETE_FILTER_NAMES}
enum_values = {t:set() for t in DISCRETE_FILTER_NAMES}
irange = range(1,len(translated))
for row in cursor:
vid = row[0] # variant id
for i in irange:
val = row[i]
fname = DISCRETE_FILTER_NAMES[i-1]
variant_masks[fname][val][vid-1] = 1
enum_values[fname].add(val)
# Pack and cache the result
for fname in DISCRETE_FILTER_NAMES:
for val, mask in variant_masks[fname].items():
mask = masking.pack(mask)
self.save_mask(mask, fname, val)
self.save_enum_values(enum_values)
self._masks_ready = True
def test_allany(self):
# Checks the any/all methods/functions.
x = np.array([[0.13, 0.26, 0.90],
[0.28, 0.33, 0.63],
[0.31, 0.87, 0.70]])
m = np.array([[True, False, False],
[False, False, False],
[True, True, False]], dtype=np.bool_)
mx = masked_array(x, mask=m)
mxbig = (mx > 0.5)
mxsmall = (mx < 0.5)
self.assertFalse(mxbig.all())
self.assertTrue(mxbig.any())
assert_equal(mxbig.all(0), [False, False, True])
assert_equal(mxbig.all(1), [False, False, True])
assert_equal(mxbig.any(0), [False, False, True])
assert_equal(mxbig.any(1), [True, True, True])
self.assertFalse(mxsmall.all())
self.assertTrue(mxsmall.any())
assert_equal(mxsmall.all(0), [True, True, False])
assert_equal(mxsmall.all(1), [False, False, False])
assert_equal(mxsmall.any(0), [True, True, False])
assert_equal(mxsmall.any(1), [True, True, False])
def test_allany_onmatrices(self):
x = np.array([[0.13, 0.26, 0.90],
[0.28, 0.33, 0.63],
[0.31, 0.87, 0.70]])
X = np.matrix(x)
m = np.array([[True, False, False],
[False, False, False],
[True, True, False]], dtype=np.bool_)
mX = masked_array(X, mask=m)
mXbig = (mX > 0.5)
mXsmall = (mX < 0.5)
self.assertFalse(mXbig.all())
self.assertTrue(mXbig.any())
assert_equal(mXbig.all(0), np.matrix([False, False, True]))
assert_equal(mXbig.all(1), np.matrix([False, False, True]).T)
assert_equal(mXbig.any(0), np.matrix([False, False, True]))
assert_equal(mXbig.any(1), np.matrix([True, True, True]).T)
self.assertFalse(mXsmall.all())
self.assertTrue(mXsmall.any())
assert_equal(mXsmall.all(0), np.matrix([True, True, False]))
assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T)
assert_equal(mXsmall.any(0), np.matrix([True, True, False]))
assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T)
def _recursive_make_descr(datatype, newtype=bool_):
"Private function allowing recursion in make_descr."
# Do we have some name fields ?
if datatype.names:
descr = []
for name in datatype.names:
field = datatype.fields[name]
if len(field) == 3:
# Prepend the title to the name
name = (field[-1], name)
descr.append((name, _recursive_make_descr(field[0], newtype)))
return descr
# Is this some kind of composite a la (np.float,2)
elif datatype.subdtype:
mdescr = list(datatype.subdtype)
mdescr[0] = _recursive_make_descr(datatype.subdtype[0], newtype)
return tuple(mdescr)
else:
return newtype
def test_respect_dtype_singleton(self):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
self.assertEqual(sample.dtype, np.dtype(dt))
for dt in (np.bool, np.int, np.long):
lbnd = 0 if dt is np.bool else np.iinfo(dt).min
ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
self.assertFalse(hasattr(sample, 'dtype'))
self.assertEqual(type(sample), dt)
def handle_comparison_units(inps, units, ufunc, ret_class, raise_error=False):
if units[0] != units[1]:
u1d = units[0].is_dimensionless
u2d = units[1].is_dimensionless
any_nonzero = [np.any(inps[0]), np.any(inps[1])]
if any_nonzero[0] == np.bool_(False):
units = (units[1], units[1])
elif any_nonzero[1] == np.bool_(False):
units = (units[0], units[0])
elif not any([u1d, u2d]):
if not units[0].same_dimensions_as(units[1]):
raise YTUnitOperationError(ufunc, *units)
else:
if raise_error:
raise YTUfuncUnitError(ufunc, *units)
inps = (inps[0], ret_class(inps[1]).to(
ret_class(inps[0]).units))
return inps, units
def apply_fast_gt(value_left, value_right, index_left, index_right):
index = index_left * index_right
if len(value_left) > len(value_right):
result = np.empty(len(value_left), dtype=np.bool_)
else:
result = np.empty(len(value_right), dtype=np.bool_)
cursor_result = 0
cursor_left = 0
cursor_right = 0
for il, ir in zip(index_left, index_right):
if il & ir:
result[cursor_result] = value_left[cursor_left] >\
value_right[cursor_right]
cursor_result += 1
cursor_left += 1
cursor_right += 1
elif il:
cursor_left += 1
elif ir:
cursor_right += 1
return result[:cursor_result], index
def apply_fast_ge(value_left, value_right, index_left, index_right):
index = index_left * index_right
if len(value_left) > len(value_right):
result = np.empty(len(value_left), dtype=np.bool_)
else:
result = np.empty(len(value_left), dtype=np.bool_)
cursor_result = 0
cursor_left = 0
cursor_right = 0
for il, ir in zip(index_left, index_right):
if il & ir:
result[cursor_result] = value_left[cursor_left] >=\
value_right[cursor_right]
cursor_result += 1
cursor_left += 1
cursor_right += 1
elif il:
cursor_left += 1
elif ir:
cursor_right += 1
return result[:cursor_result], index
def apply_fast_lt(value_left, value_right, index_left, index_right):
index = index_left * index_right
if len(value_left) > len(value_right):
result = np.empty(len(value_left), dtype=np.bool_)
else:
result = np.empty(len(value_right), dtype=np.bool_)
cursor_result = 0
cursor_left = 0
cursor_right = 0
for il, ir in zip(index_left, index_right):
if il & ir:
result[cursor_result] = value_left[cursor_left] <\
value_right[cursor_right]
cursor_result += 1
cursor_left += 1
cursor_right += 1
elif il:
cursor_left += 1
elif ir:
cursor_right += 1
return result[:cursor_result], index
def apply_fast_le(value_left, value_right, index_left, index_right):
index = index_left * index_right
if len(value_left) > len(value_right):
result = np.empty(len(value_left), dtype=np.bool_)
else:
result = np.empty(len(value_right), dtype=np.bool_)
cursor_result = 0
cursor_left = 0
cursor_right = 0
for il, ir in zip(index_left, index_right):
if il & ir:
result[cursor_result] = value_left[cursor_left] <=\
value_right[cursor_right]
cursor_result += 1
cursor_left += 1
cursor_right += 1
elif il:
cursor_left += 1
elif ir:
cursor_right += 1
return result[:cursor_result], index
def isin_sorted(base, test):
result = np.empty(base.shape, dtype=np.bool_)
cursor_result = 0
cursor_test = 0
for elem in base:
result[cursor_result] = False
for i in range(len(test)):
if elem < test[cursor_test]:
break
elif elem == test[cursor_test]:
result[cursor_result] = True
break
else:
# array exhausted
if cursor_test == len(test) - 1:
break
# Advance test array
else:
cursor_test += 1
cursor_result += 1
return result
def for_int_dtypes(name='dtype', no_bool=False):
"""Decorator that checks the fixture with integer and optionally bool dtypes.
Args:
name(str): Argument name to which specified dtypes are passed.
no_bool(bool): If ``True``, ``numpy.bool_`` is
omitted from candidate dtypes.
dtypes to be tested are ``numpy.dtype('b')``, ``numpy.dtype('h')``,
``numpy.dtype('i')``, ``numpy.dtype('l')``, ``numpy.dtype('q')``,
``numpy.dtype('B')``, ``numpy.dtype('H')``, ``numpy.dtype('I')``,
``numpy.dtype('L')``, ``numpy.dtype('Q')``, and ``numpy.bool_`` (optional).
.. seealso:: :func:`cupy.testing.for_dtypes`,
:func:`cupy.testing.for_all_dtypes`
"""
if no_bool:
return for_dtypes(_int_dtypes, name=name)
else:
return for_dtypes(_int_bool_dtypes, name=name)
def for_all_dtypes_combination(names=['dtyes'],
no_float16=False, no_bool=False, full=None):
"""Decorator that checks the fixture with a product set of all dtypes.
Args:
names(list of str): Argument names to which dtypes are passed.
no_float16(bool): If ``True``, ``numpy.float16`` is
omitted from candidate dtypes.
no_bool(bool): If ``True``, ``numpy.bool_`` is
omitted from candidate dtypes.
full(bool): If ``True``, then all combinations of dtypes
will be tested.
Otherwise, the subset of combinations will be tested
(see description in :func:`cupy.testing.for_dtypes_combination`).
.. seealso:: :func:`cupy.testing.for_dtypes_combination`
"""
types = _make_all_dtypes(no_float16, no_bool)
return for_dtypes_combination(types, names, full)
def for_int_dtypes_combination(names=['dtype'], no_bool=False, full=None):
"""Decorator for parameterized test w.r.t. the product set of int and boolean.
Args:
names(list of str): Argument names to which dtypes are passed.
no_bool(bool): If ``True``, ``numpy.bool_`` is
omitted from candidate dtypes.
full(bool): If ``True``, then all combinations of dtypes
will be tested.
Otherwise, the subset of combinations will be tested
(see description in :func:`cupy.testing.for_dtypes_combination`).
.. seealso:: :func:`cupy.testing.for_dtypes_combination`
"""
if no_bool:
types = _int_dtypes
else:
types = _int_bool_dtypes
return for_dtypes_combination(types, names, full)
def shaped_arange(shape, xp=cupy, dtype=numpy.float32):
"""Returns an array with given shape, array module, and dtype.
Args:
shape(tuple of int): Shape of returned ndarray.
xp(numpy or cupy): Array module to use.
dtype(dtype): Dtype of returned ndarray.
Returns:
numpy.ndarray or cupy.ndarray:
The array filled with :math:`1, \cdots, N` with specified dtype
with given shape, array module. Here, :math:`N` is
the size of the returned array.
If ``dtype`` is ``numpy.bool_``, evens (resp. odds) are converted to
``True`` (resp. ``False``).
"""
a = numpy.arange(1, internal.prod(shape) + 1, 1)
if numpy.dtype(dtype).type == numpy.bool_:
return xp.array((a % 2 == 0).reshape(shape))
else:
return xp.array(a.astype(dtype).reshape(shape))
def __is_adversarial(self, image, predictions):
"""Interface to criterion.is_adverarial that calls
__new_adversarial if necessary.
Parameters
----------
predictions : :class:`numpy.ndarray`
A vector with the pre-softmax predictions for some image.
label : int
The label of the unperturbed reference image.
"""
is_adversarial = self.__criterion.is_adversarial(
predictions, self.__original_class)
if is_adversarial:
is_best, distance = self.__new_adversarial(image)
else:
is_best = False
distance = None
assert isinstance(is_adversarial, bool) or \
isinstance(is_adversarial, np.bool_)
return is_adversarial, is_best, distance
def is_bool_indexer(key):
if isinstance(key, (ABCSeries, np.ndarray)):
if key.dtype == np.object_:
key = np.asarray(_values_from_object(key))
if not lib.is_bool_array(key):
if isnull(key).any():
raise ValueError('cannot index with vector containing '
'NA / NaN values')
return False
return True
elif key.dtype == np.bool_:
return True
elif isinstance(key, list):
try:
arr = np.asarray(key)
return arr.dtype == np.bool_ and len(arr) == len(key)
except TypeError: # pragma: no cover
return False
return False
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
self.assertTrue(idx[empty_barr].identical(empty_idx))
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'boolean')
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'boolean')
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'mixed')
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr)
self.assertEqual(result, 'boolean')
def test_to_frame_mixed(self):
panel = self.panel.fillna(0)
panel['str'] = 'foo'
panel['bool'] = panel['ItemA'] > 0
lp = panel.to_frame()
wp = lp.to_panel()
self.assertEqual(wp['bool'].values.dtype, np.bool_)
# Previously, this was mutating the underlying index and changing its
# name
assert_frame_equal(wp['bool'], panel['bool'], check_names=False)
# GH 8704
# with categorical
df = panel.to_frame()
df['category'] = df['str'].astype('category')
# to_panel
# TODO: this converts back to object
p = df.to_panel()
expected = panel.copy()
expected['category'] = 'foo'
assert_panel_equal(p, expected)
def test_select_dtypes_exclude_include(self):
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.date_range('now', periods=3).values})
exclude = np.datetime64,
include = np.bool_, 'integer'
r = df.select_dtypes(include=include, exclude=exclude)
e = df[['b', 'c', 'e']]
assert_frame_equal(r, e)
exclude = 'datetime',
include = 'bool', 'int64', 'int32'
r = df.select_dtypes(include=include, exclude=exclude)
e = df[['b', 'e']]
assert_frame_equal(r, e)
def test_where(self):
def testit():
for f in [self.frame, self.frame2, self.mixed, self.mixed2]:
for cond in [True, False]:
c = np.empty(f.shape, dtype=np.bool_)
c.fill(cond)
result = expr.where(c, f.values, f.values + 1)
expected = np.where(c, f.values, f.values + 1)
tm.assert_numpy_array_equal(result, expected)
expr.set_use_numexpr(False)
testit()
expr.set_use_numexpr(True)
expr.set_numexpr_threads(1)
testit()
expr.set_numexpr_threads()
testit()
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating),
"FloatCol loaded with incorrect type")
self.assertTrue(issubclass(df.IntCol.dtype.type, np.integer),
"IntCol loaded with incorrect type")
self.assertTrue(issubclass(df.BoolCol.dtype.type, np.bool_),
"BoolCol loaded with incorrect type")
# Int column with NA values stays as float
self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating),
"IntColWithNull loaded with incorrect type")
# Bool column with NA values becomes object
self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.object),
"BoolColWithNull loaded with incorrect type")
def test_allany(self):
# Checks the any/all methods/functions.
x = np.array([[0.13, 0.26, 0.90],
[0.28, 0.33, 0.63],
[0.31, 0.87, 0.70]])
m = np.array([[True, False, False],
[False, False, False],
[True, True, False]], dtype=np.bool_)
mx = masked_array(x, mask=m)
mxbig = (mx > 0.5)
mxsmall = (mx < 0.5)
self.assertFalse(mxbig.all())
self.assertTrue(mxbig.any())
assert_equal(mxbig.all(0), [False, False, True])
assert_equal(mxbig.all(1), [False, False, True])
assert_equal(mxbig.any(0), [False, False, True])
assert_equal(mxbig.any(1), [True, True, True])
self.assertFalse(mxsmall.all())
self.assertTrue(mxsmall.any())
assert_equal(mxsmall.all(0), [True, True, False])
assert_equal(mxsmall.all(1), [False, False, False])
assert_equal(mxsmall.any(0), [True, True, False])
assert_equal(mxsmall.any(1), [True, True, False])
def test_empty_positions(self):
"""
make sure all the empty position stats return a numeric 0
Originally this bug was due to np.dot([], []) returning
np.bool_(False)
"""
pt = perf.PositionTracker(self.env.asset_finder)
pos_stats = pt.stats()
stats = [
'net_value',
'net_exposure',
'gross_value',
'gross_exposure',
'short_value',
'short_exposure',
'shorts_count',
'long_value',
'long_exposure',
'longs_count',
]
for name in stats:
val = getattr(pos_stats, name)
self.assertEquals(val, 0)
self.assertNotIsInstance(val, (bool, np.bool_))
def eq(a, b):
"""The great missing equivalence function: Guaranteed evaluation to a single bool value."""
if a is b:
return True
try:
with warnings.catch_warnings(module=np): # ignore numpy futurewarning (numpy v. 1.10)
e = a==b
except ValueError:
return False
except AttributeError:
return False
except:
print('failed to evaluate equivalence for:')
print(" a:", str(type(a)), str(a))
print(" b:", str(type(b)), str(b))
raise
t = type(e)
if t is bool:
return e
elif t is np.bool_:
return bool(e)
elif isinstance(e, np.ndarray) or (hasattr(e, 'implements') and e.implements('MetaArray')):
try: ## disaster: if a is an empty array and b is not, then e.all() is True
if a.shape != b.shape:
return False
except:
return False
if (hasattr(e, 'implements') and e.implements('MetaArray')):
return e.asarray().all()
else:
return e.all()
else:
raise Exception("== operator returned type %s" % str(type(e)))
def eq(a, b):
"""The great missing equivalence function: Guaranteed evaluation to a single bool value."""
if a is b:
return True
try:
with warnings.catch_warnings(module=np): # ignore numpy futurewarning (numpy v. 1.10)
e = a==b
except ValueError:
return False
except AttributeError:
return False
except:
print('failed to evaluate equivalence for:')
print(" a:", str(type(a)), str(a))
print(" b:", str(type(b)), str(b))
raise
t = type(e)
if t is bool:
return e
elif t is np.bool_:
return bool(e)
elif isinstance(e, np.ndarray) or (hasattr(e, 'implements') and e.implements('MetaArray')):
try: ## disaster: if a is an empty array and b is not, then e.all() is True
if a.shape != b.shape:
return False
except:
return False
if (hasattr(e, 'implements') and e.implements('MetaArray')):
return e.asarray().all()
else:
return e.all()
else:
raise Exception("== operator returned type %s" % str(type(e)))
def only_bool(val):
""" Pass input value or array only if it is a bool
:param val: value to be evaluated
:returns: evaluated value
:rtype: np.bool or np.ndarray
"""
if isinstance(val, np.bool_) or isinstance(val, bool):
return np.bool(val)
elif hasattr(val, '__iter__') and not isinstance(val, str):
return np.asarray(list(filter(lambda s: isinstance(s, np.bool_) or isinstance(s, bool), val)))
return None
def determine_preferred_dtype(dtype_cnt):
"""Determine preferred column data type"""
# get sorted type counts for column
type_cnts = dtype_cnt.most_common()
if not type_cnts:
return None
# determine preferred type from types with the highest count
type_order = {str: '0', np.float64: '1', np.int64: '2', np.bool_: '3'}
return sorted((cnt[0] for cnt in type_cnts if cnt[1] == type_cnts[0][1]),
key=lambda t: type_order.get(t, t.__name__))[0]
def test_boolean(self):
p = Parameter('test_bool', 'boolean')
s = p.random_sample()
self.assertTrue(isinstance(s, np.bool_))