Python numpy 模块,spacing() 实例源码
我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.spacing()。
def test_spacing_nextafter(self):
"""Test np.spacing and np.nextafter"""
# All non-negative finite #'s
a = np.arange(0x7c00, dtype=uint16)
hinf = np.array((np.inf,), dtype=float16)
a_f16 = a.view(dtype=float16)
assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:])
assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1])
# switch to negatives
a |= 0x8000
assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1]))
assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:])
assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
def validate_cov_matrix(M):
M = (M + M.T) * 0.5
k = 0
I = np.eye(M.shape[0])
while True:
try:
_ = np.linalg.cholesky(M)
break
except np.linalg.LinAlgError:
# Find the nearest positive definite matrix for M. Modified from
# http://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd
# Might take several minutes
k += 1
w, v = np.linalg.eig(M)
min_eig = v.min()
M += (-min_eig * k * k + np.spacing(min_eig)) * I
return M
def __init__(self, data_manager, t_layer_sizes, p_layer_sizes, dropout=0):
print('{:25}'.format("Initializing Model"), end='', flush=True)
self.t_layer_sizes = t_layer_sizes
self.p_layer_sizes = p_layer_sizes
self.dropout = dropout
self.data_manager = data_manager
self.t_input_size = self.data_manager.f.feature_count
self.output_size = self.data_manager.s.information_count
self.time_model = StackedCells(self.t_input_size, celltype=LSTM, layers = t_layer_sizes)
self.time_model.layers.append(PassthroughLayer())
p_input_size = t_layer_sizes[-1] + self.output_size
self.pitch_model = StackedCells( p_input_size, celltype=LSTM, layers = p_layer_sizes)
self.pitch_model.layers.append(Layer(p_layer_sizes[-1], self.output_size, activation = T.nnet.sigmoid))
self.conservativity = T.fscalar()
self.srng = T.shared_randomstreams.RandomStreams(np.random.randint(0, 1024))
self.epsilon = np.spacing(np.float32(1.0))
print("Done")
def _logL(distr, y, y_hat):
"""The log likelihood."""
if distr in ['softplus', 'poisson']:
eps = np.spacing(1)
logL = np.sum(y * np.log(y_hat + eps) - y_hat)
elif distr == 'gaussian':
logL = -0.5 * np.sum((y - y_hat)**2)
elif distr == 'binomial':
# analytical formula
logL = np.sum(y * np.log(y_hat) + (1 - y) * np.log(1 - y_hat))
# but this prevents underflow
# z = beta0 + np.dot(X, beta)
# logL = np.sum(y * z - np.log(1 + np.exp(z)))
elif distr == 'probit':
logL = np.sum(y * np.log(y_hat) + (1 - y) * np.log(1 - y_hat))
elif distr == 'gamma':
# see
# https://www.statistics.ma.tum.de/fileadmin/w00bdb/www/czado/lec8.pdf
nu = 1. # shape parameter, exponential for now
logL = np.sum(nu * (-y / y_hat - np.log(y_hat)))
return logL
def test_spacing_nextafter(self):
"""Test np.spacing and np.nextafter"""
# All non-negative finite #'s
a = np.arange(0x7c00, dtype=uint16)
hinf = np.array((np.inf,), dtype=float16)
a_f16 = a.view(dtype=float16)
assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:])
assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1])
# switch to negatives
a |= 0x8000
assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1]))
assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:])
assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
def unitize(v):
"""
UNIT Unitize a vector
:param v: given unit vector
:return: a unit-vector parallel to V.
Reports error for the case where V is non-symbolic and norm(V) is zero
"""
n = np.linalg.norm(v, "fro")
# Todo ISA
if n > np.spacing([1])[0]:
return v / n
else:
raise AttributeError("Vector has zero norm")
# ---------------------------------------------------------------------------------------#
def SID(s1, s2):
"""
Computes the spectral information divergence between two vectors.
Parameters:
s1: `numpy array`
The first vector.
s2: `numpy array`
The second vector.
Returns: `float`
Spectral information divergence between s1 and s2.
Reference
C.-I. Chang, "An Information-Theoretic Approach to SpectralVariability,
Similarity, and Discrimination for Hyperspectral Image"
IEEE TRANSACTIONS ON INFORMATION THEORY, VOL. 46, NO. 5, AUGUST 2000.
"""
p = (s1 / np.sum(s1)) + np.spacing(1)
q = (s2 / np.sum(s2)) + np.spacing(1)
return np.sum(p * np.log(p / q) + q * np.log(q / p))
def test_spacing_nextafter(self):
"""Test np.spacing and np.nextafter"""
# All non-negative finite #'s
a = np.arange(0x7c00, dtype=uint16)
hinf = np.array((np.inf,), dtype=float16)
a_f16 = a.view(dtype=float16)
assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:])
assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1])
# switch to negatives
a |= 0x8000
assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1]))
assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:])
assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
def test_spacing_nextafter(self):
"""Test np.spacing and np.nextafter"""
# All non-negative finite #'s
a = np.arange(0x7c00, dtype=uint16)
hinf = np.array((np.inf,), dtype=float16)
a_f16 = a.view(dtype=float16)
assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:])
assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1])
# switch to negatives
a |= 0x8000
assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1]))
assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:])
assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
def test_spacing_nextafter(self):
"""Test np.spacing and np.nextafter"""
# All non-negative finite #'s
a = np.arange(0x7c00, dtype=uint16)
hinf = np.array((np.inf,), dtype=float16)
a_f16 = a.view(dtype=float16)
assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:])
assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1])
# switch to negatives
a |= 0x8000
assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1]))
assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:])
assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
def test_spacing_nextafter(self):
"""Test np.spacing and np.nextafter"""
# All non-negative finite #'s
a = np.arange(0x7c00, dtype=uint16)
hinf = np.array((np.inf,), dtype=float16)
a_f16 = a.view(dtype=float16)
assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:])
assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1])
# switch to negatives
a |= 0x8000
assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1]))
assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:])
assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
def laplacian(W, normalized=True):
"""Return the Laplacian of the weigth matrix."""
# Degree matrix.
d = W.sum(axis=0)
# Laplacian matrix.
if not normalized:
D = scipy.sparse.diags(d.A.squeeze(), 0)
L = D - W
else:
d += np.spacing(np.array(0, W.dtype))
d = 1 / np.sqrt(d)
D = scipy.sparse.diags(d.A.squeeze(), 0)
I = scipy.sparse.identity(d.size, dtype=W.dtype)
L = I - D * W * D
# assert np.abs(L - L.T).mean() < 1e-9
assert type(L) is scipy.sparse.csr.csr_matrix
return L
def sensitivity(Ntp, Nfn, eps=numpy.spacing(1)):
"""Sensitivity
Wikipedia entry https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Parameters
----------
Ntp : int >=0
Number of true positives
Nfn : int >=0
Number of false negatives
eps : float
eps
(Default value=numpy.spacing(1))
Returns
-------
sensitivity: float
Sensitivity
"""
return float(Ntp / (Ntp + Nfn + eps))
def specificity(Ntn, Nfp, eps=numpy.spacing(1)):
"""Specificity
Wikipedia entry https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Parameters
----------
Ntn : int >= 0
Number of true negatives
Nfp : int >= 0
Number of false positives
eps : float
eps
(Default value=numpy.spacing(1))
Returns
-------
specificity: float
Specificity
"""
return float(Ntn / (Ntn + Nfp + eps))
def substitution_rate(Nref, Nsubstitutions, eps=numpy.spacing(1)):
"""Substitution rate
Parameters
----------
Nref : int >=0
Number of entries in the reference
Nsubstitutions : int >=0
Number of substitutions
eps : float
eps
(Default value=numpy.spacing(1))
Returns
-------
substitution_rate: float
Substitution rate
"""
return float(Nsubstitutions / (Nref + eps))
def deletion_rate(Nref, Ndeletions, eps=numpy.spacing(1)):
"""Deletion rate
Parameters
----------
Nref : int >=0
Number of entries in the reference
Ndeletions : int >=0
Number of deletions
eps : float
eps
(Default value=numpy.spacing(1))
Returns
-------
deletion_rate: float
Deletion rate
"""
return float(Ndeletions / (Nref + eps))
def insertion_rate(Nref, Ninsertions, eps=numpy.spacing(1)):
"""Insertion rate
Parameters
----------
Nref : int >=0
Number of entries in the reference
Ninsertions : int >=0
Number of insertions
eps : float
eps
(Default value=numpy.spacing(1))
Returns
-------
insertion_rate: float
Insertion rate
"""
return float(Ninsertions / (Nref + eps))
def test_spacing_nextafter(self):
"""Test np.spacing and np.nextafter"""
# All non-negative finite #'s
a = np.arange(0x7c00, dtype=uint16)
hinf = np.array((np.inf,), dtype=float16)
a_f16 = a.view(dtype=float16)
assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:])
assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1])
# switch to negatives
a |= 0x8000
assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1]))
assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:])
assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
def feat_eog(signals):
"""
calculate the EOG features
:param signals: 1D or 2D signals
"""
if signals.ndim == 1: signals = np.expand_dims(signals,0)
sfreq = use_sfreq
nsamp = float(signals.shape[1])
w = (fft(signals,axis=1)).real
feats = np.zeros((signals.shape[0],15),dtype='float32')
delta = np.sum(np.abs(w[:,np.arange(0.5*nsamp/sfreq,4*nsamp/sfreq, dtype=int)]),axis=1)
theta = np.sum(np.abs(w[:,np.arange(4*nsamp/sfreq,8*nsamp/sfreq, dtype=int)]),axis=1)
alpha = np.sum(np.abs(w[:,np.arange(8*nsamp/sfreq,13*nsamp/sfreq, dtype=int)]),axis=1)
beta = np.sum(np.abs(w[:,np.arange(13*nsamp/sfreq,20*nsamp/sfreq, dtype=int)]),axis=1)
gamma = np.sum(np.abs(w[:,np.arange(20*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]),axis=1) # only until 50, because hz=100
sum_abs_pow = delta + theta + alpha + beta + gamma
feats[:,0] = delta /sum_abs_pow
feats[:,1] = theta /sum_abs_pow
feats[:,2] = alpha /sum_abs_pow
feats[:,3] = beta /sum_abs_pow
feats[:,4] = gamma /sum_abs_pow
feats[:,5] = np.dot(np.array([3.5,4,5,7,30]),feats[:,0:5].T ) / (sfreq/2-0.5) #smean
feats[:,6] = np.sqrt(np.max(signals, axis=1)) #PAV
feats[:,7] = np.sqrt(np.abs(np.min(signals, axis=1))) #VAV
feats[:,8] = np.argmax(signals, axis=1)/nsamp #PAP
feats[:,9] = np.argmin(signals, axis=1)/nsamp #VAP
feats[:,10] = np.sqrt(np.sum(np.abs(signals), axis=1)/ np.mean(np.sum(np.abs(signals), axis=1))) # AUC
feats[:,11] = np.sum(((np.roll(np.sign(signals), 1,axis=1) - np.sign(signals)) != 0).astype(int),axis=1)/nsamp #TVC
feats[:,12] = np.log10(np.std(signals, axis=1)) #STD/VAR
feats[:,13] = np.log10(stats.kurtosis(signals,fisher=False,axis=1)) # kurtosis
feats[:,14] = np.log10(-np.sum([(x/nsamp)*((np.log((x+np.spacing(1))/nsamp))) for x in np.apply_along_axis(lambda x: np.histogram(x, bins=8)[0], 1, signals)],axis=1)) # entropy.. yay, one line...
if np.any(feats==np.nan): print('NaN detected')
return np.nan_to_num(feats)
def feat_emg(signals):
"""
calculate the EMG median as defined by Leangkvist (2012),
"""
if signals.ndim == 1: signals = np.expand_dims(signals,0)
sfreq = use_sfreq
nsamp = float(signals.shape[1])
w = (fft(signals,axis=1)).real
feats = np.zeros((signals.shape[0],13),dtype='float32')
delta = np.sum(np.abs(w[:,np.arange(0.5*nsamp/sfreq,4*nsamp/sfreq, dtype=int)]),axis=1)
theta = np.sum(np.abs(w[:,np.arange(4*nsamp/sfreq,8*nsamp/sfreq, dtype=int)]),axis=1)
alpha = np.sum(np.abs(w[:,np.arange(8*nsamp/sfreq,13*nsamp/sfreq, dtype=int)]),axis=1)
beta = np.sum(np.abs(w[:,np.arange(13*nsamp/sfreq,20*nsamp/sfreq, dtype=int)]),axis=1)
gamma = np.sum(np.abs(w[:,np.arange(20*nsamp/sfreq,50*nsamp/sfreq, dtype=int)]),axis=1) # only until 50, because hz=100
sum_abs_pow = delta + theta + alpha + beta + gamma
feats[:,0] = delta /sum_abs_pow
feats[:,1] = theta /sum_abs_pow
feats[:,2] = alpha /sum_abs_pow
feats[:,3] = beta /sum_abs_pow
feats[:,4] = gamma /sum_abs_pow
feats[:,5] = np.dot(np.array([3.5,4,5,7,30]),feats[:,0:5].T ) / (sfreq/2-0.5) #smean
emg = np.sum(np.abs(w[:,np.arange(12.5*nsamp/sfreq,32*nsamp/sfreq, dtype=int)]),axis=1)
feats[:,6] = emg / np.sum(np.abs(w[:,np.arange(8*nsamp/sfreq,32*nsamp/sfreq, dtype=int)]),axis=1) # ratio of high freq to total motor
feats[:,7] = np.median(np.abs(w[:,np.arange(8*nsamp/sfreq,32*nsamp/sfreq, dtype=int)]),axis=1) # median freq
feats[:,8] = np.mean(np.abs(w[:,np.arange(8*nsamp/sfreq,32*nsamp/sfreq, dtype=int)]),axis=1) # mean freq
feats[:,9] = np.std(signals, axis=1) # std
feats[:,10] = np.mean(signals,axis=1)
feats[:,11] = np.log10(stats.kurtosis(signals,fisher=False,axis=1) )
feats[:,12] = np.log10(-np.sum([(x/nsamp)*((np.log((x+np.spacing(1))/nsamp))) for x in np.apply_along_axis(lambda x: np.histogram(x, bins=8)[0], 1, signals)],axis=1)) # entropy.. yay, one line...
if np.any(feats==np.nan): print('NaN detected')
return np.nan_to_num(feats)
def _test_spacing(t):
one = t(1)
eps = np.finfo(t).eps
nan = t(np.nan)
inf = t(np.inf)
with np.errstate(invalid='ignore'):
assert_(np.spacing(one) == eps)
assert_(np.isnan(np.spacing(nan)))
assert_(np.isnan(np.spacing(inf)))
assert_(np.isnan(np.spacing(-inf)))
assert_(np.spacing(t(1e30)) != 0)
def test_spacing_gfortran():
# Reference from this fortran file, built with gfortran 4.3.3 on linux
# 32bits:
# PROGRAM test_spacing
# INTEGER, PARAMETER :: SGL = SELECTED_REAL_KIND(p=6, r=37)
# INTEGER, PARAMETER :: DBL = SELECTED_REAL_KIND(p=13, r=200)
#
# WRITE(*,*) spacing(0.00001_DBL)
# WRITE(*,*) spacing(1.0_DBL)
# WRITE(*,*) spacing(1000._DBL)
# WRITE(*,*) spacing(10500._DBL)
#
# WRITE(*,*) spacing(0.00001_SGL)
# WRITE(*,*) spacing(1.0_SGL)
# WRITE(*,*) spacing(1000._SGL)
# WRITE(*,*) spacing(10500._SGL)
# END PROGRAM
ref = {np.float64: [1.69406589450860068E-021,
2.22044604925031308E-016,
1.13686837721616030E-013,
1.81898940354585648E-012],
np.float32: [9.09494702E-13,
1.19209290E-07,
6.10351563E-05,
9.76562500E-04]}
for dt, dec_ in zip([np.float32, np.float64], (10, 20)):
x = np.array([1e-5, 1, 1000, 10500], dtype=dt)
assert_array_almost_equal(np.spacing(x), ref[dt], decimal=dec_)
def test_nextafter_vs_spacing():
# XXX: spacing does not handle long double yet
for t in [np.float32, np.float64]:
for _f in [1, 1e-5, 1000]:
f = t(_f)
f1 = t(_f + 1)
assert_(np.nextafter(f, f1) - f == np.spacing(f))
def test_nans_infs(self):
with np.errstate(all='ignore'):
# Check some of the ufuncs
assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32))
assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32))
assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32))
assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32))
assert_equal(np.spacing(float16(65504)), np.inf)
# Check comparisons of all values with NaN
nan = float16(np.nan)
assert_(not (self.all_f16 == nan).any())
assert_(not (nan == self.all_f16).any())
assert_((self.all_f16 != nan).all())
assert_((nan != self.all_f16).all())
assert_(not (self.all_f16 < nan).any())
assert_(not (nan < self.all_f16).any())
assert_(not (self.all_f16 <= nan).any())
assert_(not (nan <= self.all_f16).any())
assert_(not (self.all_f16 > nan).any())
assert_(not (nan > self.all_f16).any())
assert_(not (self.all_f16 >= nan).any())
assert_(not (nan >= self.all_f16).any())
def KL(a, b):
"""Calculate the Kullback Leibler divergence between a and b """
D_KL = np.nansum(np.multiply(a, np.log(np.divide(a, b+np.spacing(1)))), axis=1)
return D_KL
def calc_information(probTgivenXs, PYgivenTs, PXs, PYs):
"""Calculate the MI - I(X;T) and I(Y;T)"""
PTs = np.nansum(probTgivenXs*PXs, axis=1)
Ht = np.nansum(-np.dot(PTs, np.log2(PTs)))
Htx = - np.nansum((np.dot(np.multiply(probTgivenXs, np.log2(probTgivenXs)), PXs)))
Hyt = - np.nansum(np.dot(PYgivenTs*np.log2(PYgivenTs+np.spacing(1)), PTs))
Hy = np.nansum(-PYs * np.log2(PYs+np.spacing(1)))
IYT = Hy - Hyt
ITX = Ht - Htx
return ITX, IYT
def calc_information_1(probTgivenXs, PYgivenTs, PXs, PYs, PTs):
"""Calculate the MI - I(X;T) and I(Y;T)"""
#PTs = np.nansum(probTgivenXs*PXs, axis=1)
Ht = np.nansum(-np.dot(PTs, np.log2(PTs+np.spacing(1))))
Htx = - np.nansum((np.dot(np.multiply(probTgivenXs, np.log2(probTgivenXs+np.spacing(1))), PXs)))
Hyt = - np.nansum(np.dot(PYgivenTs*np.log2(PYgivenTs+np.spacing(1)), PTs))
Hy = np.nansum(-PYs * np.log2(PYs+np.spacing(1)))
IYT = Hy - Hyt
ITX = Ht - Htx
return ITX, IYT
def calc_information(probTgivenXs, PYgivenTs, PXs, PYs, PTs):
"""Calculate the MI - I(X;T) and I(Y;T)"""
#PTs = np.nansum(probTgivenXs*PXs, axis=1)
t_indeces = np.nonzero(PTs)
Ht = np.nansum(-np.dot(PTs, np.log2(PTs+np.spacing(1))))
Htx = - np.nansum((np.dot(np.multiply(probTgivenXs, np.log2(probTgivenXs)), PXs)))
Hyt = - np.nansum(np.dot(PYgivenTs*np.log2(PYgivenTs+np.spacing(1)), PTs))
Hy = np.nansum(-PYs * np.log2(PYs+np.spacing(1)))
IYT = Hy - Hyt
ITX = Ht - Htx
return ITX, IYT
def t_calc_information(p_x_given_t, PYgivenTs, PXs, PYs):
"""Calculate the MI - I(X;T) and I(Y;T)"""
Hx = np.nansum(-np.dot(PXs, np.log2(PXs)))
Hxt = - np.nansum((np.dot(np.multiply(p_x_given_t, np.log2(p_x_given_t)), PXs)))
Hyt = - np.nansum(np.dot(PYgivenTs*np.log2(PYgivenTs+np.spacing(1)), PTs))
Hy = np.nansum(-PYs * np.log2(PYs+np.spacing(1)))
IYT = Hy - Hyt
ITX = Hx - Hxt
return ITX, IYT
def barycentric_coordinates_of_projection(p, q, u, v):
""" Given a point, gives projected coords of that point to a triangle
in barycentric coordinates.
See Heidrich, Computing the Barycentric Coordinates of a Projected Point, JGT 05
at http://www.cs.ubc.ca/~heidrich/Papers/JGT.05.pdf
Args:
p: point to project
q: a vertex of the triangle to project into
u,v: edges of the the triangle such that it has vertices q, q+u, q+v
Returns:
b: barycentric coordinates of p's projection in triangle defined by q,u,v
vectorized so p,q,u,v can all be 3xN
"""
p = p.T
q = q.T
u = u.T
v = v.T
n = np.cross(u, v, axis=0)
s = np.sum(n*n, axis=0)
# If the triangle edges are collinear, cross-product is zero,
# which makes "s" 0, which gives us divide by zero. So we
# make the arbitrary choice to set s to epsv (=numpy.spacing(1)),
# the closest thing to zero
if np.isscalar(s):
s = s if s else np.spacing(1)
else:
s[s == 0] = np.spacing(1)
oneOver4ASquared = 1.0 / s
w = p - q
b2 = np.sum(np.cross(u, w, axis=0) * n, axis=0) * oneOver4ASquared
b1 = np.sum(np.cross(w, v, axis=0) * n, axis=0) * oneOver4ASquared
b = np.vstack((1 - b1 - b2, b1, b2))
return b.T
def log_likelihood(y, yhat):
'''Helper function to compute the log likelihood.'''
eps = np.spacing(1)
return np.nansum(y * np.log(eps + yhat) - yhat)
def _test_spacing(t):
one = t(1)
eps = np.finfo(t).eps
nan = t(np.nan)
inf = t(np.inf)
with np.errstate(invalid='ignore'):
assert_(np.spacing(one) == eps)
assert_(np.isnan(np.spacing(nan)))
assert_(np.isnan(np.spacing(inf)))
assert_(np.isnan(np.spacing(-inf)))
assert_(np.spacing(t(1e30)) != 0)
def test_spacing_gfortran():
# Reference from this fortran file, built with gfortran 4.3.3 on linux
# 32bits:
# PROGRAM test_spacing
# INTEGER, PARAMETER :: SGL = SELECTED_REAL_KIND(p=6, r=37)
# INTEGER, PARAMETER :: DBL = SELECTED_REAL_KIND(p=13, r=200)
#
# WRITE(*,*) spacing(0.00001_DBL)
# WRITE(*,*) spacing(1.0_DBL)
# WRITE(*,*) spacing(1000._DBL)
# WRITE(*,*) spacing(10500._DBL)
#
# WRITE(*,*) spacing(0.00001_SGL)
# WRITE(*,*) spacing(1.0_SGL)
# WRITE(*,*) spacing(1000._SGL)
# WRITE(*,*) spacing(10500._SGL)
# END PROGRAM
ref = {np.float64: [1.69406589450860068E-021,
2.22044604925031308E-016,
1.13686837721616030E-013,
1.81898940354585648E-012],
np.float32: [9.09494702E-13,
1.19209290E-07,
6.10351563E-05,
9.76562500E-04]}
for dt, dec_ in zip([np.float32, np.float64], (10, 20)):
x = np.array([1e-5, 1, 1000, 10500], dtype=dt)
assert_array_almost_equal(np.spacing(x), ref[dt], decimal=dec_)
def test_nextafter_vs_spacing():
# XXX: spacing does not handle long double yet
for t in [np.float32, np.float64]:
for _f in [1, 1e-5, 1000]:
f = t(_f)
f1 = t(_f + 1)
assert_(np.nextafter(f, f1) - f == np.spacing(f))
def test_nans_infs(self):
with np.errstate(all='ignore'):
# Check some of the ufuncs
assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32))
assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32))
assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32))
assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32))
assert_equal(np.spacing(float16(65504)), np.inf)
# Check comparisons of all values with NaN
nan = float16(np.nan)
assert_(not (self.all_f16 == nan).any())
assert_(not (nan == self.all_f16).any())
assert_((self.all_f16 != nan).all())
assert_((nan != self.all_f16).all())
assert_(not (self.all_f16 < nan).any())
assert_(not (nan < self.all_f16).any())
assert_(not (self.all_f16 <= nan).any())
assert_(not (nan <= self.all_f16).any())
assert_(not (self.all_f16 > nan).any())
assert_(not (nan > self.all_f16).any())
assert_(not (self.all_f16 >= nan).any())
assert_(not (nan >= self.all_f16).any())
def rel_entropy_true(p, q):
"""KL divergence (relative entropy) D(p||q) in bits
Returns a scalar entropy when the input distributions p and q are
vectors of probability masses, or returns in a row vector the
columnwise relative entropies of the input probability matrices p and
q"""
if type(p) == list or type(q) == tuple:
p = np.array(p)
if type(q) == list or type(q) == tuple:
q = np.array(q)
if not p.shape == q.shape:
raise Exception('p and q must be equal sizes',
'p: ' + str(p.shape),
'q: ' + str(q.shape))
if (np.iscomplex(p).any() or not
np.isfinite(p).any() or
(p < 0).any() or
(p > 1).any()):
raise Exception('The probability elements of p must be real numbers'
'between 0 and 1.')
if (np.iscomplex(q).any() or not
np.isfinite(q).any() or
(q < 0).any() or
(q > 1).any()):
raise Exception('The probability elements of q must be real numbers'
'between 0 and 1.')
eps = math.sqrt(np.spacing(1))
if (np.abs(np.sum(p, axis=0) - 1) > eps).any():
raise Exception('Sum of the probability elements of p must equal 1.')
if (np.abs(np.sum(q, axis=0) - 1) > eps).any():
raise Exception('Sum of the probability elements of q must equal 1.')
return sum(np.log2((p**p) / (q**p)))
def check_for_dark_states(nb, Es):
"""Check for dark states, throws a warning if it finds one."""
dark_state_indices = np.where(np.abs(np.imag(Es)) < 10 * np.spacing(1))
if len(dark_state_indices[0]) == 0:
return
import warnings
warnings.warn('The {} block contains {} dark state(s) with generalized eigenenergie(s): {}'.format(nb, len(dark_state_indices), Es[dark_state_indices]))
def _ulps_away(value1, value2, num_bits=1):
r"""Determines if ``value1`` is within ``n`` ULPs of ``value2``.
Uses ``np.spacing`` to determine the unit of least precision (ULP)
for ``value1`` and then checks that the different between the values
does not exceed ``n`` ULPs.
When ``value1 == 0`` or ``value2 == 0``, we instead check that the other
is less than :math:`2^{-40}` (``_EPS``) in magnitude.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
value1 (float): The first value that being compared.
value2 (float): The second value that being compared.
num_bits (Optional[int]): The number of bits allowed to differ.
Defaults to ``1``.
Returns:
bool: Predicate indicating if the values agree to ``n`` bits.
"""
if value1 == 0.0:
return abs(value2) < _EPS
elif value2 == 0.0:
return abs(value1) < _EPS
else:
local_epsilon = np.spacing(value1) # pylint: disable=no-member
return abs(value1 - value2) <= num_bits * abs(local_epsilon)
def two_by_two_det(mat):
r"""Compute the determinant of a 2x2 matrix.
.. note::
This is used **only** by :func:`quadratic_jacobian_polynomial` and
:func:`cubic_jacobian_polynomial`.
This is "needed" because :func:`numpy.linalg.det` uses a more generic
determinant implementation which can introduce rounding even when the
simple :math:`a d - b c` will suffice. For example:
.. doctest:: 2-by-2
>>> import numpy as np
>>> mat = np.asfortranarray([
... [-1.5 , 0.1875],
... [-1.6875, 0.0 ],
... ])
>>> actual_det = -mat[0, 1] * mat[1, 0]
>>> np_det = np.linalg.det(mat)
>>> np.abs(actual_det - np_det) == np.spacing(actual_det)
True
Args:
mat (numpy.ndarray): A 2x2 matrix.
Returns:
float: The determinant of ``mat``.
"""
return mat[0, 0] * mat[1, 1] - mat[0, 1] * mat[1, 0]
def angvec2r(theta, v):
"""
ANGVEC2R(THETA, V) is an orthonormal rotation matrix (3x3)
equivalent to a rotation of THETA about the vector V.
:param theta: rotation in radians
:param v: vector
:return: rotation matrix
Notes::
- If THETA == 0 then return identity matrix.
- If THETA ~= 0 then V must have a finite length.
"""
if np.isscalar(theta) is False or common.isvec(v) is False:
raise AttributeError("Arguments must be theta and vector")
# TODO implement ISA
elif np.linalg.norm(v) < 10 * np.spacing([1])[0]:
if False:
raise AttributeError("Bad arguments")
else:
return np.eye(3)
sk = skew(np.matrix(unitize(v)))
m = np.eye(3) + np.sin(theta) * sk + (1 - np.cos(theta)) * sk * sk
return m
# ---------------------------------------------------------------------------------------#
def so2_valid(obj):
assert type(obj) is pose.SO2
for each in obj:
assert each.shape == (2, 2)
assert abs(np.linalg.det(each) - 1) < np.spacing([1])[0]
def ishomog(tr, dim, rtest=''):
"""ISHOMOG Test if SE(3) homogeneous transformation matrix.
ISHOMOG(T) is true if the argument T is of dimension 4x4 or 4x4xN, else false.
ISHOMOG(T, 'valid') as above, but also checks the validity of the rotation sub-matrix.
See Also: isrot, ishomog2, isvec"""
assert dim == [3, 3] or dim == [4, 4]
is_valid = None
if rtest == 'valid':
is_valid = lambda matrix: abs(np.linalg.det(matrix) - 1) < np.spacing([1])[0]
flag = True
if check_args.is_mat_list(tr):
for matrix in tr:
if not (matrix.shape[0] == dim[0] and matrix.shape[1] == dim[0]):
flag = False
# if rtest = 'valid'
if flag and rtest == 'valid':
flag = is_valid(tr[0]) # As in matlab code only first matrix is passed for validity test
# TODO-Do we need to test all matrices in list for validity of rotation submatrix -- Yes
elif isinstance(tr, np.matrix):
if tr.shape[0] == dim[0] and tr.shape[1] == dim[0]:
if flag and rtest == 'valid':
flag = is_valid(tr)
else:
flag = False
else:
raise ValueError('Invalid data type passed to common.ishomog()')
return flag
def SID_classifier(M, E, threshold):
"""
Classify a HSI cube M using the spectral information divergence
and a spectral library E.
This function is part of the NormXCorr class.
Parameters
M : numpy array
a HSI cube ((m*n) x p)
E : numpy array
a spectral library (N x p)
Returns : numpy array
a class map ((m*n))
"""
def prob_vector_array(m):
pv_array = np.ndarray(shape=m.shape, dtype=np.float32)
sum_m = np.sum(m, axis=1)
pv_array[:] = (m.T / sum_m).T
return pv_array + np.spacing(1)
mn = M.shape[0]
N = E.shape[0]
p = prob_vector_array(M)
q = prob_vector_array(E)
sid = np.ndarray((mn, N), dtype=np.float)
for i in range(mn):
pq = q[0:,:] * np.log(q[0:,:] / p[i,:])
pp = p[i,:] * np.log(p[i,:] / q[0:,:])
sid[i,:] = np.sum(pp[0:,:] + pq[0:,:], axis=1)
if isinstance(threshold, float):
cmap = _single_value_min(sid, threshold)
elif isinstance(threshold, list):
cmap = _multiple_values_min(sid, threshold)
else:
return np.argmin(sid, axis=1), sid
return cmap, sid
def isStochastic(matrix):
"""Check that ``matrix`` is row stochastic.
Returns
=======
is_stochastic : bool
``True`` if ``matrix`` is row stochastic, ``False`` otherwise.
"""
try:
absdiff = (_np.abs(matrix.sum(axis=1) - _np.ones(matrix.shape[0])))
except AttributeError:
matrix = _np.array(matrix)
absdiff = (_np.abs(matrix.sum(axis=1) - _np.ones(matrix.shape[0])))
return (absdiff.max() <= 10*_np.spacing(_np.float64(1)))
def _test_spacing(t):
one = t(1)
eps = np.finfo(t).eps
nan = t(np.nan)
inf = t(np.inf)
with np.errstate(invalid='ignore'):
assert_(np.spacing(one) == eps)
assert_(np.isnan(np.spacing(nan)))
assert_(np.isnan(np.spacing(inf)))
assert_(np.isnan(np.spacing(-inf)))
assert_(np.spacing(t(1e30)) != 0)
def test_spacing_gfortran():
# Reference from this fortran file, built with gfortran 4.3.3 on linux
# 32bits:
# PROGRAM test_spacing
# INTEGER, PARAMETER :: SGL = SELECTED_REAL_KIND(p=6, r=37)
# INTEGER, PARAMETER :: DBL = SELECTED_REAL_KIND(p=13, r=200)
#
# WRITE(*,*) spacing(0.00001_DBL)
# WRITE(*,*) spacing(1.0_DBL)
# WRITE(*,*) spacing(1000._DBL)
# WRITE(*,*) spacing(10500._DBL)
#
# WRITE(*,*) spacing(0.00001_SGL)
# WRITE(*,*) spacing(1.0_SGL)
# WRITE(*,*) spacing(1000._SGL)
# WRITE(*,*) spacing(10500._SGL)
# END PROGRAM
ref = {}
ref[np.float64] = [1.69406589450860068E-021,
2.22044604925031308E-016,
1.13686837721616030E-013,
1.81898940354585648E-012]
ref[np.float32] = [
9.09494702E-13,
1.19209290E-07,
6.10351563E-05,
9.76562500E-04]
for dt, dec_ in zip([np.float32, np.float64], (10, 20)):
x = np.array([1e-5, 1, 1000, 10500], dtype=dt)
assert_array_almost_equal(np.spacing(x), ref[dt], decimal=dec_)
def test_nextafter_vs_spacing():
# XXX: spacing does not handle long double yet
for t in [np.float32, np.float64]:
for _f in [1, 1e-5, 1000]:
f = t(_f)
f1 = t(_f + 1)
assert_(np.nextafter(f, f1) - f == np.spacing(f))
def test_nans_infs(self):
with np.errstate(all='ignore'):
# Check some of the ufuncs
assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32))
assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32))
assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32))
assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32))
assert_equal(np.spacing(float16(65504)), np.inf)
# Check comparisons of all values with NaN
nan = float16(np.nan)
assert_(not (self.all_f16 == nan).any())
assert_(not (nan == self.all_f16).any())
assert_((self.all_f16 != nan).all())
assert_((nan != self.all_f16).all())
assert_(not (self.all_f16 < nan).any())
assert_(not (nan < self.all_f16).any())
assert_(not (self.all_f16 <= nan).any())
assert_(not (nan <= self.all_f16).any())
assert_(not (self.all_f16 > nan).any())
assert_(not (nan > self.all_f16).any())
assert_(not (self.all_f16 >= nan).any())
assert_(not (nan >= self.all_f16).any())
def pinv(x):
#return np.linalg.pinv(x, max(x.shape) * np.spacing(np.linalg.norm(x)))
#return scipy.linalg.pinv(x, max(x.shape) * np.spacing(np.linalg.norm(x)))
return scipy.linalg.pinv(x, 1e-6)
def weights(self, nlags):
""" Evenly-spaced beta weights
"""
eps = np.spacing(1)
u = np.linspace(eps, 1.0 - eps, nlags)
beta_vals = u ** (self.theta1 - 1) * (1 - u) ** (self.theta2 - 1)
beta_vals = beta_vals / sum(beta_vals)
if self.theta3 is not None:
w = beta_vals + self.theta3
return w / sum(w)
return beta_vals