Python numpy 模块,outer() 实例源码
我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.outer()。
def treegauss_remove_row(
data_row,
tree_grid,
latent_row,
vert_ss,
edge_ss,
feat_ss, ):
# Update sufficient statistics.
for v in range(latent_row.shape[0]):
z = latent_row[v, :]
vert_ss[v, :, :] -= np.outer(z, z)
for e in range(tree_grid.shape[1]):
z1 = latent_row[tree_grid[1, e], :]
z2 = latent_row[tree_grid[2, e], :]
edge_ss[e, :, :] -= np.outer(z1, z2)
for v, x in enumerate(data_row):
if np.isnan(x):
continue
z = latent_row[v, :]
feat_ss[v] -= 1
feat_ss[v, 1] -= x
feat_ss[v, 2:] -= x * z # TODO Use central covariance.
def getTrainTestKernel(self, params, Xtest):
self.checkParams(params)
ell = np.exp(params[0])
p = np.exp(params[1])
Xtest_scaled = Xtest/np.sqrt(Xtest.shape[1])
d2 = sq_dist(self.X_scaled.T/ell, Xtest_scaled.T/ell) #precompute squared distances
#compute dp
dp = np.zeros(d2.shape)
for d in xrange(self.X_scaled.shape[1]):
dp += (np.outer(self.X_scaled[:,d], np.ones((1, Xtest_scaled.shape[0]))) - np.outer(np.ones((self.X_scaled.shape[0], 1)), Xtest_scaled[:,d]))
dp /= p
K = np.exp(-d2 / 2.0)
return np.cos(2*np.pi*dp)*K
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2, numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def unscentedTransform(X, Wm, Wc, f):
Y = None
Ymean = None
fdim = None
N = np.shape(X)[1]
for j in range(0,N):
fImage = f(X[:,j])
if Y is None:
fdim = np.size(fImage)
Y = np.zeros((fdim, np.shape(X)[1]))
Ymean = np.zeros(fdim)
Y[:,j] = fImage
Ymean += Wm[j] * Y[:,j]
Ycov = np.zeros((fdim, fdim))
for j in range(0, N):
meanAdjustedYj = Y[:,j] - Ymean
Ycov += np.outer(Wc[j] * meanAdjustedYj, meanAdjustedYj)
return Y, Ymean, Ycov
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.0
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2., numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def estimate_params(self, ess):
"""
Estimate Nomal param,
given expecteed sufficient stats
"""
n = len(ess)
mu = np.asarray([0,0])
for (m, c) in ess:
mu = mu + m
mu = mu * 1.0 / n
C = np.asarray([[0,0],[0,0]])
for (m, c) in ess:
C = C + c
C = C * 1.0 / n
C = C - np.outer(mu, mu)
if not self.full_cov:
C = reduce_cov(C)
return (mu, C)
def compute_pvalues_for_processes(self,U_matrix,chane_prob, num_bootstrapped_stats=100):
N = U_matrix.shape[0]
bootsraped_stats = np.zeros(num_bootstrapped_stats)
# orsetinW = simulate(N,num_bootstrapped_stats,corr)
for proc in range(num_bootstrapped_stats):
# W = np.sign(orsetinW[:,proc])
W = simulatepm(N,chane_prob)
WW = np.outer(W, W)
st = np.mean(U_matrix * WW)
bootsraped_stats[proc] = N * st
stat = N*np.mean(U_matrix)
return float(np.sum(bootsraped_stats > stat)) / num_bootstrapped_stats
def genSphCoords():
""" Generates cartesian (x,y,z) and spherical (theta, phi) coordinates of a sphere
Returns
-------
coords : named tuple
holds cartesian (x,y,z) and spherical (theta, phi) coordinates
"""
coords = namedtuple('coords', ['x', 'y', 'z', 'az', 'el'])
az = _np.linspace(0, 2 * _np.pi, 360)
el = _np.linspace(0, _np.pi, 181)
coords.x = _np.outer(_np.cos(az), _np.sin(el))
coords.y = _np.outer(_np.sin(az), _np.sin(el))
coords.z = _np.outer(_np.ones(360), _np.cos(el))
coords.el, coords.az = _np.meshgrid(_np.linspace(0, _np.pi, 181),
_np.linspace(0, 2 * _np.pi, 360))
return coords
def _eig_local_op_mps(lv, ltens, rv):
"""Local operator contribution from an MPS"""
# MPS 1 / ltens: Interpreted as |psiXpsi| part of the operator
# MPS 2: The current eigvectector candidate
op = lv.T
# op axes: 0 mps2 bond, 1: mps1 bond
s = op.shape
op = op.reshape((s[0], 1, s[1]))
# op axes: 0 mps2 bond, 1: physical legs, 2: mps1 bond
for lt in ltens:
# op axes: 0: mps2 bond, 1: physical legs, 2: mps1 bond
op = np.tensordot(op, lt.conj(), axes=(2, 0))
# op axes: 0: mps2 bond, 1, 2: physical legs, 3: mps1 bond
s = op.shape
op = op.reshape((s[0], -1, s[3]))
# op axes: 0: mps2 bond, 1: physical legs, 2: mps1 bond
op = np.tensordot(op, rv, axes=(2, 0))
# op axes: 0: mps2 bond, 1: physical legs, 2: mps2 bond
op = np.outer(op.conj(), op)
# op axes:
# 0: (0a: left cc mps2 bond, 0b: physical row leg, 0c: right cc mps2 bond),
# 1: (1a: left mps2 bond, 1b: physical column leg, 1c: right mps2 bond)
return op
def test_mps_to_mpo(nr_sites, local_dim, rank, rgen):
mps = factory.random_mps(nr_sites, local_dim, rank, randstate=rgen)
# Instead of calling the two functions, we call mps_to_mpo(),
# which does exactly that:
# mps_as_puri = mp.mps_as_local_purification_mps(mps)
# mpo = mp.pmps_to_mpo(mps_as_puri)
mpo = mm.mps_to_mpo(mps)
# This is also a test of mp.mps_as_local_purification_mps() in the
# following sense: Local purifications are representations of
# mixed states. Therefore, compare mps and mps_as_puri by
# converting them to mixed states.
state = mps.to_array()
state = np.outer(state, state.conj())
state.shape = (local_dim,) * (2 * nr_sites)
state2 = mpo.to_array_global()
assert_array_almost_equal(state, state2)
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2, numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def vol(self):
"""Construct cell volumes of the 3D model as 1d array."""
if getattr(self, '_vol', None) is None:
vh = self.h
# Compute cell volumes
if self.dim == 1:
self._vol = utils.mkvc(vh[0])
elif self.dim == 2:
# Cell sizes in each direction
self._vol = utils.mkvc(np.outer(vh[0], vh[1]))
elif self.dim == 3:
# Cell sizes in each direction
self._vol = utils.mkvc(
np.outer(utils.mkvc(np.outer(vh[0], vh[1])), vh[2])
)
return self._vol
def test_minimummaximum_func(self):
a = np.ones((2, 2))
aminimum = minimum(a, a)
self.assertTrue(isinstance(aminimum, MaskedArray))
assert_equal(aminimum, np.minimum(a, a))
aminimum = minimum.outer(a, a)
self.assertTrue(isinstance(aminimum, MaskedArray))
assert_equal(aminimum, np.minimum.outer(a, a))
amaximum = maximum(a, a)
self.assertTrue(isinstance(amaximum, MaskedArray))
assert_equal(amaximum, np.maximum(a, a))
amaximum = maximum.outer(a, a)
self.assertTrue(isinstance(amaximum, MaskedArray))
assert_equal(amaximum, np.maximum.outer(a, a))
def test_TakeTransposeInnerOuter(self):
# Test of take, transpose, inner, outer products
x = arange(24)
y = np.arange(24)
x[5:6] = masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1)))
assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1))
assert_equal(np.inner(filled(x, 0), filled(y, 0)),
inner(x, y))
assert_equal(np.outer(filled(x, 0), filled(y, 0)),
outer(x, y))
y = array(['abc', 1, 'def', 2, 3], object)
y[2] = masked
t = take(y, [0, 3, 4])
assert_(t[0] == 'abc')
assert_(t[1] == 2)
assert_(t[2] == 3)
def test_testTakeTransposeInnerOuter(self):
# Test of take, transpose, inner, outer products
x = arange(24)
y = np.arange(24)
x[5:6] = masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
assert_(eq(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))))
assert_(eq(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)))
assert_(eq(np.inner(filled(x, 0), filled(y, 0)),
inner(x, y)))
assert_(eq(np.outer(filled(x, 0), filled(y, 0)),
outer(x, y)))
y = array(['abc', 1, 'def', 2, 3], object)
y[2] = masked
t = take(y, [0, 3, 4])
assert_(t[0] == 'abc')
assert_(t[1] == 2)
assert_(t[2] == 3)
def test_4(self):
"""
Test of take, transpose, inner, outer products.
"""
x = self.arange(24)
y = np.arange(24)
x[5:6] = self.masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
assert self.allequal(np.transpose(y, (2, 0, 1)), self.transpose(x, (2, 0, 1)))
assert self.allequal(np.take(y, (2, 0, 1), 1), self.take(x, (2, 0, 1), 1))
assert self.allequal(np.inner(self.filled(x, 0), self.filled(y, 0)),
self.inner(x, y))
assert self.allequal(np.outer(self.filled(x, 0), self.filled(y, 0)),
self.outer(x, y))
y = self.array(['abc', 1, 'def', 2, 3], object)
y[2] = self.masked
t = self.take(y, [0, 3, 4])
assert t[0] == 'abc'
assert t[1] == 2
assert t[2] == 3
def outer(self, a, b):
"""
Return the function applied to the outer product of a and b.
"""
(da, db) = (getdata(a), getdata(b))
d = self.f.outer(da, db)
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
m = nomask
else:
ma = getmaskarray(a)
mb = getmaskarray(b)
m = umath.logical_or.outer(ma, mb)
if (not m.ndim) and m:
return masked
if m is not nomask:
np.copyto(d, da, where=m)
if not d.shape:
return d
masked_d = d.view(get_masked_subclass(a, b))
masked_d._mask = m
return masked_d
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2, numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def update_kl_loss(p, lambdas, T, Cs):
"""
Updates C according to the KL Loss kernel with the S Ts couplings calculated at each iteration
Parameters
----------
p : ndarray, shape (N,)
weights in the targeted barycenter
lambdas : list of the S spaces' weights
T : list of S np.ndarray(ns,N)
the S Ts couplings calculated at each iteration
Cs : list of S ndarray, shape(ns,ns)
Metric cost matrices
Returns
----------
C : ndarray, shape (ns,ns)
updated C matrix
"""
tmpsum = sum([lambdas[s] * np.dot(T[s].T, Cs[s]).dot(T[s])
for s in range(len(T))])
ppt = np.outer(p, p)
return np.exp(np.divide(tmpsum, ppt))
def __init__(self, n):
self.degree = 2*n - 2
a, A = numpy.polynomial.legendre.leggauss(n)
w = numpy.outer((1 + a) * A, A)
x = numpy.outer(1-a, numpy.ones(n)) / 2
y = numpy.outer(1+a, 1-a) / 4
self.weights = w.reshape(-1) / 4
self.points = numpy.stack([x.reshape(-1), y.reshape(-1)]).T
self.bary = numpy.array([
self.points[:, 0],
self.points[:, 1],
1 - numpy.sum(self.points, axis=1)
]).T
return
def rotation_matrix(u, theta):
'''Return matrix that implements the rotation around the vector :math:`u`
by the angle :math:`\\theta`, cf.
https://en.wikipedia.org/wiki/Rotation_matrix#Rotation_matrix_from_axis_and_angle.
:param u: rotation vector
:param theta: rotation angle
'''
# Cross-product matrix.
cpm = numpy.array([[0.0, -u[2], u[1]],
[u[2], 0.0, -u[0]],
[-u[1], u[0], 0.0]])
c = numpy.cos(theta)
s = numpy.sin(theta)
R = numpy.eye(3) * c \
+ s * cpm \
+ (1.0 - c) * numpy.outer(u, u)
return R
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2, numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def test_basic(self):
vecs = [[1, 2, 3],
[2],
np.array([1, 2, 3]).reshape(1, -1),
np.array([1, 2, 3]).reshape(-1, 1)]
num_ones_list = [4, 1]
for vec in vecs:
for num_ones in num_ones_list:
A = OnesOuterVec(num_ones=num_ones, vec=vec)
M = np.outer([1]*num_ones, vec)
V, v1, v2, U, u1, u2 = get_tst_mats(M.shape)
assert_allclose(A.dot(V), M.dot(V))
assert_allclose(A.dot(v1), M.dot(v1))
assert_allclose(A.dot(v2), M.dot(v2))
assert_allclose(A.T.dot(U), M.T.dot(U))
assert_allclose(A.T.dot(u1), M.T.dot(u1))
assert_allclose(A.T.dot(u2), M.T.dot(u2))
def test_basic(self):
shapes = [(50, 20), (1, 20), (50, 1)]
# sparse
for shape in shapes:
mats = self.get_Xs(shape)
m = mats[0].mean(axis=0).A1
ones = np.ones(shape[0])
M = mats[0].toarray() - np.outer(ones, m)
for X in mats:
A = col_mean_centered(X)
V, v1, v2, U, u1, u2 = get_tst_mats(M.shape)
assert_almost_equal(A.dot(V), M.dot(V))
assert_almost_equal(A.dot(v1), M.dot(v1))
assert_almost_equal(A.dot(v2), M.dot(v2))
assert_almost_equal(A.T.dot(U), M.T.dot(U))
assert_almost_equal(A.T.dot(u1), M.T.dot(u1))
assert_almost_equal(A.T.dot(u2), M.T.dot(u2))
def rotation_matrix(axis, angle):
"""
Calculate a three dimensional rotation matrix for a rotation around
the given angle and axis.
@type axis: (3,) numpy array
@param angle: angle in radians
@type angle: float
@rtype: (3,3) numpy.array
"""
axis = numpy.asfarray(axis) / norm(axis)
assert axis.shape == (3,)
c = math.cos(angle)
s = math.sin(angle)
r = (1.0 - c) * numpy.outer(axis, axis)
r.flat[[0,4,8]] += c
r.flat[[5,6,1]] += s * axis
r.flat[[7,2,3]] -= s * axis
return r
def gower_matrix(X):
"""
Gower, J.C. (1966). Some distance properties of latent root
and vector methods used in multivariate analysis.
Biometrika 53: 325-338
@param X: ensemble coordinates
@type X: (m,n,k) numpy.array
@return: symmetric dissimilarity matrix
@rtype: (n,n) numpy.array
"""
X = numpy.asarray(X)
B = sum(numpy.dot(x, x.T) for x in X) / float(len(X))
b = B.mean(1)
bb = b.mean()
return (B - numpy.add.outer(b, b)) + bb
def nextfastpower(n):
"""Return the next integral power of small factors greater than the given
number. Specifically, return m such that
m >= n
m == 2**x * 3**y * 5**z
where x, y, and z are integers.
This is useful for ensuring fast FFT sizes.
From https://gist.github.com/bhawkins/4479607 (Brian Hawkins)
"""
if n < 7:
return max (n, 1)
# x, y, and z are all bounded from above by the formula of nextpower.
# Compute all possible combinations for powers of 3 and 5.
# (Not too many for reasonable FFT sizes.)
def power_series (x, base):
nmax = ceil (log (x) / log (base))
return np.logspace (0.0, nmax, num=nmax+1, base=base)
n35 = np.outer (power_series (n, 3.0), power_series (n, 5.0))
n35 = n35[n35<=n]
# Lump the powers of 3 and 5 together and solve for the powers of 2.
n2 = nextpower (n / n35)
return int (min (n2 * n35))
def ests_ll_quad(self, params):
"""
Calculate the loglikelihood given model parameters `params`.
This method uses Gaussian quadrature, and thus returns an *approximate*
integral.
"""
mu0, gamma0, err0 = np.split(params, 3)
x = np.tile(self.z, (self.cfg.QCOUNT, 1, 1)) # (QCOUNTXnhospXnmeas)
loc = mu0 + np.outer(QC1, gamma0)
loc = np.tile(loc, (self.n, 1, 1))
loc = np.transpose(loc, (1, 0, 2))
scale = np.tile(err0, (self.cfg.QCOUNT, self.n, 1))
zs = lpdf_3d(x=x, loc=loc, scale=scale)
w2 = np.tile(self.w, (self.cfg.QCOUNT, 1, 1))
wted = np.nansum(w2 * zs, axis=2).T # (nhosp X QCOUNT)
qh = np.tile(QC1, (self.n, 1)) # (nhosp X QCOUNT)
combined = wted + norm.logpdf(qh) # (nhosp X QCOUNT)
return logsumexp(np.nan_to_num(combined), b=QC2, axis=1) # (nhosp)
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.0
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2., numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def forward_prop_random_thru_post_mm(self, model, mx, vx, mu, Su):
Kuu_noiseless = compute_kernel(
2 * model.ls, 2 * model.sf, model.zu, model.zu)
Kuu = Kuu_noiseless + np.diag(jitter * np.ones((self.M, )))
# TODO: remove inv
Kuuinv = np.linalg.inv(Kuu)
A = np.dot(Kuuinv, mu)
Smm = Su + np.outer(mu, mu)
B_sto = np.dot(Kuuinv, np.dot(Smm, Kuuinv)) - Kuuinv
psi0 = np.exp(2.0 * model.sf)
psi1, psi2 = compute_psi_weave(
2 * model.ls, 2 * model.sf, mx, vx, model.zu)
mout = np.einsum('nm,md->nd', psi1, A)
Bpsi2 = np.einsum('ab,nab->n', B_sto, psi2)[:, np.newaxis]
vout = psi0 + Bpsi2 - mout**2
return mout, vout
def score(self, y):
groups = numpy.unique(y)
a = len(groups)
Ntx = len(y)
self.a_ = a
self.Ntx_ = Ntx
self._SST = (self.pairs_**2).sum() / (2 * Ntx)
pattern = numpy.zeros((Ntx, Ntx))
for g in groups:
pattern += numpy.outer(y == g, y == g) / \
(numpy.float(numpy.sum(y == g)))
self._SSW = ((self.pairs_**2) * (pattern)).sum() / 2
self._SSA = self._SST - self._SSW
self._F = (self._SSA / (a - 1)) / (self._SSW / (Ntx - a))
return self._F
#######################################################################
def outer(v1, v2=None):
"""
Construct the outer product of two vectors.
The second vector argument is optional, if absent the projector
of the first vector will be returned.
Args:
v1 (ndarray): the first vector.
v2 (ndarray): the (optional) second vector.
Returns:
The matrix |v1><v2|.
"""
if v2 is None:
u = np.array(v1).conj()
else:
u = np.array(v2).conj()
return np.outer(v1, u)
###############################################################
# Measures.
###############################################################
def concurrence(state):
"""Calculate the concurrence.
Args:
state (np.array): a quantum state
Returns:
concurrence.
"""
rho = np.array(state)
if rho.ndim == 1:
rho = outer(state)
if len(state) != 4:
raise Exception("Concurence is not defined for more than two qubits")
YY = np.fliplr(np.diag([-1, 1, 1, -1]))
A = rho.dot(YY).dot(rho.conj()).dot(YY)
w = la.eigh(A, eigvals_only=True)
w = np.sqrt(np.maximum(w, 0))
return max(0.0, w[-1]-np.sum(w[0:-1]))
###############################################################
# Other.
###############################################################
def _get_Smatrices(self, X, y):
Sb = np.zeros((X.shape[1], X.shape[1]))
S = np.inner(X.T, X.T)
N = len(X)
mu = np.mean(X, axis=0)
classLabels = np.unique(y)
for label in classLabels:
classIdx = np.argwhere(y == label).T[0]
Nl = len(classIdx)
xL = X[classIdx]
muL = np.mean(xL, axis=0)
muLbar = muL - mu
Sb = Sb + Nl * np.outer(muLbar, muLbar)
Sbar = S - N * np.outer(mu, mu)
Sw = Sbar - Sb
self.mean_ = mu
return (Sw, Sb)
def FOBI(X):
"""Fourth Order Blind Identification technique is used.
The function returns the unmixing matrix.
X is assumed to be centered and whitened.
The paper by J. Cardaso is in itself the best resource out there for it.
SOURCE SEPARATION USING HIGHER ORDER MOMENTS - Jean-Francois Cardoso"""
rows = X.shape[0]
n = X.shape[1]
# Initializing the weighted covariance matrix which will hold the fourth order information
weightedCovMatrix = np.zeros([rows, rows])
# Approximating the expectation by diving with the number of data points
for signal in X.T:
norm = np.linalg.norm(signal)
weightedCovMatrix += norm*norm*np.outer(signal, signal)
weightedCovMatrix /= n
# Doing the eigen value decomposition
eigValue, eigVector = np.linalg.eigh(weightedCovMatrix)
# print eigVector
return eigVector
def ksvd(Y, D, X, n_cycles=1, verbose=True):
n_atoms = D.shape[1]
n_features, n_samples = Y.shape
unused_atoms = []
R = Y - fast_dot(D, X)
for c in range(n_cycles):
for k in range(n_atoms):
if verbose:
sys.stdout.write("\r" + "k-svd..." + ":%3.2f%%" % ((k / float(n_atoms)) * 100))
sys.stdout.flush()
# find all the datapoints that use the kth atom
omega_k = X[k, :] != 0
if not np.any(omega_k):
unused_atoms.append(k)
continue
# the residual due to all the other atoms but k
Rk = R[:, omega_k] + np.outer(D[:, k], X[k, omega_k])
U, S, V = randomized_svd(Rk, n_components=1, n_iter=10, flip_sign=False)
D[:, k] = U[:, 0]
X[k, omega_k] = V[0, :] * S[0]
# update the residual
R[:, omega_k] = Rk - np.outer(D[:, k], X[k, omega_k])
print ""
return D, X, unused_atoms
def test_minimummaximum_func(self):
a = np.ones((2, 2))
aminimum = minimum(a, a)
self.assertTrue(isinstance(aminimum, MaskedArray))
assert_equal(aminimum, np.minimum(a, a))
aminimum = minimum.outer(a, a)
self.assertTrue(isinstance(aminimum, MaskedArray))
assert_equal(aminimum, np.minimum.outer(a, a))
amaximum = maximum(a, a)
self.assertTrue(isinstance(amaximum, MaskedArray))
assert_equal(amaximum, np.maximum(a, a))
amaximum = maximum.outer(a, a)
self.assertTrue(isinstance(amaximum, MaskedArray))
assert_equal(amaximum, np.maximum.outer(a, a))
def test_TakeTransposeInnerOuter(self):
# Test of take, transpose, inner, outer products
x = arange(24)
y = np.arange(24)
x[5:6] = masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1)))
assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1))
assert_equal(np.inner(filled(x, 0), filled(y, 0)),
inner(x, y))
assert_equal(np.outer(filled(x, 0), filled(y, 0)),
outer(x, y))
y = array(['abc', 1, 'def', 2, 3], object)
y[2] = masked
t = take(y, [0, 3, 4])
assert_(t[0] == 'abc')
assert_(t[1] == 2)
assert_(t[2] == 3)
def test_testTakeTransposeInnerOuter(self):
# Test of take, transpose, inner, outer products
x = arange(24)
y = np.arange(24)
x[5:6] = masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
assert_(eq(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))))
assert_(eq(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)))
assert_(eq(np.inner(filled(x, 0), filled(y, 0)),
inner(x, y)))
assert_(eq(np.outer(filled(x, 0), filled(y, 0)),
outer(x, y)))
y = array(['abc', 1, 'def', 2, 3], object)
y[2] = masked
t = take(y, [0, 3, 4])
assert_(t[0] == 'abc')
assert_(t[1] == 2)
assert_(t[2] == 3)
def test_4(self):
"""
Test of take, transpose, inner, outer products.
"""
x = self.arange(24)
y = np.arange(24)
x[5:6] = self.masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
assert self.allequal(np.transpose(y, (2, 0, 1)), self.transpose(x, (2, 0, 1)))
assert self.allequal(np.take(y, (2, 0, 1), 1), self.take(x, (2, 0, 1), 1))
assert self.allequal(np.inner(self.filled(x, 0), self.filled(y, 0)),
self.inner(x, y))
assert self.allequal(np.outer(self.filled(x, 0), self.filled(y, 0)),
self.outer(x, y))
y = self.array(['abc', 1, 'def', 2, 3], object)
y[2] = self.masked
t = self.take(y, [0, 3, 4])
assert t[0] == 'abc'
assert t[1] == 2
assert t[2] == 3
def outer(self, a, b):
"""
Return the function applied to the outer product of a and b.
"""
(da, db) = (getdata(a), getdata(b))
d = self.f.outer(da, db)
ma = getmask(a)
mb = getmask(b)
if ma is nomask and mb is nomask:
m = nomask
else:
ma = getmaskarray(a)
mb = getmaskarray(b)
m = umath.logical_or.outer(ma, mb)
if (not m.ndim) and m:
return masked
if m is not nomask:
np.copyto(d, da, where=m)
if not d.shape:
return d
masked_d = d.view(get_masked_subclass(a, b))
masked_d._mask = m
return masked_d
def direct_ionization_rate(self):
"""
Calculate direct ionization rate in cm3/s
Needs an equation reference or explanation
"""
xgl, wgl = np.polynomial.laguerre.laggauss(12)
kBT = const.k_B.cgs*self.temperature
energy = np.outer(xgl, kBT)*kBT.unit + self.ip
cross_section = self.direct_ionization_cross_section(energy)
if cross_section is None:
return None
term1 = np.sqrt(8./np.pi/const.m_e.cgs)*np.sqrt(kBT)*np.exp(-self.ip/kBT)
term2 = ((wgl*xgl)[:,np.newaxis]*cross_section).sum(axis=0)
term3 = (wgl[:,np.newaxis]*cross_section).sum(axis=0)*self.ip/kBT
return term1*(term2 + term3)
def treegauss_add_row(
data_row,
tree_grid,
program,
latent_row,
vert_ss,
edge_ss,
feat_ss, ):
# Sample latent state using dynamic programming.
TODO('https://github.com/posterior/treecat/issues/26')
# Update sufficient statistics.
for v in range(latent_row.shape[0]):
z = latent_row[v, :]
vert_ss[v, :, :] += np.outer(z, z)
for e in range(tree_grid.shape[1]):
z1 = latent_row[tree_grid[1, e], :]
z2 = latent_row[tree_grid[2, e], :]
edge_ss[e, :, :] += np.outer(z1, z2)
for v, x in enumerate(data_row):
if np.isnan(x):
continue
z = latent_row[v, :]
feat_ss[v] += 1
feat_ss[v, 1] += x
feat_ss[v, 2:] += x * z # TODO Use central covariance.
def __init__(self, X):
Kernel.__init__(self)
self.X_scaled = X/np.sqrt(X.shape[1])
if (X.shape[1] >= X.shape[0] or True): self.K_sq = sq_dist(self.X_scaled.T)
else: self.K_sq = None
#compute dp
self.dp = np.zeros((X.shape[0], X.shape[0]))
for d in xrange(self.X_scaled.shape[1]):
self.dp += (np.outer(self.X_scaled[:,d], np.ones((1, self.X_scaled.shape[0]))) - np.outer(np.ones((self.X_scaled.shape[0], 1)), self.X_scaled[:,d]))
def deriveKernel(self, params, i):
self.checkParamsI(params, i)
#find the relevant W
numSNPs = self.X_scaled.shape[1]
unitNum = i / numSNPs
weightNum = i % numSNPs
nnX_unitNum = self.applyNN(self.X_scaled, params, unitNum) / float(self.numUnits)
w_deriv_relu = self.X_scaled[:, weightNum].copy()
w_deriv_relu[nnX_unitNum <= 0] = 0
K_deriv1 = np.outer(nnX_unitNum, w_deriv_relu)
K_deriv = K_deriv1 + K_deriv1.T
return K_deriv
def getTrainKernel(self, params):
self.checkParams(params)
if (self.sameParams(params)): return self.cache['getTrainKernel']
ell2 = np.exp(2*params[0])
sqrt_ell2PSx = np.sqrt(ell2+self.sx)
K = self.S / np.outer(sqrt_ell2PSx, sqrt_ell2PSx)
self.cache['K'] = K
K_arcsin = np.arcsin(K)
self.cache['getTrainKernel'] = K_arcsin
self.saveParams(params)
return K_arcsin
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.0
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2., numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def scale_matrix(factor, origin=None, direction=None):
"""Return matrix to scale by factor around origin in direction.
Use factor -1 for point symmetry.
>>> v = (numpy.random.rand(4, 5) - 0.5) * 20.0
>>> v[3] = 1.0
>>> S = scale_matrix(-1.234)
>>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3])
True
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S = scale_matrix(factor, origin)
>>> S = scale_matrix(factor, origin, direct)
"""
if direction is None:
# uniform scaling
M = numpy.array(((factor, 0.0, 0.0, 0.0),
(0.0, factor, 0.0, 0.0),
(0.0, 0.0, factor, 0.0),
(0.0, 0.0, 0.0, 1.0)), dtype=numpy.float64)
if origin is not None:
M[:3, 3] = origin[:3]
M[:3, 3] *= 1.0 - factor
else:
# nonuniform scaling
direction = unit_vector(direction[:3])
factor = 1.0 - factor
M = numpy.identity(4)
M[:3, :3] -= factor * numpy.outer(direction, direction)
if origin is not None:
M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction
return M
def shear_matrix(angle, direction, point, normal):
"""Return matrix to shear by angle along direction vector on shear plane.
The shear plane is defined by a point and normal vector. The direction
vector must be orthogonal to the plane's normal vector.
A point P is transformed by the shear matrix into P" such that
the vector P-P" is parallel to the direction vector and its extent is
given by the angle of P-P'-P", where P' is the orthogonal projection
of P onto the shear plane.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S = shear_matrix(angle, direct, point, normal)
>>> numpy.allclose(1.0, numpy.linalg.det(S))
True
"""
normal = unit_vector(normal[:3])
direction = unit_vector(direction[:3])
if abs(numpy.dot(normal, direction)) > 1e-6:
raise ValueError("direction and normal vectors are not orthogonal")
angle = math.tan(angle)
M = numpy.identity(4)
M[:3, :3] += angle * numpy.outer(direction, normal)
M[:3, 3] = -angle * numpy.dot(point[:3], normal) * direction
return M
def multiply_C(self, factor):
"""multiply ``self.C`` with ``factor`` updating internal states.
``factor`` can be a scalar, a vector or a matrix. The vector
is used as outer product and multiplied element-wise, i.e.,
``multiply_C(diag(C)**-0.5)`` generates a correlation matrix.
Details:
"""
self._updateC()
if np.isscalar(factor):
self.C *= factor
self.D *= factor**0.5
try:
self.inverse_root_C /= factor**0.5
except AttributeError:
pass
elif len(np.asarray(factor).shape) == 1:
self.C *= np.outer(factor, factor)
self._decompose_C()
elif len(factor.shape) == 2:
self.C *= factor
self._decompose_C()
else:
raise ValueError(str(factor))
# raise NotImplementedError('never tested')