Python numpy 模块,einsum() 实例源码
我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.einsum()。
def total_length_selected(ed='empty', coords='empty', ob='empty'):
'''Returns the total length of all edge segments'''
if ob == 'empty':
ob = bpy.context.object
if coords == 'empty':
coords = get_coords(ob)
if ed == 'empty':
ed = get_edge_idx(ob)
edc = coords[ed]
e1 = edc[:, 0]
e2 = edc[:, 1]
ee1 = e1 - e2
sel = get_selected_edges(ob)
ee = ee1[sel]
leng = np.einsum('ij,ij->i', ee, ee)
return np.sum(np.sqrt(leng))
def transitions_old(width, height, configs=None, one_per_state=False):
digit = width * height
if configs is None:
configs = generate_configs(digit)
if one_per_state:
def pickone(thing):
index = np.random.randint(0,len(thing))
return thing[index]
transitions = np.array([
generate(
[c1,pickone(successors(c1,width,height))],width,height)
for c1 in configs ])
else:
transitions = np.array([ generate([c1,c2],width,height)
for c1 in configs for c2 in successors(c1,width,height) ])
return np.einsum('ab...->ba...',transitions)
def puzzle_plot(p):
p.setup()
def name(template):
return template.format(p.__name__)
from itertools import islice
configs = list(islice(p.generate_configs(9), 1000)) # be careful, islice is not immutable!!!
import numpy.random as random
random.shuffle(configs)
configs = configs[:10]
puzzles = p.generate(configs, 3, 3)
print(puzzles.shape, "mean", puzzles.mean(), "stdev", np.std(puzzles))
plot_image(puzzles[-1], name("{}.png"))
plot_image(np.clip(puzzles[-1]+np.random.normal(0,0.1,puzzles[-1].shape),0,1),name("{}+noise.png"))
plot_image(np.round(np.clip(puzzles[-1]+np.random.normal(0,0.1,puzzles[-1].shape),0,1)),name("{}+noise+round.png"))
plot_grid(puzzles, name("{}s.png"))
_transitions = p.transitions(3,3,configs=configs)
print(_transitions.shape)
transitions_for_show = \
np.einsum('ba...->ab...',_transitions) \
.reshape((-1,)+_transitions.shape[2:])
print(transitions_for_show.shape)
plot_grid(transitions_for_show, name("{}_transitions.png"))
def run(ae,xs):
zs = ae.encode_binary(xs)
ys = ae.decode_binary(zs)
mod_ys = []
correlations = []
print(ys.shape)
print("corrlations:")
print("bit \ image {}".format(range(len(xs))))
for i in range(ae.N):
mod_zs = np.copy(zs)
# increase the latent value from 0 to 1 and check the difference
for j in range(11):
mod_zs[:,i] = j / 10.0
mod_ys.append(ae.decode_binary(mod_zs))
zero_zs,one_zs = np.copy(zs),np.copy(zs)
zero_zs[:,i] = 0.
one_zs[:,i] = 1.
correlation = np.mean(np.square(ae.decode_binary(zero_zs) - ae.decode_binary(one_zs)),
axis=(1,2))
correlations.append(correlation)
print("{:>5} {}".format(i,correlation))
plot_grid2(np.einsum("ib...->bi...",np.array(mod_ys)).reshape((-1,)+ys.shape[1:]),
w=11,path=ae.local("dump_significance.png"))
return np.einsum("ib->bi",correlations)
def buildFock(self):
"""Routine to build the AO basis Fock matrix"""
if self.direct:
if self.incFockRst: # restart incremental fock build?
self.G = formPT(self.P,np.zeros_like(self.P),self.bfs,
self.nbasis,self.screen,self.scrTol)
self.G = 0.5*(self.G + self.G.T)
self.F = self.Core.astype('complex') + self.G
else:
self.G = formPT(self.P,self.P_old,self.bfs,self.nbasis,
self.screen,self.scrTol)
self.G = 0.5*(self.G + self.G.T)
self.F = self.F_old + self.G
else:
self.J = np.einsum('pqrs,sr->pq', self.TwoE.astype('complex'),self.P)
self.K = np.einsum('psqr,sr->pq', self.TwoE.astype('complex'),self.P)
self.G = 2.*self.J - self.K
self.F = self.Core.astype('complex') + self.G
def pointsInRegion(regNum, vor, p, overlap=0.0):
"""
returns the subset of points p that are inside the regNum region of the voronoi object
vor. The boundaries of the region are extended by an amount given by 'overlap'.
"""
reg = vor.regions[vor.point_region[regNum]] # region associated with the point
if -1 in reg:
raise Exception('Open region associated with generator')
nVerts = len(reg) # number of verticies in the region
p0 = vor.points[regNum]
for i in range(len(reg)):
vert1, vert2 = vor.vertices[reg[i]], vor.vertices[reg[(i + 1) % len(reg)]]
dr = vert1 - vert2 # edge
dr = dr / numpy.linalg.norm(dr) # normalize
dn = numpy.array([dr[1], -dr[0]]) # normal to edge
dn = dn if numpy.dot(dn, vert2 - p0[:2]) > 0 else -dn # orient so that the normal is outwards
d1 = numpy.einsum('i,ji', dn, vert2 + dn * overlap - p[:, :2])
p = p[d1 * numpy.dot(dn, vert2 - p0[:2]) > 0]
return p
def test_einsum_misc(self):
# This call used to crash because of a bug in
# PyArray_AssignZero
a = np.ones((1, 2))
b = np.ones((2, 2, 1))
assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]])
# The iterator had an issue with buffering this reduction
a = np.ones((5, 12, 4, 2, 3), np.int64)
b = np.ones((5, 12, 11), np.int64)
assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b),
np.einsum('ijklm,ijn->', a, b))
# Issue #2027, was a problem in the contiguous 3-argument
# inner loop implementation
a = np.arange(1, 3)
b = np.arange(1, 5).reshape(2, 2)
c = np.arange(1, 9).reshape(4, 2)
assert_equal(np.einsum('x,yx,zx->xzy', a, b, c),
[[[1, 3], [3, 9], [5, 15], [7, 21]],
[[8, 16], [16, 32], [24, 48], [32, 64]]])
def test_einsum_all_contig_non_contig_output(self):
# Issue gh-5907, tests that the all contiguous special case
# actually checks the contiguity of the output
x = np.ones((5, 5))
out = np.ones(10)[::2]
correct_base = np.ones(10)
correct_base[::2] = 5
# Always worked (inner iteration is done with 0-stride):
np.einsum('mi,mi,mi->m', x, x, x, out=out)
assert_array_equal(out.base, correct_base)
# Example 1:
out = np.ones(10)[::2]
np.einsum('im,im,im->m', x, x, x, out=out)
assert_array_equal(out.base, correct_base)
# Example 2, buffering causes x to be contiguous but
# special cases do not catch the operation before:
out = np.ones((2, 2, 2))[..., 0]
correct_base = np.ones((2, 2, 2))
correct_base[..., 0] = 2
x = np.ones((2, 2), np.float32)
np.einsum('ij,jk->ik', x, x, out=out)
assert_array_equal(out.base, correct_base)
def dihedral_transform_batch(x):
g = np.random.randint(low=0, high=8, size=x.shape[0])
h, w = x.shape[-2:]
hh = (h - 1) / 2.
hw = (w - 1) / 2.
I, J = np.meshgrid(np.linspace(-hh, hh, x.shape[-2]), np.linspace(-hw, hw, x.shape[-1]))
C = np.r_[[I, J]]
D4C = np.einsum('...ij,jkl->...ikl', D4, C)
D4C[:, 0] += hh
D4C[:, 1] += hw
D4C = D4C.astype(int)
x_out = np.empty_like(x)
for i in range(x.shape[0]):
I, J = D4C[g[i]]
x_out[i, :] = x[i][:, J, I]
return x_out
def get_vol(simplex):
# Compute the volume via the Cayley-Menger determinant
# <http://mathworld.wolfram.com/Cayley-MengerDeterminant.html>. One
# advantage is that it can compute the volume of the simplex indenpendent
# of the dimension of the space where it's embedded.
# compute all edge lengths
edges = numpy.subtract(simplex[:, None], simplex[None, :])
ei_dot_ej = numpy.einsum('...k,...k->...', edges, edges)
j = simplex.shape[0] - 1
a = numpy.empty((j+2, j+2) + ei_dot_ej.shape[2:])
a[1:, 1:] = ei_dot_ej
a[0, 1:] = 1.0
a[1:, 0] = 1.0
a[0, 0] = 0.0
a = numpy.moveaxis(a, (0, 1), (-2, -1))
det = numpy.linalg.det(a)
vol = numpy.sqrt((-1.0)**(j+1) / 2**j / math.factorial(j)**2 * det)
return vol
def scalar_product_interval(anchors, indizes_1, indizes_2):
q = (anchors[1][0]-anchors[0][0])
vector_1 = np.vstack([
anchors[0][1][indizes_1], # a_1
anchors[0][2][indizes_1] * q, # b_1
anchors[1][1][indizes_1], # c_1
anchors[1][2][indizes_1] * q, # d_1
])
vector_2 = np.vstack([
anchors[0][1][indizes_2], # a_2
anchors[0][2][indizes_2] * q, # b_2
anchors[1][1][indizes_2], # c_2
anchors[1][2][indizes_2] * q, # d_2
])
return np.einsum(
vector_1, [0,2],
sp_matrix, [0,1],
vector_2, [1,2]
)*q
def scalar_product_partial(anchors, indizes_1, indizes_2, start):
q = (anchors[1][0]-anchors[0][0])
z = (start-anchors[1][0]) / q
vector_1 = np.vstack([
anchors[0][1][indizes_1], # a_1
anchors[0][2][indizes_1] * q, # b_1
anchors[1][1][indizes_1], # c_1
anchors[1][2][indizes_1] * q, # d_1
])
vector_2 = np.vstack([
anchors[0][1][indizes_2], # a_2
anchors[0][2][indizes_2] * q, # b_2
anchors[1][1][indizes_2], # c_2
anchors[1][2][indizes_2] * q, # d_2
])
return np.einsum(
vector_1, [0,2],
partial_sp_matrix(z), [0,1],
vector_2, [1,2]
)*q
def mvl(pha, amp, optimize):
"""Mean Vector Length (Canolty, 2006).
Parameters
----------
pha : array_like
Array of phases of shapes (npha, ..., npts)
amp : array_like
Array of amplitudes of shapes (namp, ..., npts)
Returns
-------
pac : array_like
PAC of shape (npha, namp, ...)
"""
# Number of time points :
npts = pha.shape[-1]
return np.abs(np.einsum('i...j, k...j->ik...', amp, np.exp(1j * pha),
optimize=optimize)) / npts
def ps(pha, amp, optimize):
"""Phase Synchrony (Penny, 2008; Cohen, 2008).
Parameters
----------
pha : array_like
Array of phases of shapes (npha, ..., npts)
amp : array_like
Array of amplitudes of shapes (namp, ..., npts)
Returns
-------
pac : array_like
PAC of shape (npha, namp, ...)
"""
# Number of time points :
npts = pha.shape[-1]
pac = np.einsum('i...j, k...j->ik...', np.exp(-1j * amp), np.exp(1j * pha),
optimize=optimize)
return np.abs(pac) / npts
def half_space(self):
"""Return the half space polytope respresentation of the infinite
beam."""
# add half beam width along the normal direction to each of the points
half = self.normal * self.size / 2
edges = [Line(self.p1 + half, self.p2 + half),
Line(self.p1 - half, self.p2 - half)]
A = np.ndarray((len(edges), self.dim))
B = np.ndarray(len(edges))
for i in range(0, 2):
A[i, :], B[i] = edges[i].standard
# test for positive or negative side of line
if np.einsum('i, i', self.p1._x, A[i, :]) > B[i]:
A[i, :] = -A[i, :]
B[i] = -B[i]
p = pt.Polytope(A, B)
return p
def forward_prop_random_thru_post_mm(self, model, mx, vx, mu, Su):
Kuu_noiseless = compute_kernel(
2 * model.ls, 2 * model.sf, model.zu, model.zu)
Kuu = Kuu_noiseless + np.diag(jitter * np.ones((self.M, )))
# TODO: remove inv
Kuuinv = np.linalg.inv(Kuu)
A = np.dot(Kuuinv, mu)
Smm = Su + np.outer(mu, mu)
B_sto = np.dot(Kuuinv, np.dot(Smm, Kuuinv)) - Kuuinv
psi0 = np.exp(2.0 * model.sf)
psi1, psi2 = compute_psi_weave(
2 * model.ls, 2 * model.sf, mx, vx, model.zu)
mout = np.einsum('nm,md->nd', psi1, A)
Bpsi2 = np.einsum('ab,nab->n', B_sto, psi2)[:, np.newaxis]
vout = psi0 + Bpsi2 - mout**2
return mout, vout
def _forward_prop_deterministic_thru_post(self, x, return_info=False):
"""Propagate deterministic inputs thru posterior
Args:
x (float): input values, size K x Din
return_info (bool, optional): Description
Returns:
float, size K x Dout: output means
float, size K x Dout: output variances
"""
psi0 = np.exp(2 * self.sf)
psi1 = compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu)
mout = np.einsum('nm,dm->nd', psi1, self.A)
Bpsi2 = np.einsum('dab,na,nb->nd', self.B_det, psi1, psi1)
vout = psi0 + Bpsi2
if return_info:
return mout, vout, psi1
else:
return mout, vout
def _forward_prop_random_thru_post_mm(self, mx, vx, return_info=False):
"""Propagate uncertain inputs thru posterior, using Moment Matching
Args:
mx (float): input means, size K x Din
vx (TYPE): input variances, size K x Din
return_info (bool, optional): Description
Returns:
float, size K x Dout: output means
float, size K x Dout: output variances
"""
psi0 = np.exp(2.0 * self.sf)
psi1, psi2 = compute_psi_weave(
2 * self.ls, 2 * self.sf, mx, vx, self.zu)
mout = np.einsum('nm,dm->nd', psi1, self.A)
Bpsi2 = np.einsum('dab,nab->nd', self.B_sto, psi2)
vout = psi0 + Bpsi2 - mout**2
if return_info:
return mout, vout, psi1, psi2
else:
return mout, vout
def sample(self, x):
"""Summary
Args:
x (TYPE): Description
Returns:
TYPE: Description
"""
Su = self.Su
mu = self.mu
Lu = np.linalg.cholesky(Su)
epsilon = np.random.randn(self.Dout, self.M)
u_sample = mu + np.einsum('dab,db->da', Lu, epsilon)
kff = compute_kernel(2 * self.ls, 2 * self.sf, x, x)
kff += np.diag(JITTER * np.ones(x.shape[0]))
kfu = compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu)
qfu = np.dot(kfu, self.Kuuinv)
mf = np.einsum('nm,dm->nd', qfu, u_sample)
vf = kff - np.dot(qfu, kfu.T)
Lf = np.linalg.cholesky(vf)
epsilon = np.random.randn(x.shape[0], self.Dout)
f_sample = mf + np.einsum('ab,bd->ad', Lf, epsilon)
return f_sample
def _forward_prop_deterministic_thru_cav(self, x):
"""Propagate deterministic inputs thru cavity
Args:
x (float): input values, size K x Din
Returns:
float, size K x Dout: output means
float, size K x Dout: output variances
float, size K x M: cross covariance matrix
"""
kff = np.exp(2 * self.sf)
kfu = compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu)
mout = np.einsum('nm,dm->nd', kfu, self.Ahat)
Bkfukuf = np.einsum('dab,na,nb->nd', self.Bhat_det, kfu, kfu)
vout = kff + Bkfukuf
return mout, vout, kfu
def _forward_prop_random_thru_cav_mm(self, mx, vx):
"""Propagate uncertain inputs thru cavity, using simple Moment Matching
Args:
mx (float): input means, size K x Din
vx (TYPE): input variances, size K x Din
Returns:
output means and variances, and intermediate info for backprop
"""
psi0 = np.exp(2 * self.sf)
psi1, psi2 = compute_psi_weave(
2 * self.ls, 2 * self.sf, mx, vx, self.zu)
mout = np.einsum('nm,dm->nd', psi1, self.Ahat)
Bhatpsi2 = np.einsum('dab,nab->nd', self.Bhat_sto, psi2)
vout = psi0 + Bhatpsi2 - mout**2
return mout, vout, psi1, psi2
def psi1compDer(dL_dpsi1, _psi1, variance, lengthscale, Z, mu, S):
# here are the "statistics" for psi1
# Produced intermediate results: dL_dparams w.r.t. psi1
# _dL_dvariance 1
# _dL_dlengthscale Q
# _dL_dZ MxQ
# _dL_dgamma NxQ
# _dL_dmu NxQ
# _dL_dS NxQ
lengthscale2 = np.square(lengthscale)
Lpsi1 = dL_dpsi1 * _psi1
Zmu = Z[None, :, :] - mu[:, None, :] # NxMxQ
denom = 1. / (S + lengthscale2)
Zmu2_denom = np.square(Zmu) * denom[:, None, :] # NxMxQ
_dL_dvar = Lpsi1.sum() / variance
_dL_dmu = np.einsum('nm,nmq,nq->nq', Lpsi1, Zmu, denom)
_dL_dS = np.einsum('nm,nmq,nq->nq', Lpsi1, (Zmu2_denom - 1.), denom) / 2.
_dL_dZ = -np.einsum('nm,nmq,nq->mq', Lpsi1, Zmu, denom)
_dL_dl = np.einsum('nm,nmq,nq->q', Lpsi1, (Zmu2_denom +
(S / lengthscale2)[:, None, :]), denom * lengthscale)
return _dL_dvar, _dL_dl, _dL_dZ, _dL_dmu, _dL_dS
def kfucompDer(dL_dkfu, kfu, variance, lengthscale, Z, mu, grad_x):
# here are the "statistics" for psi1
# Produced intermediate results: dL_dparams w.r.t. psi1
# _dL_dvariance 1
# _dL_dlengthscale Q
# _dL_dZ MxQ
lengthscale2 = np.square(lengthscale)
Lpsi1 = dL_dkfu * kfu
Zmu = Z[None, :, :] - mu[:, None, :] # NxMxQ
_dL_dvar = Lpsi1.sum() / variance
_dL_dZ = -np.einsum('nm,nmq->mq', Lpsi1, Zmu / lengthscale2)
_dL_dl = np.einsum('nm,nmq->q', Lpsi1, np.square(Zmu) / lengthscale**3)
if grad_x:
_dL_dx = np.einsum('nm,nmq->nq', Lpsi1, Zmu / lengthscale2)
return _dL_dvar, _dL_dl, _dL_dZ, _dL_dx
else:
return _dL_dvar, _dL_dl, _dL_dZ
def _forward_prop_deterministic_thru_cav(self, n, x, alpha):
"""Summary
Args:
n (TYPE): Description
x (TYPE): Description
alpha (TYPE): Description
Returns:
TYPE: Description
"""
muhat, Suhat, SuinvMuhat, Suinvhat = self.compute_cavity(n, alpha)
Kuuinv = self.Kuuinv
Ahat = np.einsum('ab,ndb->nda', Kuuinv, muhat)
Bhat = np.einsum(
'ab,ndbc->ndac',
Kuuinv, np.einsum('ndab,bc->ndac', Suhat, Kuuinv)) - Kuuinv
kff = np.exp(2 * self.sf)
kfu = compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu)
mout = np.einsum('nm,ndm->nd', kfu, Ahat)
Bkfukuf = np.einsum('ndab,na,nb->nd', Bhat, kfu, kfu)
vout = kff + Bkfukuf
extra_res = [muhat, Suhat, SuinvMuhat, Suinvhat, kfu, Ahat, Bhat]
return mout, vout, extra_res
def _forward_prop_deterministic_thru_post(self, x):
"""Summary
Args:
x (TYPE): Description
Returns:
TYPE: Description
"""
Kuuinv = self.Kuuinv
A = np.einsum('ab,db->da', Kuuinv, self.mu)
B = np.einsum(
'ab,dbc->dac',
Kuuinv, np.einsum('dab,bc->dac', self.Su, Kuuinv)) - Kuuinv
kff = np.exp(2 * self.sf)
kfu = compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu)
mout = np.einsum('nm,dm->nd', kfu, A)
Bpsi2 = np.einsum('dab,na,nb->nd', B, kfu, kfu)
vout = kff + Bpsi2
return mout, vout
# TODO
def _forward_prop_random_thru_post_mm(self, mx, vx):
"""Summary
Args:
mx (TYPE): Description
vx (TYPE): Description
Returns:
TYPE: Description
"""
Kuuinv = self.Kuuinv
A = np.einsum('ab,db->da', Kuuinv, self.mu)
Smm = self.Su + np.einsum('da,db->dab', self.mu, self.mu)
B = np.einsum(
'ab,dbc->dac',
Kuuinv, np.einsum('dab,bc->dac', Smm, Kuuinv)) - Kuuinv
psi0 = np.exp(2.0 * self.sf)
psi1, psi2 = compute_psi_weave(
2 * self.ls, 2 * self.sf, mx, vx, self.zu)
mout = np.einsum('nm,dm->nd', psi1, A)
Bpsi2 = np.einsum('dab,nab->nd', B, psi2)
vout = psi0 + Bpsi2 - mout**2
return mout, vout
def sample(self, x):
"""Summary
Args:
x (TYPE): Description
Returns:
TYPE: Description
"""
Su = self.Su
mu = self.mu
Lu = np.linalg.cholesky(Su)
epsilon = np.random.randn(self.Dout, self.M)
u_sample = mu + np.einsum('dab,db->da', Lu, epsilon)
kff = compute_kernel(2 * self.ls, 2 * self.sf, x, x)
kff += np.diag(JITTER * np.ones(x.shape[0]))
kfu = compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu)
qfu = np.dot(kfu, self.Kuuinv)
mf = np.einsum('nm,dm->nd', qfu, u_sample)
vf = kff - np.dot(qfu, kfu.T)
Lf = np.linalg.cholesky(vf)
epsilon = np.random.randn(x.shape[0], self.Dout)
f_sample = mf + np.einsum('ab,bd->ad', Lf, epsilon)
return f_sample
def compute_cavity(self, n, alpha=1.0):
"""Summary
Args:
n (TYPE): Description
alpha (float, optional): Description
Returns:
TYPE: Description
"""
# compute the leave one out moments
t1n = self.t1[n, :, :]
t2n = self.t2[n, :, :, :]
Suinvhat = self.Suinv - alpha * t2n
SuinvMuhat = self.SuinvMu - alpha * t1n
Suhat = np.linalg.inv(Suinvhat)
muhat = np.einsum('ndab,ndb->nda', Suhat, SuinvMuhat)
return muhat, Suhat, SuinvMuhat, Suinvhat
def forward_prop_thru_post(self, x):
"""Summary
Args:
x (TYPE): Description
Returns:
TYPE: Description
"""
Kuuinv = self.Kuuinv
A = np.einsum('ab,db->da', Kuuinv, self.mu)
B = np.einsum(
'ab,dbc->dac',
Kuuinv, np.einsum('dab,bc->dac', self.Su, Kuuinv)) - Kuuinv
kff = np.exp(2 * self.sf)
kfu = compute_kernel(2 * self.ls, 2 * self.sf, x, self.zu)
mout = np.einsum('nm,dm->nd', kfu, A)
Bpsi2 = np.einsum('dab,na,nb->nd', B, kfu, kfu)
vout = kff + Bpsi2
return mout, vout
def update_posterior(self, x_train=None, new_hypers=False):
"""Summary
Returns:
TYPE: Description
"""
# compute the posterior approximation
if new_hypers and x_train is not None:
Kfu = compute_kernel(2*self.ls, 2*self.sf, x_train, self.zu)
KuuinvKuf = np.dot(self.Kuuinv, Kfu.T)
self.Kfu = Kfu
self.KuuinvKuf = KuuinvKuf
self.Kff_diag = compute_kernel_diag(2*self.ls, 2*self.sf, x_train)
KuuinvKuf_div_var = np.einsum('an,nd->dan', self.KuuinvKuf, 1.0 / self.variances)
T2u = np.einsum('dan,bn->dab', KuuinvKuf_div_var, self.KuuinvKuf)
T1u = np.einsum('bn,nd->db', self.KuuinvKuf, self.means / self.variances)
Vinv = self.Kuuinv + T2u
self.Suinv = Vinv
self.Su = np.linalg.inv(Vinv)
self.mu = np.einsum('dab,db->da', self.Su, T1u)
self.gamma = np.einsum('ab,db->da', self.Kuuinv, self.mu)
self.beta = self.Kuuinv - np.einsum('ab,dbc->dac',
self.Kuuinv,
np.einsum('dab,bc->dac', self.Su, self.Kuuinv))
def compute_cavity(self, idxs, alpha):
# deletion
p_i = self.KuuinvKuf[:, idxs].T[:, np.newaxis, :]
k_i = self.Kfu[idxs, :]
k_ii = self.Kff_diag[idxs][:, np.newaxis]
gamma = self.gamma
beta = self.beta
h_si = p_i - np.einsum('dab,nb->nda', beta, k_i)
variance_i = self.variances[idxs, :]
mean_i = self.means[idxs, :]
dlogZd_dmi2 = 1.0 / (variance_i/alpha -
np.sum(k_i[:, np.newaxis, :] * h_si, axis=2))
dlogZd_dmi = -dlogZd_dmi2 * (mean_i -
np.sum(k_i[:, np.newaxis, :] * gamma, axis=2))
hd1 = h_si * dlogZd_dmi[:, :, np.newaxis]
hd2h = np.einsum('nda,ndb->ndab', h_si, h_si) * dlogZd_dmi2[:, :, np.newaxis, np.newaxis]
gamma_si = gamma + hd1
beta_si = beta - hd2h
# projection
h = p_i - np.einsum('ndab,nb->nda', beta_si, k_i)
m_si_i = np.einsum('na,nda->nd', k_i, gamma_si)
v_si_ii = k_ii - np.einsum('na,ndab,nb->nd', k_i, beta_si, k_i)
return m_si_i, v_si_ii, [h, beta_si, gamma_si]
def project3Dto2D(self, Li, idxs):
"""
Project 3D point to 2D
:param Li: joints in normalized 3D
:param idxs: frames specified by subset
:return: 2D points, in normalized 2D coordinates
"""
if not isinstance(idxs, numpy.ndarray):
idxs = numpy.asarray([idxs])
# 3D -> 2D projection also shift by M to cropped window
Li_glob3D = (numpy.reshape(Li, (len(idxs), self.numJoints, 3))*self.Di_scale[idxs][:, None, None]+self.Di_off3D[idxs][:, None, :]).reshape((len(idxs)*self.numJoints, 3))
Li_glob3D_hom = numpy.concatenate([Li_glob3D, numpy.ones((len(idxs)*self.numJoints, 1), dtype='float32')], axis=1)
Li_glob2D_hom = numpy.dot(Li_glob3D_hom, self.cam_proj.T)
Li_glob2D = (Li_glob2D_hom[:, 0:3] / Li_glob2D_hom[:, 3][:, None]).reshape((len(idxs), self.numJoints, 3))
Li_img2D_hom = numpy.einsum('ijk,ikl->ijl', Li_glob2D, self.Di_trans2D[idxs])
Li_img2D = (Li_img2D_hom[:, :, 0:2] / Li_img2D_hom[:, :, 2][:, :, None]).reshape((len(idxs), self.numJoints*2))
Li_img2Dcrop = (Li_img2D - (self.Di.shape[3]/2.)) / (self.Di.shape[3]/2.)
return Li_img2Dcrop
def compute_dr_wrt(self, wrt):
if wrt is not self.v:
return None
v = self.v.r.reshape(-1, 3)
blocks = -np.einsum('ij,ik->ijk', v, v) * (self.ss**(-3./2.)).reshape((-1, 1, 1))
for i in range(3):
blocks[:, i, i] += self.s_inv
if True: # pylint: disable=using-constant-test
data = blocks.ravel()
indptr = np.arange(0, (self.v.r.size+1)*3, 3)
indices = col(np.arange(0, self.v.r.size))
indices = np.hstack([indices, indices, indices])
indices = indices.reshape((-1, 3, 3))
indices = indices.transpose((0, 2, 1)).ravel()
result = sp.csc_matrix((data, indices, indptr), shape=(self.v.r.size, self.v.r.size))
return result
else:
matvec = lambda x: np.einsum('ijk,ik->ij', blocks, x.reshape((blocks.shape[0], 3))).ravel()
return sp.linalg.LinearOperator((self.v.r.size, self.v.r.size), matvec=matvec)
def test_einsum_misc(self):
# This call used to crash because of a bug in
# PyArray_AssignZero
a = np.ones((1, 2))
b = np.ones((2, 2, 1))
assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]])
# The iterator had an issue with buffering this reduction
a = np.ones((5, 12, 4, 2, 3), np.int64)
b = np.ones((5, 12, 11), np.int64)
assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b),
np.einsum('ijklm,ijn->', a, b))
# Issue #2027, was a problem in the contiguous 3-argument
# inner loop implementation
a = np.arange(1, 3)
b = np.arange(1, 5).reshape(2, 2)
c = np.arange(1, 9).reshape(4, 2)
assert_equal(np.einsum('x,yx,zx->xzy', a, b, c),
[[[1, 3], [3, 9], [5, 15], [7, 21]],
[[8, 16], [16, 32], [24, 48], [32, 64]]])
def test_einsum_all_contig_non_contig_output(self):
# Issue gh-5907, tests that the all contiguous special case
# actually checks the contiguity of the output
x = np.ones((5, 5))
out = np.ones(10)[::2]
correct_base = np.ones(10)
correct_base[::2] = 5
# Always worked (inner iteration is done with 0-stride):
np.einsum('mi,mi,mi->m', x, x, x, out=out)
assert_array_equal(out.base, correct_base)
# Example 1:
out = np.ones(10)[::2]
np.einsum('im,im,im->m', x, x, x, out=out)
assert_array_equal(out.base, correct_base)
# Example 2, buffering causes x to be contiguous but
# special cases do not catch the operation before:
out = np.ones((2, 2, 2))[..., 0]
correct_base = np.ones((2, 2, 2))
correct_base[..., 0] = 2
x = np.ones((2, 2), np.float32)
np.einsum('ij,jk->ik', x, x, out=out)
assert_array_equal(out.base, correct_base)
def test_exKxz_pairwise(self):
covall = np.array([self.Xcov, self.Xcovc])
for k in self.kernels:
with self.test_context():
if isinstance(k, ekernels.Linear):
continue
k.compile()
exKxz = k.compute_exKxz_pairwise(self.Z, self.Xmu, covall)
Kxz = k.compute_K(self.Xmu[:-1, :], self.Z) # NxM
xKxz = np.einsum('nm,nd->nmd', Kxz, self.Xmu[1:, :])
self.assertTrue(np.allclose(xKxz, exKxz))
# def test_exKxz(self):
# for k in self.kernels:
# with self.test_session():
# if isinstance(k, ekernels.Linear):
# continue
# k.compile()
# exKxz = k.compute_exKxz(self.Z, self.Xmu, self.Xcov)
# Kxz = k.compute_K(self.Xmu, self.Z) # NxM
# xKxz = np.einsum('nm,nd->nmd', Kxz, self.Xmu)
# self.assertTrue(np.allclose(xKxz, exKxz))
def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,
posteriors, fwdlattice, bwdlattice):
super(GaussianHMM, self)._accumulate_sufficient_statistics(
stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice)
if 'm' in self.params or 'c' in self.params:
stats['post'] += posteriors.sum(axis=0)
stats['obs'] += np.dot(posteriors.T, obs)
if 'c' in self.params:
if self.covariance_type in ('spherical', 'diag'):
stats['obs**2'] += np.dot(posteriors.T, obs ** 2)
elif self.covariance_type in ('tied', 'full'):
# posteriors: (nt, nc); obs: (nt, nf); obs: (nt, nf)
# -> (nc, nf, nf)
stats['obs*obs.T'] += np.einsum(
'ij,ik,il->jkl', posteriors, obs, obs)
def __init__(self, matrix, w):
W = np.sum(w)
self.w = w
self.X = matrix
self.left = None
self.right = None
self.mu = np.einsum('ij,i->j', self.X, w)/W
diff = self.X - np.tile(self.mu, [np.shape(self.X)[0], 1])
t = np.einsum('ij,i->ij', diff, np.sqrt(w))
self.cov = (t.T @ t)/W + 1e-5*np.eye(3)
self.N = self.X.shape[0]
V, D = np.linalg.eig(self.cov)
self.lmbda = np.max(np.abs(V))
self.e = D[np.argmax(np.abs(V))]
# S is measurements vector - dim nxd
# w is weights vector - dim n
def _solve_2D2(self, X, Z):
"""Solves :math:`Z^T N^{-1}X`, where :math:`X`
and :math:`Z` are 2-d arrays.
"""
ZNX = np.dot(Z.T / self._nvec, X)
for slc, jv in zip(self._slices, self._jvec):
if slc.stop - slc.start > 1:
Zblock = Z[slc, :]
Xblock = X[slc, :]
niblock = 1 / self._nvec[slc]
beta = 1.0 / (np.einsum('i->', niblock)+1.0/jv)
zn = np.dot(niblock, Zblock)
xn = np.dot(niblock, Xblock)
ZNX -= beta * np.outer(zn.T, xn)
return ZNX
def ss_framerotate(mjd, planet, x, y, z, dz,
offset=None, equatorial=False):
"""
Rotate planet trajectory given as (n,3) tensor,
by ecliptic Euler angles x, y, z, and by z rate
dz. The rate has units of rad/year, and is referred
to offset 2010/1/1. dates must be given in MJD.
"""
if equatorial:
planet = eq2ecl_vec(planet)
E = euler_vec(z + dz * (mjd - t_offset) / 365.25, y, x,
planet.shape[0])
planet = np.einsum('ijk,ik->ij', E, planet)
if offset is not None:
planet = np.array(offset) + planet
if equatorial:
planet = ecl2eq_vec(planet)
return planet
def _log_prod_students_t(self, i, mu, log_prod_var, inv_var, v):
"""
Return the value of the log of the product of the univariate Student's
t PDFs at `X[i]`.
"""
delta = self.X[i, :] - mu
return (
self.D * (
self._cached_gammaln_by_2[v + 1] - self._cached_gammaln_by_2[v]
- 0.5*self._cached_log_v[v] - 0.5*self._cached_log_pi
)
- 0.5*log_prod_var
- (v + 1.)/2. * (np.log(1. + 1./v * np.square(delta) * inv_var)).sum()
)
#-----------------------------------------------------------------------------#
# UTILITY FUNCTIONS #
#-----------------------------------------------------------------------------#
# Below is slightly faster than np.sum, see http://stackoverflow.com/questions/
# 18365073/why-is-numpys-einsum-faster-than-numpys-built-in-functions
def setUp(self):
self.f = links.Bilinear(
self.in_shape[0], self.in_shape[1], self.out_size)
self.f.W.data[...] = _uniform(*self.f.W.data.shape)
self.f.V1.data[...] = _uniform(*self.f.V1.data.shape)
self.f.V2.data[...] = _uniform(*self.f.V2.data.shape)
self.f.b.data[...] = _uniform(*self.f.b.data.shape)
self.f.zerograds()
self.W = self.f.W.data.copy()
self.V1 = self.f.V1.data.copy()
self.V2 = self.f.V2.data.copy()
self.b = self.f.b.data.copy()
self.e1 = _uniform(self.batch_size, self.in_shape[0])
self.e2 = _uniform(self.batch_size, self.in_shape[1])
self.gy = _uniform(self.batch_size, self.out_size)
self.y = (
numpy.einsum('ij,ik,jkl->il', self.e1, self.e2, self.W) +
self.e1.dot(self.V1) + self.e2.dot(self.V2) + self.b)
def inside_triangle(point,triangles):
v0 = triangles[:,2]-triangles[:,0]
v1 = triangles[:,1]-triangles[:,0]
v2 = point-triangles[:,0]
dot00 = np.einsum('ij,ij->i',v0,v0)
dot01 = np.einsum('ij,ij->i',v0,v1)
dot02 = np.einsum('ij,ij->i',v0,v2)
dot11 = np.einsum('ij,ij->i',v1,v1)
dot12 = np.einsum('ij,ij->i',v1,v2)
invDenom = 1./(dot00 * dot11-dot01*dot01)
u = np.float16((dot11 * dot02 - dot01 * dot12)*invDenom)
v = np.float16((dot00 * dot12 - dot01 * dot02)*invDenom)
return (u>=0) & (v>=0) & (u+v<=1)
def omgp_model_bound(omgp):
''' Calculate the part of the omgp bound which does not depend
on the response variable.
'''
GP_bound = 0.0
LBs = []
# Precalculate the bound minus data fit,
# and LB matrices used for data fit term.
for i, kern in enumerate(omgp.kern):
K = kern.K(omgp.X)
B_inv = np.diag(1. / ((omgp.phi[:, i] + 1e-6) / omgp.variance))
Bi, LB, LBi, Blogdet = pdinv(K + B_inv)
LBs.append(LB)
# Penalty
GP_bound -= 0.5 * Blogdet
# Constant
GP_bound -= 0.5 * omgp.D * np.einsum('j,j->', omgp.phi[:, i], np.log(2 * np.pi * omgp.variance))
model_bound = GP_bound + omgp.mixing_prop_bound() + omgp.H
return model_bound, LBs
def _predict_output_derivatives(self, x):
n = x.shape[0]
nt = self.nt
ny = self.training_points[None][0][1].shape[1]
num = self.num
dy_dstates = np.empty(n * num['dof'])
self.rbfc.compute_jac(n, x.flatten(), dy_dstates)
dy_dstates = dy_dstates.reshape((n, num['dof']))
dstates_dytl = np.linalg.inv(self.mtx)
ones = np.ones(self.nt)
arange = np.arange(self.nt)
dytl_dyt = csc_matrix((ones, (arange, arange)), shape=(num['dof'], self.nt))
dy_dyt = (dytl_dyt.T.dot(dstates_dytl.T).dot(dy_dstates.T)).T
dy_dyt = np.einsum('ij,k->ijk', dy_dyt, np.ones(ny))
return {None: dy_dyt}
def get_power_spectral_density_matrix(observation, mask=None):
"""
Calculates the weighted power spectral density matrix.
This does not yet work with more than one target mask.
:param observation: Complex observations with shape (bins, sensors, frames)
:param mask: Masks with shape (bins, frames) or (bins, 1, frames)
:return: PSD matrix with shape (bins, sensors, sensors)
"""
bins, sensors, frames = observation.shape
if mask is None:
mask = np.ones((bins, frames))
if mask.ndim == 2:
mask = mask[:, np.newaxis, :]
normalization = np.maximum(np.sum(mask, axis=-1, keepdims=True), 1e-6)
psd = np.einsum('...dt,...et->...de', mask * observation,
observation.conj())
psd /= normalization
return psd
def get_mvdr_vector(atf_vector, noise_psd_matrix):
"""
Returns the MVDR beamforming vector.
:param atf_vector: Acoustic transfer function vector
with shape (..., bins, sensors)
:param noise_psd_matrix: Noise PSD matrix
with shape (bins, sensors, sensors)
:return: Set of beamforming vectors with shape (..., bins, sensors)
"""
while atf_vector.ndim > noise_psd_matrix.ndim - 1:
noise_psd_matrix = np.expand_dims(noise_psd_matrix, axis=0)
# Make sure matrix is hermitian
noise_psd_matrix = 0.5 * (
noise_psd_matrix + np.conj(noise_psd_matrix.swapaxes(-1, -2)))
numerator = solve(noise_psd_matrix, atf_vector)
denominator = np.einsum('...d,...d->...', atf_vector.conj(), numerator)
beamforming_vector = numerator / np.expand_dims(denominator, axis=-1)
return beamforming_vector
def apply_sdw_mwf(mix, target_psd_matrix, noise_psd_matrix, mu=1, corr=None):
"""
Apply speech distortion weighted MWF: h = Tpsd * e1 / (Tpsd + mu*Npsd)
:param mix: the signal complex FFT
:param target_psd_matrix (bins, sensors, sensors)
:param noise_psd_matrix
:param mu: the lagrange factor
:return
"""
bins, sensors, frames = mix.shape
ref_vector = np.zeros((sensors,1), dtype=np.float)
if corr is None:
ref_ch = 0
else: # choose the channel with highest correlation with the others
corr=corr.tolist()
while len(corr) > sensors:
corr.remove(np.min(corr))
ref_ch=np.argmax(corr)
ref_vector[ref_ch,0]=1
mwf_vector = solve(target_psd_matrix + mu*noise_psd_matrix, target_psd_matrix[:,:,ref_ch])
return np.einsum('...a,...at->...t', mwf_vector.conj(), mix)
def test_einsum_misc(self):
# This call used to crash because of a bug in
# PyArray_AssignZero
a = np.ones((1, 2))
b = np.ones((2, 2, 1))
assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]])
# The iterator had an issue with buffering this reduction
a = np.ones((5, 12, 4, 2, 3), np.int64)
b = np.ones((5, 12, 11), np.int64)
assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b),
np.einsum('ijklm,ijn->', a, b))
# Issue #2027, was a problem in the contiguous 3-argument
# inner loop implementation
a = np.arange(1, 3)
b = np.arange(1, 5).reshape(2, 2)
c = np.arange(1, 9).reshape(4, 2)
assert_equal(np.einsum('x,yx,zx->xzy', a, b, c),
[[[1, 3], [3, 9], [5, 15], [7, 21]],
[[8, 16], [16, 32], [24, 48], [32, 64]]])
def test_against_numpy_einsum(self):
""" Test against numpy.einsum """
a = np.arange(60.).reshape(3,4,5)
b = np.arange(24.).reshape(4,3,2)
stream = [a, b]
from_numpy = np.einsum('ijk,jil->kl', a, b)
from_stream = last(ieinsum(stream, 'ijk,jil->kl'))
self.assertSequenceEqual(from_numpy.shape, from_stream.shape)
self.assertTrue(np.allclose(from_numpy, from_stream))