Python numpy 模块,bmat() 实例源码
我们从Python开源项目中,提取了以下47个代码示例,用于说明如何使用numpy.bmat()。
def circumcenter(self, tri):
"""Compute circumcenter and circumradius of a triangle in 2D.
Uses an extension of the method described here:
http://www.ics.uci.edu/~eppstein/junkyard/circumcenter.html
"""
pts = np.asarray([self.coords[v] for v in tri])
pts2 = np.dot(pts, pts.T)
A = np.bmat([[2 * pts2, [[1],
[1],
[1]]],
[[[1, 1, 1, 0]]]])
b = np.hstack((np.sum(pts * pts, axis=1), [1]))
x = np.linalg.solve(A, b)
bary_coords = x[:-1]
center = np.dot(bary_coords, pts)
# radius = np.linalg.norm(pts[0] - center) # euclidean distance
radius = np.sum(np.square(pts[0] - center)) # squared distance
return (center, radius)
def test_basic(self):
A = np.array([[1, 2], [3, 4]])
mA = matrix(A)
assert_(np.all(mA.A == A))
B = bmat("A,A;A,A")
C = bmat([[A, A], [A, A]])
D = np.array([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
assert_(np.all(B.A == D))
assert_(np.all(C.A == D))
E = np.array([[5, 6], [7, 8]])
AEresult = matrix([[1, 2, 5, 6], [3, 4, 7, 8]])
assert_(np.all(bmat([A, E]) == AEresult))
vec = np.arange(5)
mvec = matrix(vec)
assert_(mvec.shape == (1, 5))
def test_bmat_nondefault_str(self):
A = np.array([[1, 2], [3, 4]])
B = np.array([[5, 6], [7, 8]])
Aresult = np.array([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
mixresult = np.array([[1, 2, 5, 6],
[3, 4, 7, 8],
[5, 6, 1, 2],
[7, 8, 3, 4]])
assert_(np.all(bmat("A,A;A,A") == Aresult))
assert_(np.all(bmat("A,A;A,A", ldict={'A':B}) == Aresult))
assert_raises(TypeError, bmat, "A,A;A,A", gdict={'A':B})
assert_(
np.all(bmat("A,A;A,A", ldict={'A':A}, gdict={'A':B}) == Aresult))
b2 = bmat("A,B;C,D", ldict={'A':A,'B':B}, gdict={'C':B,'D':A})
assert_(np.all(b2 == mixresult))
def test_basic(self):
A = np.array([[1, 2], [3, 4]])
mA = matrix(A)
assert_(np.all(mA.A == A))
B = bmat("A,A;A,A")
C = bmat([[A, A], [A, A]])
D = np.array([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
assert_(np.all(B.A == D))
assert_(np.all(C.A == D))
E = np.array([[5, 6], [7, 8]])
AEresult = matrix([[1, 2, 5, 6], [3, 4, 7, 8]])
assert_(np.all(bmat([A, E]) == AEresult))
vec = np.arange(5)
mvec = matrix(vec)
assert_(mvec.shape == (1, 5))
def test_bmat_nondefault_str(self):
A = np.array([[1, 2], [3, 4]])
B = np.array([[5, 6], [7, 8]])
Aresult = np.array([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
mixresult = np.array([[1, 2, 5, 6],
[3, 4, 7, 8],
[5, 6, 1, 2],
[7, 8, 3, 4]])
assert_(np.all(bmat("A,A;A,A") == Aresult))
assert_(np.all(bmat("A,A;A,A", ldict={'A':B}) == Aresult))
assert_raises(TypeError, bmat, "A,A;A,A", gdict={'A':B})
assert_(
np.all(bmat("A,A;A,A", ldict={'A':A}, gdict={'A':B}) == Aresult))
b2 = bmat("A,B;C,D", ldict={'A':A,'B':B}, gdict={'C':B,'D':A})
assert_(np.all(b2 == mixresult))
def test_np():
npr.seed(0)
nx, nineq, neq = 4, 6, 7
Q = npr.randn(nx, nx)
G = npr.randn(nineq, nx)
A = npr.randn(neq, nx)
D = np.diag(npr.rand(nineq))
K_ = np.bmat((
(Q, np.zeros((nx, nineq)), G.T, A.T),
(np.zeros((nineq, nx)), D, np.eye(nineq), np.zeros((nineq, neq))),
(G, np.eye(nineq), np.zeros((nineq, nineq + neq))),
(A, np.zeros((neq, nineq + nineq + neq)))
))
K = block((
(Q, 0, G.T, A.T),
(0, D, 'I', 0),
(G, 'I', 0, 0),
(A, 0, 0, 0)
))
assert np.allclose(K_, K)
def test_basic(self):
A = np.array([[1, 2], [3, 4]])
mA = matrix(A)
assert_(np.all(mA.A == A))
B = bmat("A,A;A,A")
C = bmat([[A, A], [A, A]])
D = np.array([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
assert_(np.all(B.A == D))
assert_(np.all(C.A == D))
E = np.array([[5, 6], [7, 8]])
AEresult = matrix([[1, 2, 5, 6], [3, 4, 7, 8]])
assert_(np.all(bmat([A, E]) == AEresult))
vec = np.arange(5)
mvec = matrix(vec)
assert_(mvec.shape == (1, 5))
def test_bmat_nondefault_str(self):
A = np.array([[1, 2], [3, 4]])
B = np.array([[5, 6], [7, 8]])
Aresult = np.array([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
mixresult = np.array([[1, 2, 5, 6],
[3, 4, 7, 8],
[5, 6, 1, 2],
[7, 8, 3, 4]])
assert_(np.all(bmat("A,A;A,A") == Aresult))
assert_(np.all(bmat("A,A;A,A", ldict={'A':B}) == Aresult))
assert_raises(TypeError, bmat, "A,A;A,A", gdict={'A':B})
assert_(
np.all(bmat("A,A;A,A", ldict={'A':A}, gdict={'A':B}) == Aresult))
b2 = bmat("A,B;C,D", ldict={'A':A,'B':B}, gdict={'C':B,'D':A})
assert_(np.all(b2 == mixresult))
def test_basic(self):
A = np.array([[1, 2], [3, 4]])
mA = matrix(A)
assert_(np.all(mA.A == A))
B = bmat("A,A;A,A")
C = bmat([[A, A], [A, A]])
D = np.array([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
assert_(np.all(B.A == D))
assert_(np.all(C.A == D))
E = np.array([[5, 6], [7, 8]])
AEresult = matrix([[1, 2, 5, 6], [3, 4, 7, 8]])
assert_(np.all(bmat([A, E]) == AEresult))
vec = np.arange(5)
mvec = matrix(vec)
assert_(mvec.shape == (1, 5))
def test_bmat_nondefault_str(self):
A = np.array([[1, 2], [3, 4]])
B = np.array([[5, 6], [7, 8]])
Aresult = np.array([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
mixresult = np.array([[1, 2, 5, 6],
[3, 4, 7, 8],
[5, 6, 1, 2],
[7, 8, 3, 4]])
assert_(np.all(bmat("A,A;A,A") == Aresult))
assert_(np.all(bmat("A,A;A,A", ldict={'A':B}) == Aresult))
assert_raises(TypeError, bmat, "A,A;A,A", gdict={'A':B})
assert_(
np.all(bmat("A,A;A,A", ldict={'A':A}, gdict={'A':B}) == Aresult))
b2 = bmat("A,B;C,D", ldict={'A':A,'B':B}, gdict={'C':B,'D':A})
assert_(np.all(b2 == mixresult))
def test_basic(self):
A = np.array([[1, 2], [3, 4]])
mA = matrix(A)
assert_(np.all(mA.A == A))
B = bmat("A,A;A,A")
C = bmat([[A, A], [A, A]])
D = np.array([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
assert_(np.all(B.A == D))
assert_(np.all(C.A == D))
E = np.array([[5, 6], [7, 8]])
AEresult = matrix([[1, 2, 5, 6], [3, 4, 7, 8]])
assert_(np.all(bmat([A, E]) == AEresult))
vec = np.arange(5)
mvec = matrix(vec)
assert_(mvec.shape == (1, 5))
def test_bmat_nondefault_str(self):
A = np.array([[1, 2], [3, 4]])
B = np.array([[5, 6], [7, 8]])
Aresult = np.array([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
mixresult = np.array([[1, 2, 5, 6],
[3, 4, 7, 8],
[5, 6, 1, 2],
[7, 8, 3, 4]])
assert_(np.all(bmat("A,A;A,A") == Aresult))
assert_(np.all(bmat("A,A;A,A", ldict={'A':B}) == Aresult))
assert_raises(TypeError, bmat, "A,A;A,A", gdict={'A':B})
assert_(
np.all(bmat("A,A;A,A", ldict={'A':A}, gdict={'A':B}) == Aresult))
b2 = bmat("A,B;C,D", ldict={'A':A,'B':B}, gdict={'C':B,'D':A})
assert_(np.all(b2 == mixresult))
def I(n, transposed=False):
"""Get the index matrix with side of length ``n``.
Will only work if ``n`` is a power of 2.
Reference: http://caca.zoy.org/study/part2.html
:param int n: Power of 2 side length of matrix.
:param bool transposed:
:return: The index matrix.
"""
if n == 2:
if transposed:
return np.array([[0, 3], [2, 1]], 'int')
else:
return np.array([[0, 2], [3, 1]], 'int')
else:
smaller_I = I(n >> 1, transposed)
if transposed:
return np.bmat([[4 * smaller_I, 4 * smaller_I + 3],
[4 * smaller_I + 2, 4 * smaller_I + 1]])
else:
return np.bmat([[4 * smaller_I, 4 * smaller_I + 2],
[4 * smaller_I + 3, 4 * smaller_I + 1]])
def test_basic(self):
A = np.array([[1, 2], [3, 4]])
mA = matrix(A)
assert_(np.all(mA.A == A))
B = bmat("A,A;A,A")
C = bmat([[A, A], [A, A]])
D = np.array([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
assert_(np.all(B.A == D))
assert_(np.all(C.A == D))
E = np.array([[5, 6], [7, 8]])
AEresult = matrix([[1, 2, 5, 6], [3, 4, 7, 8]])
assert_(np.all(bmat([A, E]) == AEresult))
vec = np.arange(5)
mvec = matrix(vec)
assert_(mvec.shape == (1, 5))
def test_bmat_nondefault_str(self):
A = np.array([[1, 2], [3, 4]])
B = np.array([[5, 6], [7, 8]])
Aresult = np.array([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
mixresult = np.array([[1, 2, 5, 6],
[3, 4, 7, 8],
[5, 6, 1, 2],
[7, 8, 3, 4]])
assert_(np.all(bmat("A,A;A,A") == Aresult))
assert_(np.all(bmat("A,A;A,A", ldict={'A':B}) == Aresult))
assert_raises(TypeError, bmat, "A,A;A,A", gdict={'A':B})
assert_(
np.all(bmat("A,A;A,A", ldict={'A':A}, gdict={'A':B}) == Aresult))
b2 = bmat("A,B;C,D", ldict={'A':A,'B':B}, gdict={'C':B,'D':A})
assert_(np.all(b2 == mixresult))
def test_basic(self):
A = np.array([[1, 2], [3, 4]])
mA = matrix(A)
assert_(np.all(mA.A == A))
B = bmat("A,A;A,A")
C = bmat([[A, A], [A, A]])
D = np.array([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
assert_(np.all(B.A == D))
assert_(np.all(C.A == D))
E = np.array([[5, 6], [7, 8]])
AEresult = matrix([[1, 2, 5, 6], [3, 4, 7, 8]])
assert_(np.all(bmat([A, E]) == AEresult))
vec = np.arange(5)
mvec = matrix(vec)
assert_(mvec.shape == (1, 5))
def test_bmat_nondefault_str(self):
A = np.array([[1, 2], [3, 4]])
B = np.array([[5, 6], [7, 8]])
Aresult = np.array([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
mixresult = np.array([[1, 2, 5, 6],
[3, 4, 7, 8],
[5, 6, 1, 2],
[7, 8, 3, 4]])
assert_(np.all(bmat("A,A;A,A") == Aresult))
assert_(np.all(bmat("A,A;A,A", ldict={'A':B}) == Aresult))
assert_raises(TypeError, bmat, "A,A;A,A", gdict={'A':B})
assert_(
np.all(bmat("A,A;A,A", ldict={'A':A}, gdict={'A':B}) == Aresult))
b2 = bmat("A,B;C,D", ldict={'A':A,'B':B}, gdict={'C':B,'D':A})
assert_(np.all(b2 == mixresult))
def mat(self):
return np.bmat([self * col([1,0,0]), self * col([0,1,0]), self * col([0,0,1])])
def dexp(v):
r = v[0:3]
dexpr = quat.dexp(r)
MT = mt(v)
return np.bmat([[dexpr,MT],[np.zeros((3,3)),dexpr]])
def dlog(v):
r = v[0:3]
dlogr = quat.dlog(r)
MT = mt(v)
return np.bmat([[dlogr, -dlogr*MT*dlogr],[np.zeros((3,3)),dlogr]])
def getValuesFromPose(self, P):
'''return the virtual values of the pots corresponding to the pose P'''
vals = []
grads = []
for i, r, l, placement, attach_p in zip(range(3), self.rs, self.ls, self.placements, self.attach_ps):
#first pot axis
a = placement.rot * col([1, 0, 0])
#second pot axis
b = placement.rot * col([0, 1, 0])
#string axis
c = placement.rot * col([0, 0, 1])
#attach point on the joystick
p_joystick = P * attach_p
v = p_joystick - placement.trans
va = v - dot(v, a)*a
vb = v - dot(v, b)*b
#angles of the pots
alpha = math.atan2(dot(vb, a), dot(vb, c))
beta = math.atan2(dot(va, b), dot(va, c))
vals.append(alpha)
vals.append(beta)
#calculation of the derivatives
dv = np.bmat([-P.rot.mat() * quat.skew(attach_p), P.rot.mat()])
dva = (np.eye(3) - a*a.T) * dv
dvb = (np.eye(3) - b*b.T) * dv
dalpha = (1/dot(vb,vb)) * (dot(vb,c) * a.T - dot(vb,a) * c.T) * dvb
dbeta = (1/dot(va,va)) * (dot(va,c) * b.T - dot(va,b) * c.T) * dva
grads.append(dalpha)
grads.append(dbeta)
return (col(vals), np.bmat([[grads]]))
def getNumericalGradient(self, P, h = 1e-5):
'''just to check the calculations...'''
grad = []
for i in range(6):
dv = [0, 0, 0, 0, 0, 0]
dv[i] = h
gi = (1./h) * (self.getValuesFromPose(P * pose.exp(col(dv))) - self.getValuesFromPose(P))
grad.append(gi)
return np.bmat(grad)
def _batch_incremental_pca(x, G, S):
r = G.shape[1]
b = x.shape[0]
xh = G.T.dot(x)
H = x - G.dot(xh)
J, W = scipy.linalg.qr(H, overwrite_a=True, mode='full', check_finite=False)
Q = np.bmat( [[np.diag(S), xh], [np.zeros((b,r), dtype=np.float32), W]] )
G_new, St_new, Vtoss = scipy.linalg.svd(Q, full_matrices=False, check_finite=False)
St_new=St_new[:r]
G_new= np.asarray(np.bmat([G, J]).dot( G_new[:,:r] ))
return G_new, St_new
def pad(mat, padrow, padcol):
"""
Add additional rows/columns to a numpy.matrix `mat`. The new rows/columns
will be initialized with zeros.
"""
if padrow < 0:
padrow = 0
if padcol < 0:
padcol = 0
rows, cols = mat.shape
return numpy.bmat([[mat, numpy.matrix(numpy.zeros((rows, padcol)))],
[numpy.matrix(numpy.zeros((padrow, cols + padcol)))]])
def setUp(self):
super(ROIPoolingTest, self).setUp()
# Setup
self.im_shape = (10, 10)
self.config = EasyDict({
'pooling_mode': 'crop',
'pooled_width': 2,
'pooled_height': 2,
'padding': 'VALID',
})
# Construct the pretrained map with four matrix.
self.multiplier_a = 1
self.multiplier_b = 2
self.multiplier_c = 3
self.multiplier_d = 4
mat_a = np.ones((5, 5)) * self.multiplier_a
mat_b = np.ones((5, 5)) * self.multiplier_b
mat_c = np.ones((5, 5)) * self.multiplier_c
mat_d = np.ones((5, 5)) * self.multiplier_d
self.pretrained = np.bmat([[mat_a, mat_b], [mat_c, mat_d]])
# Expand the dimensions to be compatible with ROIPoolingLayer.
self.pretrained = np.expand_dims(self.pretrained, axis=0)
self.pretrained = np.expand_dims(self.pretrained, axis=3)
# pretrained:
# mat_a | mat_b
# -------------
# mat_c | mat_d
tf.reset_default_graph()
def update(self):
"""Set up the Gram matrix and compute its LU decomposition to make the GP
ready for inference (calls to ``.gp.mu(t)``, ``gp.V(t)``, etc...).
Call this method after you have manipulated the GP by
- ``gp.reset()`` ing,
- adding observations with ``gp.add(t, f, df)``, or
- adjusting the sigmas via ``gp.update_sigmas()``.
and want to perform inference next."""
if self.ready:
return
# Set up the kernel matrices.
self.K = np.matrix(np.zeros([self.N, self.N]))
self.Kd = np.matrix(np.zeros([self.N, self.N]))
self.dKd = np.matrix(np.zeros([self.N, self.N]))
for i in range(self.N):
for j in range(self.N):
self.K[i, j] = self.k(self.ts[i], self.ts[j])
self.Kd[i, j] = self.kd(self.ts[i], self.ts[j])
self.dKd[i, j] = self.dkd(self.ts[i], self.ts[j])
# Put together the Gram matrix
S_f = np.matrix(np.diag(self.fvars))
S_df = np.matrix(np.diag(self.dfvars))
self.G = np.bmat([[self.K + S_f, self.Kd],
[self.Kd.T, self.dKd + S_df]])
# Compute the LU decomposition of G and store it
self.LU, self.LU_piv = linalg.lu_factor(self.G, check_finite=True)
# Set ready switch to True
self.ready = True
# Pre-compute the regression weights used in mu
self.w = self.solve_G(np.array(self.fs + self.dfs))
def compute_joint_svd(self):
# SVD on joint scores matrx
joint_scores_matrix = np.bmat([self.blocks[k].signal_basis for k in range(self.K)])
self.joint_scores, self.joint_sv, self.joint_loadings = svd_wrapper(joint_scores_matrix)
def controlled(m):
"""
Make a one-qubit-controlled version of a matrix.
:param m: (numpy.ndarray) A matrix.
:return: A controlled version of that matrix.
"""
rows, cols = m.shape
assert rows == cols
n = rows
I = np.eye(n)
Z = np.zeros((n, n))
controlled_m = np.bmat([[I, Z],
[Z, m]])
return controlled_m
def pad(mat, padrow, padcol):
"""
Add additional rows/columns to a numpy.matrix `mat`. The new rows/columns
will be initialized with zeros.
"""
if padrow < 0:
padrow = 0
if padcol < 0:
padcol = 0
rows, cols = mat.shape
return numpy.bmat([[mat, numpy.matrix(numpy.zeros((rows, padcol)))],
[numpy.matrix(numpy.zeros((padrow, cols + padcol)))]])
def pad(mat, padrow, padcol):
"""
Add additional rows/columns to a numpy.matrix `mat`. The new rows/columns
will be initialized with zeros.
"""
if padrow < 0:
padrow = 0
if padcol < 0:
padcol = 0
rows, cols = mat.shape
return numpy.bmat([[mat, numpy.matrix(numpy.zeros((rows, padcol)))],
[numpy.matrix(numpy.zeros((padrow, cols + padcol)))]])
def pad(mat, padrow, padcol):
"""
Add additional rows/columns to a numpy.matrix `mat`. The new rows/columns
will be initialized with zeros.
"""
if padrow < 0:
padrow = 0
if padcol < 0:
padcol = 0
rows, cols = mat.shape
return numpy.bmat([[mat, numpy.matrix(numpy.zeros((rows, padcol)))],
[numpy.matrix(numpy.zeros((padrow, cols + padcol)))]])
def __init__(self, nb_steps, duration, omega2, state_estimator, com_target,
stance, tube_radius=0.02, tube_margin=0.01):
start_com = state_estimator.com
start_comd = state_estimator.comd
tube = COMTube(
start_com, com_target.p, stance, tube_radius, tube_margin)
dt = duration / nb_steps
I = eye(3)
A = asarray(bmat([[I, dt * I], [zeros((3, 3)), I]]))
B = asarray(bmat([[.5 * dt ** 2 * I], [dt * I]]))
x_init = hstack([start_com, start_comd])
x_goal = hstack([com_target.p, com_target.pd])
C1, e1 = tube.primal_hrep
D2, e2 = tube.dual_hrep
C1 = hstack([C1, zeros(C1.shape)])
C2 = zeros((D2.shape[0], A.shape[1]))
D1 = zeros((C1.shape[0], B.shape[1]))
C = vstack([C1, C2])
D = vstack([D1, D2])
e = hstack([e1, e2])
lmpc = LinearPredictiveControl(
A, B, C, D, e, x_init, x_goal, nb_steps, wxt=1., wxc=1e-1, wu=1e-1)
lmpc.build()
try:
lmpc.solve()
U = lmpc.U
P = lmpc.X[:-1, 0:3]
V = lmpc.X[:-1, 3:6]
Z = P + (gravity - U) / omega2
preview = ZMPPreviewBuffer([stance])
preview.update(P, V, Z, [dt] * nb_steps, omega2)
except ValueError as e:
print "%s error:" % type(self).__name__, e
preview = None
self.lmpc = lmpc
self.preview = preview
self.tube = tube
def test_diag():
n0, n1, n2, n3 = 4, 5, 6, 7
A = npr.randn(n0, n1)
B = npr.randn(n2, n3)
K_ = np.bmat((
(A, np.zeros((n0, n3))),
(np.zeros((n2, n1)), B)
))
K = block_diag((A, B))
assert np.allclose(K_, K)
def test_linear_operator():
npr.seed(0)
nx, nineq, neq = 4, 6, 7
Q = npr.randn(nx, nx)
G = npr.randn(nineq, nx)
A = npr.randn(neq, nx)
D = np.diag(npr.rand(nineq))
K_ = np.bmat((
(Q, np.zeros((nx, nineq)), G.T, A.T),
(np.zeros((nineq, nx)), D, np.eye(nineq), np.zeros((nineq, neq))),
(G, np.eye(nineq), np.zeros((nineq, nineq + neq))),
(A, np.zeros((neq, nineq + nineq + neq)))
))
Q_lo = sla.aslinearoperator(Q)
G_lo = sla.aslinearoperator(G)
A_lo = sla.aslinearoperator(A)
D_lo = sla.aslinearoperator(D)
K = block((
(Q_lo, 0, G.T, A.T),
(0, D_lo, 'I', 0),
(G_lo, 'I', 0, 0),
(A_lo, 0, 0, 0)
), arrtype=sla.LinearOperator)
w1 = np.random.randn(K_.shape[1])
assert np.allclose(K_.dot(w1), K.dot(w1))
w2 = np.random.randn(K_.shape[0])
assert np.allclose(K_.T.dot(w2), K.H.dot(w2))
W = np.random.randn(*K_.shape)
assert np.allclose(K_.dot(W), K.dot(W))
def pad(mat, padrow, padcol):
"""
Add additional rows/columns to a np.matrix `mat`. The new rows/columns
will be initialized with zeros.
"""
if padrow < 0:
padrow = 0
if padcol < 0:
padcol = 0
rows, cols = mat.shape
return np.bmat([[mat, np.matrix(np.zeros((rows, padcol)))],
[np.matrix(np.zeros((padrow, cols + padcol)))]])
def doPhysics(rbd, force, torque, dtime):
globalcom = rbd.rotmat.dot(rbd.com)+rbd.pos
globalinertiatensor = rbd.rotmat.dot(rbd.inertiatensor).dot(rbd.rotmat.transpose())
globalcom_hat = rm.hat(globalcom)
# si = spatial inertia
Isi00 = rbd.mass * np.eye(3)
Isi01 = rbd.mass * globalcom_hat.transpose()
Isi10 = rbd.mass * globalcom_hat
Isi11 = rbd.mass * globalcom_hat.dot(globalcom_hat.transpose()) + globalinertiatensor
Isi = np.bmat([[Isi00, Isi01], [Isi10, Isi11]])
vw = np.bmat([rbd.linearv, rbd.angularw]).T
pl = Isi*vw
# print np.ravel(pl[0:3])
# print np.ravel(pl[3:6])
ft = np.bmat([force, torque]).T
angularw_hat = rm.hat(rbd.angularw)
linearv_hat = rm.hat(rbd.linearv)
vwhat_mat = np.bmat([[angularw_hat, np.zeros((3,3))], [linearv_hat, angularw_hat]])
dvw = Isi.I*(ft-vwhat_mat*Isi*vw)
# print dvw
rbd.dlinearv = np.ravel(dvw[0:3])
rbd.dangularw = np.ravel(dvw[3:6])
rbd.linearv = rbd.linearv + rbd.dlinearv * dtime
rbd.angularw = rbd.angularw + rbd.dangularw * dtime
return [np.ravel(pl[0:3]), np.ravel(pl[3:6])]
def _build_cov_mat_datapoints(self, axis):
'''
Builds the Cov_mat for the data points for the given axis. The cov_mat will take in account
if 2 datasets are the same and correlate them, if self.corelate_datasets is True.
'''
dummy2 = []
__querry_dummy2 = []
for i,fit in enumerate(self.fit_list):
# Create mask to store points where a non 0 matrix is needed.
__querry = [False] * len(self.fit_list)
# Diagonal entrys are never 0
__querry[i] = True
dummy2.append([0] * len(self.fit_list))
# Check if datsets are correlated. If so change non diagonal elements.
if self.corelate_datasets:
if np.allclose(self.fit_list[i].dataset.get_data(axis), self.fit_list[i-1].dataset.get_data(axis),
atol=0, rtol=1e-4):
__querry[i-1] = True
__querry_dummy2.append(__querry)
for i,list in enumerate(dummy2):
for j,entry in enumerate(list):
if __querry_dummy2[i][j]:
dummy2[i][j] = self.fit_list[i].current_cov_mat
else:
dummy2[i][j] = np.zeros((self.fit_list[i].dataset.get_size(), self.fit_list[j].dataset.get_size()))
return np.bmat(dummy2)
def _get_weights(self, X, Y):
"""
This private method, given the original control points and the deformed
ones, returns the matrix with the weights and the polynomial terms, that
is :math:`W`, :math:`c^T` and :math:`Q^T`. The shape is
(n_control_points+1+3)-by-3.
:param numpy.ndarray X: it is an n_control_points-by-3 array with the
coordinates of the original interpolation control points before the
deformation.
:param numpy.ndarray Y: it is an n_control_points-by-3 array with the
coordinates of the interpolation control points after the
deformation.
:return: weights: the matrix with the weights and the polynomial terms.
:rtype: numpy.matrix
"""
n_points = X.shape[0]
dim = X.shape[1]
identity = np.ones((n_points, 1))
dist = self._distance_matrix(X, X)
H = np.bmat([
[dist, identity, X],
[identity.T, np.zeros((1, 1)), np.zeros((1, dim))],
[X.T, np.zeros((dim, 1)), np.zeros((dim, dim))]
])
rhs = np.bmat([[Y], [np.zeros((1, dim))], [np.zeros((dim, dim))]])
weights = np.linalg.solve(H, rhs)
return weights
def perform(self):
"""
This method performs the deformation of the mesh points. After the
execution it sets `self.modified_mesh_points`.
"""
n_points = self.original_mesh_points.shape[0]
dist = self._distance_matrix(
self.original_mesh_points, self.parameters.original_control_points
)
identity = np.ones((n_points, 1))
H = np.bmat([[dist, identity, self.original_mesh_points]])
self.modified_mesh_points = np.asarray(np.dot(H, self.weights))
def pad(mat, padrow, padcol):
"""
Add additional rows/columns to a numpy.matrix `mat`. The new rows/columns
will be initialized with zeros.
"""
if padrow < 0:
padrow = 0
if padcol < 0:
padcol = 0
rows, cols = mat.shape
return numpy.bmat([[mat, numpy.matrix(numpy.zeros((rows, padcol)))],
[numpy.matrix(numpy.zeros((padrow, cols + padcol)))]])
def crossEntrGrad(y, trueY, G):
k,n = G.shape
assert(len(y) == n)
y_ = np.copy(y)
eps = 1e-8
y_ = np.clip(y_, eps, 1.-eps)
# H = np.bmat([[np.diag(1./y_ + 1./(1.-y_)), G.T, np.zeros((n,1))],
# [G, np.zeros((k,k)), -np.ones((k,1))],
# [np.zeros((1,n)), -np.ones((1,k)), np.zeros((1,1))]])
# c = -np.linalg.solve(H, np.concatenate([trueY/y_-(1-trueY)/(1-y_), np.zeros(k+1)]))
# b = np.concatenate([trueY/y_-(1-trueY)/(1-y_), np.zeros(k+1)])
# cy, clam, ct = np.split(c, [n, n+k])
# cy[(y == 0) | (y == 1)] = 0
z = 1./y_ + 1./(1.-y_)
zinv = 1./z
G_zinv = G*zinv
G_zinv_GT = np.dot(G_zinv, G.T)
H = np.bmat([[G_zinv_GT, np.ones((k,1))], [np.ones((1,k)), np.zeros((1,1))]])
dl = trueY/y_-(1-trueY)/(1-y_)
b = np.concatenate([np.dot(G_zinv, dl), np.zeros(1)])
clamt = np.linalg.solve(H, b)
clam, ct = np.split(clamt, [k])
cy = zinv*dl - np.dot((G*zinv).T, clam)
cy[(y == 0) | (y == 1)] = 0
return cy, clam, ct
def mseGrad_full(y, trueY, G):
k,n = G.shape
assert(len(y) == n)
I = np.where((y > 1e-8) & (1.-y > 1e-8))
z = np.ones_like(y)
z[I] = (1./y[I] + 1./(1.-y[I]))
H = np.bmat([[np.diag(z), G.T, np.zeros((n,1))],
[G, np.zeros((k,k)), -np.ones((k,1))],
[np.zeros((1,n)), -np.ones((1,k)), np.zeros((1,1))]])
c = -np.linalg.solve(H, np.concatenate([(y - trueY), np.zeros(k+1)]))
return np.split(c, [n, n+k])
def mseGrad(y, trueY, G):
try:
k,n = G.shape
except:
import IPython; IPython.embed(); sys.exit(-1)
assert(len(y) == n)
# y_ = np.copy(y)
# eps = 1e-8
# y_ = np.clip(y_, eps, 1.-eps)
I = np.where((y > 1e-8) & (1.-y > 1e-8))
# print(len(I[0]))
z = np.ones_like(y)
z[I] = (1./y[I] + 1./(1.-y[I]))
z = 1./y + 1./(1.-y)
zinv = 1./z
G_zinv = G*zinv
G_zinv_GT = np.dot(G_zinv, G.T)
H = np.bmat([[G_zinv_GT, np.ones((k,1))], [np.ones((1,k)), np.zeros((1,1))]])
# Different scaling than the MSE plots.
dl = -(y-trueY)
b = np.concatenate([np.dot(G_zinv, dl), np.zeros(1)])
clamt = np.linalg.solve(H, b)
clam, ct = np.split(clamt, [k])
cy = zinv*dl - np.dot((G*zinv).T, clam)
cy[(y == 0) | (y == 1)] = 0
return cy, clam, ct
def pad(mat, padrow, padcol):
"""
Add additional rows/columns to a numpy.matrix `mat`. The new rows/columns
will be initialized with zeros.
"""
if padrow < 0:
padrow = 0
if padcol < 0:
padcol = 0
rows, cols = mat.shape
return numpy.bmat([[mat, numpy.matrix(numpy.zeros((rows, padcol)))],
[numpy.matrix(numpy.zeros((padrow, cols + padcol)))]])
def generate_data_ajive_fig2(seed=None):
"""
Samples the data from AJIVE figure 2. Note here we use rows as observations
i.e. data matrices are n x d where n = # observations.
X_obs, X_joint, X_indiv, X_noise, Y_obs, Y_joint, Y_indiv, Y_noise =
generate_data_ajive_fig2()
"""
# TODO: return ndarray instead of matrix
if seed:
np.random.seed(seed)
# Sample X data
X_joint = np.bmat([[np.ones((50, 50))],
[-1*np.ones((50, 50))]])
X_joint = 5000 * np.bmat([X_joint, np.zeros((100, 50))])
X_indiv = 5000 * np.bmat([[-1 * np.ones((25, 100))],
[np.ones((25, 100))],
[-1 * np.ones((25, 100))],
[np.ones((25, 100))]])
X_noise = 5000 * np.random.normal(loc=0, scale=1, size=(100, 100))
X_obs = X_joint + X_indiv + X_noise
# Sample Y data
Y_joint = np.bmat([[-1 * np.ones((50, 2000))],
[np.ones((50, 2000))]])
Y_joint = np.bmat([np.zeros((100, 8000)), Y_joint])
Y_indiv_t = np.bmat([[np.ones((20, 5000))],
[-1 * np.ones((20, 5000))],
[np.zeros((20, 5000))],
[np.ones((20, 5000))],
[-1 * np.ones((20, 5000))]])
Y_indiv_b = np.bmat([[np.ones((25, 5000))],
[-1 * np.ones((50, 5000))],
[np.ones((25, 5000))]])
Y_indiv = np.bmat([Y_indiv_t, Y_indiv_b])
Y_noise = np.random.normal(loc=0, scale=1, size=(100, 10000))
Y_obs = Y_joint + Y_indiv + Y_noise
# TODO: make this into a list of dicts i.e. hierarchical
return X_obs, X_joint, X_indiv, X_noise, Y_obs, Y_joint, Y_indiv, Y_noise
def train(self):
if (self.status != 'init'):
print("Please load train data and init W first.")
return self.W
self.status = 'train'
original_X = self.train_X[:, 1:]
K = utility.Kernel.kernel_matrix(self, original_X)
# P = Q, q = p, G = -A, h = -c
P = cvxopt.matrix(np.bmat([[K, -K], [-K, K]]))
q = cvxopt.matrix(np.bmat([self.epsilon - self.train_Y, self.epsilon + self.train_Y]).reshape((-1, 1)))
G = cvxopt.matrix(np.bmat([[-np.eye(2 * self.data_num)], [np.eye(2 * self.data_num)]]))
h = cvxopt.matrix(np.bmat([[np.zeros((2 * self.data_num, 1))], [self.C * np.ones((2 * self.data_num, 1))]]))
# A = cvxopt.matrix(np.append(np.ones(self.data_num), -1 * np.ones(self.data_num)), (1, 2*self.data_num))
# b = cvxopt.matrix(0.0)
cvxopt.solvers.options['show_progress'] = False
solution = cvxopt.solvers.qp(P, q, G, h)
# Lagrange multipliers
alpha = np.array(solution['x']).reshape((2, -1))
self.alpha_upper = alpha[0]
self.alpha_lower = alpha[1]
self.beta = self.alpha_upper - self.alpha_lower
sv = abs(self.beta) > 1e-5
self.sv_index = np.arange(len(self.beta))[sv]
self.sv_beta = self.beta[sv]
self.sv_X = original_X[sv]
self.sv_Y = self.train_Y[sv]
free_sv_upper = np.logical_and(self.alpha_upper > 1e-5, self.alpha_upper < self.C)
self.free_sv_index_upper = np.arange(len(self.alpha_upper))[free_sv_upper]
self.free_sv_alpha_upper = self.alpha_upper[free_sv_upper]
self.free_sv_X_upper = original_X[free_sv_upper]
self.free_sv_Y_upper = self.train_Y[free_sv_upper]
free_sv_lower = np.logical_and(self.alpha_lower > 1e-5, self.alpha_lower < self.C)
self.free_sv_index_lower = np.arange(len(self.alpha_lower))[free_sv_lower]
self.free_sv_alpha_lower = self.alpha_lower[free_sv_lower]
self.free_sv_X_lower = original_X[free_sv_lower]
self.free_sv_Y_lower = self.train_Y[free_sv_lower]
short_b_upper = self.free_sv_Y_upper[0] - np.sum(self.sv_beta * utility.Kernel.kernel_matrix_xX(self, self.free_sv_X_upper[0], self.sv_X)) - self.epsilon
short_b_lower = self.free_sv_Y_lower[0] - np.sum(self.sv_beta * utility.Kernel.kernel_matrix_xX(self, self.free_sv_X_lower[0], self.sv_X)) + self.epsilon
self.sv_avg_b = (short_b_upper + short_b_lower) / 2
return self.W