Python numpy 模块,matrix() 实例源码
我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.matrix()。
def transformation_from_points(points1, points2):
points1 = points1.astype(np.float64)
points2 = points2.astype(np.float64)
c1 = np.mean(points1, axis=0)
c2 = np.mean(points2, axis=0)
points1 -= c1
points2 -= c2
s1 = np.std(points1)
s2 = np.std(points2)
points1 /= s1
points2 /= s2
U, S, Vt = np.linalg.svd(points1.T * points2)
R = (U * Vt).T
return np.vstack([np.hstack(((s2 / s1) * R,
c2.T - (s2 / s1) * R * c1.T)),
np.matrix([0., 0., 1.])])
def KeyGen(n, m, k, d, q):
'''
input:
q : polynomial size prime number
n, m, k : dimensions specifiers
d : SIS parameter, hardest instances are where d ~ q^(n/m)
output:
Signing Key S : Matrix of dimension mxk with coefficients in [-d.d]
Verification Key A : Matrix of dimension nxm with coefficients from [-(q-1)/2,(q-1)/2]
T : the matrix AS ,it is used in the Verification of the signature
'''
S = crypt_secure_matrix(d, m, k)
A = crypt_secure_matrix((q-1)/2, n, m)
T = np.matmul(A, S)
return S, A, T
def test_calculate_slopes_no_mask():
mat = np.matrix([[1, 5, 7, 0, 12, 14],
[12, 5, 7, 18, 0, 0],
[6, 2, 9, 17, 0, 5]])
tp = np.array([0, 5, 7, 11, 15, 19])
mask = np.array(['1', '1', '1', '1', '1', '1'])
res = calculate_slopes(mat, tp, mask)
true_res = np.matrix([[0.8, 1, -1.75, 3, 0.5],
[-1.4, 1, 2.75, -4.5, 0],
[-0.8, 3.5, 2, -4.25, 1.25]])
assert (res - true_res).sum() <= 10e-10
#def generate_STS_distance_matrix(slope_matrix, nthreads)
#def test_generate_STS_distance_matrix():
# pass
#def sts_matrix_generator(ind, slope_matrix)
#def test_sts_matrix_generator():
# pass
#def cluster_dbscan(dist_matrix, eps=1, distance_measure="sts")
#def test_cluster_dbscan():
# pass
#def zscore(x)
def timeseriesdata_constructor_existing_file():
"""Tests the TimeSeriesData class constructor when the file exists. Tests
that the data are as expected, and that the filled data flag is set."""
tsd = TimeSeriesData("./tests/data/small.h5")
assert tsd.filled_data
#Validate that the time-series matrix data are correct
dat = np.matrix([[0, 2, 0, 0],
[2, 1, 3, 4],
[1, 1, 1, 1],
[0, 0, 1, 0]])
assert (dat == tsd.get_sparse_matrix().todense()).all()
assert (tsd.get_time_points() == [0, 4, 10, 12]).all()
sequence_ids = np.array([b'9247ec5fd33e99387d41e8fc0d7ee278',
b'53f74905f7826fee79dd09b0c12caddf',
b'8829fefe91ead511a30df118060b1030',
b'7f97b4991342bf629966aeac0708c94f'])
assert (sequence_ids == tsd.get_array_by_chunks("genes/sequenceids")).all()
sample_names = np.array([b'72c', b'29', b'qpa', b'15'])
assert (sample_names == tsd.get_array_by_chunks("samples/names")).all()
# __del__(self)
#Test if HDF5 file closed
def gradientDescent(X, y, theta, alpha, iters):
temp = np.matrix(np.zeros(theta.shape))
params = int(theta.ravel().shape[1]) #flattens
cost = np.zeros(iters)
for i in range(iters):
err = (X * theta.T) - y
for j in range(params):
term = np.multiply(err, X[:,j])
temp[0, j] = theta[0, j] - ((alpha / len(X)) * np.sum(term))
theta = temp
cost[i] = computeCost(X, y, theta)
return theta, cost
def computeCost(X, y, theta):
inner = np.power(((X * theta.T) - y), 2)
return np.sum(inner) / (2 * len(X))
#def gradientDescent(X, y, theta, alpha, iters):
# temp = np.matrix(np.zeros(theta.shape))
# params = int(theta.ravel().shape[1]) #flattens
# cost = np.zeros(iters)
#
# for i in range(iters):
# err = (X * theta.T) - y
#
# for j in range(params):
# term = np.multiply(err, X[:,j])
# temp[0, j] = theta[0, j] - ((alpha / len(X)) * np.sum(term))
#
# theta = temp
# cost[i] = computeCost(X, y, theta)
#
# return theta, cost
def adapt_z_state(self,main_rdd, cinfo,beta):
ainv = cinfo
def Updatez(tpl):
tt=[]
for ((tx,lam,state,z),index) in tpl:
random.setstate(state)
p = random.random()
state = random.getstate()
if p<self.ptr:
znew=float(-np.matrix(tx)*ainv*np.matrix(tx).T)
else:
znew=0.0
z=(1-beta)*z+beta*znew
tt.append(((tx,lam,state,z),index))
return tt
main_rdd = main_rdd.mapValues(Updatez).cache()
return main_rdd
def update_lam_z(self, main_rdd, cinfo,iStar,Gamma):
ainv = cinfo
def update(t, ainv):
p=[]
[tpl, gen] = t
for ((tx,lam,z),index) in tpl:
znew=0.0
if gen.random()<self.ptr:
znew=-(np.matrix(tx)*ainv*np.matrix(tx).T)[0,0]
else:
znew= 0.0
zupdtd = (1.0-self.beta)*z + self.beta*znew
if index != iStar:
lamupdt = (1-Gamma)*lam
else:
lamupdt = (1-Gamma)*lam +lam
p.append(((tx,lamupdt,zupdtd),index))
out = [p,gen]
return out
main_rdd = main_rdd.mapValues(lambda t:update(t,ainv)).persist()
return main_rdd
def update_lam_z(self, main_rdd, cinfo,iStar,Gamma):
ainv,ainv2 = cinfo
def update(t, ainv2):
p=[]
[tpl, gen] = t
for ((tx,lam,z),index) in tpl:
znew=0.0
if gen.random()<self.ptr:
znew=-(np.matrix(tx)*ainv2*np.matrix(tx).T)[0,0]
else:
znew= 0.0
zupdtd = (1.0-self.beta)*z + self.beta*znew
if index != iStar:
lamupdt = (1-Gamma)*lam
else:
lamupdt = (1-Gamma)*lam +lam
p.append(((tx,lamupdt,zupdtd),index))
out = [p,gen]
return out
main_rdd = main_rdd.mapValues(lambda t:update(t,ainv2)).persist()
return main_rdd
def update_comm_info(self,cinfo,iStar,mingrad,txmin,Gamma):
def UpdateAinv2(binv2,u,v,UVT,Denom,alpha,Xi):
return binv2-alpha*UVT/Denom-alpha*UVT.T/Denom+alpha**2*UVT*Xi*u.T/Denom**2
def UpdateAinv3(binv3,U1,U2,U3,D,Xi,Gamma):
return binv3-Gamma*U2*U2.T/D-Gamma*U1*U3.T/D-Gamma*U3*U1.T/D+Gamma**2*U1*U2.T*Xi*U2.T/D**2+Gamma**2*U2*U2.T*Xi*U1.T/D**2+Gamma**2*U1*U3.T*Xi*U1.T/D**2-Gamma**3*U1*U2.T*Xi*U2.T*Xi*U1.T/D**3
ainv,ainv2,ainv3=cinfo
binv=1.0/(1.0-Gamma)*ainv
binv2=1.0/(1.0-Gamma)**2*ainv2
binv3=1.0/(1.0-Gamma)**3*ainv3
u1=binv*np.matrix(txmin).T
u2=binv2*np.matrix(txmin).T
u3=binv3*np.matrix(txmin).T
UVT=u1*u2.T
Denom=1+Gamma*u1.T*np.matrix(txmin).T
ainv=rankOneInvUpdate(binv,Gamma*np.matrix(txmin).T,np.matrix(txmin).T)
ainv2=UpdateAinv2(binv2,u1,u2,UVT,Denom,Gamma,np.matrix(txmin).T)
ainv3=UpdateAinv3(binv3,u1,u2,u3,Denom,np.matrix(txmin).T,Gamma)
return ainv,ainv2,ainv3
def compute_mingrad_l1(self,main_rdd,cinfo,K):
R = cinfo
def maxmin_l1(tpl1,tpl2):
(z1,x1,lam1,i1)=tpl1
(z2,x2,lam2,i2)=tpl2
zt = max(abs(z1),abs(z2))
if zt>abs(z2):
out = (z1,x1,lam1,i1)
else:
out = (z2,x2,lam2,i2)
return out
def CompMingrad(tpl):
p=[]
for ((tx,lam),index) in tpl:
p.append(((np.matrix(tx)*R)[0,0],tx,lam,index))
return p
(mingrad,xmin,lambdaMin,iStar)=main_rdd.flatMapValues(CompMingrad).map(lambda (key, value):value).reduce(maxmin_l1)
s_star = -np.sign(mingrad)
return (mingrad,xmin,lambdaMin,iStar,s_star)
def update_lam_z(self, main_rdd, cinfo,iStar,Gamma):
R = cinfo
def update(t, R):
p=[]
[tpl, gen] = t
for ((tx,lam,z),index) in tpl:
znew=0.0
if gen.random()<self.ptr:
znew=(2*np.matrix(tx)*R)[0,0]
else:
znew= 0.0
zupdtd = (1.0-self.beta)*z + self.beta*znew
if index != iStar:
lamupdt = (1-Gamma)*lam
else:
lamupdt = (1-Gamma)*lam +lam
p.append(((tx,lamupdt,zupdtd),index))
out = [p,gen]
return out
main_rdd = main_rdd.mapValues(lambda t:update(t,R)).persist()
return main_rdd
def gen_comm_info(self,main_rdd):
def cominfo(tpl):
p=[]
for ((tx,lam),index) in tpl:
p.append(np.matrix(tx).T*lam)
return p
def findDim(tpl):
for ((tx,lam),index) in tpl:
d = len(tx)
return d
d = main_rdd.mapValues(findDim).values().reduce(lambda x,y:x)
c=main_rdd.flatMapValues(cominfo).map(lambda (key,value):value).reduce(lambda x,y:x+y)
V=matrix(0.0,(d,1))
for j in range(d):
V[j]=math.exp(-self.C*self.r[j]*c[j,0])
return d,V
def computegap(self,cinfo,main_rdd,iStar,mingrad,lambdaMin):
d,V = cinfo
z=matrix(0.0,(d,1))
vSum=float(np.sum(V))
for j in range(d):
z[j]=-V[j]*self.C*self.r[j]/vSum
def CompGap(tpl,lambdaMin,mingrad,iStar):
p=[]
for ((tx,lam),index) in tpl:
if index!=iStar:
p.append((np.matrix(tx)*np.matrix(z))[0,0]*lam)
else:
p.append((lambdaMin-1)*mingrad)
return p
gap=main_rdd.flatMapValues(lambda tpl:CompGap(tpl,lambdaMin,mingrad,iStar)).map(lambda (key, value):value).reduce(lambda x,y:x+y)
return gap
def get_landmarks(self,im,fname,n=2):
'''
??????????????????????????????
im:
???numpy??
fname:
????????
???:
????(x,y)?????
'''
rects = self.detector(im, 1)
if len(rects) >=5:
raise TooManyFaces('Too many faces in '+fname)
if len(rects) <2:
raise NoFace('No enough face in' +fname)
return [np.matrix([[p.x, p.y] for p in self.predictor(im, rect).parts()]) for rect in rects]
def get_landmarks(self,im,fname,n=1):
'''
??????????????????????????????
im:
???numpy??
fname:
????????
???:
????(x,y)?????
'''
rects = self.detector(im, 1)
if len(rects) > n:
raise TooManyFaces('No face in '+fname)
if len(rects) < 0:
raise NoFace('Too many faces in '+fname)
return np.matrix([[p.x, p.y] for p in self.predictor(im, rects[0]).parts()])
def __init__(self, originFilename):
self._originFilename = originFilename
if originFilename is None:
self._name = 'None'
else:
self._name = os.path.basename(originFilename)
if '.' in self._name:
self._name = os.path.splitext(self._name)[0]
self._meshList = []
self._position = numpy.array([0.0, 0.0])
self._matrix = numpy.matrix([[1,0,0],[0,1,0],[0,0,1]], numpy.float64)
self._transformedMin = None
self._transformedMax = None
self._transformedSize = None
self._boundaryCircleSize = None
self._drawOffset = None
self._boundaryHull = None
self._printAreaExtend = numpy.array([[-1,-1],[ 1,-1],[ 1, 1],[-1, 1]], numpy.float32)
self._headAreaExtend = numpy.array([[-1,-1],[ 1,-1],[ 1, 1],[-1, 1]], numpy.float32)
self._headMinSize = numpy.array([1, 1], numpy.float32)
self._printAreaHull = None
self._headAreaHull = None
self._headAreaMinHull = None
self._loadAnim = None
def test_iter_allocate_output_subtype():
# Make sure that the subtype with priority wins
# matrix vs ndarray
a = np.matrix([[1, 2], [3, 4]])
b = np.arange(4).reshape(2, 2).T
i = nditer([a, b, None], [],
[['readonly'], ['readonly'], ['writeonly', 'allocate']])
assert_equal(type(a), type(i.operands[2]))
assert_(type(b) != type(i.operands[2]))
assert_equal(i.operands[2].shape, (2, 2))
# matrix always wants things to be 2D
b = np.arange(4).reshape(1, 2, 2)
assert_raises(RuntimeError, nditer, [a, b, None], [],
[['readonly'], ['readonly'], ['writeonly', 'allocate']])
# but if subtypes are disabled, the result can still work
i = nditer([a, b, None], [],
[['readonly'], ['readonly'], ['writeonly', 'allocate', 'no_subtype']])
assert_equal(type(b), type(i.operands[2]))
assert_(type(a) != type(i.operands[2]))
assert_equal(i.operands[2].shape, (1, 2, 2))
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
def test_inner_product_with_various_contiguities(self):
# github issue 6532
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
# check an inner product involving a matrix transpose
A = np.array([[1, 2], [3, 4]], dtype=dt)
B = np.array([[1, 3], [2, 4]], dtype=dt)
C = np.array([1, 1], dtype=dt)
desired = np.array([4, 6], dtype=dt)
assert_equal(np.inner(A.T, C), desired)
assert_equal(np.inner(C, A.T), desired)
assert_equal(np.inner(B, C), desired)
assert_equal(np.inner(C, B), desired)
# check a matrix product
desired = np.array([[7, 10], [15, 22]], dtype=dt)
assert_equal(np.inner(A, B), desired)
# check the syrk vs. gemm paths
desired = np.array([[5, 11], [11, 25]], dtype=dt)
assert_equal(np.inner(A, A), desired)
assert_equal(np.inner(A, A.copy()), desired)
# check an inner product involving an aliased and reversed view
a = np.arange(5).astype(dt)
b = a[::-1]
desired = np.array(10, dtype=dt).item()
assert_equal(np.inner(b, a), desired)
def test_basic(self):
y1 = np.array([1, 2, 3])
assert_(average(y1, axis=0) == 2.)
y2 = np.array([1., 2., 3.])
assert_(average(y2, axis=0) == 2.)
y3 = [0., 0., 0.]
assert_(average(y3, axis=0) == 0.)
y4 = np.ones((4, 4))
y4[0, 1] = 0
y4[1, 0] = 2
assert_almost_equal(y4.mean(0), average(y4, 0))
assert_almost_equal(y4.mean(1), average(y4, 1))
y5 = rand(5, 5)
assert_almost_equal(y5.mean(0), average(y5, 0))
assert_almost_equal(y5.mean(1), average(y5, 1))
y6 = np.matrix(rand(5, 5))
assert_array_equal(y6.mean(0), average(y6, 0))
def test_return_type(self):
a = np.ones([2, 2])
m = np.asmatrix(a)
assert_equal(type(kron(a, a)), np.ndarray)
assert_equal(type(kron(m, m)), np.matrix)
assert_equal(type(kron(a, m)), np.matrix)
assert_equal(type(kron(m, a)), np.matrix)
class myarray(np.ndarray):
__array_priority__ = 0.0
ma = myarray(a.shape, a.dtype, a.data)
assert_equal(type(kron(a, a)), np.ndarray)
assert_equal(type(kron(ma, ma)), myarray)
assert_equal(type(kron(a, ma)), np.ndarray)
assert_equal(type(kron(ma, a)), myarray)
def test_allany_onmatrices(self):
x = np.array([[0.13, 0.26, 0.90],
[0.28, 0.33, 0.63],
[0.31, 0.87, 0.70]])
X = np.matrix(x)
m = np.array([[True, False, False],
[False, False, False],
[True, True, False]], dtype=np.bool_)
mX = masked_array(X, mask=m)
mXbig = (mX > 0.5)
mXsmall = (mX < 0.5)
self.assertFalse(mXbig.all())
self.assertTrue(mXbig.any())
assert_equal(mXbig.all(0), np.matrix([False, False, True]))
assert_equal(mXbig.all(1), np.matrix([False, False, True]).T)
assert_equal(mXbig.any(0), np.matrix([False, False, True]))
assert_equal(mXbig.any(1), np.matrix([True, True, True]).T)
self.assertFalse(mXsmall.all())
self.assertTrue(mXsmall.any())
assert_equal(mXsmall.all(0), np.matrix([True, True, False]))
assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T)
assert_equal(mXsmall.any(0), np.matrix([True, True, False]))
assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T)
def test_compressed(self):
# Tests compressed
a = array([1, 2, 3, 4], mask=[0, 0, 0, 0])
b = a.compressed()
assert_equal(b, a)
a[0] = masked
b = a.compressed()
assert_equal(b, [2, 3, 4])
a = array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0])
b = a.compressed()
assert_equal(b, a)
self.assertTrue(isinstance(b, np.matrix))
a[0, 0] = masked
b = a.compressed()
assert_equal(b, [[2, 3, 4]])
def test_ravel(self):
# Tests ravel
a = array([[1, 2, 3, 4, 5]], mask=[[0, 1, 0, 0, 0]])
aravel = a.ravel()
assert_equal(aravel._mask.shape, aravel.shape)
a = array([0, 0], mask=[1, 1])
aravel = a.ravel()
assert_equal(aravel._mask.shape, a.shape)
a = array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]])
aravel = a.ravel()
assert_equal(aravel.shape, (1, 5))
assert_equal(aravel._mask.shape, a.shape)
# Checks that small_mask is preserved
a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False)
assert_equal(a.ravel()._mask, [0, 0, 0, 0])
# Test that the fill_value is preserved
a.fill_value = -99
a.shape = (2, 2)
ar = a.ravel()
assert_equal(ar._mask, [0, 0, 0, 0])
assert_equal(ar._data, [1, 2, 3, 4])
assert_equal(ar.fill_value, -99)
# Test index ordering
assert_equal(a.ravel(order='C'), [1, 2, 3, 4])
assert_equal(a.ravel(order='F'), [1, 3, 2, 4])
def test_view(self):
# Test view w/ flexible dtype
iterator = list(zip(np.arange(10), np.random.rand(10)))
data = np.array(iterator)
a = array(iterator, dtype=[('a', float), ('b', float)])
a.mask[0] = (1, 0)
controlmask = np.array([1] + 19 * [0], dtype=bool)
# Transform globally to simple dtype
test = a.view(float)
assert_equal(test, data.ravel())
assert_equal(test.mask, controlmask)
# Transform globally to dty
test = a.view((float, 2))
assert_equal(test, data)
assert_equal(test.mask, controlmask.reshape(-1, 2))
test = a.view((float, 2), np.matrix)
assert_equal(test, data)
self.assertTrue(isinstance(test, np.matrix))
def dot_generalized(a, b):
a = asarray(a)
if a.ndim >= 3:
if a.ndim == b.ndim:
# matrix x matrix
new_shape = a.shape[:-1] + b.shape[-1:]
elif a.ndim == b.ndim + 1:
# matrix x vector
new_shape = a.shape[:-1]
else:
raise ValueError("Not implemented...")
r = np.empty(new_shape, dtype=np.common_type(a, b))
for c in itertools.product(*map(range, a.shape[:-2])):
r[c] = dot(a[c], b[c])
return r
else:
return dot(a, b)
def do(self, a, b):
arr = np.asarray(a)
m, n = arr.shape
u, s, vt = linalg.svd(a, 0)
x, residuals, rank, sv = linalg.lstsq(a, b)
if m <= n:
assert_almost_equal(b, dot(a, x))
assert_equal(rank, m)
else:
assert_equal(rank, n)
assert_almost_equal(sv, sv.__array_wrap__(s))
if rank == n and m > n:
expect_resids = (
np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)
expect_resids = np.asarray(expect_resids)
if len(np.asarray(b).shape) == 1:
expect_resids.shape = (1,)
assert_equal(residuals.shape, expect_resids.shape)
else:
expect_resids = np.array([]).view(type(x))
assert_almost_equal(residuals, expect_resids)
assert_(np.issubdtype(residuals.dtype, np.floating))
assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
def test_bad_args(self):
# Check that bad arguments raise the appropriate exceptions.
A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
# Using `axis=<integer>` or passing in a 1-D array implies vector
# norms are being computed, so also using `ord='fro'`
# or `ord='nuc'` raises a ValueError.
assert_raises(ValueError, norm, A, 'fro', 0)
assert_raises(ValueError, norm, A, 'nuc', 0)
assert_raises(ValueError, norm, [3, 4], 'fro', None)
assert_raises(ValueError, norm, [3, 4], 'nuc', None)
# Similarly, norm should raise an exception when ord is any finite
# number other than 1, 2, -1 or -2 when computing matrix norms.
for order in [0, 3]:
assert_raises(ValueError, norm, A, order, None)
assert_raises(ValueError, norm, A, order, (0, 1))
assert_raises(ValueError, norm, B, order, (1, 2))
# Invalid axis
assert_raises(ValueError, norm, B, None, 3)
assert_raises(ValueError, norm, B, None, (2, 3))
assert_raises(ValueError, norm, B, None, (0, 1, 2))
def test_matrix_rank(self):
# Full rank matrix
yield assert_equal, 4, matrix_rank(np.eye(4))
# rank deficient matrix
I = np.eye(4)
I[-1, -1] = 0.
yield assert_equal, matrix_rank(I), 3
# All zeros - zero rank
yield assert_equal, matrix_rank(np.zeros((4, 4))), 0
# 1 dimension - rank 1 unless all 0
yield assert_equal, matrix_rank([1, 0, 0, 0]), 1
yield assert_equal, matrix_rank(np.zeros((4,))), 0
# accepts array-like
yield assert_equal, matrix_rank([1]), 1
# greater than 2 dimensions raises error
yield assert_raises, TypeError, matrix_rank, np.zeros((2, 2, 2))
# works on scalar
yield assert_equal, matrix_rank(1), 1
def test_mode_all_but_economic(self):
a = array([[1, 2], [3, 4]])
b = array([[1, 2], [3, 4], [5, 6]])
for dt in "fd":
m1 = a.astype(dt)
m2 = b.astype(dt)
self.check_qr(m1)
self.check_qr(m2)
self.check_qr(m2.T)
self.check_qr(matrix(m1))
for dt in "fd":
m1 = 1 + 1j * a.astype(dt)
m2 = 1 + 1j * b.astype(dt)
self.check_qr(m1)
self.check_qr(m2)
self.check_qr(m2.T)
self.check_qr(matrix(m1))
def test_basic(self):
A = np.array([[1, 2], [3, 4]])
mA = matrix(A)
assert_(np.all(mA.A == A))
B = bmat("A,A;A,A")
C = bmat([[A, A], [A, A]])
D = np.array([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
assert_(np.all(B.A == D))
assert_(np.all(C.A == D))
E = np.array([[5, 6], [7, 8]])
AEresult = matrix([[1, 2, 5, 6], [3, 4, 7, 8]])
assert_(np.all(bmat([A, E]) == AEresult))
vec = np.arange(5)
mvec = matrix(vec)
assert_(mvec.shape == (1, 5))
def test_sum(self):
"""Test whether matrix.sum(axis=1) preserves orientation.
Fails in NumPy <= 0.9.6.2127.
"""
M = matrix([[1, 2, 0, 0],
[3, 4, 0, 0],
[1, 2, 1, 2],
[3, 4, 3, 4]])
sum0 = matrix([8, 12, 4, 6])
sum1 = matrix([3, 7, 6, 14]).T
sumall = 30
assert_array_equal(sum0, M.sum(axis=0))
assert_array_equal(sum1, M.sum(axis=1))
assert_equal(sumall, M.sum())
assert_array_equal(sum0, np.sum(M, axis=0))
assert_array_equal(sum1, np.sum(M, axis=1))
assert_equal(sumall, np.sum(M))
def test_basic(self):
import numpy.linalg as linalg
A = np.array([[1., 2.],
[3., 4.]])
mA = matrix(A)
assert_(np.allclose(linalg.inv(A), mA.I))
assert_(np.all(np.array(np.transpose(A) == mA.T)))
assert_(np.all(np.array(np.transpose(A) == mA.H)))
assert_(np.all(A == mA.A))
B = A + 2j*A
mB = matrix(B)
assert_(np.allclose(linalg.inv(B), mB.I))
assert_(np.all(np.array(np.transpose(B) == mB.T)))
assert_(np.all(np.array(np.transpose(B).conj() == mB.H)))
def skew(v):
return np.matrix([[0,-v[2,0],v[1,0]], [v[2,0],0,-v[0,0]], [-v[1,0],v[0,0],0]])
def vector6(a, b, c, x, y, z):
return np.matrix([[a],[b],[c],[x],[y],[z]])
def build_2D_cov_matrix(sigmax,sigmay,angle,verbose=True):
"""
Build a covariance matrix for a 2D multivariate Gaussian
--- INPUT ---
sigmax Standard deviation of the x-compoent of the multivariate Gaussian
sigmay Standard deviation of the y-compoent of the multivariate Gaussian
angle Angle to rotate matrix by in degrees (clockwise) to populate covariance cross terms
verbose Toggle verbosity
--- EXAMPLE OF USE ---
import tdose_utilities as tu
covmatrix = tu.build_2D_cov_matrix(3,1,35)
"""
if verbose: print ' - Build 2D covariance matrix with varinaces (x,y)=('+str(sigmax)+','+str(sigmay)+\
') and then rotated '+str(angle)+' degrees'
cov_orig = np.zeros([2,2])
cov_orig[0,0] = sigmay**2.0
cov_orig[1,1] = sigmax**2.0
angle_rad = (180.0-angle) * np.pi/180.0 # The (90-angle) makes sure the same convention as DS9 is used
c, s = np.cos(angle_rad), np.sin(angle_rad)
rotmatrix = np.matrix([[c, -s], [s, c]])
cov_rot = np.dot(np.dot(rotmatrix,cov_orig),np.transpose(rotmatrix)) # performing rot * cov * rot^T
return cov_rot
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def normalize_2D_cov_matrix(covmatrix,verbose=True):
"""
Calculate the normalization foctor for a multivariate gaussian from it's covariance matrix
However, not that gaussian returned by tu.gen_2Dgauss() is normalized for scale=1
--- INPUT ---
covmatrix covariance matrix to normaliz
verbose Toggle verbosity
"""
detcov = np.linalg.det(covmatrix)
normfac = 1.0 / (2.0 * np.pi * np.sqrt(detcov) )
return normfac
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def analytic_convolution_gaussian(mu1,covar1,mu2,covar2):
"""
The analytic vconvolution of two Gaussians is simply the sum of the two mean vectors
and the two convariance matrixes
--- INPUT ---
mu1 The mean of the first gaussian
covar1 The covariance matrix of of the first gaussian
mu2 The mean of the second gaussian
covar2 The covariance matrix of of the second gaussian
"""
muconv = mu1+mu2
covarconv = covar1+covar2
return muconv, covarconv
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def get_facial_landmarks_from_mask(img, pts):
rect = cv2.boundingRect(pts)
rect = dlib.rectangle(rect[0], rect[1], rect[0] + rect[2], rect[1] + rect[3])
return np.matrix([list(pt) for pt in pts]), rect
def get_facial_landmarks(img):
# No need to upsample
rects = face_detector(img, 0)
if len(rects) == 0:
print "No faces"
return None
rect = rects[0]
shape = shape_predictor(img, rect)
return np.matrix([[pt.x, pt.y] for pt in shape.parts()]), rect
def get_tm_opp(pts1, pts2):
# Transformation matrix - ( Translation + Scaling + Rotation )
# using Procuster analysis
pts1 = np.float64(pts1)
pts2 = np.float64(pts2)
m1 = np.mean(pts1, axis = 0)
m2 = np.mean(pts2, axis = 0)
# Removing translation
pts1 -= m1
pts2 -= m2
std1 = np.std(pts1)
std2 = np.std(pts2)
std_r = std2/std1
# Removing scaling
pts1 /= std1
pts2 /= std2
U, S, V = np.linalg.svd(np.transpose(pts1) * pts2)
# Finding the rotation matrix
R = np.transpose(U * V)
return np.vstack([np.hstack((std_r * R,
np.transpose(m2) - std_r * R * np.transpose(m1))), np.matrix([0.0, 0.0, 1.0])])
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
def diffD1D2(Flags):
# To check if the difference between D1 and D2 amplifies downstream
# First decide which model to use
delay = 1
if Flags == "Allsym":
(d1,d2,fsi,ti,ta,stn,gpi,ipctx,A,B,params) = calcRates(Flags,delay)
# D = Direct pathway, ID = Indirect pathway, HD = Hyperdirect pathway
# Reducing a full recurrent matrix leads to postive and negative contributions in ID and HD instead of pure just positive contributions
D = params['gpid1']
print "Direct",D
de1 = 1. + (params['d1d1']*params['fsifsi']) - (params['stnti']*params['stnstn'])
Ex1 = (params['stnti']*params['d1d1']*params['fsifsi']*params['gpistn'])/de1
Ex2 = (params['gpid1']*params['stnstn']*params['d1ta']*params['fsifsi'])/de1
IDpos = params['stnti']*params['tid2']*(params['d1ta']*params['gpid1']*params['tistn']+ Ex1 + Ex2)
print "IDpos",IDpos
Ex3 = (params['d1ta']+params['d1ta']*params['tata']+((params['stnta']*params['fsiti']*params['d1fsi'])/de1))
IDneg = params['gpid1']+params['gpiti']*params['tid2']+params['gpid1']*params['stnstn']*params['tid2']*Ex3
print "IDneg",IDneg
HDpos = (params['jstnctx']*params['gpid1']*params['stnstn']*params['fsiti']*params['d1fsi'])/de1
print "HDpos",HDpos
Ex4 = params['jstnctx']*params['d1d1']*params['fsifsi']*params['stnti']*params['gpistn']
Ex5 = params['jstnctx']*params['gpid1']*params['stnstn']*params['d1ta']*params['fsifsi']
HDneg = (Ex4 + Ex5)/de1
print "HDneg",HDneg
d1fix = np.mean(d1[:-10])
d2fix = np.mean(d2[:-10])
DelMSN = d1fix - d2fix
DelGpi = (D*d1fix) + ((IDpos - IDneg)*d2fix)
return (DelMSN,DelGpi)
def diffD1D2(Flags):
# To check if the difference between D1 and D2 amplifies downstream
# First decide which model to use
delay = 1
if Flags == "Allsym":
(d1,d2,fsi,ti,ta,stn,gpi,ipctx,A,B,params) = calcRates(Flags,delay)
# D = Direct pathway, ID = Indirect pathway, HD = Hyperdirect pathway
# Reducing a full recurrent matrix leads to postive and negative contributions in ID and HD instead of pure just positive contributions
D = params['gpid1']
print "Direct",D
de1 = 1. + (params['d1d1']*params['fsifsi']) - (params['stnti']*params['stnstn'])
Ex1 = (params['stnti']*params['d1d1']*params['fsifsi']*params['gpistn'])/de1
Ex2 = (params['gpid1']*params['stnstn']*params['d1ta']*params['fsifsi'])/de1
IDpos = params['stnti']*params['tid2']*(params['d1ta']*params['gpid1']*params['tistn']+ Ex1 + Ex2)
print "IDpos",IDpos
Ex3 = (params['d1ta']+params['d1ta']*params['tata']+((params['stnta']*params['fsiti']*params['d1fsi'])/de1))
IDneg = params['gpid1']+params['gpiti']*params['tid2']+params['gpid1']*params['stnstn']*params['tid2']*Ex3
print "IDneg",IDneg
HDpos = (params['jstnctx']*params['gpid1']*params['stnstn']*params['fsiti']*params['d1fsi'])/de1
print "HDpos",HDpos
Ex4 = params['jstnctx']*params['d1d1']*params['fsifsi']*params['stnti']*params['gpistn']
Ex5 = params['jstnctx']*params['gpid1']*params['stnstn']*params['d1ta']*params['fsifsi']
HDneg = (Ex4 + Ex5)/de1
print "HDneg",HDneg
d1fix = np.mean(d1[:-10])
d2fix = np.mean(d2[:-10])
DelMSN = d1fix - d2fix
DelGpi = (D*d1fix) + ((IDpos - IDneg)*d2fix)
return (DelMSN,DelGpi)
def diffD1D2(Flags):
# To check if the difference between D1 and D2 amplifies downstream
# First decide which model to use
delay = 1
if Flags == "Allsym":
(d1,d2,fsi,ti,ta,stn,gpi,ipctx,A,B,params) = calcRates(Flags,delay)
# D = Direct pathway, ID = Indirect pathway, HD = Hyperdirect pathway
# Reducing a full recurrent matrix leads to postive and negative contributions in ID and HD instead of pure just positive contributions
D = params['gpid1']
print "Direct",D
de1 = 1. + (params['d1d1']*params['fsifsi']) - (params['stnti']*params['stnstn'])
Ex1 = (params['stnti']*params['d1d1']*params['fsifsi']*params['gpistn'])/de1
Ex2 = (params['gpid1']*params['stnstn']*params['d1ta']*params['fsifsi'])/de1
IDpos = params['stnti']*params['tid2']*(params['d1ta']*params['gpid1']*params['tistn']+ Ex1 + Ex2)
print "IDpos",IDpos
Ex3 = (params['d1ta']+params['d1ta']*params['tata']+((params['stnta']*params['fsiti']*params['d1fsi'])/de1))
IDneg = params['gpid1']+params['gpiti']*params['tid2']+params['gpid1']*params['stnstn']*params['tid2']*Ex3
print "IDneg",IDneg
HDpos = (params['jstnctx']*params['gpid1']*params['stnstn']*params['fsiti']*params['d1fsi'])/de1
print "HDpos",HDpos
Ex4 = params['jstnctx']*params['d1d1']*params['fsifsi']*params['stnti']*params['gpistn']
Ex5 = params['jstnctx']*params['gpid1']*params['stnstn']*params['d1ta']*params['fsifsi']
HDneg = (Ex4 + Ex5)/de1
print "HDneg",HDneg
d1fix = np.mean(d1[:-10])
d2fix = np.mean(d2[:-10])
DelMSN = d1fix - d2fix
DelGpi = (D*d1fix) + ((IDpos - IDneg)*d2fix)
return (DelMSN,DelGpi)
def diffD1D2(Flags):
# To check if the difference between D1 and D2 amplifies downstream
# First decide which model to use
delay = 1
if Flags == "Allsym":
(d1,d2,fsi,ti,ta,stn,gpi,ipctx,A,B,params) = calcRates(Flags,delay)
# D = Direct pathway, ID = Indirect pathway, HD = Hyperdirect pathway
# Reducing a full recurrent matrix leads to postive and negative contributions in ID and HD instead of pure just positive contributions
D = params['gpid1']
print "Direct",D
de1 = 1. + (params['d1d1']*params['fsifsi']) - (params['stnti']*params['stnstn'])
Ex1 = (params['stnti']*params['d1d1']*params['fsifsi']*params['gpistn'])/de1
Ex2 = (params['gpid1']*params['stnstn']*params['d1ta']*params['fsifsi'])/de1
IDpos = params['stnti']*params['tid2']*(params['d1ta']*params['gpid1']*params['tistn']+ Ex1 + Ex2)
print "IDpos",IDpos
Ex3 = (params['d1ta']+params['d1ta']*params['tata']+((params['stnta']*params['fsiti']*params['d1fsi'])/de1))
IDneg = params['gpid1']+params['gpiti']*params['tid2']+params['gpid1']*params['stnstn']*params['tid2']*Ex3
print "IDneg",IDneg
HDpos = (params['jstnctx']*params['gpid1']*params['stnstn']*params['fsiti']*params['d1fsi'])/de1
print "HDpos",HDpos
Ex4 = params['jstnctx']*params['d1d1']*params['fsifsi']*params['stnti']*params['gpistn']
Ex5 = params['jstnctx']*params['gpid1']*params['stnstn']*params['d1ta']*params['fsifsi']
HDneg = (Ex4 + Ex5)/de1
print "HDneg",HDneg
d1fix = np.mean(d1[:-10])
d2fix = np.mean(d2[:-10])
DelMSN = d1fix - d2fix
DelGpi = (D*d1fix) + ((IDpos - IDneg)*d2fix)
return (DelMSN,DelGpi)