我们从Python开源项目中,提取了以下47个代码示例,用于说明如何使用scipy.linalg()。
def logdet(C,eps=1e-6,safe=0): ''' Logarithm of the determinant of a matrix Works only with real-valued positive definite matrices ''' try: return 2.0*np.sum(np.log(np.diag(chol(C)))) except numpy.linalg.linalg.LinAlgError: if safe: C = check_covmat(C,eps=eps) w = np.linalg.eigh(C)[0] w = np.real(w) w[w<eps]=eps det = np.sum(np.log(w)) return det
def nufft_T(N, J, K, alpha, beta): ''' The Equation (29) and (26) in Fessler and Sutton 2003. Create the overlapping matrix CSSC (diagonal dominent matrix) of J points and find out the pseudo-inverse of CSSC ''' # import scipy.linalg L = numpy.size(alpha) - 1 # print('L = ', L, 'J = ',J, 'a b', alpha,beta ) cssc = numpy.zeros((J, J)) [j1, j2] = numpy.mgrid[1:J + 1, 1:J + 1] overlapping_mat = j2 - j1 for l1 in range(-L, L + 1): for l2 in range(-L, L + 1): alf1 = alpha[abs(l1)] # if l1 < 0: alf1 = numpy.conj(alf1) alf2 = alpha[abs(l2)] # if l2 < 0: alf2 = numpy.conj(alf2) tmp = overlapping_mat + beta * (l1 - l2) tmp = dirichlet(1.0 * tmp / (1.0 * K / N)) cssc = cssc + alf1 * alf2 * tmp return mat_inv(cssc)
def update_per_row(self, y_i, phi_i, J, mu0, c, v, r_prev_i, u_prev_i, phi_r_i, phi_u): # Params: # J - column indices nnz_i = len(J) residual_i = y_i - mu0 - c[J] prior_Phi = np.diag(np.concatenate(([phi_r_i], phi_u))) v_T = np.hstack((np.ones((nnz_i, 1)), v[J, :])) post_Phi_i = prior_Phi + \ np.dot(v_T.T, np.tile(phi_i[:, np.newaxis], (1, 1 + self.num_factor)) * v_T) # Weighted sum of v_j * v_j.T post_mean_i = np.squeeze(np.dot(phi_i * residual_i, v_T)) C, lower = scipy.linalg.cho_factor(post_Phi_i) post_mean_i = scipy.linalg.cho_solve((C, lower), post_mean_i) # Generate Gaussian, recycling the Cholesky factorization from the posterior mean computation. ru_i = math.sqrt(1 - self.relaxation ** 2) * scipy.linalg.solve_triangular(C, np.random.randn(len(post_mean_i)), lower=lower) ru_i += post_mean_i + self.relaxation * (post_mean_i - np.concatenate(([r_prev_i], u_prev_i))) r_i = ru_i[0] u_i = ru_i[1:] return r_i, u_i
def update_per_col(self, y_j, phi_j, I, mu0, r, u, c_prev_j, v_prev_j, phi_c_j, phi_v): prior_Phi = np.diag(np.concatenate(([phi_c_j], phi_v))) nnz_j = len(I) residual_j = y_j - mu0 - r[I] u_T = np.hstack((np.ones((nnz_j, 1)), u[I, :])) post_Phi_j = prior_Phi + \ np.dot(u_T.T, np.tile(phi_j[:, np.newaxis], (1, 1 + self.num_factor)) * u_T) # Weighted sum of u_i * u_i.T post_mean_j = np.squeeze(np.dot(phi_j * residual_j, u_T)) C, lower = scipy.linalg.cho_factor(post_Phi_j) post_mean_j = scipy.linalg.cho_solve((C, lower), post_mean_j) # Generate Gaussian, recycling the Cholesky factorization from the posterior mean computation. cv_j = math.sqrt(1 - self.relaxation ** 2) * scipy.linalg.solve_triangular(C, np.random.randn(len(post_mean_j)), lower=lower) cv_j += post_mean_j + self.relaxation * (post_mean_j - np.concatenate(([c_prev_j], v_prev_j))) c_j = cv_j[0] v_j = cv_j[1:] return c_j, v_j
def test_scipy(pyi_builder): pyi_builder.test_source( """ from distutils.version import LooseVersion # Test top-level SciPy importability. import scipy from scipy import * # Test hooked SciPy modules. import scipy.io.matlab import scipy.sparse.csgraph # Test problematic SciPy modules. import scipy.linalg import scipy.signal # SciPy >= 0.16 privatized the previously public "scipy.lib" package as # "scipy._lib". Since this package is problematic, test its # importability regardless of SciPy version. if LooseVersion(scipy.__version__) >= LooseVersion('0.16.0'): import scipy._lib else: import scipy.lib """)
def matrixMult(A, B): try: linalg.blas except AttributeError: return np.dot(A, B) if not A.flags['F_CONTIGUOUS']: AA = A.T transA = True else: AA = A transA = False if not B.flags['F_CONTIGUOUS']: BB = B.T transB = True else: BB = B transB = False return linalg.blas.dgemm(alpha=1., a=AA, b=BB, trans_a=transA, trans_b=transB)
def factor(X, rho): """ computes cholesky factorization of the kernel K = 1/rho*XX^T + I Input: X design matrix: n_s x n_f (we assume n_s << n_f) rho: regularizaer Output: L lower triangular matrix U upper triangular matrix """ n_s, n_f = X.shape K = 1 / rho * scipy.dot(X, X.T) + scipy.eye(n_s) U = linalg.cholesky(K) return U
def FeCalc(self, Aoptimality=True): ''' Compute estimation efficiency. :param Aoptimality: Kind of optimality to optimize, A- or D-optimality :type Aoptimality: boolean ''' try: invM = scipy.linalg.inv(self.X) except scipy.linalg.LinAlgError: try: invM = scipy.linalg.pinv(self.X) except np.linalg.linalg.LinAlgError: invM = np.nan sys.exc_clear() invM = np.array(invM) st1 = np.dot(self.CX, invM) CMC = np.dot(st1, t(self.CX)) if Aoptimality == True: self.Fe = float(self.CX.shape[0] / np.matrix.trace(CMC)) else: self.Fe = float(np.linalg.det(CMC)**(-1 / len(self.C))) self.Fe = self.Fe / self.experiment.FeMax return self
def FdCalc(self, Aoptimality=True): ''' Compute detection power. :param Aoptimality: Kind of optimality to optimize: A- or D-optimality :type Aoptimality: boolean ''' try: invM = scipy.linalg.inv(self.Z) except scipy.linalg.LinAlgError: try: invM = scipy.linalg.pinv(self.Z) except np.linalg.linalg.LinAlgError: invM = np.nan sys.exc_clear() invM = np.array(invM) CMC = np.matrix(self.C) * invM * np.matrix(t(self.C)) if Aoptimality == True: self.Fd = float(len(self.C) / np.matrix.trace(CMC)) else: self.Fd = float(np.linalg.det(CMC)**(-1 / len(self.C))) self.Fd = self.Fd / self.experiment.FdMax return self
def decompose(P): M = P[:3, :3] T = P[:3, 3] K, R = scipy.linalg.rq(M) for i in range(2): if K[i,i] < 0: K[:, i] *= -1 R[i, :] *= -1 if K[2,2] > 0: K[:, 2] *= -1 R[2, :] *= -1 if det(R) < 0: R *= -1 T = linalg.inv(dot(K, -R)).dot(T.reshape((3, 1))) K /= -K[2,2] return K, R, T
def _gauss_from_coefficients_numpy(alpha, beta): assert isinstance(alpha, numpy.ndarray) assert isinstance(beta, numpy.ndarray) # eigh_tridiagonal is only available from scipy 1.0.0 try: from scipy.linalg import eigh_tridiagonal except ImportError: # Use eig_banded x, V = eig_banded(numpy.vstack((numpy.sqrt(beta), alpha)), lower=False) w = beta[0]*scipy.real(scipy.power(V[0, :], 2)) else: x, V = eigh_tridiagonal(alpha, numpy.sqrt(beta[1:])) w = beta[0] * V[0, :]**2 return x, w
def MVG_DKL(M0,P0,M1,P1): ''' KL divergence between two Gaussians Example ------- M = randn(10) Q = randn(N,N) P = Q.dot(Q.T) MGV_DKL(M,P,M,P) ''' MVG_check(M0,P0) MVG_check(M1,P1) N = len(M0) M1M0 = M1-M0 return 0.5*(np.sum(P1*pinv(P0))+logdet(P0)-logdet(P1)-N+M1M0.T.dot(P1).dot(M1M0)) #return 0.5*(np.sum(np.diag(P1.dot(np.linalg.pinv(P0))))+logdet(P0)-logdet(P1)-N+M1M0.T.dot(P1).dot(M1M0))
def gaussian1DblurOperator(n,sigma): ''' Returns a 1D Gaussan blur operator of size n ''' x = np.linspace(0,n-1,n); # 1D domain tau = 1.0/sigma**2; # precision k = np.exp(-tau*x**2); # compute (un-normalized) 1D kernel op = scipy.linalg.special_matrices.toeplitz(k,k); # convert to an operator from n -> n # normalize rows so density is conserved op /= np.sum(op) # truncate small entries big = np.max(op) toosmall = 1e-4*big op[op<toosmall] = 0 # (re) normalize rows so density is conserved op /= np.sum(op) return op
def gaussian2DblurOperator(n,sigma): ''' Returns a 2D Gaussan blur operator for a n x n sized domain Constructed as a tensor product of two 1d blurs of size n ''' x = np.linspace(0,n-1,n) # 1D domain tau = 1.0/sigma**2 # precision k = np.exp(-tau*x**2) # compute (un-normalized) 1D kernel tp = scipy.linalg.special_matrices.toeplitz(k,k) # convert to an operator from n -> n op = scipy.linalg.special_matrices.kron(tp,tp) # take the tensor product to get 2D operator # normalize rows so density is conserved op /= np.sum(op,axis=1) # truncate small entries big = np.max(op) toosmall = 1e-4*big op[op<toosmall] = 0 # (re) normalize rows so density is conserved op /= np.sum(op,axis=1) return op
def _getAplus(A): ''' Please see the documentation for nearPDHigham ''' eigval, eigvec = np.linalg.eig(A) Q = np.matrix(eigvec) xdiag = np.matrix(np.diag(np.maximum(eigval, 0))) return Q*xdiag*Q.T
def measure_background(image, Fibers, width=30, niter=3, order=3): t = [] a,b = image.shape ygrid,xgrid = np.indices(image.shape) ygrid = 1. * ygrid.ravel() / a xgrid = 1. * xgrid.ravel() / b image = image.ravel() s = np.arange(a*b) for fiber in Fibers: t.append(fiber.D*fiber.yind + fiber.xind) t = np.hstack(t) t = np.array(t, dtype=int) ind = np.setdiff1d(s,t) mask = np.zeros((a*b)) mask[ind] = 1. mask[ind] = 1.-is_outlier(image[ind]) sel = np.where(mask==1.)[0] for i in xrange(niter): V = polyvander2d(xgrid[sel],ygrid[sel],[order,order]) sol = np.linalg.lstsq(V, image[sel])[0] vals = np.dot(V,sol) - image[sel] sel = sel[~is_outlier(vals)] V = polyvander2d(xgrid,ygrid,[order,order]) back = np.dot(V, sol).reshape(a,b) return back
def mat_inv(A): # I = numpy.eye(A.shape[0], A.shape[1]) B = scipy.linalg.pinv2(A) return B
def _batch_incremental_pca(x, G, S): r = G.shape[1] b = x.shape[0] xh = G.T.dot(x) H = x - G.dot(xh) J, W = scipy.linalg.qr(H, overwrite_a=True, mode='full', check_finite=False) Q = np.bmat( [[np.diag(S), xh], [np.zeros((b,r), dtype=np.float32), W]] ) G_new, St_new, Vtoss = scipy.linalg.svd(Q, full_matrices=False, check_finite=False) St_new=St_new[:r] G_new= np.asarray(np.bmat([G, J]).dot( G_new[:,:r] )) return G_new, St_new
def test_recoverG(self): ''' Test GCCA implementation by seeing if it can recover G. ''' eps = 1.e-10 Vs = [self.V1, self.V2, self.V3] wgcca = WGCCA.WeightedGCCA(3, [self.F1, self.F2, self.F3], self.k, [eps, eps, eps], verbose=True) wgcca.learn(Vs) U1 = wgcca.U[0] U2 = wgcca.U[1] U3 = wgcca.U[2] Gprime = wgcca.G # Rotate G to minimize norm of difference between G and G' R, B = scipy.linalg.orthogonal_procrustes(self.G, Gprime) normDiff = scipy.linalg.norm(self.G.dot(R) - Gprime) print ('Recovered G up to rotation; difference in norm:', normDiff) self.assertTrue( normDiff < 1.e-6 ) self.assertTrue( np.allclose(self.G.dot(R), Gprime) )
def for_loop_update_row_param_blockwise(self, y_csr, phi_csr, mu0, c, v, r_prev, u_prev): nrow = y_csr.shape[0] num_factor = v.shape[1] prior_Phi = np.diag(np.hstack((self.prior_param['row_bias_scale'] ** -2, np.tile(self.prior_param['factor_scale'] ** -2, num_factor)))) # Pre-allocate r = np.zeros(nrow) u = np.zeros((nrow, num_factor)) # NOTE: The loop through 'i' is completely parallelizable. for i in range(nrow): j = y_csr[i, :].indices nnz_i = len(j) residual_i = y_csr[i, :].data - mu0 - c[j] phi_i = phi_csr[i, :].data.copy() v_T = np.hstack((np.ones((nnz_i, 1)), v[j, :])) post_Phi_i = prior_Phi + \ np.dot(v_T.T, np.tile(phi_i[:, np.newaxis], (1, 1 + num_factor)) * v_T) # Weighted sum of v_j * v_j.T post_mean_i = np.squeeze(np.dot(phi_i * residual_i, v_T)) C, lower = scipy.linalg.cho_factor(post_Phi_i) post_mean_i = scipy.linalg.cho_solve((C, lower), post_mean_i) # Generate Gaussian, recycling the Cholesky factorization from the posterior mean computation. ru_i = math.sqrt(1 - self.relaxation ** 2) * scipy.linalg.solve_triangular(C, np.random.randn(len(post_mean_i)), lower=lower) ru_i += post_mean_i + self.relaxation * (post_mean_i - np.concatenate(([r_prev[i]], u_prev[i, :]))) r[i] = ru_i[0] u[i, :] = ru_i[1:] return r, u
def for_loop_update_col_param_blockwise(self, y_csc, phi_csc, mu0, r, u, c_prev, v_prev): ncol = y_csc.shape[1] num_factor = u.shape[1] prior_Phi = np.diag(np.hstack((self.prior_param['col_bias_scale'] ** -2, np.tile(self.prior_param['factor_scale'] ** -2, num_factor)))) # Pre-allocate c = np.zeros(ncol) v = np.zeros((ncol, num_factor)) # NOTE: The loop through 'j' is completely parallelizable. for j in range(ncol): i = y_csc[:, j].indices nnz_j = len(i) residual_j = y_csc[:, j].data - mu0 - r[i] phi_j = phi_csc[:, j].data u_T = np.hstack((np.ones((nnz_j, 1)), u[i, :])) post_Phi_j = prior_Phi + \ np.dot(u_T.T, np.tile(phi_j[:, np.newaxis], (1, 1 + num_factor)) * u_T) # Weighted sum of u_i * u_i.T post_mean_j = np.squeeze(np.dot(phi_j * residual_j, u_T)) C, lower = scipy.linalg.cho_factor(post_Phi_j) post_mean_j = scipy.linalg.cho_solve((C, lower), post_mean_j) # Generate Gaussian, recycling the Cholesky factorization from the posterior mean computation. cv_j = math.sqrt(1 - self.relaxation ** 2) * scipy.linalg.solve_triangular(C, np.random.randn(len(post_mean_j)), lower=lower) cv_j += post_mean_j + self.relaxation * (post_mean_j - np.concatenate(([c_prev[j]], v_prev[j, :]))) c[j] = cv_j[0] v[j, :] = cv_j[1:] return c, v
def check_gradient(self, X, T, eps=1e-10): thetas = self.flatten() grad1 = self.numerical_gradient(thetas, X, T) _, grad2 = self.compute_cost(thetas, X, T) diff = linalg.norm(grad1 - grad2) / linalg.norm(grad1 + grad2) print(np.c_[grad1, grad2, np.abs(grad1 - grad2)]) print('diff = {0}'.format(diff)) return diff < eps
def ComputeCCA(X, Y): assert X.shape[0] == Y.shape[0], (X.shape, Y.shape, "Unequal number of rows") assert X.shape[0] > 1, (X.shape, "Must have more than 1 row") X = NormCenterMatrix(X) Y = NormCenterMatrix(Y) X_q, _, _ = decomp_qr.qr(X, overwrite_a=True, mode='economic', pivoting=True) Y_q, _, _ = decomp_qr.qr(Y, overwrite_a=True, mode='economic', pivoting=True) C = np.dot(X_q.T, Y_q) r = linalg.svd(C, full_matrices=False, compute_uv=False) d = min(X.shape[1], Y.shape[1]) r = r[:d] r = np.minimum(np.maximum(r, 0.0), 1.0) # remove roundoff errs return r.mean()
def compute_phi_prior(self): logZ_prior = 0 for i in range(self.no_layers): Dout_i = self.layer_sizes[i+1] if not self.zu_tied: for d in range(Dout_i): (sign, logdet) = np.linalg.slogdet(self.Kuu[i][d]) logZ_prior += 0.5 * logdet else: (sign, logdet) = np.linalg.slogdet(self.Kuu[i]) logZ_prior += Dout_i * 0.5 * logdet return logZ_prior
def compute_phi_posterior(self): logZ_posterior = 0 for i in range(self.no_layers): Dout_i = self.layer_sizes[i+1] for d in range(Dout_i): mud_val = self.mu[i][d] Sud_val = self.Su[i][d] (sign, logdet) = np.linalg.slogdet(Sud_val) # print 'phi_poste: ', 0.5 * logdet, 0.5 * np.sum(mud_val * spalg.solve(Sud_val, mud_val.T)) logZ_posterior += 0.5 * logdet logZ_posterior += 0.5 * np.sum(mud_val * spalg.solve(Sud_val, mud_val.T)) return logZ_posterior
def compute_phi_cavity(self): phi_cavity = 0 for i in range(self.no_layers): Dout_i = self.layer_sizes[i+1] for d in range(Dout_i): muhatd_val = self.muhat[i][d] Suhatd_val = self.Suhat[i][d] (sign, logdet) = np.linalg.slogdet(Suhatd_val) phi_cavity += 0.5 * logdet phi_cavity += 0.5 * np.sum(muhatd_val * spalg.solve(Suhatd_val, muhatd_val.T)) return phi_cavity
def CreateLmComp(self): ''' This function generates components for the linear model: hrf, whitening matrix, autocorrelation matrix and CX ''' # hrf self.canonical(0.1) # contrasts # expand contrasts to resolution of HRF (?) prec = int(self.laghrf/(self.hrf_precision/self.resolution)) self.CX = np.kron(self.C, np.eye(prec)) # drift self.S = self.drift(np.arange(0, self.n_scans)) # [tp x 1] self.S = np.matrix(self.S) # square of the whitening matrix base = [1 + self.rho**2, -1 * self.rho] + [0] * (self.n_scans - 2) self.V2 = scipy.linalg.toeplitz(base) self.V2[0, 0] = 1 self.V2 = np.matrix(self.V2) self.V2[self.n_scans - 1, self.n_scans - 1] = 1 self.white = self.V2 - self.V2 * \ t(self.S) * np.linalg.pinv(self.S * self.V2 * t(self.S)) * self.S * self.V2 return self
def find_frame_of_reference(target, source): target = copy(target) source = copy(source[:, :target.shape[1]]) target /= target[3,:] source /= source[3,:] return dot(linalg.pinv(source.T), target.T).T
def reconstruct_columns(W, B): W = W.copy() for i in range(W.shape[1]): C = W[:, i] M = isntnan(C) if sum(M) != C.shape[0]: coeffs = dot(linalg.pinv(B[M,:]), C[M]) W[:, i] = dot(B, coeffs) return W
def generate_synth_data(n_seq,time_steps,sizes,prng,Winit='svd'): n_input=sizes['n_input'] n_hidden=sizes['n_hidden'] Xaug=prng.randn(2*n_input,n_seq*time_steps).astype(np.float32)/np.sqrt(2) #unit variance circular complex Gaussians in real-composite form if (Winit=='svd'): W=prng.randn(n_hidden,n_hidden).astype(np.complex64)+1j*prng.randn(n_hidden,n_hidden).astype(np.complex64) U, S, V = np.linalg.svd(W) W = np.dot(U,V) # convert W to real-composite form for right multiplication # real-composite for right multiplication, g=h^T W, with h=x+jy and W=A+jB, # is grc=hrc^T Wrc, with Wrc=[A^T, B^T; -B^T, A^T] and hrc=[x; y] # # real-composite for left multiplication, g=Wh, with h=x+jy and W=A+jB, # is grc=Wrc hrc, with Wrc=[A, -B; B, A] and hrc=[x; y] A=np.transpose(np.real(W)) B=np.transpose(np.imag(W)) Wr = np.concatenate( [ A, B], axis=1) #create [ A, B] Wc = np.concatenate( [(-1)*B, A], axis=1) #create [-B, A] Waug = np.concatenate( [Wr,Wc], axis=0) # create [A,B; -B, A] elif (Winit=='adhoc'): Wparams=initialize_unitary(n_hidden,'full',prng) Waug = Wparams[0].get_value() elif (Winit=='adhoc2x'): Wparams1=initialize_unitary(n_hidden,'full',prng) Waug1 = Wparams1[0] Waug1np = Waug1.get_value() Waug1np = Waug1np[:n_hidden,:] # only take first row of blocks to get correct augmented form after multiplication within numerical precision Wparams2=initialize_unitary(n_hidden,'full',prng) Waug2 = Wparams2[0] Waug_row1=np.dot(Waug1np,Waug2.get_value()) Waug=np.concatenate([ Waug_row1, np.concatenate([-Waug_row1[:,n_hidden:],Waug_row1[:,:n_hidden]],axis=1) ],axis=0) fidx0 = np.arange(0,n_seq*time_steps,time_steps) fidx1 = np.arange(time_steps,n_seq*time_steps+time_steps,time_steps) fidx = np.concatenate( [np.reshape(fidx0,(n_seq,1)), np.reshape(fidx1,(n_seq,1))] , axis=1) return Xaug, Waug, fidx
def MVG_logPDF(X,M,P=None,C=None): ''' Args: X : KxN vector of samples for which to compute the PDF M : N mean vector P : NxN precision matrix OR C : NxN covariance matirx Example: N = 10 K = 100 M = randn(10) Q = randn(N,N) C = Q.dot(Q.T) X = randn(K,N) MVG_logPDF(X,M,C=C) - MVG_logPDF(X,M,P=np.linalg.pinv(C)) ''' N = len(M) if (P is None and C is None): raise ValueError("Either a Covariance or Precision matrix is needed") normd = -0.5*N*np.log(2*pi) xM = X-M if P is None: # Use covariance # Compute product with precision (inverse covariance) # Using least-squares, rather than inverting the matrix MVG_check(M,C) norm = normd-0.5*logdet(C) xMP = np.linalg.lstsq(C,xM.T)[0].T if C is None: # Use precision MVG_check(M,P) norm = normd+0.5*logdet(P) xMP = xM.dot(P) logpr = -0.5*np.sum(xMP*xM,axis=1) return norm+logpr
def MVG_multiply(M1,P1,M2,P2,safe=1): ''' Multiply two multivariate Gaussians based on precision ''' if safe: MVG_check(M1,P1) MVG_check(M2,P2) P = P1 + P2 M = scipy.linalg.lstsq(P,np.squeeze(P1.dot(M1)+P2.dot(M2)))[0] if safe: MVG_check(M,P) return M,P
def MVG_divide(M1,P1,M2,P2,eps=1e-6,handle_negative='repair',verbose=0): ''' Divide two multivariate Gaussians based on precision Parameters ---------- handle_negative : 'repair' (default): returns a nearby distribution with positive variance 'ignore': can return a distribution with negative variance 'error': throws a ValueError if negative variances are producted ''' MVG_check(M1,P1,eps=eps) MVG_check(M2,P2,eps=eps) P = P1 - P2 w,v = real_eig(P) if any(w<eps): if handle_negative=='repair': w[w<eps]=eps P = v.dot(diag(w)).dot(v.T) if verbose: print('Warning: non-positive precision in Gaussian division') elif handle_negative=='ignore': pass elif handle_negative=='error': raise ValueError('Distribution resulting from division has non-positive precision!') else: raise ValueError('Argument handle_negative must be "repair", "ignore", or "error"') M = scipy.linalg.lstsq(P,P1.dot(M1) - P2.dot(M2))[0] MVG_check(M,P,eps=eps) return M,P
def MVG_projection(M,C,A): ''' Compute a new multi-variate gaussian reflecting distribution of a projection A Args: M : length N vector of the mean C : NxN covariance matrix A : KxN projection of the vector space (should be unitary?) ''' MVG_check(M,C) M = A.dot(M) C = scipy.linalg.lstsq(A.T,C.dot(A.T))[0].T MVG_check(M,C) return M,C
def rcond(x): ''' Reciprocal condition number ''' return 1./np.linalg.cond(x)
def check_covmat_fast(C,N=None,eps=1e-6): ''' Verify that matrix M is a size NxN precision or covariance matirx ''' if not type(C)==np.ndarray: raise ValueError("Covariance matrix should be a 2D numpy array") if not len(C.shape)==2: raise ValueError("Covariance matrix should be a 2D numpy array") if N is None: N = C.shape[0] if not C.shape==(N,N): raise ValueError("Expected size %d x %d matrix"%(N,N)) if np.any(~np.isreal(C)): raise ValueError("Covariance matrices should not contain complex numbers") C = np.real(C) if np.any(~np.isfinite(C)): raise ValueError("Covariance matrix contains NaN or ±inf!") if not np.all(np.abs(C-C.T)<eps): raise ValueError("Covariance matrix is not symmetric up to precision %0.1e"%eps) try: ch = chol(C) except numpy.linalg.linalg.LinAlgError: # Check smallest eigenvalue if cholesky fails mine = np.real(scipy.linalg.decomp.eigh(C,eigvals=(0,0))[0][0]) if np.any(mine<-eps): raise ValueError('Covariance matrix contains eigenvalue %0.3e<%0.3e'%(mine,-eps)) if (mine<eps): C = C + np.eye(N)*(eps-mine) C = 0.5*(C+C.T) return C
def solt(a,b): ''' wraps solve_triangular automatically detects lower vs. upper triangular ''' if np.allclose(a, scipy.linalg.special_matrices.tril(a)): # check if lower triangular return scipy.linalg.solve_triangular(a,b,lower=1) if np.allclose(a, scipy.linalg.special_matrices.triu(a)): # check if upper triangular return scipy.linalg.solve_triangular(a,b,lower=0) raise ValueError('a matrix is not triangular')
def rsolve(a,b): ''' wraps solve, applies to right hand solution solves system x A = B ''' return scipy.linalg.solve(b.T,a.T).T
def nufft_alpha_kb_fit(N, J, K): """ Find parameters alpha and beta for scaling factor st['sn'] The alpha is hardwired as [1,0,0...] when J = 1 (uniform scaling factor) :param int N: the size of image :param int J:the size of interpolator :param int K: the size of oversampled k-space """ beta = 1 Nmid = (N - 1.0) / 2.0 if N > 40: L = 13 else: L = numpy.ceil(N / 3).astype(numpy.int16) nlist = numpy.arange(0, N) * 1.0 - Nmid (kb_a, kb_m) = kaiser_bessel('string', J, 'best', 0, K / N) if J > 1: sn_kaiser = 1 / kaiser_bessel_ft(nlist / K, J, kb_a, kb_m, 1.0) elif J == 1: # The case when samples are on regular grids sn_kaiser = numpy.ones((1, N), dtype=dtype) gam = 2 * numpy.pi / K X_ant = beta * gam * nlist.reshape((N, 1), order='F') X_post = numpy.arange(0, L + 1) X_post = X_post.reshape((1, L + 1), order='F') X = numpy.dot(X_ant, X_post) # [N,L] X = numpy.cos(X) sn_kaiser = sn_kaiser.reshape((N, 1), order='F').conj() X = numpy.array(X, dtype=dtype) sn_kaiser = numpy.array(sn_kaiser, dtype=dtype) coef = numpy.linalg.lstsq(numpy.nan_to_num(X), numpy.nan_to_num(sn_kaiser))[0] alphas = coef if J > 1: alphas[0] = alphas[0] alphas[1:] = alphas[1:] / 2.0 elif J == 1: # cases on grids alphas[0] = 1.0 alphas[1:] = 0.0 alphas = numpy.real(alphas) return (alphas, beta)
def setUp(self): ### Generate sample data with 3 views ### self.N = 1000 # Number of examples self.F1 = 50 # Number of features in view 1 self.F2 = 30 self.F3 = 40 self.k = 5 # Number of latent features def scale(X): X_mean = np.mean(X, axis=0) X -= X_mean X_std = np.std(X, axis=0) X_std[X_std==0.] = 1.0 X /= X_std return X def orth(X): ''' http://stackoverflow.com/questions/13940056/orthogonalize-matrix-numpy ''' return X.dot( scipy.linalg.inv(scipy.linalg.sqrtm( X.T.dot(X) ))) # Maps to each view W1 = np.random.normal(size=(self.F1, self.k)) W2 = np.random.normal(size=(self.F2, self.k)) W3 = np.random.normal(size=(self.F3, self.k)) W1 = scale(W1) W2 = scale(W2) W3 = scale(W3) G = np.random.normal(size=(self.N, self.k)) # Latent examples self.G = orth(G) # Observations self.V1 = W1.dot(self.G.T).T # N X F1 self.V2 = W2.dot(self.G.T).T # N X F2 self.V3 = W3.dot(self.G.T).T # N X F3 ### Write sample data to test file ### outFile = open('gcca_test_file.tsv', 'w') for i in range(self.N): vStrs = [' '.join([str(val) for val in v[i,:]]) for v in [self.V1, self.V2, self.V3]] # Assume each view is populated from a single document outFile.write('%d\t1\t1\t1\t%s\n' % (i, '\t'.join(vStrs))) outFile.close()
def euler_angles_1q(unitary_matrix): """Compute Euler angles for a single-qubit gate. Find angles (theta, phi, lambda) such that unitary_matrix = phase * Rz(phi) * Ry(theta) * Rz(lambda) Return (theta, phi, lambda, "U(theta,phi,lambda)"). The last element of the tuple is the OpenQASM gate name with parameter values substituted. """ small = 1e-10 if unitary_matrix.shape != (2, 2): raise MapperError("compiling.euler_angles_1q expected 2x2 matrix") phase = np.linalg.det(unitary_matrix)**(-1.0/2.0) U = phase * unitary_matrix # U in SU(2) # OpenQASM SU(2) parameterization: # U[0, 0] = exp(-i(phi+lambda)/2) * cos(theta/2) # U[0, 1] = -exp(-i(phi-lambda)/2) * sin(theta/2) # U[1, 0] = exp(i(phi-lambda)/2) * sin(theta/2) # U[1, 1] = exp(i(phi+lambda)/2) * cos(theta/2) # Find theta if abs(U[0, 0]) > small: theta = 2 * math.acos(abs(U[0, 0])) else: theta = 2 * math.asin(abs(U[1, 0])) # Find phi and lambda phase11 = 0.0 phase10 = 0.0 if abs(math.cos(theta/2.0)) > small: phase11 = U[1, 1] / math.cos(theta/2.0) if abs(math.sin(theta/2.0)) > small: phase10 = U[1, 0] / math.sin(theta/2.0) phiplambda = 2 * math.atan2(np.imag(phase11), np.real(phase11)) phimlambda = 2 * math.atan2(np.imag(phase10), np.real(phase10)) phi = 0.0 if abs(U[0, 0]) > small and abs(U[1, 0]) > small: phi = (phiplambda + phimlambda) / 2.0 lamb = (phiplambda - phimlambda) / 2.0 else: if abs(U[0, 0]) < small: lamb = -phimlambda else: lamb = phiplambda # Check the solution Rzphi = np.array([[np.exp(-1j*phi/2.0), 0], [0, np.exp(1j*phi/2.0)]], dtype=complex) Rytheta = np.array([[np.cos(theta/2.0), -np.sin(theta/2.0)], [np.sin(theta/2.0), np.cos(theta/2.0)]], dtype=complex) Rzlambda = np.array([[np.exp(-1j*lamb/2.0), 0], [0, np.exp(1j*lamb/2.0)]], dtype=complex) V = np.dot(Rzphi, np.dot(Rytheta, Rzlambda)) if np.linalg.norm(V - U) > small: raise MapperError("compiling.euler_angles_1q incorrect result") return theta, phi, lamb, "U(%.15f,%.15f,%.15f)" % (theta, phi, lamb)
def calcAvec(new, dQ, W, lambda_, active_set, M, positive): # TODO: comment r, c = np.nonzero(active_set) # [r,c] = find(active_set); Mm = -M.take(r, axis=0).take(r, axis=1) Mm = (Mm + Mm.T) / 2 #% verify that there is no numerical instability if len(Mm) > 1: # print Mm.shape eigMm, _ = scipy.linalg.eig(Mm) eigMm = np.real(eigMm) # check_here else: eigMm = Mm if any(eigMm < 0): np.min(eigMm) #%error('The matrix Mm has negative eigenvalues') flag = 1 b = np.sign(W) if new >= 0: b[new] = np.sign(dQ[new]) b = b[active_set == 1] if len(Mm) > 1: avec = np.linalg.solve(Mm, b) else: avec = b / Mm if positive: if new >= 0: in_ = np.sum(active_set[:new]) if avec[in_] < 0: # new; #%error('new component of a is negative') flag = 1 one_vec = np.ones(W.shape) dQa = np.zeros(W.shape) for j in range(len(r)): dQa = dQa + np.expand_dims(avec[j] * M[:, r[j]], axis=1) gamma_plus = (lambda_ - dQ) / (one_vec + dQa) gamma_minus = (lambda_ + dQ) / (one_vec - dQa) return avec, gamma_plus, gamma_minus
def check_covmat(C,N=None,eps=1e-6): ''' Verify that matrix M is a size NxN precision or covariance matirx ''' if not type(C)==np.ndarray: raise ValueError("Covariance matrix should be a 2D numpy array") if not len(C.shape)==2: raise ValueError("Covariance matrix should be a 2D numpy array") if N is None: N = C.shape[0] if not C.shape==(N,N): raise ValueError("Expected size %d x %d matrix"%(N,N)) if np.any(~np.isreal(C)): raise ValueError("Covariance matrices should not contain complex numbers") C = np.real(C) if np.any(~np.isfinite(C)): raise ValueError("Covariance matrix contains NaN or ±inf!") if not np.all(np.abs(C-C.T)<eps): raise ValueError("Covariance matrix is not symmetric up to precision %0.1e"%eps) # Get just highest eigenvalue maxe = np.real(scipy.linalg.decomp.eigh(C,eigvals=(N-1,N-1))[0][0]) # Get just lowest eigenvalue mine = np.real(scipy.linalg.decomp.eigh(C,eigvals=(0,0))[0][0]) #if np.any(w<-eps): # raise ValueError('Covariance matrix contains eigenvalue %0.3e<%0.3e'%(np.min(w),-eps)) if mine<0: raise ValueError('Covariance matrix contains negative eigenvalue %0.3e'%mine) if (mine<eps): C = C + eye(N)*(eps-mine) # trucate spectrum at some small value # w[w<eps]=eps # Very large eigenvalues can also cause numeric problems # w[w>1./eps]=1./eps; # maxe = np.max(np.abs(w)) # if maxe>10./eps: # raise ValueError('Covariance matrix eigenvalues %0.2e is larger than %0.2e'%(maxe,10./eps)) # Rebuild matrix # C = v.dot(np.diag(w)).dot(v.T) # Ensure symmetry (only occurs as a numerical error for very large matrices?) C = 0.5*(C+C.T) return C # need a faster covariance matrix checker
def nearPSDRebonatoJackel(A,epsilon=1e-10): ''' Computes a positive definite matrix "close" to matrix $X$ using the algorithm of Rebonato and Jackel (1999). This is based on a [Stackoverflow answer](https://stackoverflow.com/a/18542094/900749) Parameters ---------- X : np.array Square, real-valued matrix epsilon : non-negative scalar, default 1e-10 minimum eigenvalue Returns ------- np.array Positive semi-definite matrix close to $X$ ''' n = A.shape[0] eigval, eigvec = np.linalg.eig(A) val = np.matrix(np.maximum(eigval,epsilon)) vec = np.matrix(eigvec) T = 1/(np.multiply(vec,vec) * val.T) T = np.matrix(np.sqrt(np.diag(np.array(T).reshape((n)) ))) B = T * vec * np.diag(np.array(np.sqrt(val)).reshape((n))) return B*B.T