我们从Python开源项目中,提取了以下29个代码示例,用于说明如何使用scipy.linalg.eig()。
def transform(self, X, y): """transform function""" XMat = np.array(X) yMat = np.array(y) if XMat.shape[0] != yMat.shape[0]: yMat = yMat.T assert XMat.shape[0] == yMat.shape[0] XMat -= XMat.mean(axis=0) Sw, Sb = calc_Sw_Sb(XMat, yMat) evals, evecs = eig(Sw, Sb) np.ascontiguousarray(evals) np.ascontiguousarray(evecs) idx = np.argsort(evals) idx = idx[::-1] evecs = evecs[:, idx] self.W = evecs[:, :self.n_components] X_transformed = np.dot(XMat, self.W) return X_transformed
def eigenbasis(se, nb): # generate number sector ns1 = se.model.numbersector(nb) # get the size of the basis ns1size = ns1.basis.len # length of the number sector basis # G1i = range(ns1size) # our Greens function? # self energy # sigma = self.sigma(nb, phi) # Effective Hamiltonian H1n = ns1.hamiltonian # Complete diagonalization E1, psi1r = linalg.eig(H1n.toarray(), left=False) psi1l = np.conj(np.linalg.inv(psi1r)).T # psi1l = np.conj(psi1r).T # check for dark states (throw a warning if one shows up) # if (nb > 0): # Setup.check_for_dark_states(nb, E1) return E1, psi1l, psi1r
def contruct_ellipse_parallel(pars): Coor,cm,A_i,Vr,dims,dist,max_size,min_size,d=pars dist_cm = coo_matrix(np.hstack([Coor[c].reshape(-1, 1) - cm[k] for k, c in enumerate(['x', 'y', 'z'][:len(dims)])])) Vr.append(dist_cm.T * spdiags(A_i.toarray().squeeze(), 0, d, d) * dist_cm / A_i.sum(axis=0)) if np.sum(np.isnan(Vr)) > 0: raise Exception('You cannot pass empty (all zeros) components!') D, V = eig(Vr[-1]) dkk = [np.min((max_size**2, np.max((min_size**2, dd.real)))) for dd in D] # search indexes for each component return np.sqrt(np.sum([(dist_cm * V[:, k])**2 / dkk[k] for k in range(len(dkk))], 0)) <= dist #%% threshold_components
def get_gev_vector(target_psd_matrix, noise_psd_matrix): """ Returns the GEV beamforming vector. :param target_psd_matrix: Target PSD matrix with shape (bins, sensors, sensors) :param noise_psd_matrix: Noise PSD matrix with shape (bins, sensors, sensors) :return: Set of beamforming vectors with shape (bins, sensors) """ bins, sensors, _ = target_psd_matrix.shape beamforming_vector = np.empty((bins, sensors), dtype=np.complex) for f in range(bins): try: eigenvals, eigenvecs = eigh(target_psd_matrix[f, :, :], noise_psd_matrix[f, :, :]) except np.linalg.LinAlgError: eigenvals, eigenvecs = eig(target_psd_matrix[f, :, :], noise_psd_matrix[f, :, :]) beamforming_vector[f, :] = eigenvecs[:, np.argmax(eigenvals)] return beamforming_vector
def get_gevd_vals_vecs(target_psd_matrix, noise_psd_matrix): """ Returns the eigenvalues and eigenvectors of GEVD :param target_psd_matrix: Target PSD matrix with shape (bins, sensors, sensors) :param noise_psd_matrix: Noise PSD matrix with shape (bins, sensors, sensors) :return: Set of eigen values with shape (bins, sensors) eigenvectors (bins, sensors, sensors) """ bins, sensors, _ = target_psd_matrix.shape eigen_values = np.empty((bins, sensors), dtype=np.complex) eigen_vectors = np.empty((bins, sensors, sensors), dtype=np.complex) for f in range(bins): try: eigenvals, eigenvecs = eigh(target_psd_matrix[f, :, :], noise_psd_matrix[f, :, :]) except np.linalg.LinAlgError: eigenvals, eigenvecs = eig(target_psd_matrix[f, :, :], noise_psd_matrix[f, :, :]) # values in increasing order eigen_values[f,:] = eigenvals eigen_vectors[f, :] = eigenvecs return eigen_values, eigen_vectors
def get_dressed_info(H0): # assign index of the dressed state according to the overall with bare state w_c, v_c = la.eig(H0) dressed_id=[] for ii in range(len(v_c)): index = np.argmax(np.abs(v_c[:, ii])) if index not in dressed_id: dressed_id.append(index) else: temp = (np.abs(v_c[:, ii])).tolist() while index in dressed_id: temp[index] = 0 index = np.argmax(temp) dressed_id.append(index) return w_c, v_c, dressed_id
def gen_A(self, V, n_classes, n_dims, return_S_b=False): """ A = [B][inv(? ** .5)][Q^T] and assumes same number of data in each class v. """ B = np.random.randint(-100, 100, (n_dims, n_dims)).astype(float) big_V = np.matmul(V.T, V) # V is now a scatter matrix. vals, vecs = eig(big_V) A = B / np.sqrt(vals.real) A = np.matmul(A, vecs.T) D = np.matmul(np.matmul(vecs.T, big_V), vecs) assert np.allclose(D, np.diag(vals)) if return_S_b is True: S_b = 1 /n_classes * np.matmul(np.matmul(A, big_V), A.T) x = np.matmul(A, V.T).T S_b_empirical = 1 / n_classes * np.matmul(x.T, x) assert np.allclose(S_b, S_b_empirical) return A, S_b else: return A
def isNormal(A, method = 'definition'): # use Schur inequality to determine whether it's normal if method == 'Schur': # initialize eigenValue eigenValue = la.eig(A)[0] if abs(np.sum(eigenValue**2) - la.norm(A, 'fro')**2) < 0.00001: return True else: return False # use definition else: if abs((A.conjugate().T.dot(A) - A.dot(A.conjugate().T)).all()) < 0.00001: return True else: return False
def get_esystem(basis,traj_edges,test_set=None,delay=1): """ """ if test_set is None: test_set = basis # Calculate Generator, Stiffness matrix L = get_generator(basis,traj_edges,test_set=test_set,delay=delay,dt_eff=1) # L = get_transop(basis,traj_edges,test_set=test_set,delay=delay) S = get_stiffness_mat(basis,traj_edges,test_set=test_set,delay=delay) # Calculate, sort eigensystem evals, evecs_l, evecs_r = spl.eig(L,b=S,left=True,right=True,overwrite_a=False,overwrite_b=False) idx = evals.argsort()[::-1] evals = evals[idx] evecs_l = evecs_l[:,idx] evecs_r = evecs_r[:,idx] # Expand eigenvectors into real space. expanded_evecs_l = np.dot(test_set,evecs_l) expanded_evecs_r = np.dot(basis,evecs_r) return evals, expanded_evecs_l, expanded_evecs_r
def FLQTQEB(engine,app): ''' This method calculates the Floquet quasi-energy bands. ''' if app.path is None: result=zeros((2,engine.nmatrix+1)) result[:,0]=array(xrange(2)) result[0,1:]=angle(eig(engine.evolution(ts=app.ts.mesh('t')))[0])/app.ts.volume('t') result[1,1:]=result[0,1:] else: rank,mesh=app.path.rank(0),app.path.mesh(0) result=zeros((rank,engine.nmatrix+1)) result[:,0]=mesh if mesh.ndim==1 else array(xrange(rank)) for i,paras in app.path('+'): result[i,1:]=angle(eig(engine.evolution(ts=app.ts.mesh('t'),**paras))[0])/app.ts.volume('t') name='%s_%s'%(engine,app.name) if app.savedata: savetxt('%s/%s.dat'%(engine.dout,name),result) if app.plot: app.figure('L',result,'%s/%s'%(engine.dout,name)) if app.returndata: return result
def transform(self, X, y): """transform function""" XMat = np.array(X) yMat = np.array(y) if XMat.shape[0] != yMat.shape[0]: yMat = yMat.T assert XMat.shape[0] == yMat.shape[0] XMat -= XMat.mean(axis=0) Sw, Sb = calc_Sw_Sb(XMat, yMat) if self.method == 'svd': U, S, V = np.linalg.svd(Sw) S = np.diag(S) Sw_inversed = V * np.linalg.pinv(S) * U.T A = Sw_inversed * Sb elif self.method == 'auto': A = np.linalg.pinv(Sw) * Sb eigval, eigvec = np.linalg.eig(A) eigval = eigval[0:self.n_components] eigvec = eigvec[:, 0:self.n_components] X_transformed = np.dot(XMat, eigvec) self.W = eigvec return X_transformed
def control_systems(request): ct_sys, ref = request.param Ac, Bc, Cc = ct_sys.data Dc = np.zeros((Cc.shape[0], 1)) Q = np.eye(Ac.shape[0]) R = np.eye(Bc.shape[1] if len(Bc.shape) > 1 else 1) Sc = linalg.solve_continuous_are(Ac, Bc.reshape(-1, 1), Q, R,) Kc = linalg.solve(R, Bc.T @ Sc).reshape(1, -1) ct_ctr = LTISystem(Kc) evals = np.sort(np.abs( linalg.eig(Ac, left=False, right=False, check_finite=False) )) dT = 1/(2*evals[-1]) Tsim = (8/np.min(evals[~np.isclose(evals, 0)]) if np.sum(np.isclose(evals[np.nonzero(evals)], 0)) > 0 else 8 ) dt_data = signal.cont2discrete((Ac, Bc.reshape(-1, 1), Cc, Dc), dT) Ad, Bd, Cd, Dd = dt_data[:-1] Sd = linalg.solve_discrete_are(Ad, Bd.reshape(-1, 1), Q, R,) Kd = linalg.solve(Bd.T @ Sd @ Bd + R, Bd.T @ Sd @ Ad) dt_sys = LTISystem(Ad, Bd, dt=dT) dt_sys.initial_condition = ct_sys.initial_condition dt_ctr = LTISystem(Kd, dt=dT) yield ct_sys, ct_ctr, dt_sys, dt_ctr, ref, Tsim
def eigenbasis(self, nb, phi=0): """ Calculates the generalized eigen-energies along with the left and right eigen-basis. Parameters ---------- nb : int Number of bosons phi : float Phase factor for the relevant photonic state """ phi = 0 if self.local else phi ckey = '{}-{}'.format(nb, phi) if ckey not in self._cache['eigen']: # generate number sector ns1 = self.model.numbersector(nb) # get the size of the basis ns1size = ns1.basis.len # length of the number sector basis # G1i = xrange(ns1size) # our Greens function? # self energy sigma = self.sigma(nb, phi) # Effective Hamiltonian H1n = ns1.hamiltonian + sigma # Complete diagonalization E1, psi1r = linalg.eig(H1n.toarray(), left=False) psi1l = np.conj(np.linalg.inv(psi1r)).T # check for dark states (throw a warning if one shows up) # if (nb > 0): # Setup.check_for_dark_states(nb, E1) self._cache['eigen'][ckey] = (E1, psi1l, psi1r) return self._cache['eigen'][ckey]
def fitEllipse (x, y): x = x [:, np.newaxis] y = y [:, np.newaxis] D = np.hstack(( x*x , x*y , y*y , x, y, np.ones_like(x))) S = np.dot (D.T ,D) C = np.zeros ([6 , 6]) C[0, 2] = C[2, 0] = 2 C[1, 1] = -1 E ,V = eig( np.dot(inv(S),C )) n = np.argmax(np.abs(E)) a = V[:, n] if a[0] < 0 : a = -a return a
def Aint(qL, qR, d): """ Returns the Osher-Solomon jump matrix for A, in the dth direction NB: eig function should be replaced with analytical function, if known """ ret = zeros(n, dtype=complex128) ?q = qR - qL for i in range(N+1): q = qL + nodes[i] * ?q J = jacobian(q, d) ?, R = eig(J, overwrite_a=1, check_finite=0) b = solve(R, ?q, check_finite=0) ret += weights[i] * dot(R, abs(?)*b) return ret.real
def PCA(self,data): """ returns: data transformed in 2 dims/columns + regenerated original data pass in: data as 2D NumPy array """ dims_rescaled_data=2 m, n = data.shape data -= data.mean(axis=0) # calculate the covariance matrix R = np.cov(data, rowvar=False) # calculate eigenvectors & eigenvalues of the covariance matrix # use 'eigh' rather than 'eig' since R is symmetric, # the performance gain is substantial evals, evecs = LA.eig(R) # sort eigenvalue in decreasing order idx = np.argsort(evals)[::-1] evecs = evecs[:,idx] # sort eigenvectors according to same index evals = evals[idx] # select the first n eigenvectors (n is desired dimension # of rescaled data array, or dims_rescaled_data) evecs = evecs[:, :dims_rescaled_data] # carry out the transformation on the data using eigenvectors # and return the re-scaled data, eigenvalues, and eigenvectors return np.dot(evecs.T, data.T).T, evals, evecs
def plot_eig(model, maps, t, condition = 0, color = [0, 0, 1]): import scipy.linalg as la w = get_weights(model) wrec = w[1] bin_maps = maps.astype('int') bin_t = bin_maps[condition, t, :].reshape(np.shape(bin_maps)[2], 1) bin_mask = bin_t.dot(bin_t.T) eva = la.eig(wrec * bin_mask) plt.scatter(eva[0].real, eva[0].imag, 12, color) plt.xlabel('real') plt.ylabel('imaginary') # Dave's visualization function
def compute_scores(self,Y): if Y.shape[1]==1: Y=self.get_Y_from_y(Y) P,N=Y.shape R=Y*Y.H/N S,U=lg.eig(R) eigenvalues=np.sort(np.real(S))[::-1] scores=np.zeros(len(self.L_vect)) for index,L in enumerate(self.L_vect): if self.corrected==1: nb_free_parameters=L*(2*P-1-L) #see [WIL94] else: nb_free_parameters=L*(2*P-L) #see [WAX85] num=np.prod(eigenvalues[L:]) den=np.mean(eigenvalues[L:])**(P-L) penalty_term=penalty_term_IC(nb_free_parameters,N,method=self.method) scores[index]=-2*N*np.log(num/den)+penalty_term self.scores=scores
def hsvd(sys): """Compute Hankel singular values of a linear system. Parameters ---------- sys : :data:`linear_system_like` Linear system representation. Returns ------- ``(len(sys),) np.array`` Hankel singular values. See Also -------- :class:`.Hankel` :func:`.balanced_transformation` References ---------- .. [#] Glover, Keith, and Jonathan R. Partington. "Bounds on the achievable accuracy in model reduction." Modelling, robustness and sensitivity reduction in control systems. Springer Berlin Heidelberg, 1987. 95-118. .. [#] https://www.mathworks.com/help/control/ref/hsvd.html """ sys = LinearSystem(sys) R, O = control_gram(sys), observe_gram(sys) # sort needed to be consistent across different versions return np.sort(np.sqrt(abs(eig(np.dot(O, R))[0])))[::-1]
def isSimple(A): ''' ask is matrix A is a simple matrix ''' # check if A is a squre matrix if A.shape[1] != A.shape[0]: return False eigenValues, eigenVectors = la.eig(A) while eigenValues.shape[0] != 0: #dictValues.update({eigenValues[0]: 1}) index = np.argwhere(abs(eigenValues - eigenValues[0]) < 0.00001) algebraicMulti = len(index) geometricMulti = eigenVectors[:, index].shape[1] if algebraicMulti != geometricMulti: return False #dictValues.update({eigenValues[0]: len}) eigenValues = np.delete(eigenValues, index) # stack another spaces of eigenvalue and eigenvector return True # compute the spectrum decomposition of matrix A
def spectrum_decomposition(A): ''' compute the spectrum decomposition of matrix A ''' # check if A is a simple matrix if isSimple(A) != True: raise Exception('non-simple matrix cannot be spectrum-decomposed') eigenValues, eigenVectors = la.eig(A) invVectors = la.inv(eigenVectors) eigenVectors_row = eigenVectors.shape[0] eigenVectors_column = eigenVectors.shape[1] invVectors_row = invVectors.shape[0] invVectors_column = invVectors.shape[1] # an array of all distinct eigen values with their associated algebraic multiplicity # keys: eigen values # values: algebraic multiplicity # {eigenValue: algebraicMulti} dictValues = {} while eigenValues.shape[0] != 0: index = np.argwhere(abs(eigenValues - eigenValues[0]) < 0.00001) spectrum = eigenVectors[:, index].reshape((eigenVectors_row, len(index))).dot(invVectors[index, :].reshape((len(index), invVectors_column)) ) dictValues.update({eigenValues[0]: spectrum}) eigenValues = np.delete(eigenValues, index) eigenVectors = np.delete(eigenVectors, index, 1) invVectors = np.delete(invVectors, index, 0) return dictValues
def verSchur(A): ''' a function that verifies Schur inequality test whether the sum of all square eigenValue is less than or equal to Frobenius norm of the matrix ''' # initialize eigenValue eigenValue = la.eig(A)[0] if np.sum(eigenValue**2) <= la.norm(A, 'fro')**2: return True else: return False # approximation of the upper bound of the absolute value of each eigenValue
def isSimple(A): # check if A is a squre matrix if A.shape[1] != A.shape[0]: return False eigenValues, eigenVectors = la.eig(A) while (eigenValues.shape[0] != 0): #dictValues.update({eigenValues[0]: 1}) index = np.argwhere(abs(eigenValues - eigenValues[0]) < 0.00001) algebraicMulti = len(index) geometricMulti = eigenVectors[:, index].shape[1] if algebraicMulti != geometricMulti: return False #dictValues.update({eigenValues[0]: len}) eigenValues = np.delete(eigenValues, index) # stack another spaces of eigenvalue and eigenvector return True
def calcAvec(new, dQ, W, lambda_, active_set, M, positive): # TODO: comment r, c = np.nonzero(active_set) # [r,c] = find(active_set); Mm = -M.take(r, axis=0).take(r, axis=1) Mm = (Mm + Mm.T) / 2 #% verify that there is no numerical instability if len(Mm) > 1: # print Mm.shape eigMm, _ = scipy.linalg.eig(Mm) eigMm = np.real(eigMm) # check_here else: eigMm = Mm if any(eigMm < 0): np.min(eigMm) #%error('The matrix Mm has negative eigenvalues') flag = 1 b = np.sign(W) if new >= 0: b[new] = np.sign(dQ[new]) b = b[active_set == 1] if len(Mm) > 1: avec = np.linalg.solve(Mm, b) else: avec = b / Mm if positive: if new >= 0: in_ = np.sum(active_set[:new]) if avec[in_] < 0: # new; #%error('new component of a is negative') flag = 1 one_vec = np.ones(W.shape) dQa = np.zeros(W.shape) for j in range(len(r)): dQa = dQa + np.expand_dims(avec[j] * M[:, r[j]], axis=1) gamma_plus = (lambda_ - dQ) / (one_vec + dQa) gamma_minus = (lambda_ + dQ) / (one_vec - dQa) return avec, gamma_plus, gamma_minus
def eigensystem(matrix): """ Given an array-like 'matrix', returns: - An array of eigenvalues - An array of eigenvectors - A rotation matrix that rotates the eigenbasis into the original basis Example: mat = [[1,2,3],[2,4,5],[3,5,6]] evals, evecs, rot = eigensystem(mat) evals array([ 11.34481428+0.j, -0.51572947+0.j, 0.17091519+0.j] evecs array([[-0.32798528, -0.59100905, -0.73697623], [-0.73697623, -0.32798528, 0.59100905], [ 0.59100905, -0.73697623, 0.32798528]]) rot array([[-0.32798528, -0.73697623, 0.59100905], [-0.59100905, -0.32798528, -0.73697623], [-0.73697623, 0.59100905, 0.32798528]])) This allows you to translate between original and eigenbases: If [v1, v2, v3] are the components of a vector in eigenbasis e1, e2, e3 Then: rot.dot([v1,v2,v3]) = [vx,vy,vz] Will give the components in the original basis x, y, z If [wx, wy, wz] are the components of a vector in original basis z, y, z Then: inv(rot).dot([wx,wy,wz]) = [w1,w2,w3] Will give the components in the eigenbasis e1, e2, e3 inv(rot).dot(mat).dot(rot) array([[evals[0], 0, 0] [0, evals[1], 0] [0, 0, evals[2]]]) Note: For symmetric input 'matrix', inv(rot) == evecs """ evals, emat = eig(matrix) return evals, np.transpose(emat), emat
def solveCSE(en, rot, mu, R, VT): n, m, oo = VT.shape V = np.transpose(VT) # find channels that are open, as defined by E' > 0 edash = en - np.diag(VT[:, :, -1]) openchann = edash > 0 nopen = edash[openchann].size mx = matching_point(en, rot, V, R, mu) if mx < oo-5: out = leastsq(eigen, (en, ), args=(rot, mx, V, R, mu), xtol=0.01) en = float(out[0]) # solve CSE according to Johnson renormalized Numerov method WI = WImat(en, rot, V, R, mu) RI = RImat(WI, mx) wf = [] if nopen > 0: oc = 0 for j, ed in enumerate(edash): if ed > 0: f = fmat(j, RI, WI, mx) wf.append(wavefunction(WI, oc, f)) oc += 1 else: f = fmat(0, RI, WI, mx) wf.append(wavefunction(WI, nopen, f)) wf = np.array(wf) wf = np.transpose(wf) if nopen == 0: wf = normalize(wf, R) else: K, AI, B = amplitude(wf, R, edash, mu) # shape = nopen x nopen # K = BA-1 = U tan xi UT eig, U = linalg.eig(K) # form A^-1 U cos xi exp(i xi) UT I = np.identity(nopen, dtype=complex) xi = np.arctan(eig)*I cosxi = np.cos(xi)*I expxi = np.exp((0+1j)*xi)*I expxiUT = expxi @ np.transpose(U) cosxiexpxiUT = cosxi @ expxiUT UcosxiexpxiUT = U @ cosxiexpxiUT Norm = AI @ UcosxiexpxiUT # == (cu, su) complex # complex wavefunction array oo x n x nopen wf = wf @ Norm return wf, en
def processing(self): ''' Perform the maf transformation ''' # covariance of original bands sigma = numpy.ma.cov(self.variates_stack.T, allow_masked=True) # TODO: Jenkins can't run this lines with allow_masked =True, whyyyy?? # covariance for horizontal and vertical shifts sigmadh = numpy.ma.cov(self.H.T, allow_masked=True) sigmadv = numpy.ma.cov(self.V.T, allow_masked=True) sigma = numpy.cov(self.variates_stack.T) # covariance for horizontal and vertical shifts sigmadh = numpy.cov(self.H.T) sigmadv = numpy.cov(self.V.T) # simple pooling of shifts sigmad = 0.5 * (numpy.array(sigmadh) + numpy.array(sigmadv)) # evalues, vec1 = scipy.linalg.eig(sigmad, sigma) evalues, vec1 = linalg.eig(sigmad, sigma) # Sort eigen values from smallest to largest and apply this order to # eigen vector matrix sort_index = evalues.argsort() evalues = evalues[sort_index] vec1 = vec1[:, sort_index] # autocorrelation # ac= 1-0.5*vec1 HH = 1 / numpy.std(self.variates_stack, 0, ddof=1) diagvariates = numpy.diag(HH) invstderrmaf = numpy.diag((1 / numpy.sqrt(numpy.diag(vec1.T * sigma * vec1)))) HHH = numpy.zeros((self.number_of_maf_variates), float) for b in range(self.number_of_maf_variates): # logger.info("Calculating component %d of MAF transformation" % b) HHH[b] = cmp(numpy.sum((diagvariates * sigma * vec1 * invstderrmaf)[b]), 0) sgn = numpy.diag(HHH) # assure positiviy v = numpy.dot(vec1, sgn) N = numpy.shape(self.variates_stack)[0] X = self.variates_stack - numpy.tile(numpy.mean(self.variates_stack, 0), (N, 1)) # scale v to give MAFs with unit variance aux1 = numpy.dot(numpy.dot(v.T, sigma), v) # dispersion of MAFs aux2 = 1 / numpy.sqrt(numpy.diag(aux1)) aux3 = numpy.tile(aux2.T, (self.number_of_maf_variates, 1)) v = v * aux3 # now dispersion is unit matrix self.mafs = numpy.dot(X, v)