我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用scipy.linalg.norm()。
def nn(model, text, vectors, query, k=5): """ Return the nearest neighbour sentences to query text: list of sentences vectors: the corresponding representations for text query: a string to search """ qf = encode(model, [query]) qf /= norm(qf) scores = numpy.dot(qf, vectors.T).flatten() sorted_args = numpy.argsort(scores)[::-1] sentences = [text[a] for a in sorted_args[:k]] print('QUERY: ' + query) print('NEAREST: ') for i, s in enumerate(sentences): print(s, sorted_args[i])
def nn(model, text, vectors, query, k=5): """ Return the nearest neighbour sentences to query text: list of sentences vectors: the corresponding representations for text query: a string to search """ qf = encode(model, [query]) qf /= norm(qf) scores = numpy.dot(qf, vectors.T).flatten() sorted_args = numpy.argsort(scores)[::-1] sentences = [text[a] for a in sorted_args[:k]] print 'QUERY: ' + query print 'NEAREST: ' for i, s in enumerate(sentences): print s, sorted_args[i]
def get_lipschitz(data): """Get the Lipschitz constant for a specific loss function. Only square loss implemented. Parameters ---------- data : (n, d) float ndarray data matrix loss : string the selected loss function in {'square', 'logit'} Returns ---------- L : float the Lipschitz constant """ n, p = data.shape if p > n: tmp = np.dot(data, data.T) else: tmp = np.dot(data.T, data) return la.norm(tmp, 2)
def prox_l1(w, alpha): r"""Proximity operator for l1 norm. :math:`\\hat{\\alpha}_{l,m} = sign(u_{l,m})\\left||u_{l,m}| - \\alpha \\right|_+` Parameters ---------- u : ndarray The vector (of the n-dimensional space) on witch we want to compute the proximal operator alpha : float regularisation parameter Returns ------- ndarray : the vector corresponding to the application of the proximity operator to u """ return np.sign(w) * np.maximum(np.abs(w) - alpha, 0.)
def dctii(x, axes=None): """ Compute a multi-dimensional DCT-II over specified array axes. This function is implemented by calling the one-dimensional DCT-II :func:`scipy.fftpack.dct` with normalization mode 'ortho' for each of the specified axes. Parameters ---------- a : array_like Input array axes : sequence of ints, optional (default None) Axes over which to compute the DCT-II. Returns ------- y : ndarray DCT-II of input array """ if axes is None: axes = list(range(x.ndim)) for ax in axes: x = fftpack.dct(x, type=2, axis=ax, norm='ortho') return x
def idctii(x, axes=None): """ Compute a multi-dimensional inverse DCT-II over specified array axes. This function is implemented by calling the one-dimensional inverse DCT-II :func:`scipy.fftpack.idct` with normalization mode 'ortho' for each of the specified axes. Parameters ---------- a : array_like Input array axes : sequence of ints, optional (default None) Axes over which to compute the inverse DCT-II. Returns ------- y : ndarray Inverse DCT-II of input array """ if axes is None: axes = list(range(x.ndim)) for ax in axes[::-1]: x = fftpack.idct(x, type=2, axis=ax, norm='ortho') return x
def fl2norm2(xf, axis=(0, 1)): r""" Compute the squared :math:`\ell_2` norm in the DFT domain, taking into account the unnormalised DFT scaling, i.e. given the DFT of a multi-dimensional array computed via :func:`fftn`, return the squared :math:`\ell_2` norm of the original array. Parameters ---------- xf : array_like Input array axis : sequence of ints, optional (default (0,1)) Axes on which the input is in the frequency domain Returns ------- x : float :math:`\|\mathbf{x}\|_2^2` where the input array is the result of applying :func:`fftn` to the specified axes of multi-dimensional array :math:`\mathbf{x}` """ xfs = xf.shape return (linalg.norm(xf)**2)/np.prod(np.array([xfs[k] for k in axis]))
def rrs(ax, b): r""" Compute relative residual :math:`\|\mathbf{b} - A \mathbf{x}\|_2 / \|\mathbf{b}\|_2` of the solution to a linear equation :math:`A \mathbf{x} = \mathbf{b}`. Returns 1.0 if :math:`\mathbf{b} = 0`. Parameters ---------- ax : array_like Linear component :math:`A \mathbf{x}` of equation b : array_like Constant component :math:`\mathbf{b}` of equation Returns ------- x : float Relative residual """ nrm = linalg.norm(b.ravel()) if nrm == 0.0: return 1.0 else: return linalg.norm((ax - b).ravel()) / nrm
def compute_residuals(self): """Compute residuals and stopping thresholds.""" if self.opt['AutoRho', 'StdResiduals']: r = linalg.norm(self.rsdl_r(self.AXnr, self.Y)) s = linalg.norm(self.rsdl_s(self.Yprev, self.Y)) epri = scipy.sqrt(self.Nc)*self.opt['AbsStopTol'] + \ self.rsdl_rn(self.AXnr, self.Y)*self.opt['RelStopTol'] edua = scipy.sqrt(self.Nx)*self.opt['AbsStopTol'] + \ self.rsdl_sn(self.U)*self.opt['RelStopTol'] else: rn = self.rsdl_rn(self.AXnr, self.Y) if rn == 0.0: rn = 1.0 sn = self.rsdl_sn(self.U) if sn == 0.0: sn = 1.0 r = linalg.norm(self.rsdl_r(self.AXnr, self.Y)) / rn s = linalg.norm(self.rsdl_s(self.Yprev, self.Y)) / sn epri = scipy.sqrt(self.Nc)*self.opt['AbsStopTol']/rn + \ self.opt['RelStopTol'] edua = scipy.sqrt(self.Nx)*self.opt['AbsStopTol']/sn + \ self.opt['RelStopTol'] return r, s, epri, edua
def test_08(self): N = 64 M = 2*N L = 4 np.random.seed(12345) D = np.random.randn(N, M) x0 = np.zeros((M, 1)) si = np.random.permutation(list(range(0, M-1))) x0[si[0:L]] = np.random.randn(L, 1) s0 = D.dot(x0) lmbda = 5e-3 opt = bpdn.BPDN.Options({'Verbose': False, 'MaxMainIter': 500, 'RelStopTol': 5e-4}) b = bpdn.BPDN(D, s0, lmbda, opt) b.solve() x1 = b.Y assert(np.abs(b.itstat[-1].ObjFun - 0.012009) < 1e-5) assert(np.abs(b.itstat[-1].DFid - 1.9636082e-06) < 1e-5) assert(np.abs(b.itstat[-1].RegL1 - 2.401446) < 1e-5) assert(linalg.norm(x1-x0) < 1e-3)
def costFunc(alpha, *args): i = args[2] original_thetai = args[0] delta_thetai = args[1] x = args[3] y = args[4] _lambda = args[5] labels = set(y) thetai = original_thetai thetai[i, :] = thetai[i, :] - alpha * delta_thetai k = 0 sum_log_p = 0.0 for label in labels: index = y == label xi = x[index] p = condProb(original_thetai,thetai[k, :], xi) log_p = np.log10(p) sum_log_p = sum_log_p + log_p.sum() k = k + 1 r = -sum_log_p / x.shape[0]+ (_lambda / 2.0) * pow(norm(thetai),2) #print r ,alpha return r
def distance_smooth_norm(expected, result): """ Calculates 2-norm from difference in fitness between expected and given snakes @param expected: array of expected fitness @param result: array of given fitness @return: """ global best_so_far, calculations n = result.size differences = abs(expected - result) ** 4 * np.arange(n * 2, 0, -2) distance = norm(differences) / np.sqrt(n) best_so_far = min(best_so_far, distance) calculations += 1 show_progress(distance, calculations) return distance
def test_jw_restrict_operator(self): """Test the scheme for restricting JW encoded operators to number""" # Make a Hamiltonian that cares mostly about number of electrons n_qubits = 6 target_electrons = 3 penalty_const = 100. number_sparse = jordan_wigner_sparse(number_operator(n_qubits)) bias_sparse = jordan_wigner_sparse( sum([FermionOperator(((i, 1), (i, 0)), 1.0) for i in range(n_qubits)], FermionOperator())) hamiltonian_sparse = penalty_const * ( number_sparse - target_electrons * scipy.sparse.identity(2**n_qubits)).dot( number_sparse - target_electrons * scipy.sparse.identity(2**n_qubits)) + bias_sparse restricted_hamiltonian = jw_number_restrict_operator( hamiltonian_sparse, target_electrons, n_qubits) true_eigvals, _ = eigh(hamiltonian_sparse.A) test_eigvals, _ = eigh(restricted_hamiltonian.A) self.assertAlmostEqual(norm(true_eigvals[:20] - test_eigvals[:20]), 0.0)
def get_Gt(u, gradf, t): # vector used for backtracking check with projected gradient descent u_n = u - t * gradf; norm_u_n = norm(u_n); if (norm_u_n > 1.0): # project to L2 unit ball u_norm = u_n / norm_u_n; else: u_norm = u_n; Gt = 1.0/t * (u - u_norm); return Gt ################################################### ## DCA_ONE STOCH - STOCHASTIC GRADIENT DESCENT ## ###################################################
def logistic_regression(x, t, w, eps=1e-2, max_iter=int(1e3)): N = x.shape[1] Phi = np.vstack([np.ones(N), phi(x)]).T for k in range(max_iter): y = expit(Phi.dot(w)) R = np.diag(np.ones(N) * (y * (1 - y))) H = Phi.T.dot(R).dot(Phi) g = Phi.T.dot(y - t) w_new = w - linalg.solve(H, g) diff = linalg.norm(w_new - w) / linalg.norm(w) if (diff < eps): break w = w_new print('{0:5d} {1:10.6f}'.format(k, diff)) return w
def build_encoder(tparams, options): """ Construct encoder """ # inputs (image, sentence) im = tensor.matrix('im', dtype='float32') s = tensor.matrix('s', dtype='float32') # embeddings eim = get_layer('ff')[1](tparams, im, options, prefix='ff_im', activ='linear') es = get_layer('ff')[1](tparams, s, options, prefix='ff_s', activ='linear') # L2 norm of rows lim = l2norm(eim) ls = l2norm(es) return [im, s], lim, ls # optimizers # name(hyperp, tparams, grads, inputs (list), cost) = f_grad_shared, f_update
def doa(self, receiver, source): ''' Computes the direction of arrival wrt a source and receiver ''' s_ind = self.key2ind(source) r_ind = self.key2ind(receiver) # vector from receiver to source v = self.X[:,s_ind] - self.X[:,r_ind] azimuth = np.arctan2(v[1], v[0]) elevation = np.arctan2(v[2], la.norm(v[:2])) azimuth = azimuth + 2*np.pi if azimuth < 0. else azimuth elevation = elevation + 2*np.pi if elevation < 0. else elevation return np.array([azimuth, elevation])
def compute_criterion(self,y): self.N=len(y) #construct matrices A=np.matrix(self.A) b=np.matrix(self.b).T H=np.matrix(self.H) x=lg.inv(H.T*H)*H.T*y #estimation of x if self.estimate_sigma2 is True: r,p=self.A.shape coef=(self.N-p)/r den=lg.norm(y-H*x)**2 else: den=self.sigma2 coef=1 term1=A*x-b num=term1.T*lg.inv(A*lg.inv(H.T*H)*A.T)*term1 self.criterion=coef*num/den ## See page 274 / 345
def linkage(df, n_groups): # create the distance matrix based on the forbenius norm: |A-B|_F where A is # a 24 x N matrix with N the number of timeseries inside the dataframe df # TODO: We can save have time as we only need the upper triangle once as the # distance matrix is symmetric if True: Y = np.empty((n_groups, n_groups,)) Y[:] = np.NAN for i in range(len(Y)): for j in range(len(Y[i,:])): A = df.loc[i+1].values B = df.loc[j+1].values #print('Computing distance of:{},{}'.format(i,j)) Y[i,j] = norm(A-B, ord='fro') # condensed distance matrix as vector for linkage (upper triangle as a vector) y = Y[np.triu_indices(n_groups, 1)] # create linkage matrix with wards algorithm an euclidean norm Z = hac.linkage(y, method='ward', metric='euclidean') # R = hac.inconsistent(Z, d=10) return Z
def qr_ls(A, b): ''' least square using QR (A must be full column rank) ''' m = A.shape[0] n = A.shape[1] if rank(A) < n: raise Exception('Rank deficient') A = qr_householder(A) for j in range(n): v = np.hstack((1, A[j+1:, j])) A[j+1:, j] = 0 b[j:] = (np.eye(m - j + 1) - 2 * np.outer(v, v) / la.norm(v, 2)).dot(b[j:]) x_ls = la.solve(A[:n, :n], b[:n]) return x_ls
def ls_qr(A, b): ''' least square using QR (A must be full column rank) ''' m = A.shape[0] n = A.shape[1] if rank(A) < n: raise Exception('Rank deficient') A = qr.qr_householder(A) for j in range(n): v = np.hstack((1, A[j+1:, j])) A[j+1:, j] = 0 b[j:] = (np.eye(m - j + 1) - 2 * np.outer(v, v) / la.norm(v, 2)).dot(b[j:]) x_ls = la.solve(A, b) return x_ls0
def householder_vector(x): dimensionX = len(x) sigma = x[1:].conjugate().T.dot(x[1:]) v = np.vstack((1, x[1:])) if sigma == 0: beta = 0 return v, beta else: miu = np.sqrt(x[0]**2 / sigma) if x[0] <= 0: v[0] = x[0] - miu else: v[0] = - sigma / (x[0] + miu) beta = 2 * v[0]**2 / (sigma + v[0]**2) v = v / la.norm(v, 2) return v, beta # a test fuction that asks whether a particular matrix is one of the special matries above
def debug_bidiag(i, s, inds, A, B, U, V, T): print("\n ********* DEBUGGING BLOCKBIDIAG: ************\n") # We will check the recurrence relations listed in Karimi, Toutounian print("\n Iteration i = {}, inds = {}\n".format(i, inds)) E1 = np.zeros((inds+s, s)) E1[0:s, :] = np.eye(s,s) errorRecurrence1 = sp.norm(B-np.dot(U[:,0:inds+s], np.dot(E1, B1))) print("\n B - UU(i+1)*E1*B1 = {}\n".format(errorRecurrence1)) # # AVk = Ukp1Tk errorRecurrence2 = sp.norm(np.dot(A, V[:, 0:inds]) - np.dot(U[:, 0:inds+s], T[0:inds+s, 0:inds])) print("\n A*VV(i) - UU(i+1)T(i) = {}\n".format(errorRecurrence2)) # # ATUkp1 = VkTkT + Vkp1Akp1Ekp1T Eip1 = np.zeros((inds+s, s)) Eip1[inds:inds+s, :] = np.eye(s,s) errorRecurrence3 = sp.norm(np.dot(A.T, U[:, 0:inds+s]) - np.dot(V[:, 0:inds], T[0:inds+s, 0:inds].T) - np.dot(V[:, inds:inds+s], np.dot(Aip1, Eip1.T))) print("\n A.T*UU(i+1) - VV(i)*T(i).T - V(i+1)*A(i+1)*E(i+1).T = {}\n".format(errorRecurrence3))
def tf_lipschitz_constant(M, G, phi, phiT, tol=1e-3, verbose=None): """Compute lipschitz constant for FISTA It uses a power iteration method. """ n_times = M.shape[1] n_points = G.shape[1] iv = np.ones((n_points, n_times), dtype=np.float) v = phi(iv) L = 1e100 for it in range(100): L_old = L logger.info('Lipschitz estimation: iteration = %d' % it) iv = np.real(phiT(v)) Gv = np.dot(G, iv) GtGv = np.dot(G.T, Gv) w = phi(GtGv) L = np.max(np.abs(w)) # l_inf norm v = w / L if abs((L - L_old) / L_old) < tol: break return L
def run(self, params, loss): m = theano.shared(np.zeros(params.shape.eval()), borrow=True, name='m') v = theano.shared(np.zeros(params.shape.eval()), borrow=True, name='v') grad = T.grad(loss, params) norm_grad = grad.norm(2) m_t = self.beta1 * m + (1 - self.beta1) * grad v_t = self.beta2 * v + (1 - self.beta2) * T.pow(grad, 2) step = T.iscalar(name='step') update_rules = [(params, params - self.lr * (m_t / (1.0 - T.pow(self.beta1, step)) / (T.sqrt(v_t / (1.0 - T.pow(self.beta2, step))) + self.stable))), (m, m_t), (v, v_t)] train_epoch = theano.function([step], [loss, norm_grad], updates=update_rules) for epoch in xrange(self.max_epoch): loss, grad = train_epoch(epoch + 1) norm_l2 = norm(grad) print("epoch = %d\t loss = %f\t norm = %f" %(epoch + 1, loss, norm_l2), end='') print() if norm_l2 < self.eps: break
def multi_im_run(self, image_name): """detection and extraction with many boxes""" #caffe.set_mode_gpu() multi_im = self.detect(image_name, multi_box=True) features = [] for im in multi_im: image = pad(im,size=224) feature = extraction.forward(self.net_e, image, self.transformer) r = np.squeeze(feature['pool5/7x7_s1'].data[0]) #feature2 = extraction.forward(self.net_e2, image, self.transformer2) #r2 = np.squeeze(feature2['pool5/7x7_s1'].data[0]) #r = np.hstack((r, r2)).copy() #r = r2 if self.pca is not None: r = self.pca.transform(r)[0,:] r = r/norm(r) #print r.shape features.append(r) return features
def norm_of_columns(A, p=2): """Vector p-norm of each column of a matrix. Parameters ---------- A : array_like Input matrix. p : int, optional p-th norm. Returns ------- array_like p-norm of each column of A. """ _, N = A.shape return np.asarray([linalg.norm(A[:, j], ord=p) for j in range(N)])
def coherence_of_columns(A): """Mutual coherence of columns of A. Parameters ---------- A : array_like Input matrix. p : int, optional p-th norm. Returns ------- array_like Mutual coherence of columns of A. """ A = np.asmatrix(A) _, N = A.shape A = A * np.asmatrix(np.diag(1/norm_of_columns(A))) Gram_A = A.H*A for j in range(N): Gram_A[j, j] = 0 return np.max(np.abs(Gram_A))
def test_fed(): print 'test_fed' t1,U,m=1.0,4.0,4 lattice=Square('S1')('%sP-1O'%m) config=IDFConfig(priority=DEFAULT_FERMIONIC_PRIORITY,pids=lattice.pids,map=lambda pid: Fermi(norbital=1,nspin=2,nnambu=1)) basis=FBasis(m*2,m,0.0) ed=FED(name='OneD_%s_%s'%(lattice.name,basis.rep),sectors=[basis],lattice=lattice,config=config,terms=[Hopping('t1',t1),Hubbard('U',U)],dtype=np.complex128) eigvals0=eigh(ed.matrix(basis.rep).todense(),eigvals_only=True) basis=TRBasis(FBasis(m*2,m,0.0),dk=2,nk=m) ed=TrFED(name='OneD_%s_%s'%(lattice.name,basis.rep),basis=basis,lattice=lattice,config=config,terms=[Hopping('t1',t1),Hubbard('U',U)],dtype=np.complex128) eigvals1=[] for k in xrange(m): eigvals1.append(eigh(ed.matrix(k=k).todense(),eigvals_only=True)) eigvals1=sorted(np.concatenate(eigvals1)) print 'diff: %s'%norm(eigvals0-eigvals1) print
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False): # Test that LassoLars and Lasso using coordinate descent give the # same results when early stopping is used. # (test : before, in the middle, and in the last part of the path) alphas_min = [10, 0.9, 1e-4] for alphas_min in alphas_min: alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso', alpha_min=0.9) lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8) lasso_cd.alpha = alphas[-1] lasso_cd.fit(X, y) error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_) assert_less(error, 0.01) alphas_min = [10, 0.9, 1e-4] # same test, with normalization for alphas_min in alphas_min: alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso', alpha_min=0.9) lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True, tol=1e-8) lasso_cd.alpha = alphas[-1] lasso_cd.fit(X, y) error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_) assert_less(error, 0.01)
def test_barycenter_kneighbors_graph(): X = np.array([[0, 1], [1.01, 1.], [2, 0]]) A = barycenter_kneighbors_graph(X, 1) assert_array_almost_equal( A.toarray(), [[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]) A = barycenter_kneighbors_graph(X, 2) # check that columns sum to one assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3)) pred = np.dot(A.toarray(), X) assert_less(linalg.norm(pred - X) / X.shape[0], 1) #---------------------------------------------------------------------- # Test LLE by computing the reconstruction error on some manifolds.
def switchcomp2(self, prev_para, prev_para2, curr_para): bet = 0 ent = 6 if prev_para.size == 12: distrec = np.finfo(np.float64).max indrec = [-1, -1] for i in xrange(2): for j in xrange(2): if i == j: continue tempdist = np.linalg.norm(prev_para[bet:ent] - curr_para[6 * i + bet:6 * i + ent]) + np.linalg.norm( prev_para[6 + bet:6 + ent] - curr_para[6 * j + bet:6 * j + ent]) \ + np.linalg.norm( prev_para2[bet:ent] - curr_para[6 * i + bet:6 * i + ent]) + np.linalg.norm( prev_para2[6 + bet:6 + ent] - curr_para[6 * j + bet:6 * j + ent]) if tempdist < distrec: distrec = tempdist indrec = [i, j] fin_para = np.concatenate( (curr_para[6 * indrec[0]:6 * indrec[0] + 6], curr_para[6 * indrec[1]:6 * indrec[1] + 6])) return fin_para
def word_features(table): """ Extract word features into a normalized matrix """ features = numpy.zeros((len(table), 620), dtype='float32') keys = table.keys() for i in range(len(table)): f = table[keys[i]] features[i] = f / norm(f) return features
def computeMatrixConvergence(prev, new): return la.norm(new-prev)
def computeListOfListsConvergence(prev, new): assert len(prev) == len(new) max_diff = 0 for i in range(len(prev)): diff = la.norm(np.array(new[i])-np.array(prev[i])) if diff > max_diff: max_diff = diff return max_diff
def computeEtaDifference(self): max_diff = 0 for t in range(self.n_tasks): last_eta_list = self.last_eta[t,:] eta_list = self.eta[t,:] norm = la.norm(last_eta_list - eta_list) if norm > max_diff: max_diff = norm return max_diff
def kl_preds_v2(model,sess,s_test,a_test,n_rep_per_item=200): ## Compare sample distribution to ground truth Env = grid_env(False) n_test_items,state_size = s_test.shape distances = np.empty([n_test_items,3]) for i in range(n_test_items): state = s_test[i,:].astype('int32') action = np.round(a_test[i,:]).astype('int32') # ground truth state_truth = np.empty([n_rep_per_item,s_test.shape[1]]) for o in range(n_rep_per_item): Env.set_state(state.flatten()) s1,r,dead = Env.step(action.flatten()) state_truth[o,:] = s1 truth_count,bins = np.histogramdd(state_truth,bins=[np.arange(8)-0.5]*state_size) truth_prob = truth_count/n_rep_per_item # predictions of model y_sample = sess.run(model.y_sample,{ model.x : state[None,:].repeat(n_rep_per_item,axis=0), model.y : np.zeros(np.shape(state[None,:])).repeat(n_rep_per_item,axis=0), model.a : action[None,:].repeat(n_rep_per_item,axis=0), model.Qtarget : np.zeros(np.shape(action[None,:])).repeat(n_rep_per_item,axis=0), model.lr : 0, model.lamb : 1, model.temp : 0.00001, model.is_training : False, model.k: 1}) sample_count,bins = np.histogramdd(y_sample,bins=[np.arange(8)-0.5]*state_size) sample_prob = sample_count/n_rep_per_item distances[i,0]= np.sum(truth_prob*(np.log(truth_prob+1e-5)-np.log(sample_prob+1e-5))) # KL(p|p_tilde) distances[i,1]= np.sum(sample_prob*(np.log(sample_prob+1e-5)-np.log(truth_prob+1e-5))) # Inverse KL(p_tilde|p) distances[i,2]= norm(np.sqrt(truth_prob) - np.sqrt(sample_prob))/np.sqrt(2) return np.mean(distances,axis=0)
def main(): file1, file2 = sys.argv[1:1+2] # read images as 2D arrays (convert to grayscale for simplicity) img1 = to_grayscale(imread(file1).astype(float)) img2 = to_grayscale(imread(file2).astype(float)) # compare n_m, n_0 = compare_images(img1, img2) print "Manhattan norm:", n_m, "/ per pixel:", n_m/img1.size print "Zero norm:", n_0, "/ per pixel:", n_0*1.0/img1.size
def compare_images(img1, img2): # normalize to compensate for exposure difference, this may be unnecessary # consider disabling it img1 = normalize(img1) img2 = normalize(img2) # calculate the difference and its norms diff = img1 - img2 # elementwise for scipy arrays m_norm = sum(abs(diff)) # Manhattan norm z_norm = norm(diff.ravel(), 0) # Zero norm return (m_norm, z_norm)
def _sigma(matrix, mu): n, p = matrix.shape if p > n: tmp = np.dot(matrix, matrix.T) else: tmp = np.dot(matrix.T, matrix) return (la.norm(tmp, 2)/n) + mu ############################################################################## # Models
def ridge_regression(data, labels, mu=0.0): r"""Implementation of the Regularized Least Squares solver. It solves the ridge regression problem with parameter ``mu`` on the `l2-norm`. Parameters ---------- data : (N, P) ndarray Data matrix. labels : (N,) or (N, 1) ndarray Labels vector. mu : float, optional (default is `0.0`) `l2-norm` penalty. Returns -------- beta : (P, 1) ndarray Ridge regression solution. Examples -------- >>> X = numpy.array([[0.1, 1.1, 0.3], [0.2, 1.2, 1.6], [0.3, 1.3, -0.6]]) >>> beta = numpy.array([0.1, 0.1, 0.0]) >>> Y = numpy.dot(X, beta) >>> beta = l1l2py.algorithms.ridge_regression(X, Y, 1e3).T >>> len(numpy.flatnonzero(beta)) 3 """ n, p = data.shape if n < p: tmp = np.dot(data, data.T) if mu: tmp += mu * n * np.eye(n) tmp = la.pinv(tmp) return np.dot(np.dot(data.T, tmp), labels.reshape(-1, 1)) else: tmp = np.dot(data.T, data) if mu: tmp += mu * n * np.eye(p) tmp = la.pinv(tmp) return np.dot(tmp, np.dot(data.T, labels.reshape(-1, 1)))
def _sigma(matrix, mu): n, p = matrix.shape if p > n: tmp = np.dot(matrix, matrix.T) else: tmp = np.dot(matrix.T, matrix) return (la.norm(tmp, 2)/n) + mu
def _fixed_step(self, t, fields, dt, pars, hook=null_hook): fields = fields.copy() fields, pars = hook(t, fields, pars) J = self._model.J(fields, pars) Id = self.__cache__(fields.uflat.size) self._A = A = Id - self._gamma[0, 0] * dt * J luf = sps.linalg.factorized(A) ks = [] fields_i = fields.copy() for i in np.arange(self._s): fields_i.fill(fields.uflat + sum([self._alpha[i, j] * ks[j] for j in range(i)])) F = self._model.F(fields_i, pars) ks.append(luf(dt * F + dt * (J @ sum([self._gamma[i, j] * ks[j] for j in range(i)]) if i > 0 else 0))) U = fields.uflat.copy() U = U + sum([bi * ki for bi, ki in zip(self._b, ks)]) U_pred = (U + sum([bi * ki for bi, ki in zip(self._b_pred, ks)]) if self._b_pred is not None else None) fields.fill(U) return t + dt, fields, (norm(U - U_pred, np.inf) if U_pred is not None else None)