我们从Python开源项目中,提取了以下34个代码示例,用于说明如何使用scipy.ones()。
def glmnet_softmax(x): d = x.shape nas = scipy.any(scipy.isnan(x), axis = 1) if scipy.any(nas): pclass = scipy.zeros([d[0], 1])*scipy.NaN if scipy.sum(nas) < d[0]: pclass2 = glmnet_softmax(x[~nas, :]) pclass[~nas] = pclass2 result = pclass else: maxdist = x[:, 1] pclass = scipy.ones([d[0], 1]) for i in range(1, d[1], 1): t = x[:, i] > maxdist pclass[t] = i maxdist[t] = x[t, i] result = pclass return(result) #=========================
def softmax(x, gap = False): d = x.shape maxdist = x[:, 0] pclass = scipy.zeros([d[0], 1], dtype = scipy.integer) for i in range(1, d[1], 1): l = x[:, i] > maxdist pclass[l] = i maxdist[l] = x[l, i] if gap == True: x = scipy.absolute(maxdist - x) x[0:d[0], pclass] = x*scipy.ones([d[1], d[1]]) #gaps = pmin(x)# not sure what this means; gap is never called with True raise ValueError('gap = True is not implemented yet') result = dict() if gap == True: result['pclass'] = pclass #result['gaps'] = gaps raise ValueError('gap = True is not implemented yet') else: result['pclass'] = pclass; return(result) # end of softmax # =========================================
def cvcompute(mat, weights, foldid, nlams): if len(weights.shape) > 1: weights = scipy.reshape(weights, [weights.shape[0], ]) wisum = scipy.bincount(foldid, weights = weights) nfolds = scipy.amax(foldid) + 1 outmat = scipy.ones([nfolds, mat.shape[1]])*scipy.NaN good = scipy.zeros([nfolds, mat.shape[1]]) mat[scipy.isinf(mat)] = scipy.NaN for i in range(nfolds): tf = foldid == i mati = mat[tf, ] wi = weights[tf, ] outmat[i, :] = wtmean(mati, wi) good[i, 0:nlams[i]] = 1 N = scipy.sum(good, axis = 0) cvcpt = dict() cvcpt['cvraw'] = outmat cvcpt['weights'] = wisum cvcpt['N'] = N return(cvcpt) # end of cvcompute #=========================
def cvglmnetPlot(cvobject, sign_lambda = 1.0, **options): sloglam = sign_lambda*scipy.log(cvobject['lambdau']) fig = plt.gcf() ax1 = plt.gca() #fig, ax1 = plt.subplots() plt.errorbar(sloglam, cvobject['cvm'], cvobject['cvsd'], \ ecolor = (0.5, 0.5, 0.5), \ **options ) plt.hold(True) plt.plot(sloglam, cvobject['cvm'], linestyle = 'dashed',\ marker = 'o', markerfacecolor = 'r') xlim1 = ax1.get_xlim() ylim1 = ax1.get_ylim() xval = sign_lambda*scipy.log(scipy.array([cvobject['lambda_min'], cvobject['lambda_min']])) plt.plot(xval, ylim1, color = 'b', linestyle = 'dashed', \ linewidth = 1) if cvobject['lambda_min'] != cvobject['lambda_1se']: xval = sign_lambda*scipy.log([cvobject['lambda_1se'], cvobject['lambda_1se']]) plt.plot(xval, ylim1, color = 'b', linestyle = 'dashed', \ linewidth = 1) ax2 = ax1.twiny() ax2.xaxis.tick_top() atdf = ax1.get_xticks() indat = scipy.ones(atdf.shape, dtype = scipy.integer) if sloglam[-1] >= sloglam[1]: for j in range(len(sloglam)-1, -1, -1): indat[atdf <= sloglam[j]] = j else: for j in range(len(sloglam)): indat[atdf <= sloglam[j]] = j prettydf = cvobject['nzero'][indat] ax2.set(XLim=xlim1, XTicks = atdf, XTickLabels = prettydf) ax2.grid() ax1.yaxis.grid() ax2.set_xlabel('Degrees of Freedom') # plt.plot(xlim1, [ylim1[1], ylim1[1]], 'b') # plt.plot([xlim1[1], xlim1[1]], ylim1, 'b') if sign_lambda < 0: ax1.set_xlabel('-log(Lambda)') else: ax1.set_xlabel('log(Lambda)') ax1.set_ylabel(cvobject['name']) #plt.show()
def __init__(self,nn_name,batch_size=1024,freeze=1,l_rates = sp.float32(0.05)*sp.ones(512,dtype=sp.float32),verbose = 1,subnet= None): self.nn_name = nn_name self.subnet = subnet if subnet != None and freeze: self.subnet.__freeze__() self.batch_size = batch_size self.verbose = verbose self.l_rates = l_rates self.__input_var__ = T.tensor4('X'+self.nn_name[:2]) self.__target_var__ = T.ivector('y+'+self.nn_name[:2]) self.max_epochs = self.l_rates.shape[0] if self.nn_name == '12-net': self.net = self.__build_12_net__() elif self.nn_name == '24-net': self.net = self.__build_24_net__() elif self.nn_name == '48-net': self.net = self.__build_48_net__() elif self.nn_name =='12-calib_net': self.net = self.__build_12_calib_net__() elif self.nn_name =='24-calib_net': self.net = self.__build_24_calib_net__() elif self.nn_name =='48-calib_net': self.net = self.__build_48_calib_net__() self.__build_loss_train__fn__()
def test_one_with_one_without_parameters(population_strategy: PopulationStrategy): n = 10 kernels = [] df_without = pd.DataFrame(index=list(range(n))) w_without = sp.ones(n) / n kernel_without = MultivariateNormalTransition() kernel_without.fit(df_without, w_without) kernels.append(kernel_without) df_with = pd.DataFrame([{"s": sp.rand()} for _ in range(n)]) w_with = sp.ones(n) / n kernel_with = MultivariateNormalTransition() kernel_with.fit(df_with, w_with) kernels.append(kernel_with) population_strategy.adapt_population_size(kernels, sp.array([.7, .3])) assert population_strategy.nr_particles > 0
def test_transitions_not_modified(population_strategy: PopulationStrategy): n = 10 kernels = [] test_points = pd.DataFrame([{"s": sp.rand()} for _ in range(n)]) for _ in range(2): df = pd.DataFrame([{"s": sp.rand()} for _ in range(n)]) w = sp.ones(n) / n kernel = MultivariateNormalTransition() kernel.fit(df, w) kernels.append(kernel) test_weights = [k.pdf(test_points) for k in kernels] population_strategy.adapt_population_size(kernels, sp.array([.7, .2])) after_adaptation_weights = [k.pdf(test_points) for k in kernels] same = all([(k1 == k2).all() for k1, k2 in zip(test_weights, after_adaptation_weights)]) err_msg = ("Population strategy {}" " modified the transitions".format(population_strategy)) assert same, err_msg
def test_download(): """Test that fetch_mldata is able to download and cache a data set.""" _urlopen_ref = datasets.mldata.urlopen datasets.mldata.urlopen = mock_mldata_urlopen({ 'mock': { 'label': sp.ones((150,)), 'data': sp.ones((150, 4)), }, }) try: mock = fetch_mldata('mock', data_home=tmpdir) for n in ["COL_NAMES", "DESCR", "target", "data"]: assert_in(n, mock) assert_equal(mock.target.shape, (150,)) assert_equal(mock.data.shape, (150, 4)) assert_raises(datasets.mldata.HTTPError, fetch_mldata, 'not_existing_name') finally: datasets.mldata.urlopen = _urlopen_ref
def get_ld_tables(snps, ld_radius=100, ld_window_size=0): """ Calculates LD tables, and the LD score in one go... """ ld_dict = {} m,n = snps.shape print m,n ld_scores = sp.ones(m) ret_dict = {} for snp_i, snp in enumerate(snps): # Calculate D start_i = max(0, snp_i - ld_radius) stop_i = min(m, snp_i + ld_radius + 1) X = snps[start_i: stop_i] D_i = sp.dot(snp, X.T) / n r2s = D_i ** 2 ld_dict[snp_i] = D_i lds_i = sp.sum(r2s - (1-r2s) / (n-2),dtype='float32') #lds_i = sp.sum(r2s - (1-r2s)*empirical_null_r2) ld_scores[snp_i] =lds_i ret_dict['ld_dict']=ld_dict ret_dict['ld_scores']=ld_scores if ld_window_size>0: ref_ld_matrices = [] for i, wi in enumerate(range(0, m, ld_window_size)): start_i = wi stop_i = min(m, wi + ld_window_size) curr_window_size = stop_i - start_i X = snps[start_i: stop_i] D = sp.dot(X, X.T) / n ref_ld_matrices.append(D) ret_dict['ref_ld_matrices']=ref_ld_matrices return ret_dict
def auc_mat(y, prob, weights = None): if weights == None or len(weights) == 0: weights = scipy.ones([y.shape[0], 1]) wweights = weights*y wweights = wweights.flatten() wweights = scipy.reshape(wweights, [1, wweights.size]) ny= y.shape[0] a = scipy.zeros([ny, 1]) b = scipy.ones([ny, 1]) yy = scipy.vstack((a, b)) pprob = scipy.vstack((prob,prob)) result = auc(yy, pprob, wweights) return(result) #=========================
def get_aflw_face_data(k = 12, on_drive = False): dbpath = 'F:\\datasets\\image_data_sets\\faces\\AFLW' dbpath = join(dbpath,'aflw.sqlite') rfpath = 'F:\\datasets\\image_data_sets\\faces\\AFLW\\img' conn = sqlite3.connect(dbpath) X = [] c = 0 for file_id,x,y,ra,rb,theta in conn.execute('SELECT file_id,x,y,ra,rb,theta FROM Faces NATURAL JOIN FaceEllipse'): fpath = join(rfpath,file_id) frame = fr.get_frame(fpath) x1,y1,x2,y2 = util.ellipse2bbox(a = ra, b = rb, angle = theta, cx = x, cy = y) x = x1 y = y1 h = abs(y2-y1) w = abs(x2-x1) no_neg = sp.all(sp.array([x,y,h,w]) > 0) ## ignore a bad data in sql table if frame != None and no_neg: y,x,w,h = [int(e) for e in (y,x,w,h)] face = fr.get_patch(frame,y,x,(w,h)) face_r,good_example = Datasets.sample_resize(face,k,k) if good_example: print('face:',fpath) vec = fr.frame_to_vect(face_r) if not on_drive: X.append(vec) face_flip_r = fr.flip_frame(face_r) vec = fr.frame_to_vect(face_flip_r) X.append(vec) else: for item in Datasets.data_augmentation(frame,y,x,w,h): fr.write_frame('F:\\train_data\\pos\\' + str(c) + '_' + str(file_id)[:-4] + '_' + 'pos',item) c +=1 X = sp.array(X) y = sp.ones(len(X)) return X,y
def get_train_face_wider_data(k = 12,write_to_disk = False): ''' cut faces (positive examples) by bboxes from all images in dataset return X - features y - labels cnt - count of examples ''' X,y = [],[] root = 'F:\\Datasets\\image_data_sets\\faces\\WIDERFace\\' pattern = "*.jpg" bboxs = Datasets.load_wider_face(os.path.join(root,'wider_face_split','wider_face_train_v7.mat')) for path, subdirs, files in os.walk(root,'WIDER_train'): for indx,iname in enumerate(files): if fnmatch(iname, pattern): ipath = os.path.join(path, iname) print('face:',ipath) img = fr.get_frame(ipath) H,W,dim = img.shape bbox_list = bboxs[iname[:-4]] for bbox in bbox_list: face = fr.get_patch(img,bbox[1],bbox[0],(bbox[2],bbox[3])) #fr.write_frame('F:\\1\\' + str(c),face) face_r,good_example = Datasets.sample_resize(face,k,k) if good_example: vec = fr.frame_to_vect(face_r) X.append(vec) y.append(1) face_r_flip = fr.flip_frame(face_r) vec = fr.frame_to_vect(face_r_flip) X.append(vec) y.append(1) X = sp.array(X) y = sp.array(y) #y = sp.ones(len(X)) return X,y
def get_train_data(n_pos = 46443, n_neg = 206940,k=12): ''' megre positive and negative examples ''' suff = str(k) X_name = 'train_data_'+ suff + '.npz' y_name = 'labels_'+ suff + '.npz' if not(os.path.exists(X_name) and os.path.exists(y_name)): X_pos = [] # X_train_face,y_train_face = Datasets.get_train_face_wider_data(k = k) # X_pos = X_train_face[y_train_face==1] # X_pos = X_train_face X_aflw,y_train_face_aflw = Datasets.get_aflw_face_data(k = k) # if len(X_pos) > 0: # X_pos = sp.vstack( [X_pos,X_aflw] ) # else: # X_pos = X_aflw X_pos = X_aflw X_train_non_face,y_train_non_face = Datasets.get_train_non_face_data(k = k) print('c1_pos:',len(X_pos)) #print((X_train_face[y_train_face==0].shape,X_train_non_face.shape)) # if len(X_train_face[y_train_face==0]) > 0: # X_neg = sp.vstack( (X_train_face[y_train_face==0],X_train_non_face) ) # else: # X_neg = X_train_non_face X_neg = X_train_non_face X_pos = shuffle(X_pos,random_state=42) X_neg = shuffle(X_neg,random_state=42) X_pos = X_pos[:n_pos] X_neg = X_neg[:n_neg] n_neg = len(X_neg) n_pos = len(X_pos) y_pos = sp.ones(n_pos,int) y_neg = sp.zeros(n_neg,int) X = sp.vstack((X_pos,X_neg)) y = sp.hstack( (y_pos,y_neg) ) X,y = shuffle(X,y,random_state=42) sp.savez(X_name,X) sp.savez(y_name,y)
def _init_hyperparameters(self, X, T): n_samples = X.shape[0] if (self.mean is None): self.mean = sp.zeros(n_samples + 1) if (self.cov is None): self.cov = sp.ones(n_samples + 1) if (self.beta is None): self.beta = 1 return
def predict(self, X, T, X_new): """Predict ``X_new`` with given traning data ``(X, T)``.""" n_tests = X_new.shape[0] phi = sp.r_[sp.ones(n_tests).reshape(1, -1), self._compute_design_matrix(X_new, X)] # Add x0 phi = phi[self.rv_indices, :] predict_mean = sp.dot(self.mean, phi) predict_cov = 1 / self.beta + sp.dot(phi.T, sp.dot(self.cov, phi)).diagonal() return predict_mean, predict_cov
def fit(self, X): n_samples, n_featurs = X.shape K = self.kernel.inner(X, X) I1N = sp.ones((n_samples, n_samples)) K_centered = K - sp.dot(I1N, K) - sp.dot(K, I1N) + sp.dot(sp.dot(I1N, K), I1N) eigvals, eigvecs = self._eig_decomposition(K_centered) self.eigvals = eigvals self.eigvecs = eigvecs Y = sp.dot(K, eigvecs) return Y
def test_adapt_single_model(population_strategy: PopulationStrategy): n = 10 df = pd.DataFrame([{"s": sp.rand()} for _ in range(n)]) w = sp.ones(n) / n kernel = MultivariateNormalTransition() kernel.fit(df, w) population_strategy.adapt_population_size([kernel], sp.array([1.])) assert population_strategy.nr_particles > 0
def test_adapt_two_models(population_strategy: PopulationStrategy): n = 10 kernels = [] for _ in range(2): df = pd.DataFrame([{"s": sp.rand()} for _ in range(n)]) w = sp.ones(n) / n kernel = MultivariateNormalTransition() kernel.fit(df, w) kernels.append(kernel) population_strategy.adapt_population_size(kernels, sp.array([.7, .2])) assert population_strategy.nr_particles > 0
def test_no_parameters(population_strategy: PopulationStrategy): n = 10 df = pd.DataFrame(index=list(range(n))) w = sp.ones(n) / n kernels = [] for _ in range(2): kernel = MultivariateNormalTransition() kernel.fit(df, w) kernels.append(kernel) population_strategy.adapt_population_size(kernels, sp.array([.7, .3])) assert population_strategy.nr_particles > 0
def read_data(instruments): ''' Data pre-processing ''' nins = len(instruments) instruments = sp.array([sp.loadtxt('datafiles/'+x) for x in instruments]) def data(data, ins_no): Time, Radial_Velocity, Err = data.T[:3] # el error de la rv Radial_Velocity -= sp.mean(Radial_Velocity) Flag = sp.ones(len(Time)) * ins_no # marca el instrumento al q pertenece Staract = data.T[3:] return sp.array([Time, Radial_Velocity, Err, Flag, Staract]) def sortstuff(tryin): t, rv, er, flag = tryin order = sp.argsort(t) return sp.array([x[order] for x in [t, rv, er, flag]]) fd = sp.array([]), sp.array([]), sp.array([]), sp.array([]) for k in range(len(instruments)): # appends all the data in megarg t, rv, er, flag, star = data(instruments[k], k) fd = sp.hstack((fd, [t, rv, er, flag] )) # ojo this, list not array fd[0] = fd[0] - min(fd[0]) alldat = sp.array([]) try: staract = sp.array([data(instruments[i], i)[4] for i in range(nins)]) except: staract = sp.array([sp.array([]) for i in range(nins)]) starflag = sp.array([sp.array([i for k in range(len(staract[i]))]) for i in range(len(staract))]) tryin = sortstuff(fd) for i in range(len(starflag)): for j in range(len(starflag[i])): staract[i][j] -= sp.mean(staract[i][j]) totcornum = 0 for correlations in starflag: if len(correlations) > 0: totcornum += len(correlations) return fd, staract, starflag, totcornum
def get_fddb_face_data(k = 12, on_drive = False): root = 'F:\\datasets\\image_data_sets\\faces\\FDDB\\' iroot = os.path.join(root,'originalPics') eroot = os.path.join(root,'FDDB-folds') pattern = '-ellipseList.txt' c = 0 X,y = [],[] for path, subdirs, files in os.walk(eroot): for fname in files: if fname.find(pattern) > 0: fpath = os.path.join(path,fname) print(fpath) with open(fpath) as f: lines = sp.array(f.readlines()) paths_indx = sp.where([line.find('/') > 0 for line in lines])[0] counts_indx = paths_indx + 1 paths = sp.array([e.strip() for e in lines[paths_indx]]) ellipces = [] for i in counts_indx: cnt = int(lines[i]) ellipces.append(lines[i+1:i+cnt+1]) ellipces = [ [ [float(num) for num in line.split()[:-1]] for line in e] for e in ellipces] ellipces = sp.array(ellipces) for iname,ells in zip(paths[:],ellipces[:]): ppath = os.path.join(iroot,iname.replace('/','\\')) + '.jpg' file_id = iname.split('/')[-1] frame = fr.get_frame(ppath) for item in ells: ra,rb,theta,x,y = item x1,y1,x2,y2 = util.ellipse2bbox(a = ra, b = rb, angle = theta, cx = x, cy = y) x = x1 y = y1 h = abs(y2-y1) w = abs(x2-x1) print(file_id,(y,x,h,w)) non_neg = x > 0 and y > 0 if not non_neg: continue if on_drive: for item in Datasets.data_augmentation(frame,y,x,w,h): fr.write_frame('F:\\train_data\\pos\\' + str(c) + '_' + str(file_id) + '_pos',item) c +=1 else: pass X = sp.array(X) y = sp.ones(len(X)) return X,y
def fit(self, X, T, max_iter=int(1e2), tol=1e-3, bound=1e10): """Fit a RVM model with the training data ``(X, T)``.""" # Initialize the hyperparameters self._init_hyperparameters(X, T) # Compute design matrix n_samples = X.shape[0] phi = sp.c_[sp.ones(n_samples), self._compute_design_matrix(X)] # Add x0 alpha = self.cov beta = self.beta log_evidence = -1e10 for iter in range(max_iter): alpha[alpha >= bound] = bound rv_indices = sp.nonzero(alpha < bound)[0] rv_phi = phi[:, rv_indices] rv_alpha = alpha[rv_indices] # Compute the posterior distribution post_cov = spla.inv(sp.diag(rv_alpha) + beta * sp.dot(rv_phi.T, rv_phi)) post_mean = beta * sp.dot(post_cov, sp.dot(rv_phi.T, T)) # Re-estimate the hyperparameters gamma = 1 - rv_alpha * post_cov.diagonal() rv_alpha = gamma / (post_mean * post_mean) beta = (n_samples + 1 - gamma.sum()) / spla.norm(T - sp.dot(rv_phi, post_mean))**2 # Evalueate the log evidence and test the relative change C = sp.eye(rv_phi.shape[0]) / beta + rv_phi.dot(sp.diag(1.0 / rv_alpha)).dot(rv_phi.T) log_evidence_new = -0.5 * (sp.log(spla.det(C)) + T.dot(spla.inv(C)).dot((T))) diff = spla.norm(log_evidence_new - log_evidence) if (diff < tol * spla.norm(log_evidence)): break log_evidence = log_evidence_new alpha[rv_indices] = rv_alpha # Should re-compute the posterior distribution self.rv_indices = rv_indices self.cov = post_cov self.mean = post_mean self.beta = beta return self