我们从Python开源项目中,提取了以下29个代码示例,用于说明如何使用scipy.absolute()。
def getForegroundMask(self): ''' @return: A mask image indicating which pixels are considered foreground. Depending on whether soft-thresholding is used, this may be a binary image with values of [0 or 255], or image of weights [0.0-255.0], which will have to be divided by 255 to get weights [0.0-1.0]. @note: One may wish to perform additional morphological operations on the foreground mask prior to use. ''' diff = self._computeBGDiff() if self._softThreshold: mask = 1 - (math.e)**(-(1.0*diff)/self._threshold) #element-wise exp weighting #mask = (diff > self._threshold) else: mask = (sp.absolute(diff) > self._threshold) #mu = sp.mean(diff) #sigma = sp.std(diff) #mask = sp.absolute((diff-mu)/sigma) > self._threshold return pv.Image(mask*255.0)
def softmax(x, gap = False): d = x.shape maxdist = x[:, 0] pclass = scipy.zeros([d[0], 1], dtype = scipy.integer) for i in range(1, d[1], 1): l = x[:, i] > maxdist pclass[l] = i maxdist[l] = x[l, i] if gap == True: x = scipy.absolute(maxdist - x) x[0:d[0], pclass] = x*scipy.ones([d[1], d[1]]) #gaps = pmin(x)# not sure what this means; gap is never called with True raise ValueError('gap = True is not implemented yet') result = dict() if gap == True: result['pclass'] = pclass #result['gaps'] = gaps raise ValueError('gap = True is not implemented yet') else: result['pclass'] = pclass; return(result) # end of softmax # =========================================
def _computeBGDiff(self): prevImg = self._imageBuffer[0].asMatrix2D() curImg = self._imageBuffer.getMiddle().asMatrix2D() nextImg = self._imageBuffer[-1].asMatrix2D() delta1 = sp.absolute(curImg - prevImg) #frame diff 1 delta2 = sp.absolute(nextImg - curImg) #frame diff 2 #use element-wise minimum of the two difference images, which is what # gets compared to threshold to yield foreground mask return sp.minimum(delta1, delta2)
def get_amplitude(self,signal,l): if self.amplitude.has_key(l): return self.amplitude[l] else: amp = sp.absolute(sp.fft(get_frame(signal, self.winsize,l) * self.window)) self.amplitude[l] = amp return amp
def compute_noise_avg_spectrum(self, nsignal): windownum = int(len(nsignal)//(self.winsize//2) - 1) avgamp = np.zeros(self.winsize) for l in range(windownum): avgamp += sp.absolute(sp.fft(get_frame(nsignal, self.winsize,l) * self.window)) return avgamp/float(windownum)
def nonzeroCoef(beta, bystep = False): result = scipy.absolute(beta) > 0 if len(result.shape) == 1: result = scipy.reshape(result, [result.shape[0], 1]) if not bystep: result = scipy.any(result, axis = 1) return(result) # end of nonzeroCoef() # =========================================
def nonzeroCoef(beta, bystep = False): result = scipy.absolute(beta) > 0 if not bystep: result = scipy.any(result, axis = 1) return(result) # end of nonzeroCoef # =========================================
def calc_par(self, frame): power = sp.absolute(sp.fft(frame * self._window)) ** 2 avg_pow = power[:int(self._winsize / 2)].sum() / (self._winsize / 2) smax = -np.inf lenmax = 0 for i in range(2, int(self._winsize / 10)): # searching f0 with maximizing estimated power of periodic component idx = list(range(i, int(self._winsize / 2), i + 1)) score = power[:int(self._winsize / 2)][idx].sum() - (len(idx) * avg_pow) if score > smax: smax = score lenmax = len(idx) pp = (smax / (1.0 - self._eta * lenmax)) * self._eta pa = avg_pow - pp return calc_hypotes(pa, pp, beta=self._beta) / calc_nullhypotes(pa, pp, alpha=self._alpha)
def _init_params(self, X): init = self.init n_samples, n_features = X.shape n_components = self.n_components if (init == 'kmeans'): km = Kmeans(n_components) clusters, mean, cov = km.cluster(X) coef = sp.array([c.shape[0] / n_samples for c in clusters]) comps = [multivariate_normal(mean[i], cov[i], allow_singular=True) for i in range(n_components)] elif (init == 'rand'): coef = sp.absolute(sprand.randn(n_components)) coef = coef / coef.sum() means = X[sprand.permutation(n_samples)[0: n_components]] clusters = [[] for i in range(n_components)] for x in X: idx = sp.argmin([spla.norm(x - mean) for mean in means]) clusters[idx].append(x) comps = [] for k in range(n_components): mean = means[k] cov = sp.cov(clusters[k], rowvar=0, ddof=0) comps.append(multivariate_normal(mean, cov, allow_singular=True)) self.coef = coef self.comps = comps
def _cov(self, indices, n): if len(indices) > 1: surrounding_indices = indices[n, 1:] nearest_vector_deltas = (self.X_arr[surrounding_indices] - self.X_arr[n]) local_weights = self.w[surrounding_indices] else: nearest_vector_deltas = sp.absolute(self.X_arr) local_weights = sp.array([1]) cov = smart_cov(nearest_vector_deltas, local_weights / local_weights.sum()) if sp.absolute(cov.sum()) == 0: for k in range(cov.shape[0]): cov[k, k] = sp.absolute(self.X_arr[0, k]) return cov * self.scaling
def test_continuous_non_gaussian(db_path, sampler): def model(args): return {"result": sp.rand() * args['u']} models = [model] models = list(map(SimpleModel, models)) population_size = ConstantPopulationSize(250) parameter_given_model_prior_distribution = [Distribution(u=RV("uniform", 0, 1))] abc = ABCSMC(models, parameter_given_model_prior_distribution, MinMaxDistanceFunction(measures_to_use=["result"]), population_size, eps=MedianEpsilon(.2), sampler=sampler) d_observed = .5 abc.new(db_path, {"result": d_observed}) abc.do_not_stop_when_only_single_model_alive() minimum_epsilon = -1 history = abc.run(minimum_epsilon, max_nr_populations=2) posterior_x, posterior_weight = history.get_distribution(0, None) posterior_x = posterior_x["u"].as_matrix() sort_indices = sp.argsort(posterior_x) f_empirical = sp.interpolate.interp1d(sp.hstack((-200, posterior_x[sort_indices], 200)), sp.hstack((0, sp.cumsum( posterior_weight[ sort_indices]), 1))) @sp.vectorize def f_expected(u): return (sp.log(u)-sp.log(d_observed)) / (- sp.log(d_observed)) * \ (u > d_observed) x = sp.linspace(0.1, 1) max_distribution_difference = sp.absolute(f_empirical(x) - f_expected(x)).max() assert max_distribution_difference < 0.12
def test_gaussian_multiple_populations(db_path, sampler): sigma_x = 1 sigma_y = .5 y_observed = 2 def model(args): return {"y": st.norm(args['x'], sigma_y).rvs()} models = [model] models = list(map(SimpleModel, models)) nr_populations = 4 population_size = ConstantPopulationSize(600) parameter_given_model_prior_distribution = [Distribution(x=st.norm(0, sigma_x))] abc = ABCSMC(models, parameter_given_model_prior_distribution, MinMaxDistanceFunction(measures_to_use=["y"]), population_size, eps=MedianEpsilon(.2), sampler=sampler) abc.new(db_path, {"y": y_observed}) minimum_epsilon = -1 abc.do_not_stop_when_only_single_model_alive() history = abc.run(minimum_epsilon, max_nr_populations=nr_populations) posterior_x, posterior_weight = history.get_distribution(0, None) posterior_x = posterior_x["x"].as_matrix() sort_indices = sp.argsort(posterior_x) f_empirical = sp.interpolate.interp1d(sp.hstack((-200, posterior_x[sort_indices], 200)), sp.hstack((0, sp.cumsum(posterior_weight[sort_indices]), 1))) sigma_x_given_y = 1 / sp.sqrt(1 / sigma_x**2 + 1 / sigma_y**2) mu_x_given_y = sigma_x_given_y**2 * y_observed / sigma_y**2 expected_posterior_x = st.norm(mu_x_given_y, sigma_x_given_y) x = sp.linspace(-8, 8) max_distribution_difference = sp.absolute(f_empirical(x) - expected_posterior_x.cdf(x)).max() assert max_distribution_difference < 0.052 assert history.max_t == nr_populations-1 mean_emp, std_emp = mean_and_std(posterior_x, posterior_weight) assert abs(mean_emp - mu_x_given_y) < .07 assert abs(std_emp - sigma_x_given_y) < .12
def _computeBGDiff(self): self._flow.update( self._imageBuffer.getLast() ) n = len(self._imageBuffer) prev_im = self._imageBuffer[0] forward = None for i in range(0,n/2): if forward == None: forward = self._imageBuffer[i].to_next else: forward = forward * self._imageBuffer[i].to_next w,h = size = prev_im.size mask = cv.CreateImage(size,cv.IPL_DEPTH_8U,1) cv.Set(mask,0) interior = cv.GetSubRect(mask, pv.Rect(2,2,w-4,h-4).asOpenCV()) cv.Set(interior,255) mask = pv.Image(mask) prev_im = forward(prev_im) prev_mask = forward(mask) next_im = self._imageBuffer[n-1] back = None for i in range(n-1,n/2,-1): if back == None: back = self._imageBuffer[i].to_prev else: back = back * self._imageBuffer[i].to_prev next_im = back(next_im) next_mask = back(mask) curr_im = self._imageBuffer[n/2] prevImg = prev_im.asMatrix2D() curImg = curr_im.asMatrix2D() nextImg = next_im.asMatrix2D() prevMask = prev_mask.asMatrix2D() nextMask = next_mask.asMatrix2D() # Compute transformed images delta1 = sp.absolute(curImg - prevImg) #frame diff 1 delta2 = sp.absolute(nextImg - curImg) #frame diff 2 delta1 = sp.minimum(delta1,prevMask) delta2 = sp.minimum(delta2,nextMask) #use element-wise minimum of the two difference images, which is what # gets compared to threshold to yield foreground mask return sp.minimum(delta1, delta2)
def glmnetPlot(x, xvar = 'norm', label = False, ptype = 'coef', **options): # process inputs xvar = getFromList(xvar, ['norm', 'lambda', 'dev'], 'xvar should be one of ''norm'', ''lambda'', ''dev'' ') ptype = getFromList(ptype, ['coef', '2norm'], 'ptype should be one of ''coef'', ''2norm'' ') if x['class'] in ['elnet', 'lognet', 'coxnet', 'fishnet']: handle = plotCoef(x['beta'], [], x['lambdau'], x['df'], x['dev'], label, xvar, '', 'Coefficients', **options) elif x['class'] in ['multnet', 'mrelnet']: beta = x['beta'] if xvar == 'norm': norm = 0 nzbeta = beta for i in range(len(beta)): which = nonzeroCoef(beta[i]) nzbeta[i] = beta[i][which, :] norm = norm + scipy.sum(scipy.absolute(nzbeta[i]), axis = 0) else: norm = 0 if ptype == 'coef': ncl = x['dfmat'].shape[0] if x['class'] == 'multnet': for i in range(ncl): str = 'Coefficients: Class %d' % (i) handle = plotCoef(beta[i], norm, x['lambdau'], x['dfmat'][i,:], x['dev'], label, xvar, '', str, **options) if i < ncl - 1: plt.figure() else: str = 'Coefficients: Response %d' % (i) handle = plotCoef(beta[i], norm, x['lambdau'], x['dfmat'][i,:], x['dev'], label, xvar, '', str, **options) else: dfseq = scipy.round_(scipy.mean(x['dfmat'], axis = 0)) coefnorm = beta[1]*0 for i in range(len(beta)): coefnorm = coefnorm + scipy.absolute(beta[i])**2 coefnorm = scipy.sqrt(coefnorm) if x['class'] == 'multnet': str = 'Coefficient 2Norms' handle = plotCoef(coefnorm, norm, x['lambdau'], dfseq, x['dev'], label, xvar, '',str, **options); if i < ncl - 1: plt.figure() else: str = 'Coefficient 2Norms' handle = plotCoef(coefnorm, norm, x['lambdau'], x['dfmat'][0,:], x['dev'], label, xvar, '', str, **options); return(handle) # end of glmnetplot # ========================================= # # ========================================= # helper functions # =========================================
def test_gaussian_multiple_populations_crossval_kde(db_path, sampler): sigma_x = 1 sigma_y = .5 y_observed = 2 def model(args): return {"y": st.norm(args['x'], sigma_y).rvs()} models = [model] models = list(map(SimpleModel, models)) nr_populations = 4 population_size = ConstantPopulationSize(600) parameter_given_model_prior_distribution = [Distribution(x=st.norm(0, sigma_x))] parameter_perturbation_kernels = [GridSearchCV(MultivariateNormalTransition(), {"scaling": sp.logspace(-1, 1.5, 5)})] abc = ABCSMC(models, parameter_given_model_prior_distribution, MinMaxDistanceFunction(measures_to_use=["y"]), population_size, transitions=parameter_perturbation_kernels, eps=MedianEpsilon(.2), sampler=sampler) abc.new(db_path, {"y": y_observed}) minimum_epsilon = -1 abc.do_not_stop_when_only_single_model_alive() history = abc.run(minimum_epsilon, max_nr_populations=nr_populations) posterior_x, posterior_weight = history.get_distribution(0, None) posterior_x = posterior_x["x"].as_matrix() sort_indices = sp.argsort(posterior_x) f_empirical = sp.interpolate.interp1d(sp.hstack((-200, posterior_x[sort_indices], 200)), sp.hstack((0, sp.cumsum(posterior_weight[sort_indices]), 1))) sigma_x_given_y = 1 / sp.sqrt(1 / sigma_x**2 + 1 / sigma_y**2) mu_x_given_y = sigma_x_given_y**2 * y_observed / sigma_y**2 expected_posterior_x = st.norm(mu_x_given_y, sigma_x_given_y) x = sp.linspace(-8, 8) max_distribution_difference = sp.absolute(f_empirical(x) - expected_posterior_x.cdf(x)).max() assert max_distribution_difference < 0.052 assert history.max_t == nr_populations-1 mean_emp, std_emp = mean_and_std(posterior_x, posterior_weight) assert abs(mean_emp - mu_x_given_y) < .07 assert abs(std_emp - sigma_x_given_y) < .12
def pt_pos(self, kplanets, boundaries, inslims, acc_lims): ndim = 1 + 5 * kplanets + self.nins*2*(self.MOAV+1) + self.totcornum pos = sp.array([sp.zeros(ndim) for i in range(self.nwalkers)]) k = -2 l = -2 ll = -2 ## for j in range(ndim): if j < 5 * kplanets: k += 2 if j%5==1: fact = sp.absolute(boundaries[k] - boundaries[k+1]) / self.nwalkers else: #fact = sp.absolute(boundaries[k]) / (self.nwalkers) fact = (sp.absolute(boundaries[k] - boundaries[k+1]) * 2) / (5 * self.nwalkers) dif = sp.arange(self.nwalkers) * fact * np.random.uniform(0.9, 0.999) for i in range(self.nwalkers): if j%5==1: pos[i][j] = boundaries[k] + (dif[i] + fact/2.0) else: #pos[i][j] = boundaries[k] * 0.5 + (dif[i] + fact/2.0) pos[i][j] = (boundaries[k+1]+3*boundaries[k])/4 + (dif[i] + fact/2.0) if j == 5 * kplanets: # acc fact = sp.absolute(acc_lims[0] - acc_lims[1]) / self.nwalkers dif = sp.arange(self.nwalkers) * fact * np.random.uniform(0.9, 0.999) for i in range(self.nwalkers): pos[i][j] = acc_lims[0] + (dif[i] + fact/2.0) if 5 * kplanets < j < 5*kplanets + self.nins*2*(self.MOAV+1) + 1: l += 2 fact = sp.absolute(inslims[l] - inslims[l+1]) / self.nwalkers dif = sp.arange(self.nwalkers) * fact * np.random.uniform(0.9, 0.999) if (j-5*kplanets-1) % self.nins*2*(self.MOAV+1) == 0: # ojo aqui jitt_ini = sp.sort(sp.fabs(sp.random.normal(0, 1, self.nwalkers))) * 0.1 dif = jitt_ini * np.random.uniform(0.9, 0.999) for i in range(self.nwalkers): pos[i][j] = inslims[l] + (dif[i] + fact/2.0) if self.totcornum: if j > 5*kplanets + self.nins*2*(self.MOAV+1): fact = sp.absolute(acc_lims[0] - acc_lims[1]) / self.nwalkers dif = sp.arange(self.nwalkers) * fact * np.random.uniform(0.8, 0.999) for i in range(self.nwalkers): pos[i][j] = acc_lims[0] + (dif[i] + fact/2.0) #print(pos[i][j]) pos = sp.array([pos for h in range(self.ntemps)]) return pos
def test_gaussian_single_population(db_path, sampler): sigma_prior = 1 sigma_ground_truth = 1 observed_data = 1 def model(args): return {"y": st.norm(args['x'], sigma_ground_truth).rvs()} models = [model] models = list(map(SimpleModel, models)) nr_populations = 1 population_size = ConstantPopulationSize(600) parameter_given_model_prior_distribution = [Distribution(x=RV("norm", 0, sigma_prior)) ] abc = ABCSMC(models, parameter_given_model_prior_distribution, MinMaxDistanceFunction(measures_to_use=["y"]), population_size, eps=MedianEpsilon(.1), sampler=sampler) abc.new(db_path, {"y": observed_data}) minimum_epsilon = -1 abc.do_not_stop_when_only_single_model_alive() history = abc.run(minimum_epsilon, max_nr_populations=nr_populations) posterior_x, posterior_weight = history.get_distribution(0, None) posterior_x = posterior_x["x"].as_matrix() sort_indices = sp.argsort(posterior_x) f_empirical = sp.interpolate.interp1d(sp.hstack((-200, posterior_x[sort_indices], 200)), sp.hstack((0, sp.cumsum(posterior_weight[sort_indices]), 1))) sigma_x_given_y = 1 / sp.sqrt(1 / sigma_prior**2 + 1 / sigma_ground_truth**2) mu_x_given_y = sigma_x_given_y**2 * observed_data / sigma_ground_truth**2 expected_posterior_x = st.norm(mu_x_given_y, sigma_x_given_y) x = sp.linspace(-8, 8) max_distribution_difference = sp.absolute(f_empirical(x) - expected_posterior_x.cdf(x)).max() assert max_distribution_difference < 0.12 assert history.max_t == nr_populations-1 mean_emp, std_emp = mean_and_std(posterior_x, posterior_weight) assert abs(mean_emp - mu_x_given_y) < .07 assert abs(std_emp - sigma_x_given_y) < .1