我们从Python开源项目中,提取了以下13个代码示例,用于说明如何使用scipy.std()。
def k_means_cluster_Predict(data_list,info): array_diagnal=np.array([[data_list[0][x],data_list[1][x]] for x in range(len(data_list[0]))]) ks = list(range(1,len(info))) KMeans = [cluster.KMeans(n_clusters = i, init="k-means++").fit(array_diagnal) for i in ks] BIC = [compute_bic(kmeansi,array_diagnal) for kmeansi in KMeans] ks_picked=ks[BIC.index(max(BIC))] if ks_picked==1: return [data_list] else: out=[] std_rec=[scipy.std(data_list[0]),scipy.std(data_list[1])] whitened = whiten(array_diagnal) centroids, distortion=kmeans(whitened,ks_picked) idx,_= vq(whitened,centroids) for x in range(ks_picked): group1=[[int(i) for i in array_diagnal[idx==x,0]],[int(i) for i in array_diagnal[idx==x,1]]] out.append(group1) return out
def getForegroundMask(self): ''' @return: A mask image indicating which pixels are considered foreground. Depending on whether soft-thresholding is used, this may be a binary image with values of [0 or 255], or image of weights [0.0-255.0], which will have to be divided by 255 to get weights [0.0-1.0]. @note: One may wish to perform additional morphological operations on the foreground mask prior to use. ''' diff = self._computeBGDiff() if self._softThreshold: mask = 1 - (math.e)**(-(1.0*diff)/self._threshold) #element-wise exp weighting #mask = (diff > self._threshold) else: mask = (sp.absolute(diff) > self._threshold) #mu = sp.mean(diff) #sigma = sp.std(diff) #mask = sp.absolute((diff-mu)/sigma) > self._threshold return pv.Image(mask*255.0)
def get_html(self, base_file_name: str, h_level: int) -> str: sp = None # type: SingleProperty columns = [ BOTableColumn("n", "{:5d}", lambda sp, _: sp.observations(), first), BOTableColumn("mean", "{:10.5f}", lambda sp, _: sp.mean(), first), BOTableColumn("mean / best mean", "{:5.5%}", lambda sp, means: sp.mean() / min(means), first), BOTableColumn("mean / mean of first impl", "{:5.5%}", lambda sp, means: sp.mean() / means[0], first), BOTableColumn("std / mean", "{:5.5%}", lambda sp, _: sp.std_dev_per_mean(), first), BOTableColumn("std / best mean", "{:5.5%}", lambda sp, means: sp.std_dev() / min(means), first), BOTableColumn("std / mean of first impl", "{:5.5%}", lambda sp, means: sp.std_dev() / means[0], first), BOTableColumn("median", "{:5.5f}", lambda sp, _: sp.median(), first) ] html = """ <h{h}>Input: {input}</h{h}> The following plot shows the actual distribution of the measurements for each implementation. {box_plot} """.format(h=h_level, input=repr(self.input), box_plot=self.get_box_plot_html(base_file_name)) html += self.table_html_for_vals_per_impl(columns, base_file_name) return html
def k_means_cluster(data_list): if max(data_list[0])-min(data_list[0])>10 and max(data_list[1])-min(data_list[1])>10: array_diagnal=np.array([[data_list[0][x],data_list[1][x]] for x in range(len(data_list[0]))]) ks = list(range(1,min([5,len(data_list[0])+1]))) KMeans = [cluster.KMeans(n_clusters = i, init="k-means++").fit(array_diagnal) for i in ks] KMeans_predict=[cluster.KMeans(n_clusters = i, init="k-means++").fit_predict(array_diagnal) for i in ks] BIC=[] BIC_rec=[] for x in ks: if KMeans_predict[x-1].max()<x-1: continue else: BIC_i=compute_bic(KMeans[x-1],array_diagnal) if abs(BIC_i)<10**8: BIC.append(BIC_i) BIC_rec.append(x) #BIC = [compute_bic(kmeansi,array_diagnal) for kmeansi in KMeans] #ks_picked=ks[BIC.index(max(BIC))] ks_picked=BIC_rec[BIC.index(max(BIC))] if ks_picked==1: return [data_list] else: out=[] std_rec=[scipy.std(data_list[0]),scipy.std(data_list[1])] whitened = whiten(array_diagnal) centroids, distortion=kmeans(whitened,ks_picked) idx,_= vq(whitened,centroids) for x in range(ks_picked): group1=[[int(i) for i in array_diagnal[idx==x,0]],[int(i) for i in array_diagnal[idx==x,1]]] out.append(group1) return out else: return [data_list]
def sharpeRatio(ticker,begdate=(2012,1,1),enddate=(2016,12,31)): """Objective: estimate Sharpe ratio for stock ticker : stock symbol begdate : beginning date enddate : ending date Example #1: sharpeRatio("ibm") 0.0068655583807256159 Example #2: date1=(1990,1,1) date2=(2015,12,23) sharpeRatio("ibm",date1,date2) 0.027831010497755326 """ import scipy as sp from matplotlib.finance import quotes_historical_yahoo_ochl as getData p = getData(ticker,begdate, enddate,asobject=True,adjusted=True) ret=p.aclose[1:]/p.aclose[:-1]-1 return sp.mean(ret)/sp.std(ret)
def amean_std(values: t.List[float]) -> float: """ Calculates the arithmetic mean. """ return sp.std(values)
def get_html(self, base_file_name: str, h_level: int) -> str: html = """ <h{}>Program: {!r} ({} lines, {} entropy)</h{}> The following plot shows the mean score per input distribution for every implementation. """.format(h_level, self.name, self.line_number, self.entropy, h_level) html += self.get_box_plot_html(base_file_name) scores = self.get_impl_mean_scores() std_devs = self.get_statistical_property_scores(rel_std_dev_func) html += """ <table class="table"> <tr><th>implementation</th><th>geom mean over means relative to best (per input) aka mean score</th> <th>... std dev rel. to the best mean</th> </tr> """ for impl in scores.keys(): html += """ <tr><td>{}</td><td>{:5.2%}</td><td>{:5.2%}</td></tr> """.format(impl, stats.gmean(scores[impl]), stats.gmean(std_devs[impl])) html += "</table>" impl_names = list(scores.keys()) for (i, input) in enumerate(self.prog_inputs.keys()): app = html_escape_property(input) if len(app) > 20: app = str(i) html += self.prog_inputs[input].get_html(base_file_name + "_" + app, h_level + 1) return html
def std_f(ticker): x=ret_monthly(ticker) return sp.std(x)
def portfolio_var(R,w): cor=corr # here!!!! std_dev=sp.std(R,axis=0) var = 0.0 for i in xrange(n): for j in xrange(n): var += w[i]*w[j]*std_dev[i]*std_dev[j]*cor[i, j] return var
def portfolio_var(R,w): cor = sp.corrcoef(R.T) std_dev=sp.std(R,axis=0) var = 0.0 for i in xrange(n): for j in xrange(n): var += w[i]*w[j]*std_dev[i]*std_dev[j]*cor[i, j] return var # function 3: estimate Sharpe ratio
def outlier_removed_fit(m, w = None, n_iter=10, polyord=7): """ Remove outliers using fited data. Args: m (:obj:`numpy array`): Phase curve. n_iter (:obj:'int'): Number of iteration outlier removal polyorder (:obj:'int'): Order of polynomial used. Returns: fit (:obj:'numpy array'): Curve with outliers removed """ if w is None: w = sp.ones_like(m) W = sp.diag(sp.sqrt(w)) m2 = sp.copy(m) tv = sp.linspace(-1, 1, num=len(m)) A = sp.zeros([len(m), polyord]) for j in range(polyord): A[:, j] = tv**(float(j)) A2 = sp.dot(W,A) m2w = sp.dot(m2,W) fit = None for i in range(n_iter): xhat = sp.linalg.lstsq(A2, m2w)[0] fit = sp.dot(A, xhat) # use gradient for central finite differences which keeps order resid = sp.gradient(fit - m2) std = sp.std(resid) bidx = sp.where(sp.absolute(resid) > 2.0*std)[0] for bi in bidx: A2[bi,:]=0.0 m2[bi]=0.0 m2w[bi]=0.0 if debug_plot: plt.plot(m2,label="outlier removed") plt.plot(m,label="original") plt.plot(fit,label="fit") plt.legend() plt.ylim([sp.minimum(fit)-std*3.0,sp.maximum(fit)+std*3.0]) plt.show() return(fit)
def get_html(self, base_file_name: str, h_level: int) -> str: html = """ <h{}>{}</h{}> """.format(h_level, self.name, h_level) scores = self.get_impl_mean_scores() std_devs = self.get_statistical_property_scores(rel_std_dev_func) if len(self.programs) > 1: html += """ Mean scores per implementation for this program category <p> """ html += self.get_box_plot_html(base_file_name) html += """ </p> <table class="table"> <tr><th>implementation</th><th>geom mean over means relative to best (per input and program) aka mean score</th> <th>... std devs relative to the best means </th> </tr> """ for impl in scores.keys(): html += """ <tr><td>{}</td><td>{:5.2%}</td><td>{:5.2%}</td></tr> """.format(impl, scores[impl], std_devs[impl]) html += "</table>" if len(self.get_input_strs()) > 1: html += """ <h{h}> Mean scores per input</h{h}> """.format(h=h_level + 1) for input in self.get_input_strs(): mean_scores = self.get_statistical_property_scores_per_input_per_impl(rel_mean_func, input) std_scores = self.get_statistical_property_scores_per_input_per_impl(rel_std_dev_func, input) html += """ <h{h}>Mean scores for input {!r}</h{h}> The plot shows the distribution of mean scores per program for each implementation. <p> """.format(input, h=h_level + 2) html += self.get_box_plot_per_input_per_impl_html(base_file_name, input) html += """ </p> <table class="table"> <tr><th>impl</th><th>geom mean over means relative to best (per program) aka mean score</th> <th>... std devs relative to the best means </th> </tr> """ for impl in mean_scores.keys(): html += """ <tr><td>{}</td><td>{:5.2%}</td><td>{:5.2%}</td></tr> """.format(impl, stats.gmean(mean_scores[impl]), stats.gmean(std_scores[impl])) html += "</table>" impl_names = list(scores.keys()) for (i, prog) in enumerate(self.programs): html += self.programs[prog].get_html(base_file_name + "_" + html_escape_property(prog), h_level + 1) return html
def c_config(inputs_per_category: InputsPerCategory, optimisation: str = "-O2", clang_version = "3.7") -> ConfigDict: """ Generates a game config that compares gcc and clang. """ def cat(category: str, numbers: t.List[int] = None): return bench_category(category, "gcc", inputs_per_category[category], numbers) config = { "language": "c", "categories": [ cat("binarytrees"), cat("chameneosredux", [2]), cat("fannkuchredux", [1, 5]), cat("fasta", [1, 4, 5]), cat("fastaredux"), #cat("knucleotide", "gcc", [9]) # doesn't compile cat("mandelbrot", [1, 2, 3, 4, 6, 9]), cat("meteor"), cat("nbody"), cat("pidigits"), #cat("regexdna", "gcc", [1, 2]), # runs almost infinitely cat("revcomp", [1]), cat("spectralnorm", [1]), cat("threadring") ], "impls": [ { "name": "gcc", # todo: tcl8.6 vs 8.4??? "build_cmd": "cp {file} {bfile}.c; gcc {bfile}.c $O -I/usr/include/tcl8.6 -ltcl8.4 -lglib-2.0 -lgmp " "-D_GNU_SOURCE -Doff_t=__off64_t -fopenmp -D_FILE_OFFSET_BITS=64 -I/usr/include/apr-1.0 " "-lapr-1 -lgomp -lm -std=c99 -mfpmath=sse -msse3 -I/usr/include/glib-2.0 " "-I/usr/lib/x86_64-linux-gnu/glib-2.0/include -lglib-2.0 -lpcre -o {bfile}" .replace("$O", optimisation), "run_cmd": "./{bfile} {input} > /dev/null" }, { "name": "clang", "build_cmd": "cp {file} {bfile}.c; clang-$CV {bfile}.c $O -I/usr/include/tcl8.6 -ltcl8.4 -fopenmp=libgomp " "-lglib-2.0 -lgmp -D_GNU_SOURCE -Doff_t=__off64_t -D_FILE_OFFSET_BITS=64 " "-I/usr/include/apr-1.0 -lapr-1 -lm -std=c99 -mfpmath=sse -msse3 -I/usr/include/glib-2.0 " "-I/usr/lib/x86_64-linux-gnu/glib-2.0/include -lglib-2.0 -lpcre -o {bfile}" .replace("$CV", clang_version).replace("$O", optimisation), "run_cmd": "./{bfile} {input} > /dev/null" } ] } return config