我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用scipy.array()。
def PlotMultipleRuns(Alg, nruns=20, fname=None): '''Plot "nruns" runs of a given algorithm to show performance and variability across runs.''' if fname: runs = scipy.genfromtxt(fname) else: runs = [] for i in range(nruns): bestSol, fitHistory = tsp.TSP(200, Alg, 3000, 30, seed=None, coordfile='tmp.txt') runs.append(fitHistory) fname = 'MultRuns-' + str(Alg) + '.txt' runs = scipy.array(runs) scipy.savetxt(fname, runs) # plotting Xs = scipy.linspace(0, runs.shape[1] * 1000, runs.shape[1]) for i in range(runs.shape[0]): pl.plot(Xs, runs[i, :]) pl.show()
def LongMC3(fname=None): '''Plot a single long MC3 run to demonstrate high performance but slow convergence.''' if fname: run = scipy.genfromtxt(fname) else: bestSol, run = tsp.TSP(200, 'MC3', 20000, 10, seed=None, coordfile='tmp.txt') fname = 'ExampleOutput/MC3-Long.txt' run = scipy.array(run) scipy.savetxt(fname, run) # plotting Xs = range(0, run.shape[0] * 1000, 1000) pl.plot(Xs, run) pl.show()
def LongSA(fname=None): '''Plot a single long SA run to demonstrate performance under slower cooling schedule.''' if fname: run = scipy.genfromtxt(fname) else: bestSol, run = tsp.TSP(200, 'SA', 20000, 'placeholder', seed=None, coordfile='tmp.txt') fname = 'ExampleOutput/SA-Long.txt' run = scipy.array(run) scipy.savetxt(fname, run) # plotting Xs = range(0, run.shape[0] * 1000, 1000) pl.plot(Xs, run) pl.show()
def glmnetSet(opts = None): import scipy # default options options = { "weights" : scipy.empty([0]), "offset" : scipy.empty([0]), "alpha" : scipy.float64(1.0), "nlambda" : scipy.int32(100), "lambda_min" : scipy.empty([0]), "lambdau" : scipy.empty([0]), "standardize" : True, "intr" : True, "thresh" : scipy.float64(1e-7), "dfmax" : scipy.empty([0]), "pmax" : scipy.empty([0]), "exclude" : scipy.empty([0], dtype = scipy.integer), "penalty_factor" : scipy.empty([0]), "cl" : scipy.array([[scipy.float64(-scipy.inf)], [scipy.float64(scipy.inf)]]), "maxit" : scipy.int32(1e5), "gtype" : [], "ltype" : 'Newton', "standardize_resp" : False, "mtype" : 'ungrouped' } # quick return if no user opts if opts == None: print('pdco default options:') print(options) return options # if options are passed in by user, update options with values from opts optsInOptions = set(opts.keys()) - set(options.keys()); if len(optsInOptions) > 0: # assert 'opts' keys are subsets of 'options' keys print(optsInOptions, ' : unknown option for glmnetSet') raise ValueError('attempting to set glmnet options that are not known to glmnetSet') else: options = merge_dicts(options, opts) return options
def extrap1d(interpolator): xs = interpolator.x ys = interpolator.y def pointwise(x): if x < xs[0]: return ys[0]+(x-xs[0])*(ys[1]-ys[0])/(xs[1]-xs[0]) elif x > xs[-1]: return ys[-1]+(x-xs[-1])*(ys[-1]-ys[-2])/(xs[-1]-xs[-2]) else: return interpolator(x) def ufunclike(xs): return array(map(pointwise, array(xs))) return ufunclike #Simple offset calibration if only one point available.
def median(values): return numpy.median(numpy.array(values)) #Class to hold a TiltHydrometer reading
def _convert_colorscale_to_rgb(colorscale): """ Converts the colors in a colorscale to rgb colors A colorscale is an array of arrays, each with a numeric value as the first item and a color as the second. This function specifically is converting a colorscale with tuple colors (each coordinate between 0 and 1) into a colorscale with the colors transformed into rgb colors """ for color in colorscale: color[1] = FigureFactory._convert_to_RGB_255( color[1] ) for color in colorscale: color[1] = FigureFactory._label_rgb( color[1] ) return colorscale
def _validate_dataframe(array): """ Validates all strings or numbers in each dataframe column :raises: (PlotlyError) If there are any two items in any list whose types differ """ from numbers import Number for vector in array: if isinstance(vector[0], Number): if not all(isinstance(item, Number) for item in vector): raise exceptions.PlotlyError("Error in dataframe. " "Make sure all entries of " "each column are either " "numbers or strings.") elif isinstance(vector[0], str): if not all(isinstance(item, str) for item in vector): raise exceptions.PlotlyError("Error in dataframe. " "Make sure all entries of " "each column are either " "numbers or strings.")
def _validate_streamline(x, y): """ Streamline-specific validations Specifically, this checks that x and y are both evenly spaced, and that the package numpy is available. See FigureFactory.create_streamline() for params :raises: (ImportError) If numpy is not available. :raises: (PlotlyError) If x is not evenly spaced. :raises: (PlotlyError) If y is not evenly spaced. """ if _numpy_imported is False: raise ImportError("FigureFactory.create_streamline requires numpy") for index in range(len(x) - 1): if ((x[index + 1] - x[index]) - (x[1] - x[0])) > .0001: raise exceptions.PlotlyError("x must be a 1 dimensional, " "evenly spaced array") for index in range(len(y) - 1): if ((y[index + 1] - y[index]) - (y[1] - y[0])) > .0001: raise exceptions.PlotlyError("y must be a 1 dimensional, " "evenly spaced array")
def get_table_matrix(self): """ Create z matrix to make heatmap with striped table coloring :rtype (list[list]) table_matrix: z matrix to make heatmap with striped table coloring. """ header = [0] * len(self.table_text[0]) odd_row = [.5] * len(self.table_text[0]) even_row = [1] * len(self.table_text[0]) table_matrix = [None] * len(self.table_text) table_matrix[0] = header for i in range(1, len(self.table_text), 2): table_matrix[i] = odd_row for i in range(2, len(self.table_text), 2): table_matrix[i] = even_row if self.index: for array in table_matrix: array[0] = 0 return table_matrix
def get_table_font_color(self): """ Fill font-color array. Table text color can vary by row so this extends a single color or creates an array to set a header color and two alternating colors to create the striped table pattern. :rtype (list[list]) all_font_colors: list of font colors for each row in table. """ if len(self.font_colors) == 1: all_font_colors = self.font_colors*len(self.table_text) elif len(self.font_colors) == 3: all_font_colors = list(range(len(self.table_text))) all_font_colors[0] = self.font_colors[0] for i in range(1, len(self.table_text), 2): all_font_colors[i] = self.font_colors[1] for i in range(2, len(self.table_text), 2): all_font_colors[i] = self.font_colors[2] elif len(self.font_colors) == len(self.table_text): all_font_colors = self.font_colors else: all_font_colors = ['#000000']*len(self.table_text) return all_font_colors
def _alignment(self,ssignal,ksignal): starta = 0 for i in range(len(ssignal))[0::2]: if ssignal[i]<-100/32767.0 or ssignal[i]>100/32767.0: starta = i break startb=0 for i in range(len(ksignal))[0::2]: if ksignal[i]<-100/32767.0 or ksignal[i]>100/32767.0: startb = i break start=starta-100 base = ssignal[start:start+5000] small=1000000 index=0 for i in range(startb-1000,startb-1000+10000)[0::2]: signal = ksignal[i:i+5000] score = math.sqrt(sp.sum(sp.square(sp.array(list(base-signal),sp.float32)))) if score<small: index=i small=score return start,index #return 0,0
def extrap1d(interpolator): xs = interpolator.x ys = interpolator.y def pointwise(x): if x < xs[0]: return ys[0] + (x - xs[0]) * (ys[1] - ys[0]) / (xs[1] - xs[0]) elif x > xs[-1]: return ys[-1] + (x - xs[-1]) * (ys[-1] - ys[-2]) / (xs[-1] - xs[-2]) else: return interpolator(x) def ufunclike(xs): return array(map(pointwise, array(xs))) return ufunclike # Simple offset calibration if only one point available.
def extrap1d(interpolator): xs = interpolator.x ys = interpolator.y def pointwise(x): if x < xs[0]: return ys[0]+(x-xs[0])*(ys[1]-ys[0])/(xs[1]-xs[0]) elif x > xs[-1]: return ys[-1]+(x-xs[-1])*(ys[-1]-ys[-2])/(xs[-1]-xs[-2]) else: return interpolator(x) def ufunclike(xs): from scipy import array return array(map(pointwise, array(xs))) return ufunclike
def load_wider_face(path_to_mat): ## load dataset event labels, bboxes ## works with mat files < v7.3, other case it should to be converted r_dict = {} mat = loadmat(path_to_mat) files = mat['file_list'] bboxs = mat['face_bbx_list'] for cell1,cell2 in zip(files,bboxs): for img,bx in zip(cell1[0],cell2[0]): fname = img[0][0] bbox_r = [] for b in bx: b = sp.vectorize(lambda x: int(round(x)))(b) bbox_r.append(b) bbox_r = sp.array(bbox_r[0]) r_dict[(fname)] = bbox_r return r_dict
def __build_loss_train__fn__(self): # create loss function prediction = layers.get_output(self.net) loss = objectives.categorical_crossentropy(prediction, self.__target_var__) loss = loss.mean() + 1e-4 * regularization.regularize_network_params(self.net, regularization.l2) val_acc = T.mean(T.eq(T.argmax(prediction, axis=1), self.__target_var__),dtype=theano.config.floatX) # create parameter update expressions params = layers.get_all_params(self.net, trainable=True) self.eta = theano.shared(sp.array(sp.float32(0.05), dtype=sp.float32)) update_rule = updates.nesterov_momentum(loss, params, learning_rate=self.eta, momentum=0.9) # compile training function that updates parameters and returns training loss self.__train_fn__ = theano.function([self.__input_var__,self.__target_var__], loss, updates=update_rule) self.__predict_fn__ = theano.function([self.__input_var__], layers.get_output(self.net,deterministic=True)) self.__val_fn__ = theano.function([self.__input_var__,self.__target_var__], [loss,val_acc])
def with_walking(time_arr, mins_per_square=1.3, transfer_constant=5): arr = time_arr.copy() cross_footprint = sp.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]).astype(bool) diag_footprint = sp.array([[1, 0, 1],[0, 1, 0], [1, 0, 1]]).astype(bool) arr[sp.isnan(arr)] = sp.inf for i in range(60): cross_arr = sp.ndimage.minimum_filter(arr, footprint=cross_footprint) cross_arr[sp.isnan(cross_arr)] = sp.inf cross_changes = (cross_arr != arr) cross_arr[cross_changes] += 1*mins_per_square diag_arr = sp.ndimage.minimum_filter(arr, footprint=diag_footprint) diag_arr[sp.isnan(diag_arr)] = sp.inf diag_changes = (diag_arr != arr) diag_arr[diag_changes] += 1.4*mins_per_square arr = sp.minimum(cross_arr, diag_arr) arr[sp.isinf(arr)] = sp.nan return arr + transfer_constant
def test_one_with_one_without_parameters(population_strategy: PopulationStrategy): n = 10 kernels = [] df_without = pd.DataFrame(index=list(range(n))) w_without = sp.ones(n) / n kernel_without = MultivariateNormalTransition() kernel_without.fit(df_without, w_without) kernels.append(kernel_without) df_with = pd.DataFrame([{"s": sp.rand()} for _ in range(n)]) w_with = sp.ones(n) / n kernel_with = MultivariateNormalTransition() kernel_with.fit(df_with, w_with) kernels.append(kernel_with) population_strategy.adapt_population_size(kernels, sp.array([.7, .3])) assert population_strategy.nr_particles > 0
def test_transitions_not_modified(population_strategy: PopulationStrategy): n = 10 kernels = [] test_points = pd.DataFrame([{"s": sp.rand()} for _ in range(n)]) for _ in range(2): df = pd.DataFrame([{"s": sp.rand()} for _ in range(n)]) w = sp.ones(n) / n kernel = MultivariateNormalTransition() kernel.fit(df, w) kernels.append(kernel) test_weights = [k.pdf(test_points) for k in kernels] population_strategy.adapt_population_size(kernels, sp.array([.7, .2])) after_adaptation_weights = [k.pdf(test_points) for k in kernels] same = all([(k1 == k2).all() for k1, k2 in zip(test_weights, after_adaptation_weights)]) err_msg = ("Population strategy {}" " modified the transitions".format(population_strategy)) assert same, err_msg
def model(THETA, time, kplanets): modelo = 0.0 if kplanets == 0: return 0.0 for i in range(kplanets): As, P, Ac, S, C = THETA[5*i:5*(i+1)] A = As ** 2 + Ac ** 2 ecc = S ** 2 + C ** 2 w = sp.arccos(C / (ecc ** 0.5)) # longitude of periastron phase = sp.arccos(Ac / (A ** 0.5)) ### test if S < 0: w = 2 * sp.pi - sp.arccos(C / (ecc ** 0.5)) if As < 0: phase = 2 * sp.pi - sp.arccos(Ac / (A ** 0.5)) ### per = sp.exp(P) freq = 2. * sp.pi / per M = freq * time + phase E = sp.array([MarkleyKESolver().getE(m, ecc) for m in M]) f = (sp.arctan(((1. + ecc) ** 0.5 / (1. - ecc) ** 0.5) * sp.tan(E / 2.)) * 2.) modelo += A * (sp.cos(f + w) + ecc * sp.cos(w)) return modelo
def test_fetch_one_column(): _urlopen_ref = datasets.mldata.urlopen try: dataname = 'onecol' # create fake data set in cache x = sp.arange(6).reshape(2, 3) datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}}) dset = fetch_mldata(dataname, data_home=tmpdir) for n in ["COL_NAMES", "DESCR", "data"]: assert_in(n, dset) assert_not_in("target", dset) assert_equal(dset.data.shape, (2, 3)) assert_array_equal(dset.data, x) # transposing the data array dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir) assert_equal(dset.data.shape, (3, 2)) finally: datasets.mldata.urlopen = _urlopen_ref
def calc_twostate_weights( data ): weights=[0,0,0] # the change cannot have occurred in the last 3 points means_mss=calc_mean_mss( data ) i=0 try: for nA, mean2A, varA, nB, mean2B, varB in means_mss : #print "computing for data", nA, mean2A, varA, nB, mean2B, varB numf1 = calc_alpha( nA, mean2A, varA ) numf2 = calc_alpha( nB, mean2B, varB ) denom = (varA + varB) * (mean2A*mean2B) weights.append( (numf1*numf2)/denom) i += 1 except: print "failed at data", i # means_mss[i] print "---" print means_mss print "---" raise weights.extend( [0,0] ) # the change cannot have occurred at the last 2 points return array( weights )
def calc_twostate_weights( data ): weights=[0,0,0] # the change cannot have occurred in the last 3 points means_mss=calc_mean_mss( data ) i=0 try: for nA, mean2A, varA, nB, mean2B, varB in means_mss : #print "computing for data", nA, mean2A, varA, nB, mean2B, varB numf1 = calc_alpha( nA, mean2A, varA ) numf2 = calc_alpha( nB, mean2B, varB ) denom = (varA + varB) * (mean2A*mean2B) weights.append( (numf1*numf2)/denom) i += 1 except: print "failed at data", i # means_mss[i] print "---" #print means_mss print "---" raise weights.extend( [0,0] ) # the change cannot have occurred at the last 2 points return array( weights )
def calcdistance_mat(self, points, center, spatialmax): ## -- L2norm optimized -- ## center = scipy.array(center) location_center=center[:2] color_center=center[2:] location_points=points[:,:,:2] color_points=points[:,:,2:] difs_location=location_points-location_center difs_color=1-np.equal(color_points,color_center) if len(difs_color.shape)==2: difs_color=np.expand_dims(difs_color, axis=2) difs=np.concatenate((difs_location,difs_color),axis=2) norm = (difs ** 2).astype(float) norm[:, :, 0:2] *= (float(self.MM) / (spatialmax * spatialmax)) # color weight on location term norm = scipy.sum(norm, 2) return norm
def resultimg(self, centers): print "show result" result = scipy.zeros(self.img.shape[:2], scipy.uint8) width, height = result.shape[:2] if len(result.shape)>2: color_channels=result.shape[2] else: color_channels=1 colors = [scipy.array([int(random.uniform(0, 255)) for i in xrange(1)]) for j in xrange(len(centers))] for x in xrange(width): for y in xrange(height): result[x, y] = colors[self.assignedindex[x][y]] # cv2.imshow("result", result) # cv2.waitKey(10) cv2.imwrite(os.path.join(self.result_dir,self.filename+'_superpixel.png'), result)
def parseCCFile(self): with open(self.ccFile, 'r') as f: dataArr = None data=[] Index = [] for line in f: if line.strip() == 'Correlation coefficients': break for line in f: dataline= line.rstrip().split() data.append(dataline) Index.append(int(dataline[0])+1) Index.append(int(dataline[1])+1) Dimension=max(Index) dataArr = np.array(data,dtype=(float)) return dataArr, Dimension
def tree(self): data = self.ccTable Matrix=np.zeros((self.Dimension,self.Dimension)) reducedArray=[] for line in data: #print line if line is not None and len(line) is not 0: Matrix[line[0],line[1]]= line[2] Matrix[line[1],line[0]]= line[2] for x in range(0,self.Dimension): for y in range(x+1,self.Dimension): reducedArray.append(Matrix[x,y]) Distances = np.array(reducedArray, dtype=(float)) self.Tree =hierarchy.linkage(Distances, 'complete') return self.Tree #new function, chose the average linkage
def avgTree(self): data = self.ccTable Matrix=np.zeros((self.Dimension,self.Dimension)) reducedArray=[] for line in data: #print line if line is not None and len(line) is not 0: Matrix[line[0],line[1]]= line[2] Matrix[line[1],line[0]]= line[2] for x in range(0,self.Dimension): for y in range(x+1,self.Dimension): reducedArray.append(Matrix[x,y]) Distances = np.array(reducedArray, dtype=(float)) self.Tree =hierarchy.linkage(Distances, 'average') return self.Tree #Funtion added to plot dendrogram in shell mode only. #still not funtioninhg #Uncomment when will be needed
def plotTree( self, pos=None): P = hierarchy.dendrogram(self.Tree, color_threshold=0.3) icoord = scipy.array( P['icoord'] ) dcoord = scipy.array( P['dcoord'] ) color_list = scipy.array( P['color_list'] ) xmin, xmax = icoord.min(), icoord.max() ymin, ymax = dcoord.min(), dcoord.max() if pos: icoord = icoord[pos] ioord = dcoord[pos] color_list = color_list[pos] for xs, ys, color in zip(icoord, dcoord, color_list): plt.plot(xs, ys, color) plt.xlim( xmin-10, xmax + 0.1*abs(xmax) ) plt.ylim( ymin, ymax + 0.1*abs(ymax) ) plt.show()
def create_struct_from_obj(self, ob): struct = {} # relationship for childname in getattr(ob, '_single_child_containers', []): supported_containers = [subob.__name__.lower() + 's' for subob in self.supported_objects] if childname in supported_containers: struct[childname] = [] # attributes for i, attr in enumerate(ob._all_attrs): attrname, attrtype = attr[0], attr[1] #~ if attrname =='': #~ struct['array'] = ob.magnitude #~ struct['units'] = ob.dimensionality.string #~ continue if (hasattr(ob, '_quantity_attr') and ob._quantity_attr == attrname): struct[attrname] = ob.magnitude struct[attrname+'_units'] = ob.dimensionality.string continue if not(attrname in ob.annotations or hasattr(ob, attrname)): continue if getattr(ob, attrname) is None: continue if attrtype == pq.Quantity: #ndim = attr[2] struct[attrname] = getattr(ob, attrname).magnitude struct[attrname + '_units'] = getattr( ob, attrname).dimensionality.string elif attrtype == datetime: struct[attrname] = str(getattr(ob, attrname)) else: struct[attrname] = getattr(ob, attrname) return struct
def _parse_plink_snps_(genotype_file, snp_indices): plinkf = plinkfile.PlinkFile(genotype_file) samples = plinkf.get_samples() num_individs = len(samples) num_snps = len(snp_indices) raw_snps = sp.empty((num_snps,num_individs),dtype='int8') #If these indices are not in order then we place them in the right place while parsing SNPs. snp_order = sp.argsort(snp_indices) ordered_snp_indices = list(snp_indices[snp_order]) ordered_snp_indices.reverse() print 'Iterating over file to load SNPs' snp_i = 0 next_i = ordered_snp_indices.pop() line_i = 0 max_i = ordered_snp_indices[0] while line_i <= max_i: if line_i < next_i: plinkf.next() elif line_i==next_i: line = plinkf.next() snp = sp.array(line, dtype='int8') bin_counts = line.allele_counts() if bin_counts[-1]>0: mode_v = sp.argmax(bin_counts[:2]) snp[snp==3] = mode_v s_i = snp_order[snp_i] raw_snps[s_i]=snp if line_i < max_i: next_i = ordered_snp_indices.pop() snp_i+=1 line_i +=1 plinkf.close() assert snp_i==len(raw_snps), 'Failed to parse SNPs?' num_indivs = len(raw_snps[0]) freqs = sp.sum(raw_snps,1, dtype='float32')/(2*float(num_indivs)) return raw_snps, freqs
def GenerateMap(stops, fname=None, seed=None): '''Generate a map with "stops" stops for the salesman to traverse. Write coordinates to file if "fname" is specified. Return the distance matrix for all coordinates.''' random.seed(seed) # randomly place stop coordinates in the unit square xs = [random.uniform(0, 1) for x in range(stops)] ys = [random.uniform(0, 1) for x in range(stops)] coords = scipy.array([xs, ys]) # calculate matrix of distances distMat = DistanceMatrix(coords) if fname is not None: scipy.savetxt(fname, coords) return distMat
def TSP(stops, Alg, steps, param, seed=None, coordfile='xycoords.txt'): '''A wrapper function that attempts to optimize the traveling salesperson problem using a specified algorithm. If coordfile exists, a preexisting set of coordinates will be used. Otherwise, a new set of "stops" coordinates will be generated for the person to traverse, and will be written to the specified file.''' # Create the distance matrix, which will be used to calculate # the fitness of a given path if os.path.isfile(coordfile): coords = scipy.genfromtxt(coordfile) distMat = DistanceMatrix(coords) else: distMat = GenerateMap(stops, fname=coordfile, seed=seed) if Alg == 'HC': # param is the number of solutions to try per step bestSol, fitHistory = hc.HillClimber(steps, param, distMat, seed) elif Alg == 'SA': # param is a placeholder bestSol, fitHistory = sa.SimulatedAnnealing( steps, param, distMat, seed) elif Alg == 'MC3': # param is the number of chains bestSol, fitHistory = mc3.MCMCMC(steps, param, distMat, seed) elif Alg == 'GA': # param is the population size bestSol, fitHistory = ga.GeneticAlgorithm(steps, param, distMat, seed) else: raise ValueError('Algorithm must be "HC", "SA", "MC3", or "GA".') outfname = coordfile + '-' + Alg + '-' + \ str(steps) + '-' + str(param) + '.txt' scipy.savetxt(outfname, scipy.array(bestSol), fmt='%i') return bestSol, fitHistory
def LogNormalPaths(mu, cov, fwd, numPaths): ''' mu and fwd are 1d lists/arrays (1xn); cov is a 2d scipy.array (nxn); numPaths is int ''' return (fwd*scipy.exp(numpy.random.multivariate_normal(mu, cov, numPaths) - 0.5*cov.diagonal())).transpose()
def _flatten(array): """ Uses list comprehension to flatten array :param (array): An iterable to flatten :raises (PlotlyError): If iterable is not nested. :rtype (list): The flattened list. """ try: return [item for sublist in array for item in sublist] except TypeError: raise exceptions.PlotlyError("Your data array could not be " "flattened! Make sure your data is " "entered as lists or ndarrays!")
def __init__(self, x, y, u, v, density, angle, arrow_scale, **kwargs): self.x = np.array(x) self.y = np.array(y) self.u = np.array(u) self.v = np.array(v) self.angle = angle self.arrow_scale = arrow_scale self.density = int(30 * density) # Scale similarly to other functions self.delta_x = self.x[1] - self.x[0] self.delta_y = self.y[1] - self.y[0] self.val_x = self.x self.val_y = self.y # Set up spacing self.blank = np.zeros((self.density, self.density)) self.spacing_x = len(self.x) / float(self.density - 1) self.spacing_y = len(self.y) / float(self.density - 1) self.trajectories = [] # Rescale speed onto axes-coordinates self.u = self.u / (self.x[-1] - self.x[0]) self.v = self.v / (self.y[-1] - self.y[0]) self.speed = np.sqrt(self.u ** 2 + self.v ** 2) # Rescale u and v for integrations. self.u *= len(self.x) self.v *= len(self.y) self.st_x = [] self.st_y = [] self.get_streamlines() streamline_x, streamline_y = self.sum_streamlines() arrows_x, arrows_y = self.get_streamline_arrows()
def set_axis_layout(self, axis_key): """ Sets and returns default axis object for dendrogram figure. :param (str) axis_key: E.g., 'xaxis', 'xaxis1', 'yaxis', yaxis1', etc. :rtype (dict): An axis_key dictionary with set parameters. """ axis_defaults = { 'type': 'linear', 'ticks': 'outside', 'mirror': 'allticks', 'rangemode': 'tozero', 'showticklabels': True, 'zeroline': False, 'showgrid': False, 'showline': True, } if len(self.labels) != 0: axis_key_labels = self.xaxis if self.orientation in ['left', 'right']: axis_key_labels = self.yaxis if axis_key_labels not in self.layout: self.layout[axis_key_labels] = {} self.layout[axis_key_labels]['tickvals'] = \ [zv*self.sign[axis_key] for zv in self.zero_vals] self.layout[axis_key_labels]['ticktext'] = self.labels self.layout[axis_key_labels]['tickmode'] = 'array' self.layout[axis_key].update(axis_defaults) return self.layout[axis_key]
def median(values): return numpy.median(numpy.array(values)) # Class to hold a TiltHydrometer reading