我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用scipy.sqrt()。
def compute_residuals(self): """Compute residuals and stopping thresholds.""" if self.opt['AutoRho', 'StdResiduals']: r = linalg.norm(self.rsdl_r(self.AXnr, self.Y)) s = linalg.norm(self.rsdl_s(self.Yprev, self.Y)) epri = scipy.sqrt(self.Nc)*self.opt['AbsStopTol'] + \ self.rsdl_rn(self.AXnr, self.Y)*self.opt['RelStopTol'] edua = scipy.sqrt(self.Nx)*self.opt['AbsStopTol'] + \ self.rsdl_sn(self.U)*self.opt['RelStopTol'] else: rn = self.rsdl_rn(self.AXnr, self.Y) if rn == 0.0: rn = 1.0 sn = self.rsdl_sn(self.U) if sn == 0.0: sn = 1.0 r = linalg.norm(self.rsdl_r(self.AXnr, self.Y)) / rn s = linalg.norm(self.rsdl_s(self.Yprev, self.Y)) / sn epri = scipy.sqrt(self.Nc)*self.opt['AbsStopTol']/rn + \ self.opt['RelStopTol'] edua = scipy.sqrt(self.Nx)*self.opt['AbsStopTol']/sn + \ self.opt['RelStopTol'] return r, s, epri, edua
def lsInversion(self): """ LS Inversion from Hwang et al (2002) """ At=np.transpose(self.A) St=np.transpose(self.S) N=At.dot(self.P).dot(self.A) #solution: self.X=np.linalg.inv(N+self.S.dot(St)).dot(At).dot(self.P).dot(self.Obs) self.r=self.A.dot(self.X)-self.Obs rt=np.transpose(self.r) self.VtPV=rt.dot(self.P).dot(self.r) var_post_norm=self.VtPV/self.dof self.SDaposteriori=np.sqrt(var_post_norm) cov_post=np.linalg.inv(N)*var_post_norm self.var=np.diagonal(cov_post)
def lsStatistics(self,alpha): """ a priori variance of unit weight = 1 """ self.chi2=self.VtPV t=np.sqrt(2*np.log(1/alpha)) chi_1_alpha=t-(2.515517+0.802853*t+0.010328*t**2)/(1+1.432788*t+0.189269*t**2+0.001308*t**3) dof=float(self.dof) self.chi2c=dof*(chi_1_alpha*sqrt(2/(9*dof))+1-2/(9*dof))**3 print "SD a posteriori: %6.2f"%float(self.SDaposteriori) print "chi square value: %6.2f"%float(self.chi2) print "critical chi square value: %6.2f"%float(self.chi2c) if self.chi2<self.chi2c: print "Chi-test accepted" else: print "Chi-test rejected"
def loss(r_0, a, alpha, x_focus, x, y_focus, y, overlap): loss = np.zeros(np.size(x)) loss_squared = np.zeros(np.size(x)) dx = np.zeros(len(x)) dy = np.zeros(len(y)) for i in range(0, np.size(x)): dx = x_focus-x[i] dy = abs(y_focus-y[i]) R = r_0[i]+dx*alpha if dx > 0: loss[i] = overlap[i]*2.*a*(r_0[i]/(r_0[i]+alpha*(dx)))**2*0.5*(np.cos(-dy*pi/(R+4*r_0[i]))) loss_squared[i] = loss[i]**2 else: loss[i] = 0 loss_squared[i] = 0 total_loss = sp.sqrt(np.sum(loss_squared)) return total_loss
def __init__( self, w, h, r, n ): # w and h are the width and height of the field self.w = w self.h = h # n is the number of test points self.n = n self.r2 = r**2.0 self.A = 3.0*self.r2 # cs is the cell size self.cs = r / scipy.sqrt(2) # gw and gh are the number of grid cells self.gw = int( scipy.ceil( self.w/self.cs ) ) self.gh = int( scipy.ceil( self.h/self.cs ) ) # create a grid and a queue self.grid = [ None ] * self.gw * self.gh self.queue = list() # set the queue size and sample size to zero self.qs, self.ss = 0, 0
def rvs( self ): if self.ss == 0: x = random.random() * self.w y = random.random() * self.h self.set_point( x, y ) while self.qs: x_idx = int( random.random() * self.qs ) s = self.queue[ x_idx ] for y_idx in range( self.n ): a = 2 * scipy.pi * random.random() b = scipy.sqrt( self.A * random.random() + self.r2 ) x = s[0] + b*scipy.cos( a ) y = s[1] + b*scipy.sin( a ) if( x >= 0 )and( x < self.w ): if( y >= 0 )and( y < self.h ): if( self.distance( x, y ) ): self.set_point( x, y ) del self.queue[x_idx] self.qs -= 1 sample = list( filter( None, self.grid ) ) sample = scipy.asfarray( sample ) return sample
def fit(self, X, w): if len(X) == 0: raise NotEnoughParticles("Fitting not possible.") self.X_arr = X.as_matrix() ctree = cKDTree(X) _, indices = ctree.query(X, k=min(self.k + 1, X.shape[0])) covs, inv_covs, dets = list(zip(*[self._cov_and_inv(n, indices) for n in range(X.shape[0])])) self.covs = sp.array(covs) self.inv_covs = sp.array(inv_covs) self.determinants = sp.array(dets) self.normalization = sp.sqrt( (2 * sp.pi) ** self.X_arr.shape[1] * self.determinants) if not sp.isreal(self.normalization).all(): raise Exception("Normalization not real") self.normalization = sp.real(self.normalization)
def KMV_f(E,D,T,r,sigmaE): n=10000 m=2000 diffOld=1e6 # a very big number for i in sp.arange(1,10): for j in sp.arange(1,m): A=E+D/2+i*D/n sigmaA=0.05+j*(1.0-0.001)/m d1 = (log(A/D)+(r+sigmaA*sigmaA/2.)*T)/(sigmaA*sqrt(T)) d2 = d1-sigmaA*sqrt(T) diff4E= (A*N(d1)-D*exp(-r*T)*N(d2)-E)/A # scale by assets diff4A= A/E*N(d1)*sigmaA-sigmaE # a small number already diffNew=abs(diff4E)+abs(diff4A) if diffNew<diffOld: diffOld=diffNew output=(round(A,2),round(sigmaA,4),round(diffNew,5)) return output #
def implied_vol_put_min(S,X,T,r,p): implied_vol=1.0 min_value=100.0 for i in xrange(1,10000): sigma=0.0001*(i+1) d1=(log(S/X)+(r+sigma*sigma/2.)*T)/(sigma*sqrt(T)) d2 = d1-sigma*sqrt(T) put=X*exp(-r*T)*stats.norm.cdf(-d2)-S*stats.norm.cdf(-d1) abs_diff=abs(put-p) if abs_diff<min_value: min_value=abs_diff implied_vol=sigma k=i put_out=put print 'k, implied_vol, put, abs_diff' return k,implied_vol, put_out,min_value
def up_and_out_call(s0,x,T,r,sigma,n_simulation,barrier): n_steps=100. dt=T/n_steps total=0 for j in sp.arange(0, n_simulation): sT=s0 out=False for i in range(0,int(n_steps)): e=sp.random.normal() sT*=sp.exp((r-0.5*sigma*sigma)*dt+sigma*e*sp.sqrt(dt)) if sT>barrier: out=True if out==False: total+=bsCall(s0,x,T,r,sigma) return total/n_simulation #
def upCall(s,x,T,r,sigma,nSimulation,barrier): import scipy as sp import p4f n_steps=100 dt=T/n_steps inTotal=0 outTotal=0 for j in range(0, nSimulation): sT=s inStatus=False outStatus=True for i in range(0,int(n_steps)): e=sp.random.normal() sT*=sp.exp((r-0.5*sigma*sigma)*dt+sigma*e*sp.sqrt(dt)) if sT>barrier: outStatus=False inStatus=True if outStatus==True: outTotal+=p4f.bs_call(s,x,T,r,sigma) else: inTotal+=p4f.bs_call(s,x,T,r,sigma) return outTotal/nSimulation, inTotal/nSimulation #
def lookback_min_price_as_strike(s,T,r,sigma,n_simulation): n_steps=100 dt=T/n_steps total=0 for j in range(n_simulation): min_price=100000. # a very big number sT=s for i in range(int(n_steps)): e=sp.random.normal() sT*=sp.exp((r-0.5*sigma*sigma)*dt+sigma*e*sp.sqrt(dt)) if sT<min_price: min_price=sT #print 'j=',j,'i=',i,'total=',total total+=p4f.bs_call(s,min_price,T,r,sigma) return total/n_simulation #
def up_and_out_call(s0,x,T,r,sigma,n_simulation,barrier): n_steps=100. dt=T/n_steps total=0 for j in range(0, n_simulation): sT=s0 out=False for i in range(0,int(n_steps)): e=sp.random.normal() sT*=sp.exp((r-0.5*sigma*sigma)*dt+sigma*e*sp.sqrt(dt)) if sT>barrier: out=True if out==False: total+=p4f.bs_call(s0,x,T,r,sigma) return total/n_simulation #
def _BlackScholesCall(S, K, T, sigma, r, q): d1 = (log(S/K) + (r - q + (sigma**2)/2)*T)/(sigma*sqrt(T)) d2 = d1 - sigma*sqrt(T) return S*exp(-q*T)*norm.cdf(d1) - K*exp(-r*T)*norm.cdf(d2)
def _BlackScholesPut(S, K, T, sigma, r, q): d1 = (log(S/K) + (r - q + (sigma**2)/2)*T)/(sigma*sqrt(T)) d2 = d1 - sigma*sqrt(T) return K*exp(-r*T)*norm.cdf(-d2) - S*exp(-q*T)*norm.cdf(-d1)
def _fprime(self, sigma): logSoverK = log(self.S/self.K) n12 = ((self.r + sigma**2/2)*self.T) numerd1 = logSoverK + n12 d1 = numerd1/(sigma*sqrt(self.T)) return self.S*sqrt(self.T)*norm.pdf(d1)*exp(-self.r*self.T)
def computeFaceRecord(self,img,rect=None,eyes=None): '''Given an image and face detection box, compute a face identification record''' assert self.trained img = self.cropFace(img,eyes) vec = self.computeVector(img) fir = self.pca.project(vec,whiten=True) if self.measure == PCA_COS: scale = scipy.sqrt((fir*fir).sum()) fir = (1.0/scale)*fir return fir
def similarity(self,fir1,fir2): '''Compute the similarity of two faces''' assert self.trained == True if self.measure == PCA_L1: return (scipy.abs(fir1-fir2)).sum() if self.measure == PCA_L2: return scipy.sqrt(((fir1-fir2)*(fir1-fir2)).sum()) if self.measure == PCA_COS: return (fir1*fir2).sum() raise NotImplementedError("Unknown distance measure: %d"%self.measure)
def norm(self): x = centered1d(self.x) y = centered1d(self.y) return scipy.sqrt(trapz2(self.intensity(), x=x, y=y))
def update_rho(self, k, r, s): """Automatic rho adjustment.""" if self.opt['AutoRho', 'Enabled']: tau = self.rho_tau mu = self.rho_mu xi = self.rho_xi if k != 0 and scipy.mod(k+1, self.opt['AutoRho', 'Period']) == 0: if self.opt['AutoRho', 'AutoScaling']: if s == 0.0 or r == 0.0: rhomlt = tau else: rhomlt = scipy.sqrt(r/(s*xi) if r > s*xi else (s*xi)/r) if rhomlt > tau: rhomlt = tau else: rhomlt = tau rsf = 1.0 if r > xi*mu*s: rsf = rhomlt elif s > (mu/xi)*r: rsf = 1.0/rhomlt self.rho = self.dtype.type(rsf*self.rho) self.U /= rsf if rsf != 1.0: self.rhochange()
def rsdl_rn(self, AX, Y): """Compute primal residual normalisation term. Overriding this method is required if methods :meth:`cnst_A`, :meth:`cnst_AT`, :meth:`cnst_B`, and :meth:`cnst_c` are not overridden. """ if not hasattr(self, '_cnst_nrm_c'): self._cnst_nrm_c = np.sqrt(linalg.norm(self.cnst_c0())**2 + linalg.norm(self.cnst_c1())**2) return max((linalg.norm(AX), linalg.norm(Y), self._cnst_nrm_c))
def rsdl_s(self, Yprev, Y): """Compute dual residual vector.""" # Since s = rho A^T B (y^(k+1) - y^(k)) and B = -(I I I ...)^T, # the correct calculation here would involve replicating (Yprev - Y) # on the axis on which the blocks of X are stacked. Since this would # require allocating additional memory, and since it is only the norm # of s that is required, instead of replicating the vector it is # scaled to have the same l2 norm as the replicated vector return np.sqrt(self.Nb)*self.rho*(Yprev - Y)
def rsdl_rn(self, AX, Y): """Compute primal residual normalisation term.""" # The primal residual normalisation term is # max( ||A x^(k)||_2, ||B y^(k)||_2 ) and B = -(I I I ...)^T. # The scaling by sqrt(Nb) of the l2 norm of Y accounts for the # block replication introduced by multiplication by B return max((linalg.norm(AX), np.sqrt(self.Nb)*linalg.norm(Y)))
def c1(R1): return c2 * sc.sqrt(R2 / R1) # Equilibrium magnetic field strength within the slab:
def m0(W): return sc.sqrt((c0**2 - W**2)*(vA**2 - W**2) / ((c0**2 + vA**2)*(cT**2 - W**2)))
def m00(W): return sc.sqrt(1 - W**2/c0**2)
def m1(W, R1): return sc.sqrt(1 - W**2*(c2*sc.sqrt(R2/R1))**(-2))
def m2(W): return sc.sqrt(1 - W**2/c2**2)
def rmse(ymodel, yreal): ''' root-mean-square-error ????????????(??????????????????) ''' return sp.sqrt(sp.mean((ymodel - yreal) ** 2))
def writeCgxCFile(self,filename,base_station): """ write cgx type c file (without header) need to check which is corrected from tides re-write the sd as former SD/sqrt(duration) last column is the first gravity value of the 'o' file hence the first of the dictionnary, corrected for the tides should add a checkbox in the tree object to select the base station """ offset=self.station_dic[base_station].grav[0] print "filename: %s"%filename file=open(filename,'w') for station_id,sta in sorted(self.station_dic.iteritems(), key=lambda x: x[1].t[1]): #for station_id,sta in self.station_dic.iteritems(): if sta.keepitem==1: for i in range(len(sta.t)): if sta.keepdata[i]==1: file.write("%6d %4.3f %1.3f %2d %2d %2.1f %2.1f %2.2f %1.3f %3d %3.3f %02d%02d%02d %02d%02d%02d 0 0.000 %3.4f\n"%(sta.station[i],\ sta.grav[i]-sta.etc[i],sta.sd[i]/sqrt(sta.dur[i]), sta.dur[i], sta.rej[i], sta.tiltx[i], sta.tilty[i], sta.temp[i],sta.etc[i],sta.t[i].timetuple()[7], sta.t[i].hour*60+sta.t[i].minute+sta.t[i].second/60, sta.t[i].day,sta.t[i].month,sta.t[i].year-2000, sta.t[i].hour,sta.t[i].minute,sta.t[i].second, sta.grav[i]-offset)) file.close()
def writeModifCFile(self,filename,base_station): """ write modified cgx type c file (without header) need to check which is corrected from tides re-write the sd as former SD/sqrt(duration) last column is the first gravity value of the 'o' file hence the first of the dictionnary, corrected for the tides add an extra column with the keepdata value (0 or 1) for further loading should add a checkbox in the tree object to select the base station IMPORTANT: grav is corrected from tides (as in the classical raw data), which is different than for classical c files and SD is SD, not SD/sqrt(dur) as for classical c files OR an other option could be to really write 'c' files and modify the reading function (read_c_file) """ offset=self.station_dic[base_station].grav[0] print "filename: %s"%filename file=open(filename,'w') for station_id,sta in sorted(self.station_dic.iteritems(), key=lambda x: x[1].t[1]): #for station_id,sta in self.station_dic.iteritems(): if sta.keepitem==1: for i in range(len(sta.t)): file.write("%6d %4.3f %1.3f %2d %2d %2.1f %2.1f %2.2f %1.3f %3d %3.3f %02d%02d%02d %02d%02d%02d 0 0.000 %3.4f %1d\n"%(sta.station[i],\ sta.grav[i],sta.sd[i], sta.dur[i], sta.rej[i], sta.tiltx[i], sta.tilty[i], sta.temp[i],sta.etc[i],sta.t[i].timetuple()[7], sta.t[i].hour*60+sta.t[i].minute+sta.t[i].second/60, sta.t[i].day,sta.t[i].month,sta.t[i].year-2000, sta.t[i].hour,sta.t[i].minute,sta.t[i].second, sta.grav[i]-offset, sta.keepdata[i])) file.close()
def rmse(y_test, y): return sp.sqrt(sp.mean((y_test - y) ** 2))
def kde(data, N=None, MIN=None, MAX=None): # Parameters to set up the mesh on which to calculate N = 2**14 if N is None else int(2**sci.ceil(sci.log2(N))) if MIN is None or MAX is None: minimum = min(data) maximum = max(data) Range = maximum - minimum MIN = minimum - Range/10 if MIN is None else MIN MAX = maximum + Range/10 if MAX is None else MAX # Range of the data R = MAX-MIN # Histogram the data to get a crude first approximation of the density M = len(data) DataHist, bins = sci.histogram(data, bins=N, range=(MIN,MAX)) DataHist = DataHist/M DCTData = scipy.fftpack.dct(DataHist, norm=None) I = [iN*iN for iN in xrange(1, N)] SqDCTData = (DCTData[1:]/2)**2 # The fixed point calculation finds the bandwidth = t_star guess = 0.1 try: t_star = scipy.optimize.brentq(fixed_point, 0, guess, args=(M, I, SqDCTData)) except ValueError: print 'Oops!' return None # Smooth the DCTransformed data using t_star SmDCTData = DCTData*sci.exp(-sci.arange(N)**2*sci.pi**2*t_star/2) # Inverse DCT to get density density = scipy.fftpack.idct(SmDCTData, norm=None)*N/R mesh = [(bins[i]+bins[i+1])/2 for i in xrange(N)] bandwidth = sci.sqrt(t_star)*R density = density/sci.trapz(density, mesh) return bandwidth, mesh, density
def fixed_point(t, M, I, a2): l=7 I = sci.float128(I) M = sci.float128(M) a2 = sci.float128(a2) f = 2*sci.pi**(2*l)*sci.sum(I**l*a2*sci.exp(-I*sci.pi**2*t)) for s in range(l, 1, -1): K0 = sci.prod(xrange(1, 2*s, 2))/sci.sqrt(2*sci.pi) const = (1 + (1/2)**(s + 1/2))/3 time=(2*const*K0/M/f)**(2/(3+2*s)) f=2*sci.pi**(2*s)*sci.sum(I**s*a2*sci.exp(-I*sci.pi**2*time)) return t-(2*M*sci.sqrt(sci.pi)*f)**(-2/5)
def __init__(self, data, mleValue, fitParameters=True, mean=None, sigma=None): super(normalModel, self).__init__(data) self.MLE = mleValue if(None in [mean, sigma]): fitParameters=True if(fitParameters): mean = Mean(self.getDataSet()) sigma = standardDeviation(self.getDataSet()) try: def normDist(x, x0, sigma): output = 1.0/sqrt(2*np.pi*(sigma**2)) output *= exp(-0.5*((x - x0)/sigma)**2) return output self.n, self.bins, patches = plt.hist(self.getDataSet(), self.getDatasetSize()/10, normed=1, facecolor='blue', alpha = 0.55) popt,pcov = curve_fit(normDist,self.bins[:-1], self.n, p0=[mean, sigma]) ##plt.plot(bins[:-1], gaus(bins[:-1],*popt),'c-',label="Gaussian Curve with params\na=%f\nx0=%f\nsigma=%f" % (popt[0], popt[1], popt[2]), alpha=0.5) print "Fitted gaussian curve to data with params x0 %f, sigma %f" % (popt[0], popt[1]) self.x0 = popt[0] self.sigma = popt[1] self.fitted = True except RuntimeError: print "Unable to fit data to normal curve, runtime error" raise except Warning: raise RuntimeError else: self.x0 = mean self.sigma = sigma
def getModelpdf(self, x): output = 1.0/math.sqrt(2*np.pi*(self.getSigmaValue()**2)) output *= math.exp(-0.5*((x - self.getx0Value())/self.getSigmaValue())**2) return output
def getModelpdf(self, x): if(x>=0): output = 1.0/( (x*self.getSigmaValue())*sqrt(2*np.pi) ) output *= exp(-0.5*((log(x)- self.getx0Value())/self.getSigmaValue())**2) else: output = x return scipy.where((x<0), 0.0, output)
def _calculate_whitening_transformation_matrix(self, sample_from_prior): samples_vec = sp.asarray([self._dict_to_to_vect(x) for x in sample_from_prior]) # samples_vec is an array of shape nr_samples x nr_features means = samples_vec.mean(axis=0) centered = samples_vec - means covariance = centered.T.dot(centered) w, v = la.eigh(covariance) self._whitening_transformation_matrix = ( v.dot(sp.diag(1. / sp.sqrt(w))).dot(v.T))
def weighted_std(points, weights): mean = weighted_mean(points, weights) std = sp.sqrt(((points - mean)**2 * weights).sum()) return std
def test_gaussian_multiple_populations(db_path, sampler): sigma_x = 1 sigma_y = .5 y_observed = 2 def model(args): return {"y": st.norm(args['x'], sigma_y).rvs()} models = [model] models = list(map(SimpleModel, models)) nr_populations = 4 population_size = ConstantPopulationSize(600) parameter_given_model_prior_distribution = [Distribution(x=st.norm(0, sigma_x))] abc = ABCSMC(models, parameter_given_model_prior_distribution, MinMaxDistanceFunction(measures_to_use=["y"]), population_size, eps=MedianEpsilon(.2), sampler=sampler) abc.new(db_path, {"y": y_observed}) minimum_epsilon = -1 abc.do_not_stop_when_only_single_model_alive() history = abc.run(minimum_epsilon, max_nr_populations=nr_populations) posterior_x, posterior_weight = history.get_distribution(0, None) posterior_x = posterior_x["x"].as_matrix() sort_indices = sp.argsort(posterior_x) f_empirical = sp.interpolate.interp1d(sp.hstack((-200, posterior_x[sort_indices], 200)), sp.hstack((0, sp.cumsum(posterior_weight[sort_indices]), 1))) sigma_x_given_y = 1 / sp.sqrt(1 / sigma_x**2 + 1 / sigma_y**2) mu_x_given_y = sigma_x_given_y**2 * y_observed / sigma_y**2 expected_posterior_x = st.norm(mu_x_given_y, sigma_x_given_y) x = sp.linspace(-8, 8) max_distribution_difference = sp.absolute(f_empirical(x) - expected_posterior_x.cdf(x)).max() assert max_distribution_difference < 0.052 assert history.max_t == nr_populations-1 mean_emp, std_emp = mean_and_std(posterior_x, posterior_weight) assert abs(mean_emp - mu_x_given_y) < .07 assert abs(std_emp - sigma_x_given_y) < .12
def test_two_competing_gaussians_single_population(db_path, sampler, transition): sigma_x = .5 sigma_y = .5 y_observed = 1 def model(args): return {"y": st.norm(args['x'], sigma_y).rvs()} models = [model, model] models = list(map(SimpleModel, models)) population_size = ConstantPopulationSize(500) mu_x_1, mu_x_2 = 0, 1 parameter_given_model_prior_distribution = [ Distribution(x=st.norm(mu_x_1, sigma_x)), Distribution(x=st.norm(mu_x_2, sigma_x))] abc = ABCSMC(models, parameter_given_model_prior_distribution, MinMaxDistanceFunction(measures_to_use=["y"]), population_size, transitions=[transition(), transition()], eps=MedianEpsilon(.02), sampler=sampler) abc.new(db_path, {"y": y_observed}) minimum_epsilon = -1 nr_populations = 1 abc.do_not_stop_when_only_single_model_alive() history = abc.run(minimum_epsilon, max_nr_populations=1) mp = history.get_model_probabilities(history.max_t) def p_y_given_model(mu_x_model): return st.norm(mu_x_model, sp.sqrt(sigma_y**2+sigma_x**2)).pdf(y_observed) p1_expected_unnormalized = p_y_given_model(mu_x_1) p2_expected_unnormalized = p_y_given_model(mu_x_2) p1_expected = p1_expected_unnormalized / (p1_expected_unnormalized + p2_expected_unnormalized) p2_expected = p2_expected_unnormalized / (p1_expected_unnormalized + p2_expected_unnormalized) assert history.max_t == nr_populations - 1 assert abs(mp.p[0] - p1_expected) + abs(mp.p[1] - p2_expected) < .07
def distance(p1, p2): return sqrt((p1[0]-p2[0])**2+(p1[1]-p2[1])**2+(p1[2]-p2[2])**2)
def bsCall(S,X,T,r,sigma): from scipy import log,exp,sqrt,stats d1=(log(S/X)+(r+sigma*sigma/2.)*T)/(sigma*sqrt(T)) d2 = d1-sigma*sqrt(T) return S*stats.norm.cdf(d1)-X*exp(-r*T)*stats.norm.cdf(d2)
def sharpe(R,w): var = portfolio_var(R,w) mean_return=sp.mean(R,axis=0) ret = sp.array(mean_return) return (sp.dot(w,ret) - rf)/sp.sqrt(var) # function 4: for given n-1 weights, return a negative sharpe ratio
def BIS_f(R,s,n): R=R0 for i in sp.arange(0,n): deltaR=z[i]*s/sp.sqrt(2.) logR=sp.log(R) R=sp.exp(logR+deltaR) output.append(round(R,5)) return output #
def bsCall(S,X,T,r,sigma): d1=(log(S/X)+(r+sigma*sigma/2.)*T)/(sigma*sqrt(T)) d2 = d1-sigma*sqrt(T) return S*stats.norm.cdf(d1)-X*exp(-r*T)*stats.norm.cdf(d2) #
def bs_call(S,X,T,r,sigma): d1=(log(S/X)+(r+sigma*sigma/2.)*T)/(sigma*sqrt(T)) d2 = d1-sigma*sqrt(T) return S*stats.norm.cdf(d1)-X*exp(-r*T)*stats.norm.cdf(d2)
def implied_vol_call(S,X,T,r,c): for i in range(200): sigma=0.005*(i+1) d1=(log(S/X)+(r+sigma*sigma/2.)*T)/(sigma*sqrt(T)) d2 = d1-sigma*sqrt(T) diff=c-(S*stats.norm.cdf(d1)-X*exp(-r*T)*stats.norm.cdf(d2)) if abs(diff)<=0.01: return i,sigma, diff