我们从Python开源项目中,提取了以下38个代码示例,用于说明如何使用scipy.special.erf()。
def skewgauss(x, sigma, mu, alpha, a): """Skewed Gaussian. Args: x (np.ndarray): Dataset. sigma (float): Standard deviation. mu (float): Mean. alpha (float): Skewness. a (float): Normalization factor. Returns: skewed gaussian (np.ndarray): Skewed Gaussian. """ normpdf = (1 / (sigma * np.sqrt(2 * np.pi))) * np.exp(-(x - mu)**2 / (2 * sigma**2)) normcdf = 0.5 * (1 + erf((alpha * ((x - mu) / sigma)) / (np.sqrt(2)))) return 2 * a * normpdf * normcdf
def test_time(res, off, t, signal): """Time domain analytical half-space solution. - Source at x = y = z = 0 m - Receiver at y = z = 0 m; x = off - Resistivity of halfspace res - Times t, t > 0 s - Impulse response if signal = 0 - Switch-on response if signal = 1 """ tau = np.sqrt(mu_0*off**2/(res*t)) fact1 = res/(2*np.pi*off**3) fact2 = tau/np.sqrt(np.pi) if signal == 0: return fact1*tau**3/(4*t*np.sqrt(np.pi))*np.exp(-tau**2/4) else: resp = fact1*(2 - special.erf(tau/2) + fact2*np.exp(-tau**2/4)) if signal < 0: DC = test_time(res, off, 1000000, 1) resp = DC-resp return resp # Time-domain solution
def particle(x0, y0, d): sigma = 0.42*d C = np.pi/8.*sigma**2 out = (erf((x - x0 + 0.5)/(sigma*np.sqrt(2))) - erf((x - x0 - 0.5)/(sigma*np.sqrt(2)))) out *= (erf((y - y0 + 0.5)/(sigma*np.sqrt(2))) - erf((y - y0 - 0.5)/(sigma*np.sqrt(2)))) return C*out
def sequential_colormap(x): from numpy import array from scipy.special import erf x = array(x) if any(x < 0) or any(x > 1): raise ValueError('x must be between 0 and 1 inclusive.') red = 1.000 - 0.392 * (1 + erf((x - 0.869) / 0.255)) grn = 1.021 - 0.456 * (1 + erf((x - 0.527) / 0.376)) blu = 1.000 - 0.493 * (1 + erf((x - 0.272) / 0.309)) return array([red, grn, blu]).T
def __init__(self, x, min_limit=-np.inf, max_limit=np.inf, weights=1.0): self.points = x self.N = x.size self.min_limit = min_limit self.max_limit = max_limit self.sigma = get_sigma(x, min_limit=min_limit, max_limit=max_limit) self.weights = (2 / (erf((max_limit - x) / (np.sqrt(2.) * self.sigma)) - erf((min_limit - x) / (np.sqrt(2.) * self.sigma))) * weights) self.W_sum = np.sum(self.weights)
def Erf2D(x, y, xc, yc, a, b, sigma): ''' A simple 2D error function PSF model. ''' c = np.sqrt(2) * sigma ex = (erf((x + 1 - xc) / c) - erf((x - xc) / c)) ey = (erf((y + 1 - yc) / c) - erf((y - yc) / c)) return a * ex * ey + b
def _cdf_y_gt_f(self, y, f): return (erf((y - f) / (self.l * np.sqrt(2))) * self.l * SQRTPI2 + f) \ / (f + self.l * SQRTPI2)
def _cdf_f_lt_0(self, y, f): return erf((y - f) / (self.l * np.sqrt(2)))
def gaussian_sum(self, centers, weights, settings): """Calculates a discrete version of a sum of Gaussian distributions. The calculation is done through the cumulative distribution function that is better at keeping the integral of the probability function constant with coarser grids. The values are normalized by dividing with the maximum value of a gaussian with the given standard deviation. Args: centers (1D np.ndarray): The means of the gaussians. weights (1D np.ndarray): The weights for the gaussians. settings (dict): The grid settings Returns: Value of the gaussian sums on the given grid. """ start = settings["min"] stop = settings["max"] sigma = settings["sigma"] n = settings["n"] max_val = 1/(sigma*math.sqrt(2*math.pi)) dx = (stop - start)/(n-1) x = np.linspace(start-dx/2, stop+dx/2, n+1) pos = x[np.newaxis, :] - centers[:, np.newaxis] y = weights[:, np.newaxis]*1/2*(1 + erf(pos/(sigma*np.sqrt(2)))) f = np.sum(y, axis=0) f /= max_val f_rolled = np.roll(f, -1) pdf = (f_rolled - f)[0:-1]/dx # PDF is the derivative of CDF return pdf
def _cdf(z): """Cumulative density function (CDF) of the standard normal distribution.""" return .5 * (1. + erf(z/np.sqrt(2.)))
def truncated_normal(shape=None, mu=0., sigma=1., x_min=None, x_max=None): """ Generates random variates from a lower-and upper-bounded normal distribution @param shape: shape of the random sample @param mu: location parameter @param sigma: width of the distribution (sigma >= 0.) @param x_min: lower bound of variate @param x_max: upper bound of variate @return: random variates of lower-bounded normal distribution """ from scipy.special import erf, erfinv from numpy.random import standard_normal from numpy import inf, sqrt if x_min is None and x_max is None: return standard_normal(shape) * sigma + mu elif x_min is None: x_min = -inf elif x_max is None: x_max = inf x_min = max(-1e300, x_min) x_max = min(+1e300, x_max) var = sigma ** 2 + 1e-300 sigma = sqrt(2 * var) a = erf((x_min - mu) / sigma) b = erf((x_max - mu) / sigma) return probability_transform(shape, erfinv, a, b) * sigma + mu
def anomalies(log_z, row_label=None, prefix=''): from scipy.special import erf ns = log_z.shape[1] if row_label is None: row_label = list(map(str, range(ns))) a_score = np.sum(log_z[:, :, 0], axis=0) mean, std = np.mean(a_score), np.std(a_score) a_score = (a_score - mean) / std percentile = 1. / ns anomalies = np.where(0.5 * (1 - erf(a_score / np.sqrt(2)) ) < percentile)[0] f = safe_open(prefix + '/text_files/anomalies.txt', 'w+') for i in anomalies: f.write(row_label[i] + ', %0.1f\n' % a_score[i]) f.close()
def flow_para_function_with_vibration( x, beta, relaxation_rate, flow_velocity, freq, amp, baseline=1): vibration_part = (1 + amp*np.cos( 2*np.pi*freq* x) ) Diff_part= np.exp(-2 * relaxation_rate * x) Flow_part = np.pi**2/(16*x*flow_velocity) * abs( erf( np.sqrt( 4/np.pi * 1j* x * flow_velocity ) ) )**2 return beta* vibration_part* Diff_part * Flow_part + baseline
def flow_para_function( x, beta, relaxation_rate, flow_velocity, baseline=1): '''flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) )''' Diff_part= np.exp(-2 * relaxation_rate * x) Flow_part = np.pi**2/(16*x*flow_velocity) * abs( erf( np.sqrt( 4/np.pi * 1j* x * flow_velocity ) ) )**2 return beta*Diff_part * Flow_part + baseline
def flow_para_function_explicitq( x, beta, relaxation_rate, flow_velocity, baseline=1, qr=1, q_ang=0 ): '''Nov 9, 2017 Basically, make q vector to (qr, angle), relaxation_rate is actually a diffusion rate flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) )''' Diff_part= np.exp(-2 * relaxation_rate* qr**2 * x) Flow_part = np.pi**2/(16*x*flow_velocity*qr* np.cos(q_ang) ) * abs( erf( np.sqrt( 4/np.pi * 1j* x * flow_velocity * qr* np.cos(q_ang) ) ) )**2 return beta*Diff_part * Flow_part + baseline
def stretched_flow_para_function( x, beta, relaxation_rate, alpha, flow_velocity, baseline=1): ''' flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) ) ''' Diff_part= np.exp(-2 * (relaxation_rate * x)**alpha ) Flow_part = np.pi**2/(16*x*flow_velocity) * abs( erf( np.sqrt( 4/np.pi * 1j* x * flow_velocity ) ) )**2 return beta*Diff_part * Flow_part + baseline
def shape(xs, center, amplitude, sigma, gamma): norm = (amplitude) / (sigma * sqrt(2 * pi)) * \ exp(-((xs - center) ** 2) / (2 * sigma ** 2)) skew = (1 + erf((gamma * (xs - center)) / (sigma * sqrt(2)))) return norm * skew
def evaluate(self, F, xo, yo, s): return (F / 4 * ((erf((self.x - xo + 0.5) / (np.sqrt(2) * s)) - erf((self.x - xo - 0.5) / (np.sqrt(2) * s))) * (erf((self.y - yo + 0.5) / (np.sqrt(2) * s)) - erf((self.y - yo - 0.5) / (np.sqrt(2) * s)))))
def errfunc(c): arg = 1.00 * c / sqrt(2.00) return special.erf(arg);
def plot(begin, end, step): c = cvector(begin, end, step) erf = errfunc(c) print(erf); plt.plot(c, erf); plt.xlabel('0 <= c <= 5'); plt.ylabel('I(c)') plt.show()
def calc_theta_i(mag, mag_err, maxmag, limmag): """ Calculate theta_i. This is reproduced from calclambda_chisq_theta_i.pr parameters ---------- mag: mag_err: maxmag: limmag: returns ------- theta_i: """ theta_i = np.ones((len(mag))) eff_lim = np.clip(maxmag,0,limmag) dmag = eff_lim - mag calc = dmag < 5.0 N_calc = np.count_nonzero(calc==True) if N_calc > 0: theta_i[calc] = 0.5 + 0.5*erf(dmag[calc]/(np.sqrt(2)*mag_err[calc])) hi = mag > limmag N_hi = np.count_nonzero(hi==True) if N_hi > 0: theta_i[hi] = 0.0 return theta_i
def StandardGaussianCdf(x): """Evaluates the CDF of the standard Gaussian distribution. See http://en.wikipedia.org/wiki/Normal_distribution #Cumulative_distribution_function Args: x: float Returns: float """ return (erf(x / ROOT2) + 1) / 2
def erf(x: T.Tensor) -> T.Tensor: """ Elementwise error function of a tensor. Args: x: A tensor. Returns: tensor: Elementwise error function """ return special.erf(x)
def normal_cdf(x: T.Tensor) -> T.Tensor: """ Elementwise cumulative distribution function of the standard normal distribution. For the CDF of a normal distributon with mean u and standard deviation sigma, use normal_cdf((x-u)/sigma). Args: x (tensor) Returns: tensor: Elementwise cdf """ return 0.5 * (1 + erf(x/SQRT2))
def Edeta(deta,x): if deta != 0: g = 0.5*(1+erf(x/deta)) return g else: g = np.zeros(x.shape) g[x >= 0 ] =1 return g
def predict_proba(self, X): """Return probability estimates for the test vector X. Parameters ---------- X : array-like, shape = (n_samples, n_features) Returns ------- C : array-like, shape = (n_samples, n_classes) Returns the probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute ``classes_``. """ check_is_fitted(self, ["X_train_", "y_train_", "pi_", "W_sr_", "L_"]) # Based on Algorithm 3.2 of GPML K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star) f_star = K_star.T.dot(self.y_train_ - self.pi_) # Line 4 v = solve(self.L_, self.W_sr_[:, np.newaxis] * K_star) # Line 5 # Line 6 (compute np.diag(v.T.dot(v)) via einsum) var_f_star = self.kernel_.diag(X) - np.einsum("ij,ij->j", v, v) # Line 7: # Approximate \int log(z) * N(z | f_star, var_f_star) # Approximation is due to Williams & Barber, "Bayesian Classification # with Gaussian Processes", Appendix A: Approximate the logistic # sigmoid by a linear combination of 5 error functions. # For information on how this integral can be computed see # blitiri.blogspot.de/2012/11/gaussian-integral-of-error-function.html alpha = 1 / (2 * var_f_star) gamma = LAMBDAS * f_star integrals = np.sqrt(np.pi / alpha) \ * erf(gamma * np.sqrt(alpha / (alpha + LAMBDAS**2))) \ / (2 * np.sqrt(var_f_star * 2 * np.pi)) pi_star = (COEFS * integrals).sum(axis=0) + .5 * COEFS.sum() return np.vstack((1 - pi_star, pi_star)).T
def compute_crossing_fraction(cathode_temp, phi, zmesh): """ Returns the % of particles expected to cross the gap Arguments: cathode_temp (float) : cathode temperature in K phi (scipy) : Interpolation (scipy.interpolate.interpolate.interp1d) of Phi from initial solve zmesh (ndArray) : 1D array with positions of the z-coordinates of the grid Returns: e_frac (float) : fraction of beam that overcomes the peak potential barrier phi """ # Conditions at cathode edge var_xy = kb_J*cathode_temp/m # Define the variance of the distribution in the x,y planes var_z = 2*kb_J*cathode_temp/m # Variance in z plane is twice that in the horizontal #Phi is in eV vz_phi = np.sqrt(2.*e*np.max(phi(zmesh))/m_e) #cumulative distribution function for a Maxwellian CDF_Maxwell = lambda v, a: erf(v/(np.sqrt(2)*a)) - np.sqrt(2./np.pi)*v*np.exp(-1.*v**2/(2.*a**2))/a e_frac = CDF_Maxwell(vz_phi,np.sqrt(var_z)) #fraction with velocity below vz_phi return (1.-e_frac)*100. #Multiply by 100 to get % value
def E_from_ElectricDipoleWholeSpace(XYZ, srcLoc, sig, t, current=1., length=1., orientation='X', kappa=0., epsr=1.): """ Computing the analytic electric fields (E) from an electrical dipole in a wholespace - You have the option of computing E for multiple times at a single reciever location or a single time at multiple locations :param numpy.array XYZ: reciever locations at which to evaluate E :param numpy.array srcLoc: [x,y,z] triplet defining the location of the electric dipole source :param float sig: value specifying the conductivity (S/m) of the wholespace :param numpy.array t: array of times at which to measure :param float current: size of the injected current (A), default is 1.0 A :param float length: length of the dipole (m), default is 1.0 m :param str orientation: orientation of dipole: 'X', 'Y', or 'Z' :param float kappa: magnetic susceptiblity value (unitless), default is 0. :param float epsr: relative permitivitty value (unitless), default is 1.0 :rtype: numpy.array :return: Ex, Ey, Ez: arrays containing all 3 components of E evaluated at the specified locations and times. """ mu = mu_0*(1+kappa) epsilon = epsilon_0*epsr XYZ = Utils.asArray_N_x_Dim(XYZ, 3) # Check if XYZ.shape[0] > 1 & t.shape[0] > 1: raise Exception("I/O type error: For multiple field locations only a single time can be specified.") dx = XYZ[:,0]-srcLoc[0] dy = XYZ[:,1]-srcLoc[1] dz = XYZ[:,2]-srcLoc[2] r = np.sqrt( dx**2. + dy**2. + dz**2.) theta = np.sqrt((mu*sig)/(4*t)) front = current * length / (4.* pi * sig * r**3) mid = 3 * erf(theta*r) - (4/np.sqrt(pi) * (theta)**3 * r**3 + 6/np.sqrt(pi) * theta * r) * np.exp(-(theta)**2 * (r)**2) extra = (erf(theta*r) - (4/np.sqrt(pi) * (theta)**3 * r**3 + 2/np.sqrt(pi) * theta * r) * np.exp(-(theta)**2 * (r)**2)) if orientation.upper() == 'X': Ex = front*(dx**2 / r**2)*mid - front*extra Ey = front*(dx*dy / r**2)*mid Ez = front*(dx*dz / r**2)*mid return Ex, Ey, Ez elif orientation.upper() == 'Y': # x--> y, y--> z, z-->x Ey = front*(dy**2 / r**2)*mid - front*extra Ez = front*(dy*dz / r**2)*mid Ex = front*(dy*dx / r**2)*mid return Ex, Ey, Ez elif orientation.upper() == 'Z': # x --> z, y --> x, z --> y Ez = front*(dz**2 / r**2)*mid - front*extra Ex = front*(dz*dx / r**2)*mid Ey = front*(dz*dy / r**2)*mid return Ex, Ey, Ez
def H_from_ElectricDipoleWholeSpace(XYZ, srcLoc, sig, t, current=1., length=1., orientation='X', kappa=1., epsr=1.): """ Computing the analytic magnetic fields (H) from an electrical dipole in a wholespace - You have the option of computing H for multiple times at a single reciever location or a single time at multiple locations :param numpy.array XYZ: reciever locations at which to evaluate H :param numpy.array srcLoc: [x,y,z] triplet defining the location of the electric dipole source :param float sig: value specifying the conductivity (S/m) of the wholespace :param numpy.array t: array of times at which to measure :param float current: size of the injected current (A), default is 1.0 A :param float length: length of the dipole (m), default is 1.0 m :param str orientation: orientation of dipole: 'X', 'Y', or 'Z' :param float kappa: magnetic susceptiblity value (unitless), default is 0. :param float epsr: relative permitivitty value (unitless), default is 1.0 :rtype: numpy.array :return: Hx, Hy, Hz: arrays containing all 3 components of H evaluated at the specified locations and times. """ mu = mu_0*(1+kappa) epsilon = epsilon_0*epsr XYZ = Utils.asArray_N_x_Dim(XYZ, 3) # Check if XYZ.shape[0] > 1 & t.shape[0] > 1: raise Exception("I/O type error: For multiple field locations only a single time can be specified.") dx = XYZ[:, 0]-srcLoc[0] dy = XYZ[:, 1]-srcLoc[1] dz = XYZ[:, 2]-srcLoc[2] r = np.sqrt(dx**2. + dy**2. + dz**2.) theta = np.sqrt((mu*sig)/(4*t)) front = (current * length) / (4.*pi*(r)**3) mid = erf(theta*r) - (2/np.sqrt(pi)) * theta * r * np.exp(-(theta)**2 * (r)**2) if orientation.upper() == 'X': Hy = front * mid * -dz Hz = front * mid * dy Hx = np.zeros_like(Hy) return Hx, Hy, Hz elif orientation.upper() == 'Y': Hx = front * mid * dz Hz = front * mid * -dx Hy = np.zeros_like(Hx) return Hx, Hy, Hz elif orientation.upper() == 'Z': Hx = front * mid * -dy Hy = front * mid * dx Hz = np.zeros_like(Hx) return Hx, Hy, Hz
def H_from_MagneticDipoleWholeSpace(XYZ, srcLoc, sig, t, current=1., length=1., orientation='X', kappa=0., epsr=1.): """ Computing the analytic magnetic fields (H) from an magnetic dipole in a wholespace - You have the option of computing E for multiple times at a single reciever location or a single time at multiple locations :param numpy.array XYZ: reciever locations at which to evaluate E :param numpy.array srcLoc: [x,y,z] triplet defining the location of the electric dipole source :param float sig: value specifying the conductivity (S/m) of the wholespace :param numpy.array t: array of times at which to measure :param float current: size of the injected current (A), default is 1.0 A :param float length: length of the dipole (m), default is 1.0 m :param str orientation: orientation of dipole: 'X', 'Y', or 'Z' :param float kappa: magnetic susceptiblity value (unitless), default is 0. :param float epsr: relative permitivitty value (unitless), default is 1.0 :rtype: numpy.array :return: Hx, Hy, Hz: arrays containing all 3 components of E evaluated at the specified locations and times. """ mu = mu_0*(1+kappa) epsilon = epsilon_0*epsr XYZ = Utils.asArray_N_x_Dim(XYZ, 3) # Check if XYZ.shape[0] > 1 & t.shape[0] > 1: raise Exception("I/O type error: For multiple field locations only a single time can be specified.") dx = XYZ[:,0]-srcLoc[0] dy = XYZ[:,1]-srcLoc[1] dz = XYZ[:,2]-srcLoc[2] r = np.sqrt( dx**2. + dy**2. + dz**2.) theta = np.sqrt((mu*sig)/(4*t)) front = current * length / (4.* pi * r**3) mid = 3 * erf(theta*r) - (4/np.sqrt(pi) * (theta)**3 * r**3 + 6/np.sqrt(pi) * theta * r) * np.exp(-(theta)**2 * (r)**2) extra = (erf(theta*r) - (4/np.sqrt(pi) * (theta)**3 * r**3 + 2/np.sqrt(pi) * theta * r) * np.exp(-(theta)**2 * (r)**2)) if orientation.upper() == 'X': Hx = front*(dx**2 / r**2)*mid - front*extra Hy = front*(dx*dy / r**2)*mid Hz = front*(dx*dz / r**2)*mid return Ex, Ey, Ez elif orientation.upper() == 'Y': # x--> y, y--> z, z-->x Hy = front*(dy**2 / r**2)*mid - front*extra Hz = front*(dy*dz / r**2)*mid Hx = front*(dy*dx / r**2)*mid return Ex, Ey, Ez elif orientation.upper() == 'Z': # x --> z, y --> x, z --> y Hz = front*(dz**2 / r**2)*mid - front*extra Hx = front*(dz*dx / r**2)*mid Hy = front*(dz*dy / r**2)*mid return Hx, Hy, Hz
def process(self, **kwargs): """Process module.""" kwargs = self.prepare_input(self.key('luminosities'), **kwargs) self._rest_t_explosion = kwargs[self.key('resttexplosion')] self._times = kwargs[self.key('rest_times')] self._seds = kwargs[self.key('seds')] self._bands = kwargs['all_bands'] self._band_indices = kwargs['all_band_indices'] self._sample_wavelengths = kwargs['sample_wavelengths'] self._frequencies = kwargs['all_frequencies'] self._luminosities = kwargs[self.key('luminosities')] self._line_wavelength = kwargs[self.key('line_wavelength')] self._line_width = kwargs[self.key('line_width')] self._line_time = kwargs[self.key('line_time')] self._line_duration = kwargs[self.key('line_duration')] self._line_amplitude = kwargs[self.key('line_amplitude')] lw = self._line_wavelength ls = self._line_width cc = self.C_CONST zp1 = 1.0 + kwargs[self.key('redshift')] amps = [ self._line_amplitude * np.exp(-0.5 * ( (x - self._rest_t_explosion - self._line_time) / self._line_duration) ** 2) for x in self._times] seds = self._seds evaled = False for li, lum in enumerate(self._luminosities): bi = self._band_indices[li] if lum == 0.0: if bi >= 0: seds.append(np.zeros_like(self._sample_wavelengths[bi])) else: seds.append([0.0]) continue if bi >= 0: rest_wavs = (self._sample_wavelengths[bi] * u.Angstrom.cgs.scale / zp1) else: rest_wavs = [cc / (self._frequencies[li] * zp1)] # noqa: F841 amp = lum * amps[li] if not evaled: sed = ne.evaluate( 'amp * exp(-0.5 * ((rest_wavs - lw) / ls) ** 2)') evaled = True else: sed = ne.re_evaluate() sed = np.nan_to_num(sed) norm = (lum + amp / zp1 * np.sqrt(np.pi / 2.0) * ( 1.0 + erf(lw / (np.sqrt(2.0) * ls)))) / lum seds[li] += sed seds[li] /= norm return {'sample_wavelengths': self._sample_wavelengths, self.key('seds'): seds}
def test_cdf(self): """Test that the implementation of the gaussian value through the cumulative distribution function works as expected. """ from scipy.stats import norm from scipy.special import erf start = -5 stop = 5 n_points = 9 centers = np.array([0]) sigma = 1 area_cum = [] area_pdf = [] # Calculate errors for dfferent number of points for n_points in range(2, 10): axis = np.linspace(start, stop, n_points) # Calculate with cumulative function dx = (stop - start)/(n_points-1) x = np.linspace(start-dx/2, stop+dx/2, n_points+1) pos = x[np.newaxis, :] - centers[:, np.newaxis] y = 1/2*(1 + erf(pos/(sigma*np.sqrt(2)))) f = np.sum(y, axis=0) f_rolled = np.roll(f, -1) pdf_cum = (f_rolled - f)[0:-1]/dx # Calculate with probability function dist2 = axis[np.newaxis, :] - centers[:, np.newaxis] dist2 *= dist2 f = np.sum(np.exp(-dist2/(2*sigma**2)), axis=0) f *= 1/math.sqrt(2*sigma**2*math.pi) pdf_pdf = f true_axis = np.linspace(start, stop, 200) pdf_true = norm.pdf(true_axis, centers[0], sigma) # + norm.pdf(true_axis, centers[1], sigma) # Calculate differences sum_cum = np.sum(0.5*dx*(pdf_cum[:-1]+pdf_cum[1:])) sum_pdf = np.sum(0.5*dx*(pdf_pdf[:-1]+pdf_pdf[1:])) area_cum.append(sum_cum) area_pdf.append(sum_pdf) # mpl.plot(axis, pdf_pdf, linestyle=":", linewidth=3, color="r") # mpl.plot(axis, pdf_cum, linewidth=1, color="g") # mpl.plot(true_axis, pdf_true, linestyle="--", color="b") # mpl.show() mpl.plot(area_cum, linestyle=":", linewidth=3, color="r") mpl.plot(area_pdf, linewidth=1, color="g") # mpl.plot(true_axis, pdf_true, linestyle="--", color="b") mpl.show()
def run_mnist(): np.random.seed(42) # import dataset f = gzip.open('./tmp/data/mnist.pkl.gz', 'rb') (x_train, t_train), (x_valid, t_valid), (x_test, t_test) = cPickle.load(f) f.close() Y = x_train[:100, :] labels = t_train[:100] Y[Y < 0.5] = -1 Y[Y > 0.5] = 1 # inference print "inference ..." M = 30 D = 2 # lvm = vfe.SGPLVM(Y, D, M, lik='Gaussian') lvm = vfe.SGPLVM(Y, D, M, lik='Probit') # lvm.train(alpha=0.5, no_epochs=10, n_per_mb=100, lrate=0.1, fixed_params=['sn']) lvm.optimise(method='L-BFGS-B') plt.figure() mx, vx = lvm.get_posterior_x() zu = lvm.sgp_layer.zu plt.scatter(mx[:, 0], mx[:, 1], c=labels) plt.plot(zu[:, 0], zu[:, 1], 'ko') nx = ny = 30 x_values = np.linspace(-5, 5, nx) y_values = np.linspace(-5, 5, ny) sx = 28 sy = 28 canvas = np.empty((sx * ny, sy * nx)) for i, yi in enumerate(x_values): for j, xi in enumerate(y_values): z_mu = np.array([[xi, yi]]) x_mean, x_var = lvm.predict_f(z_mu) t = x_mean / np.sqrt(1 + x_var) Z = 0.5 * (1 + special.erf(t / np.sqrt(2))) canvas[(nx - i - 1) * sx:(nx - i) * sx, j * sy:(j + 1) * sy] = Z.reshape(sx, sy) plt.figure(figsize=(8, 10)) Xi, Yi = np.meshgrid(x_values, y_values) plt.imshow(canvas, origin="upper", cmap="gray") plt.tight_layout() plt.show()
def run_mnist(): np.random.seed(42) # import dataset f = gzip.open('./tmp/data/mnist.pkl.gz', 'rb') (x_train, t_train), (x_valid, t_valid), (x_test, t_test) = cPickle.load(f) f.close() Y = x_train[:100, :] labels = t_train[:100] Y[Y < 0.5] = -1 Y[Y > 0.5] = 1 # inference print "inference ..." M = 30 D = 2 # lvm = aep.SGPLVM(Y, D, M, lik='Gaussian') lvm = aep.SGPLVM(Y, D, M, lik='Probit') # lvm.train(alpha=0.5, no_epochs=10, n_per_mb=100, lrate=0.1, fixed_params=['sn']) lvm.optimise(method='L-BFGS-B', alpha=0.1) plt.figure() mx, vx = lvm.get_posterior_x() zu = lvm.sgp_layer.zu plt.scatter(mx[:, 0], mx[:, 1], c=labels) plt.plot(zu[:, 0], zu[:, 1], 'ko') nx = ny = 30 x_values = np.linspace(-5, 5, nx) y_values = np.linspace(-5, 5, ny) sx = 28 sy = 28 canvas = np.empty((sx * ny, sy * nx)) for i, yi in enumerate(x_values): for j, xi in enumerate(y_values): z_mu = np.array([[xi, yi]]) x_mean, x_var = lvm.predict_f(z_mu) t = x_mean / np.sqrt(1 + x_var) Z = 0.5 * (1 + special.erf(t / np.sqrt(2))) canvas[(nx - i - 1) * sx:(nx - i) * sx, j * sy:(j + 1) * sy] = Z.reshape(sx, sy) plt.figure(figsize=(8, 10)) Xi, Yi = np.meshgrid(x_values, y_values) plt.imshow(canvas, origin="upper", cmap="gray") plt.tight_layout() plt.show()
def run_xor(): from operator import xor from scipy import special # create dataset print "generating dataset..." n = 25 Y = np.zeros((0, 3)) for i in [0, 1]: for j in [0, 1]: a = i * np.ones((n, 1)) b = j * np.ones((n, 1)) c = xor(bool(i), bool(j)) * np.ones((n, 1)) Y_ij = np.hstack((a, b, c)) Y = np.vstack((Y, Y_ij)) Y = 2 * Y - 1 # inference print "inference ..." M = 10 D = 2 lvm = aep.SGPLVM(Y, D, M, lik='Probit') lvm.optimise(method='L-BFGS-B', alpha=0.1, maxiter=200) # predict given inputs mx, vx = lvm.get_posterior_x() lims = [-1.5, 1.5] x = np.linspace(*lims, num=101) y = np.linspace(*lims, num=101) X, Y = np.meshgrid(x, y) X_ravel = X.ravel() Y_ravel = Y.ravel() inputs = np.vstack((X_ravel, Y_ravel)).T my, vy = lvm.predict_f(inputs) t = my / np.sqrt(1 + vy) Z = 0.5 * (1 + special.erf(t / np.sqrt(2))) for d in range(3): plt.figure() plt.scatter(mx[:, 0], mx[:, 1]) zu = lvm.sgp_layer.zu plt.plot(zu[:, 0], zu[:, 1], 'ko') plt.contour(X, Y, np.log(Z[:, d] + 1e-16).reshape(X.shape)) plt.xlim(*lims) plt.ylim(*lims) # Y_test = np.array([[1, -1, 1], [-1, 1, 1], [-1, -1, -1], [1, 1, -1]]) # # impute missing data # for k in range(3): # Y_test_k = Y_test # missing_mask = np.ones_like(Y_test_k) # missing_mask[:, k] = 0 # my_pred, vy_pred = lvm.impute_missing( # Y_test_k, missing_mask, # alpha=0.1, no_iters=100, add_noise=False) # print k, my_pred, vy_pred, Y_test_k plt.show()
def create_diffusion_profiles(diff_matrix, x_points, exchange_vectors, noise_level=0.02, seed=0): """ Compute theoretical concentration profiles, given the diffusion matrix and exchange directions. Parameters ---------- diff_matrix : tuple tuple of eigenvalues and eigenvectors x_points : list of arrays points at which profiles are measured. There are as many profiles as exchange vectors. exchange_vectors : array array where each column encodes the species that are exchanged in the diffusion experiment. There are as many columns as diffusion experiments. noise_level : float Gaussian noise can be added to the theoretical profiles, in order to simulation experimental noise. """ gen = np.random.RandomState(seed) diags, P = diff_matrix P = np.matrix(P) exchanges = exchange_vectors[:-1] n_comps = exchanges.shape[0] if n_comps != P.shape[0]: raise ValueError("Exchange vectors must be given in the full basis") concentration_profiles = [] for i_exp, x_prof in enumerate(x_points): orig = P.I * exchanges[:, i_exp][:, None] / 2. profs = np.empty((n_comps, len(x_prof))) for i in range(n_comps): profs[i] = orig[i] * erf(x_prof / np.sqrt(4 * diags[i])) profiles = np.array(P * np.matrix(profs)) profiles = np.vstack((profiles, - profiles.sum(axis=0))) concentration_profiles.append(np.array(profiles) + noise_level * gen.randn(*profiles.shape)) return concentration_profiles
def nt(n, gm, gsd, dmin=None, dmax=10.): """Evaluate the total number of particles between two diameters. The CDF of a lognormal distribution is calculated using equation 8.39 from Seinfeld and Pandis. Mathematically, it is represented as: .. math:: N_t(D_p)=?_{D_{min}}^{D_{max}}n_N(D_p^*)dD^*_p=\\frac{N_t}{2}+\\frac{N_t}{2}*erf\Big(\\frac{ln(D_p/D_{pg})}{\sqrt{2} ln?_g}\Big) \\;\\;(cm^{-3}) Parameters ---------- n : float Total aerosol number concentration in units of #/cc gm : float Median particle diameter (geometric mean) in units of microns. gsd : float Geometric Standard Deviation of the distribution. dmin : float The minimum particle diameter in microns. Default value is 0 :math:`\mu m`. dmax : float The maximum particle diameter in microns. Default value is 10 :math:`\mu m`. Returns ------- N | float Returns the total number of particles between dmin and dmax in units of [:math:`particles*cm^{-3}`] See Also -------- opcsim.equations.pdf.dn_ddp opcsim.equations.pdf.dn_dlndp opcsim.equations.pdf.dn_dlogdp Examples -------- Evaluate the number of particles in a simple distribution between 0 and 2.5 :math:`\mu m`: >>> d = opcsim.AerosolDistribution() >>> d.add_mode(1e3, 100, 1.5, "mode 1") >>> n = opcsim.equations.cdf.nt(1e3, 0.1, 1.5, dmax=2.5) """ res = (n/2.) * (1 + erf((np.log(dmax/gm)) / (np.sqrt(2) * np.log(gsd)))) if dmin is not None and dmin > 0.0: res -= nt(n, gm, gsd, dmin=None, dmax=dmin) return res