我们从Python开源项目中,提取了以下21个代码示例,用于说明如何使用scipy.special.erfc()。
def __test_ks(self,x): x = x[~np.isnan(x)] n = x.size x.sort() yCDF = np.arange(1,n+1)/float(n) notdup = np.hstack([np.diff(x,1),[1]]) notdup = notdup>0 x_expcdf = x[notdup] y_expcdf = np.hstack([[0],yCDF[notdup]]) zScores = (x_expcdf-np.mean(x))/np.std(x,ddof=1); mu = 0 sigma = 1 theocdf = 0.5*erfc(-(zScores-mu)/(np.sqrt(2)*sigma)) delta1 = y_expcdf[:-1]-theocdf delta2 = y_expcdf[1:]-theocdf deltacdf = np.abs(np.hstack([delta1,delta2])) KSmax = deltacdf.max() return KSmax
def frequency(generator, n_bits, misc=None): """Frequency (Monobit) Test. Test purpose as described in [NIST10, section 2.1]: "The focus of the test is the proportion of zeroes and ones for the entire sequence. The purpose of this test is to determine whether the number of ones and zeros in a sequence are approximately the same as would be expected for a truly random sequence. The test assesses the closeness of the fraction of ones to 1/2, that is, the number of ones and zeroes in a sequence should be about the same. All subsequent tests depend on the passing of this test." """ s_n = 0 for _ in range(n_bits): s_n += 2 * generator.random_bit() - 1 # 1 if generator.random_bit() else -1 s_obs = abs(s_n) / sqrt(n_bits) p_value = erfc(s_obs / sqrt(2)) if type(misc) is dict: misc.update(s_n=s_n, s_obs=s_obs) return p_value
def frequency_test(generator, n_bits, sig_level=None, misc=None, n1=None): if n1 is None: n0, n1 = _calculate_n0_n1(generator, n_bits) else: n0 = n_bits - n1 x1 = ((n0 - n1) ** 2) / n_bits p_value = erfc(sqrt(x1 / 2)) if type(misc) is dict: misc.update(n0=n0, n1=n1, p_value=p_value) if sig_level is None: return x1 else: limit = chi2.ppf(1 - sig_level, 1) if type(misc) is dict: misc.update(x=x1, limit=limit) return x1 <= limit
def monobitfrequencytest(binin): ''' The focus of the test is the proportion of zeroes and ones for the entire sequence. The purpose of this test is to determine whether that number of ones and zeros in a sequence are approximately the same as would be expected for a truly random sequence. The test assesses the closeness of the fraction of ones to 1/2, that is, the number of ones and zeroes in a sequence should be about the same.''' ss = [int(el) for el in binin] sc = map(sumi, ss) sn = reduce(su, sc) sobs = np.abs(sn) / np.sqrt(len(binin)) pval = spc.erfc(sobs / np.sqrt(2)) return pval > 0.01
def runstest(binin): ''' The focus of this test is the total number of zero and one runs in the entire sequence, where a run is an uninterrupted sequence of identical bits. A run of length k means that a run consists of exactly k identical bits and is bounded before and after with a bit of the opposite value. The purpose of the runs test is to determine whether the number of runs of ones and zeros of various lengths is as expected for a random sequence. In particular, this test determines whether the oscillation between such substrings is too fast or too slow.''' binin2 = [str(el) for el in binin] binin = ''.join(binin2) ss = [int(el) for el in binin] n = len(binin) pi = 1.0 * reduce(su, ss) / n vobs = len(binin.replace('0', ' ').split()) + len(binin.replace('1' , ' ').split()) pval = spc.erfc(abs(vobs-2*n*pi*(1-pi)) / (2 * pi * (1 - pi) * np.sqrt(2*n))) return pval > 0.01
def spectraltest(binin): '''The focus of this test is the peak heights in the discrete Fast Fourier Transform. The purpose of this test is to detect periodic features (i.e., repetitive patterns that are near each other) in the tested sequence that would indicate a deviation from the assumption of randomness. ''' n = len(binin) ss = [int(el) for el in binin] sc = map(sumi, ss) ft = sff.fft(sc) af = abs(ft)[1:n/2+1:] t = np.sqrt(np.log(1/0.05)*n) n0 = 0.95*n/2 n1 = len(np.where(af<t)[0]) d = (n1 - n0)/np.sqrt(n*0.95*0.05/4) pval = spc.erfc(abs(d)/np.sqrt(2)) return pval > 0.01
def getfreq(linn, nu): val = 0 for (x, y) in linn: if x == nu: val = y return val ''' The focus of this test is the number of times that a particular state occurs in a cumulative sum random walk. The purpose of this test is to detect deviations from the expected number of occurrences of various states in the random walk.''' ss = [int(el) for el in binin] sc = map(sumi, ss) cs = np.cumsum(sc) li = [] for xs in sorted(set(cs)): if np.abs(xs) <= 9: li.append([xs, len(np.where(cs == xs)[0])]) j = getfreq(li, 0) + 1 pval = [] for xs in xrange(-9, 9 + 1): if not xs == 0: # pval.append([xs, spc.erfc(np.abs(getfreq(li, xs) - j) / np.sqrt(2 * j * (4 * np.abs(xs) - 2)))]) pval.append(spc.erfc(np.abs(getfreq(li, xs) - j) / np.sqrt(2 * j * (4 * np.abs(xs) - 2)))) return pval ''' The focus of this test is the frequency of each and every overlapping m-bit pattern. The purpose of the test is to compare the frequency of overlapping blocks of two consecutive/adjacent lengths (m and m+1) against the expected result for a random sequence.''' n = len(binin) f1a = [(binin + binin[0:m - 1:])[xs:m + xs:] for xs in xrange(n)] f1 = [[xs, f1a.count(xs)] for xs in sorted(set(f1a))] f2a = [(binin + binin[0:m:])[xs:m + 1 + xs:] for xs in xrange(n)] f2 = [[xs, f2a.count(xs)] for xs in sorted(set(f2a))] c1 = [1.0 * f1[xs][1] / n for xs in xrange(len(f1))] c2 = [1.0 * f2[xs][1] / n for xs in xrange(len(f2))] phi1 = reduce(su, map(logo, c1)) phi2 = reduce(su, map(logo, c2)) apen = phi1 - phi2 chisqr = 2.0 * n * (np.log(2) - apen) pval = spc.gammaincc(2 ** (m - 1), chisqr / 2.0) return pval
def berawgn(EbN0): """ Calculates theoretical bit error rate in AWGN (for BPSK and given Eb/N0) """ return 0.5 * erfc(math.sqrt(10**(float(EbN0)/10)))
def _calc_real_matrix(self): """ Determines the Ewald real-space sum. """ fcoords = self.system.relative_pos coords = self.system.cartesian_pos n_atoms = len(self.system) prefactor = 0.5 ereal = np.zeros((n_atoms, n_atoms), dtype=np.float) for i in range(n_atoms): # Get points that are within the real space cutoff nfcoords, rij, js = self.system.lattice.get_points_in_sphere( fcoords, coords[i], self.rmax, zip_results=False ) # Remove the rii term, because a charge does not interact with # itself (but does interact with copies of itself). mask = rij > 1e-8 js = js[mask] rij = rij[mask] nfcoords = nfcoords[mask] qi = self.q[i] qj = self.q[js] erfcval = erfc(self.a * rij) new_ereals = erfcval * qi * qj / rij # Insert new_ereals for k in range(n_atoms): ereal[k, i] = np.sum(new_ereals[js == k]) ereal *= prefactor return ereal
def transport_equation_test(self): '''Check the transport equation integrator''' lab = create_lab() D = 40 lab.add_species(True, 'O2', D, 0, bc_top=1, bc_top_type='dirichlet', bc_bot=0, bc_bot_type='neumann') lab.dcdt.O2 = '0' lab.solve() x = np.linspace(0, lab.length, lab.length / lab.dx + 1) sol = 1 / 2 * (special.erfc((x - lab.w * lab.tend) / 2 / np.sqrt(D * lab.tend)) + np.exp( lab.w * x / D) * special.erfc((x + lab.w * lab.tend) / 2 / np.sqrt(D * lab.tend))) assert max(lab.O2.concentration[:, -1] - sol) < 1e-4
def transport_equation_boundary_effect(): '''Check the transport equation integrator''' w = 5 tend = 5 dx = 0.1 length = 30 phi = 1 dt = 0.001 lab = Column(length, dx, tend, dt, w) D = 5 lab.add_species(phi, 'O2', D, 0, bc_top=1, bc_top_type='dirichlet', bc_bot=0, bc_bot_type='flux') lab.solve() x = np.linspace(0, lab.length, lab.length / lab.dx + 1) sol = 1 / 2 * (special.erfc(( x - lab.w * lab.tend) / 2 / np.sqrt(D * lab.tend)) + np.exp(lab.w * x / D) * special.erfc(( x + lab.w * lab.tend) / 2 / np.sqrt(D * lab.tend))) plt.figure() plt.plot(x, sol, 'k', label='Analytical solution') plt.scatter(lab.x[::10], lab.species['O2'].concentration[ :, -1][::10], marker='x', label='Numerical') plt.xlim([x[0], x[-1]]) ax = plt.gca() ax.ticklabel_format(useOffset=False) ax.grid(linestyle='-', linewidth=0.2) plt.legend() plt.tight_layout() plt.show()
def transport_equation_plot(): '''Check the transport equation integrator''' w = 5 tend = 5 dx = 0.1 length = 100 phi = 1 dt = 0.001 lab = Column(length, dx, tend, dt, w) D = 5 lab.add_species(phi, 'O2', D, 0, bc_top=1, bc_top_type='dirichlet', bc_bot=0, bc_bot_type='dirichlet') lab.solve() x = np.linspace(0, lab.length, lab.length / lab.dx + 1) sol = 1 / 2 * (special.erfc(( x - lab.w * lab.tend) / 2 / np.sqrt(D * lab.tend)) + np.exp(lab.w * x / D) * special.erfc(( x + lab.w * lab.tend) / 2 / np.sqrt(D * lab.tend))) plt.figure() plt.plot(x, sol, 'k', label='Analytical solution') plt.scatter(lab.x[::10], lab.species['O2'].concentration[ :, -1][::10], marker='x', label='Numerical') plt.xlim([x[0], x[-1]]) ax = plt.gca() ax.ticklabel_format(useOffset=False) ax.grid(linestyle='-', linewidth=0.2) plt.legend() plt.tight_layout() plt.show()
def fls_double(z2, z1, t, dis, alpha, reaSource=True, imgSource=True): """ FLS expression for double integral solution. """ r_pos = np.sqrt(dis**2 + (z2 - z1)**2) r_neg = np.sqrt(dis**2 + (z2 + z1)**2) fls = 0. if reaSource: fls += 0.5*erfc(r_pos/np.sqrt(4*alpha*t))/r_pos if imgSource: fls += -0.5*erfc(r_neg/np.sqrt(4*alpha*t))/r_neg return fls
def my_normcdf(x,mu,sigma): z = (x-mu) /sigma p = .5*erfc(-z/sqrt(2)) return p
def runs(generator, n_bits, misc=None): """Runs Test. Test purpose as described in [NIST10, section 2.3]: "The focus of this test is the total number of runs in the sequence, where a run is an uninterrupted sequence of identical bits. A run of length k consists of exactly k identical bits and is bounded before and after with a bit of the opposite value. The purpose of the runs test is to determine whether the number of runs of ones and zeros of various lengths is as expected for a random sequence. In particular, this test determines whether the oscillation between such zeros and ones is too fast or too slow." """ pi = 0 v_obs = 0 b0 = None for _ in range(n_bits): b1 = generator.random_bit() pi += b1 v_obs += b0 != b1 b0 = b1 pi /= n_bits if type(misc) is dict: misc.update(pi=pi, v_obs=v_obs) if abs(pi - 1 / 2) >= 2 / sqrt(n_bits): return 0 p_value = erfc(abs(v_obs - 2 * n_bits * pi * (1 - pi)) / (2 * sqrt(2 * n_bits) * pi * (1 - pi))) return p_value
def discrete_fourier_transform(generator, n_bits, misc=None): """Discrete Fourier Transform (Spectral) Test. Test purpose as described in [NIST10, section 2.6]: "The focus of this test is the peak heights in the Discrete Fourier Transform of the sequence. The purpose of this test is to detect periodic features (i.e., repetitive patterns that are near each other) in the tested sequence that would indicate a deviation from the assumption of randomness. The intention is to detect whether the number of peaks exceeding the 95 % threshold is significantly different than 5 %." """ if n_bits < 1000: print("Warning: Sequence should be at least 1000 bits long", file=stderr) x = [1 if generator.random_bit() else -1 for _ in range(n_bits)] s = fft(x) # S = DFT(X) # print(s) m = abs(s)[0:n_bits // 2] # modulus(S') # print("m =", m) t = sqrt(log(1 / 0.05) * n_bits) # the 95% peak height threshold value n_0 = 0.95 * n_bits / 2 # expected theoretical number of peaks under t n_1 = len([1 for p in m if p < t]) d = (n_1 - n_0) / sqrt(n_bits * 0.95 * 0.05 / 4) p_value = erfc(abs(d) / sqrt(2)) if type(misc) == dict: misc.update(m=m, t=t, n_0=n_0, n_1=n_1, d=d) return p_value
def random_excursions_variant(generator, n_bits, misc=None): """Random Excursions Variant Test. Test purpose as described in [NIST10, section 2.15]: "The focus of this test is the total number of times that a particular state is visited (i.e., occurs) in a cumulative sum random walk. The purpose of this test is to detect deviations from the expected number of visits to various states in the random walk. This test is actually a series of eighteen tests (and conclusions), one test and conclusion for each of the states: -9, -8, ..., -1 and +1, +2, ..., +9." """ if n_bits < 10 ** 6: print("Warning: Sequence should be at least 10^6 bits long", file=stderr) s = [0] * n_bits for i in range(n_bits): x = 1 if generator.random_bit() else - 1 s[i] = s[i - 1] + x s.append(0) # leading zero not needed for our implementation ksi = [0] * 18 j = 0 for x in s: if x == 0: j += 1 elif -9 <= x <= 9: ksi[x + 9 - (x > 0)] += 1 if j < 500: print("Warning: The number of cycles (zero crossings) should be at least 500 (is %d)" % j, file=stderr) p_value = list(map(lambda ksi_i, x: erfc(abs(ksi_i - j) / sqrt(2 * j * (4 * abs(x) - 2))), ksi, list(range(-9, 0)) + list(range(1, 10)))) if type(misc) == dict: misc.update(cumsum=s, j=j, ksi=ksi) return p_value
def polder_function(self, x, y): s = .5 * np.exp(2 * x) * erfc(x / y + y) + \ .5 * np.exp(-2 * x) * erfc(x / y - y) return s
def analytical_rate(td): ''' Returns the dimensionless production rate Input: td: np.array(n) Array of dimensionless times Returns: qd: np.array(n) Array of dimensionless production rates ''' pi = np.pi term1 = 2*np.exp(-pi**2*td/4.) term2 = erfc(1.5*pi*np.sqrt(td))/np.sqrt(pi*td) return term1 + term2
def pf(parameters, psyfun='cGauss'): """Generate conditional probabilities from psychometric function. Arguments --------- parameters: ndarray (float64) containing parameters as columns mu : threshold sigma : slope gamma : guessing rate (optional), default is 0.2 lambda : lapse rate (optional), default is 0.04 x : stimulus intensity psyfun : type of psychometric function. 'cGauss' cumulative Gaussian 'Gumbel' Gumbel, aka log Weibull Returns ------- 1D-array of conditional probabilities p(response | mu,sigma,gamma,lambda,x) """ # Unpack parameters if np.size(parameters, 1) == 5: [mu, sigma, gamma, llambda, x] = np.transpose(parameters) elif np.size(parameters, 1) == 4: [mu, sigma, llambda, x] = np.transpose(parameters) gamma = llambda elif np.size(parameters, 1) == 3: [mu, sigma, x] = np.transpose(parameters) gamma = 0.2 llambda = 0.04 else: # insufficient number of parameters will give a flat line psyfun = None gamma = 0.2 llambda = 0.04 # Psychometric function ones = np.ones(np.shape(mu)) if psyfun == 'cGauss': # F(x; mu, sigma) = Normcdf(mu, sigma) = 1/2 * erfc(-sigma * (x-mu) /sqrt(2)) z = np.divide(np.subtract(x, mu), sigma) p = 0.5 * erfc(-z / np.sqrt(2)) elif psyfun == 'Gumbel': # F(x; mu, sigma) = 1 - exp(-10^(sigma(x-mu))) p = ones - np.exp(-np.power((np.multiply(ones, 10.0)), (np.multiply(sigma, (np.subtract(x, mu)))))) elif psyfun == 'Weibull': # F(x; mu, sigma) p = 1 - np.exp(-(np.divide(x, mu)) ** sigma) else: # flat line if no psychometric function is specified p = np.ones(np.shape(mu)) y = gamma + np.multiply((ones - gamma - llambda), p) return y