我们从Python开源项目中,提取了以下38个代码示例,用于说明如何使用scipy.stats.gamma()。
def setUp(self): ''' Saves the current random state for later recovery, sets the random seed to get reproducible results and manually constructs a mixed vine. ''' # Save random state for later recovery self.random_state = np.random.get_state() # Set fixed random seed np.random.seed(0) # Manually construct mixed vine self.dim = 3 # Dimension self.vine = MixedVine(self.dim) # Specify marginals self.vine.set_marginal(0, norm(0, 1)) self.vine.set_marginal(1, poisson(5)) self.vine.set_marginal(2, gamma(2, 0, 4)) # Specify pair copulas self.vine.set_copula(1, 0, GaussianCopula(0.5)) self.vine.set_copula(1, 1, FrankCopula(4)) self.vine.set_copula(2, 0, ClaytonCopula(5))
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0, tol=1e-3, C=1.0, epsilon=0.1, shrinking=True, cache_size=200, verbose=False, max_iter=-1, target_transform='identity', ml_score=False): super(TransformedSVR, self).__init__( kernel=kernel, degree=degree, gamma=gamma, coef0=coef0, tol=tol, C=C, epsilon=epsilon, verbose=verbose, shrinking=shrinking, cache_size=cache_size, max_iter=max_iter) # used in training if isinstance(target_transform, str): target_transform = transforms.transforms[target_transform]() self.target_transform = target_transform self.ml_score = ml_score
def poisson_dist(bin_values, K): """ Poisson Distribution Parameters --------- K : int average counts of photons bin_values : array scattering bin values Returns ------- poisson_dist : array Poisson Distribution Notes ----- These implementations are based on the references under nbinom_distribution() function Notes :math :: P(K) = \frac{<K>^K}{K!}\exp(-<K>) """ #poisson_dist = stats.poisson.pmf(K, bin_values) K = float(K) poisson_dist = np.exp(-K) * np.power(K, bin_values)/gamma(bin_values + 1) return poisson_dist
def gen(self, N, trials, normal_p_range, anomaly_p_range, anomaly_scale = 1.0): self.N = N self.trials = trials self.gens = [ ?ompound_distribution( stats.uniform(loc=normal_p_range[0], scale=normal_p_range[1] - normal_p_range[0]), lambda a: stats.gamma(a = a, scale = 1.0) ), ?ompound_distribution( stats.uniform(loc=anomaly_p_range[0], scale=anomaly_p_range[1] - anomaly_p_range[0]), lambda a: stats.gamma(a = a, scale = anomaly_scale) ) ] self.priors = np.array([0.9, 0.1]) self.cats, self.params, self.X = compound_rvs(self.gens, self.priors, self.N, self.trials)
def testGammaLogPDF(self): with self.test_session(): batch_size = 6 alpha = constant_op.constant([2.0] * batch_size) beta = constant_op.constant([3.0] * batch_size) alpha_v = 2.0 beta_v = 3.0 x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32) gamma = gamma_lib.Gamma(alpha=alpha, beta=beta) expected_log_pdf = stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v) log_pdf = gamma.log_prob(x) self.assertEqual(log_pdf.get_shape(), (6,)) self.assertAllClose(log_pdf.eval(), expected_log_pdf) pdf = gamma.prob(x) self.assertEqual(pdf.get_shape(), (6,)) self.assertAllClose(pdf.eval(), np.exp(expected_log_pdf))
def testGammaLogPDFMultidimensional(self): with self.test_session(): batch_size = 6 alpha = constant_op.constant([[2.0, 4.0]] * batch_size) beta = constant_op.constant([[3.0, 4.0]] * batch_size) alpha_v = np.array([2.0, 4.0]) beta_v = np.array([3.0, 4.0]) x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T gamma = gamma_lib.Gamma(alpha=alpha, beta=beta) expected_log_pdf = stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v) log_pdf = gamma.log_prob(x) log_pdf_values = log_pdf.eval() self.assertEqual(log_pdf.get_shape(), (6, 2)) self.assertAllClose(log_pdf_values, expected_log_pdf) pdf = gamma.prob(x) pdf_values = pdf.eval() self.assertEqual(pdf.get_shape(), (6, 2)) self.assertAllClose(pdf_values, np.exp(expected_log_pdf))
def testGammaLogPDFMultidimensionalBroadcasting(self): with self.test_session(): batch_size = 6 alpha = constant_op.constant([[2.0, 4.0]] * batch_size) beta = constant_op.constant(3.0) alpha_v = np.array([2.0, 4.0]) beta_v = 3.0 x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T gamma = gamma_lib.Gamma(alpha=alpha, beta=beta) expected_log_pdf = stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v) log_pdf = gamma.log_prob(x) log_pdf_values = log_pdf.eval() self.assertEqual(log_pdf.get_shape(), (6, 2)) self.assertAllClose(log_pdf_values, expected_log_pdf) pdf = gamma.prob(x) pdf_values = pdf.eval() self.assertEqual(pdf.get_shape(), (6, 2)) self.assertAllClose(pdf_values, np.exp(expected_log_pdf))
def testGammaSampleSmallAlpha(self): with session.Session(): alpha_v = 0.05 beta_v = 1.0 alpha = constant_op.constant(alpha_v) beta = constant_op.constant(beta_v) n = 100000 gamma = gamma_lib.Gamma(alpha=alpha, beta=beta) samples = gamma.sample(n, seed=137) sample_values = samples.eval() self.assertEqual(samples.get_shape(), (n,)) self.assertEqual(sample_values.shape, (n,)) self.assertAllClose( sample_values.mean(), stats.gamma.mean( alpha_v, scale=1 / beta_v), atol=.01) self.assertAllClose( sample_values.var(), stats.gamma.var(alpha_v, scale=1 / beta_v), atol=.15) self.assertTrue(self._kstest(alpha_v, beta_v, sample_values))
def testGammaSample(self): with session.Session(): alpha_v = 4.0 beta_v = 3.0 alpha = constant_op.constant(alpha_v) beta = constant_op.constant(beta_v) n = 100000 gamma = gamma_lib.Gamma(alpha=alpha, beta=beta) samples = gamma.sample(n, seed=137) sample_values = samples.eval() self.assertEqual(samples.get_shape(), (n,)) self.assertEqual(sample_values.shape, (n,)) self.assertAllClose( sample_values.mean(), stats.gamma.mean( alpha_v, scale=1 / beta_v), atol=.01) self.assertAllClose( sample_values.var(), stats.gamma.var(alpha_v, scale=1 / beta_v), atol=.15) self.assertTrue(self._kstest(alpha_v, beta_v, sample_values))
def testGammaPdfOfSampleMultiDims(self): with session.Session() as sess: gamma = gamma_lib.Gamma(alpha=[7., 11.], beta=[[5.], [6.]]) num = 50000 samples = gamma.sample(num, seed=137) pdfs = gamma.prob(samples) sample_vals, pdf_vals = sess.run([samples, pdfs]) self.assertEqual(samples.get_shape(), (num, 2, 2)) self.assertEqual(pdfs.get_shape(), (num, 2, 2)) self.assertAllClose( stats.gamma.mean( [[7., 11.], [7., 11.]], scale=1 / np.array([[5., 5.], [6., 6.]])), sample_vals.mean(axis=0), atol=.1) self.assertAllClose( stats.gamma.var([[7., 11.], [7., 11.]], scale=1 / np.array([[5., 5.], [6., 6.]])), sample_vals.var(axis=0), atol=.1) self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.02) self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.02) self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.02) self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.02)
def define_parameters(self): params=Parameters() params.append(Parameter("trend", multivariate_normal, (self.size,1))) params.append(Parameter("sigma2", invgamma, (1,1))) params.append(Parameter("lambda2", gamma, (1,1))) params.append(Parameter("omega", invgauss, (self.size-self.total_variation_order,1))) self.parameters = params
def get_var(var): if isinstance(var, float): var = gamma(a=var, scale=1) # Initial target noise var = Parameter(var, Positive()) return var
def get_regularizer(regularizer): if isinstance(regularizer, float): reg = gamma(a=regularizer, scale=1) # Initial weight prior regularizer = Parameter(reg, Positive()) return regularizer
def __init__(self, target_transform='identity', ml_score=False, max_depth=3, learning_rate=0.1, n_estimators=100, silent=True, objective="reg:linear", nthread=1, gamma=0, min_child_weight=1, max_delta_step=0, subsample=1, colsample_bytree=1, colsample_bylevel=1, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, base_score=0.5, seed=1, missing=None): if isinstance(target_transform, str): target_transform = transforms.transforms[target_transform]() self.target_transform = target_transform self.ml_score = ml_score super(XGBoost, self).__init__(max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators, silent=silent, objective=objective, nthread=nthread, gamma=gamma, min_child_weight=min_child_weight, max_delta_step=max_delta_step, subsample=subsample, colsample_bytree=colsample_bytree, colsample_bylevel=colsample_bylevel, reg_alpha=reg_alpha, reg_lambda=reg_lambda, scale_pos_weight=scale_pos_weight, base_score=base_score, seed=seed, missing=missing)
def gammaDist(x,params): '''Gamma distribution function M,K = params, where K is average photon counts <x>, M is the number of coherent modes, In case of high intensity, the beam behavors like wave and the probability density of photon, P(x), satify this gamma function. ''' K,M = params K = float(K) M = float(M) coeff = np.exp(M*np.log(M) + (M-1)*np.log(x) - gammaln(M) - M*np.log(K)) Gd = coeff*np.exp(-M*x/K) return Gd
def gamma_dist(bin_values, K, M): """ Gamma distribution function Parameters ---------- bin_values : array scattering intensities K : int average number of photons M : int number of coherent modes Returns ------- gamma_dist : array Gamma distribution Notes ----- These implementations are based on the references under nbinom_distribution() function Notes : math :: P(K) =(\frac{M}{<K>})^M \frac{K^(M-1)}{\Gamma(M)}\exp(-M\frac{K}{<K>}) """ #gamma_dist = (stats.gamma(M, 0., K/M)).pdf(bin_values) x= bin_values coeff = np.exp(M*np.log(M) + (M-1)*np.log(x) - gammaln(M) - M*np.log(K)) gamma_dist = coeff*np.exp(-M*x/K) return gamma_dist
def poisson(x,K): '''Poisson distribution function. K is average photon counts In case of low intensity, the beam behavors like particle and the probability density of photon, P(x), satify this poisson function. ''' K = float(K) Pk = np.exp(-K)*power(K,x)/gamma(x+1) return Pk
def __init__(self, a, b, K): from operator import mul self._a = a self._b = b self._K = K theta = 1. / b self.gamma_dist = gamma_dist(a, 0, theta) # self._alpha = np.array(alpha) # self._coef = gamma(np.sum(self._alpha)) / \ # reduce(mul, [gamma(a) for a in self._alpha])
def gdir_pdf_integ(self, alpha1, alpha2, alpha3): from math import gamma from operator import mul alpha = np.array([alpha1, alpha2, alpha3]) coef1 = gamma(np.sum(alpha)) / reduce(mul, [gamma(a) for a in alpha]) coef2 = reduce(mul, [xx ** (aa - 1) for (xx, aa) in zip(self.x, alpha)]) coef3 = reduce(mul, [self.gamma_dist.pdf(local) for local in alpha]) return coef1 * coef2 * coef3
def value(self,samples=1): """ Samples number of values given from the specific distribution. ------------------------------------------------------------------------ - samples: number of values that will be returned. """ value=0 try: for item in self.__params: if item==0: break if item==0: value=[0]*samples else: if self.__name=="b": value=self.binom(samples) if self.__name=="e": value=self.exponential(samples) if self.__name=="f": value=self.fixed(samples) if self.__name=="g": value=self.gamma(samples) if self.__name=="g1": value=self.gamma1(samples) if self.__name=="ln": value=self.lognormal(samples) if self.__name=="n": value=self.normal(samples) if self.__name=="nb": value=self.nbinom(samples) if self.__name=="p": value=self.poisson(samples) if self.__name=="u": value=self.uniform(samples) except Exception as ex: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] message="\n\tUnexpected: {0} | {1} - File: {2} - Line:{3}".format(\ ex,exc_type, fname, exc_tb.tb_lineno) status=False raise Exception(message) # self.appLogger.error(message) # sys.exit() return value
def gamma1(self,samples): """ Sampling from a Gamma distribution with mean 1 ------------------------------------------------------------------------ - samples: number of values that will be returned. - gamma parameterized with shape and scale """ # E|x| = k.theta (alpha*theta) # If i want mean=1, theta=E|x|/alpha=1/alpha shape=float(self.__params[0]*1.0) theta=float(1/(shape*1.0)) distro=gamma(a=shape,scale=theta) f=distro.rvs(size=samples) return f
def gamma(self,samples): """ Sampling from a Gamma distribution with mean 1 The parameterization with alpha and beta is more common in Bayesian statistics, where the gamma distribution is used as a conjugate prior distribution for various types of inverse scale (aka rate) parameters, such as the lambda of an exponential distribution or a Poisson distribution[4] or for t hat matter, the beta of the gamma distribution itself. (The closely related inverse gamma distribution is used as a conjugate prior for scale parameters, such as the variance of a normal distribution.) shape, scale = 2., 2. # mean=4, std=2*sqrt(2) (Wikipedia: https://en.wikipedia.org/wiki/Gamma_distribution) s = np.random.gamma(shape, scale, 1000) E|x| = k.theta (alpha*theta) If i want a specific mean, theta=E|x|/alpha ------------------------------------------------------------------------ - samples: number of values that will be returned. """ shape=float(self.__params[0]*1.0) theta=float(self.__params[1]*1.0) distro=gamma(a=shape,scale=theta) f=distro.rvs(size=samples) return f
def __init__(self, parameter_distribution = stats.gamma, signal_family = stats.poisson): self.parameter_distribution = parameter_distribution self.signal_family = signal_family
def testGammaShape(self): with self.test_session(): alpha = constant_op.constant([3.0] * 5) beta = constant_op.constant(11.0) gamma = gamma_lib.Gamma(alpha=alpha, beta=beta) self.assertEqual(gamma.batch_shape().eval(), (5,)) self.assertEqual(gamma.get_batch_shape(), tensor_shape.TensorShape([5])) self.assertAllEqual(gamma.event_shape().eval(), []) self.assertEqual(gamma.get_event_shape(), tensor_shape.TensorShape([]))
def testGammaCDF(self): with self.test_session(): batch_size = 6 alpha = constant_op.constant([2.0] * batch_size) beta = constant_op.constant([3.0] * batch_size) alpha_v = 2.0 beta_v = 3.0 x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32) gamma = gamma_lib.Gamma(alpha=alpha, beta=beta) expected_cdf = stats.gamma.cdf(x, alpha_v, scale=1 / beta_v) cdf = gamma.cdf(x) self.assertEqual(cdf.get_shape(), (6,)) self.assertAllClose(cdf.eval(), expected_cdf)
def testGammaModeAllowNanStatsIsFalseWorksWhenAllBatchMembersAreDefined(self): with self.test_session(): alpha_v = np.array([5.5, 3.0, 2.5]) beta_v = np.array([1.0, 4.0, 5.0]) gamma = gamma_lib.Gamma(alpha=alpha_v, beta=beta_v) expected_modes = (alpha_v - 1) / beta_v self.assertEqual(gamma.mode().get_shape(), (3,)) self.assertAllClose(gamma.mode().eval(), expected_modes)
def testGammaModeAllowNanStatsFalseRaisesForUndefinedBatchMembers(self): with self.test_session(): # Mode will not be defined for the first entry. alpha_v = np.array([0.5, 3.0, 2.5]) beta_v = np.array([1.0, 4.0, 5.0]) gamma = gamma_lib.Gamma(alpha=alpha_v, beta=beta_v, allow_nan_stats=False) with self.assertRaisesOpError("x < y"): gamma.mode().eval()
def testGammaModeAllowNanStatsIsTrueReturnsNaNforUndefinedBatchMembers(self): with self.test_session(): # Mode will not be defined for the first entry. alpha_v = np.array([0.5, 3.0, 2.5]) beta_v = np.array([1.0, 4.0, 5.0]) gamma = gamma_lib.Gamma(alpha=alpha_v, beta=beta_v, allow_nan_stats=True) expected_modes = (alpha_v - 1) / beta_v expected_modes[0] = np.nan self.assertEqual(gamma.mode().get_shape(), (3,)) self.assertAllClose(gamma.mode().eval(), expected_modes)
def testGammaVariance(self): with self.test_session(): alpha_v = np.array([1.0, 3.0, 2.5]) beta_v = np.array([1.0, 4.0, 5.0]) gamma = gamma_lib.Gamma(alpha=alpha_v, beta=beta_v) expected_variances = stats.gamma.var(alpha_v, scale=1 / beta_v) self.assertEqual(gamma.variance().get_shape(), (3,)) self.assertAllClose(gamma.variance().eval(), expected_variances)
def testGammaStd(self): with self.test_session(): alpha_v = np.array([1.0, 3.0, 2.5]) beta_v = np.array([1.0, 4.0, 5.0]) gamma = gamma_lib.Gamma(alpha=alpha_v, beta=beta_v) expected_stddev = stats.gamma.std(alpha_v, scale=1. / beta_v) self.assertEqual(gamma.stddev().get_shape(), (3,)) self.assertAllClose(gamma.stddev().eval(), expected_stddev)
def testGammaSampleMultiDimensional(self): with session.Session(): alpha_v = np.array([np.arange(1, 101, dtype=np.float32)]) # 1 x 100 beta_v = np.array([np.arange(1, 11, dtype=np.float32)]).T # 10 x 1 gamma = gamma_lib.Gamma(alpha=alpha_v, beta=beta_v) n = 10000 samples = gamma.sample(n, seed=137) sample_values = samples.eval() self.assertEqual(samples.get_shape(), (n, 10, 100)) self.assertEqual(sample_values.shape, (n, 10, 100)) zeros = np.zeros_like(alpha_v + beta_v) # 10 x 100 alpha_bc = alpha_v + zeros beta_bc = beta_v + zeros self.assertAllClose( sample_values.mean(axis=0), stats.gamma.mean( alpha_bc, scale=1 / beta_bc), rtol=.035) self.assertAllClose( sample_values.var(axis=0), stats.gamma.var(alpha_bc, scale=1 / beta_bc), atol=4.5) fails = 0 trials = 0 for ai, a in enumerate(np.reshape(alpha_v, [-1])): for bi, b in enumerate(np.reshape(beta_v, [-1])): s = sample_values[:, bi, ai] trials += 1 fails += 0 if self._kstest(a, b, s) else 1 self.assertLess(fails, trials * 0.03)
def _kstest(self, alpha, beta, samples): # Uses the Kolmogorov-Smirnov test for goodness of fit. ks, _ = stats.kstest(samples, stats.gamma(alpha, scale=1 / beta).cdf) # Return True when the test passes. return ks < 0.02
def testGammaWithSoftplusAlphaBeta(self): with self.test_session(): alpha_v = constant_op.constant([0.0, -2.1], name="alpha") beta_v = constant_op.constant([1.0, -3.6], name="beta") gamma = gamma_lib.GammaWithSoftplusAlphaBeta(alpha=alpha_v, beta=beta_v) self.assertAllEqual(nn_ops.softplus(alpha_v).eval(), gamma.alpha.eval()) self.assertAllEqual(nn_ops.softplus(beta_v).eval(), gamma.beta.eval())
def fit(samples, is_continuous): ''' Fits a distribution to the given samples. Parameters ---------- samples : array_like Array of samples. is_continuous : bool If `True` then a continuous distribution is fitted. Otherwise, a discrete distribution is fitted. Returns ------- best_marginal : Marginal The distribution fitted to `samples`. ''' # Mean and variance mean = np.mean(samples) var = np.var(samples) # Set suitable distributions if is_continuous: if np.any(samples <= 0): options = [norm] else: options = [norm, gamma] else: if var > mean: options = [poisson, binom, nbinom] else: options = [poisson, binom] params = np.empty(len(options), dtype=object) marginals = np.empty(len(options), dtype=object) # Fit parameters and construct marginals for i, dist in enumerate(options): if dist == poisson: params[i] = [mean] elif dist == binom: param_n = np.max(samples) param_p = np.sum(samples) / (param_n * len(samples)) params[i] = [param_n, param_p] elif dist == nbinom: param_n = mean * mean / (var - mean) param_p = mean / var params[i] = [param_n, param_p] else: params[i] = dist.fit(samples) rv_mixed = dist(*params[i]) marginals[i] = Marginal(rv_mixed) # Calculate Akaike information criterion aic = np.zeros(len(options)) for i, marginal in enumerate(marginals): aic[i] = 2 * len(params[i]) \ - 2 * np.sum(marginal.logpdf(samples)) best_marginal = marginals[np.argmin(aic)] return best_marginal
def analyze(sdae, datafile_norm,\ labels, mapped_labels=None,\ bias_node=False, prefix=None): """ Speeks to R, and submits it analysis jobs. """ # Get some R functions on the Python environment def_colors = robjects.globalenv['def_colors'] do_analysis = robjects.globalenv['do_analysis'] # labels.reset_index(level=0, inplace=True) def_colors(labels) act = np.float32(datafile_norm) try: do_analysis(act, sdae.get_weights, sdae.get_biases,\ pjoin(FLAGS.output_dir, "{}_R_Layer_".format(prefix)),\ bias_node=bias_node) except RRuntimeError as e: pass # for layer in sdae.get_layers: # fixed = False if layer.which > sdae.nHLayers - 1 else True # # try: # act = sdae.get_activation(act, layer.which, use_fixed=fixed) # print("Analysis for layer {}:".format(layer.which + 1)) # temp = pd.DataFrame(data=act) # do_analysis(temp, pjoin(FLAGS.output_dir,\ # "{}_Layer_{}"\ # .format(prefix, layer.which))) # # # if not fixed: # # weights = sdae.get_weights[layer.which] # # for node in weights.transpose(): # # sns.distplot(node, kde=False,\ # fit=stats.gamma, rug=True); # # sns.plt.show() # try: # plot_tSNE(act, mapped_labels,\ # plot_name="Pyhton_{}_tSNE_layer_{}"\ # .format(prefix, layer.which)) # except IndexError as e: # pass # except FailedPreconditionError as e: # break