我们从Python开源项目中,提取了以下12个代码示例,用于说明如何使用theano.tensor.gammaln()。
def log_negative_binomial(x, p, log_r, eps = 0.0): """ Compute log pdf of a negative binomial distribution with success probability p and number of failures, r, until the experiment is stopped, at values x. A simple variation of Stirling's approximation is used: log x! = x log x - x. """ x = T.clip(x, eps, x) p = T.clip(p, eps, 1.0 - eps) r = T.exp(log_r) r = T.clip(r, eps, r) y = T.gammaln(x + r) - T.gammaln(x + 1) - T.gammaln(r) \ + x * T.log(p) + r * T.log(1 - p) return y
def _negCLL(self, z, X):#, validation = False): """Estimate -log p[x|z]""" if self.params['data_type']=='binary': p_x_z = self._conditionalXgivenZ(z) negCLL_m = T.nnet.binary_crossentropy(p_x_z,X) elif self.params['data_type'] =='bow': #Likelihood under a multinomial distribution if self.params['likelihood'] == 'mult': lsf = self._conditionalXgivenZ(z) p_x_z = T.exp(lsf) negCLL_m = -1*(X*lsf) elif self.params['likelihood'] =='poisson': loglambda_p = self._conditionalXgivenZ(z) p_x_z = T.exp(loglambda_p) negCLL_m = -X*loglambda_p+T.exp(loglambda_p)+T.gammaln(X+1) else: raise ValueError,'Invalid choice for likelihood: '+self.params['likelihood'] elif self.params['data_type']=='real': params = self._conditionalXgivenZ(z) mu,logvar= params[0], params[1] p_x_z = mu negCLL_m = 0.5 * np.log(2 * np.pi) + 0.5*logvar + 0.5 * ((X - mu_p)**2)/T.exp(logvar) else: assert False,'Bad data_type: '+str(self.params['data_type']) return p_x_z, negCLL_m.sum(1,keepdims=True)
def log_poisson(x, log_lambda, eps = 0.0): x = T.clip(x, eps, x) lambda_ = T.exp(log_lambda) lambda_ = T.clip(lambda_, eps, lambda_) y = x * log_lambda - lambda_ - T.gammaln(x + 1) return y
def ll_gg(e, beta): """Return of log likelihood of generalized Gaussian distributions. """ beta = float(beta) m = gamma(0.5 / beta) / ((2**(1 / beta)) * gamma(3 / (2 * beta))) return - 0.5 * power((e**2) / m, beta) + log(beta) \ - gammaln(0.5 / beta) - (0.5 / beta) * log(2) - 0.5 * log(m)
def __init__(self, mu=0.0, beta=None, cov=None, *args, **kwargs): super(GeneralizedGaussian, self).__init__(*args, **kwargs) # assert(mu.shape[0] == cov.shape[0] == cov.shape[1]) dim = mu.shape[0] self.mu = mu self.beta = beta self.prec = tt.nlinalg.pinv(cov) # self.k = (dim * tt.gamma(dim / 2.0)) / \ # ((np.pi**(dim / 2.0)) * tt.gamma(1 + dim / (2 * beta)) * (2**(1 + dim / (2 * beta)))) self.logk = tt.log(dim) + tt.gammaln(dim / 2.0) - \ (dim / 2.0) * tt.log(np.pi) - \ tt.gammaln(1 + dim / (2 * beta)) - \ (1 + dim / (2 * beta)) * tt.log(2.0)
def Beta_fn(a, b): return T.exp(T.gammaln(a) + T.gammaln(b) - T.gammaln(a+b))
def _log_partition_symfunc(): natural_params = T.vector() log_Z = T.sum(T.gammaln(natural_params + 1.)) -\ T.gammaln(T.sum(natural_params + 1)) func = theano.function([natural_params], log_Z) grad_func = theano.function([natural_params], T.grad(T.sum(log_Z), natural_params)) return func, grad_func
def kldiv_gamma(a1, b1, a0=a0, b0=b0): return T.sum((a1 - a0)*nnu.Psi()(a1) - T.gammaln(a1) + T.gammaln(a0) + a0*(T.log(b1) - T.log(b0)) + a1*((b0 - b1)/b1))
def kldiv_r(self, a1, b1): return - ((a1 - self.a0)*nnu.Psi()(a1) - T.gammaln(a1) + T.gammaln(self.a0) + self.a0*(T.log(b1) - T.log(self.b0)) + a1*((self.b0 - b1)/b1))[0]