我们从Python开源项目中,提取了以下7个代码示例,用于说明如何使用scipy.special.polygamma()。
def update_dir_prior(prior, N, logphat, rho): """ Updates a given prior using Newton's method, described in **Huang: Maximum Likelihood Estimation of Dirichlet Distribution Parameters.** http://jonathan-huang.org/research/dirichlet/dirichlet.pdf """ dprior = np.copy(prior) gradf = N * (psi(np.sum(prior)) - psi(prior) + logphat) c = N * polygamma(1, np.sum(prior)) q = -N * polygamma(1, prior) b = np.sum(gradf / q) / (1 / c + np.sum(1 / q)) dprior = -(gradf - b) / q if all(rho * dprior + prior > 0): prior += rho * dprior else: logger.warning("updated prior not positive") return prior
def update_alpha(self, gammat, rho): """ Update parameters for the Dirichlet prior on the per-document topic weights `alpha` given the last `gammat`. Uses Newton's method: http://www.stanford.edu/~jhuang11/research/dirichlet/dirichlet.pdf """ N = float(len(gammat)) logphat = sum(dirichlet_expectation(gamma) for gamma in gammat) / N dalpha = numpy.copy(self.alpha) gradf = N * (psi(numpy.sum(self.alpha)) - psi(self.alpha) + logphat) c = N * polygamma(1, numpy.sum(self.alpha)) q = -N * polygamma(1, self.alpha) b = numpy.sum(gradf / q) / ( 1 / c + numpy.sum(1 / q)) dalpha = -(gradf - b) / q if all(rho() * dalpha + self.alpha > 0): self.alpha += rho() * dalpha else: logger.warning("updated alpha not positive") logger.info("optimized alpha %s" % list(self.alpha)) return self.alpha
def update_alpha(self, gammat, rho): """ Update parameters for the Dirichlet prior on the per-document topic weights `alpha` given the last `gammat`. Uses Newton's method, described in **Huang: Maximum Likelihood Estimation of Dirichlet Distribution Parameters.** (http://www.stanford.edu/~jhuang11/research/dirichlet/dirichlet.pdf) """ N = float(len(gammat)) logphat = sum(dirichlet_expectation(gamma) for gamma in gammat) / N dalpha = numpy.copy(self.alpha) gradf = N * (psi(numpy.sum(self.alpha)) - psi(self.alpha) + logphat) c = N * polygamma(1, numpy.sum(self.alpha)) q = -N * polygamma(1, self.alpha) b = numpy.sum(gradf / q) / ( 1 / c + numpy.sum(1 / q)) dalpha = -(gradf - b) / q if all(rho() * dalpha + self.alpha > 0): self.alpha += rho() * dalpha else: logger.warning("updated alpha not positive") logger.info("optimized alpha %s" % list(self.alpha)) return self.alpha
def lnZ_Exponential_var(M): return polygamma(1, M)
def perform(self, node, inputs, output_storage): x = inputs[0] z = output_storage[0] z[0] = sp.polygamma(self.n, x)