Python numpy.linalg 模块,inv() 实例源码
我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.linalg.inv()。
def __call__(self, params):
print '???', params
sd1 = params[0]
sd2 = params[1]
cor = params[2]
if sd1 < 0. or sd1 > 10. or sd2 < 0. or sd2 > 10. or cor < -1. or cor > 1.:
return np.inf
bandwidth = maths.stats.choleskysqrt2d(sd1, sd2, cor)
bandwidthdet = la.det(bandwidth)
bandwidthinv = la.inv(bandwidth)
diff = sample[self.__iidx] - sample[self.__jidx]
temp = diff.dot(bandwidthinv.T)
temp *= temp
e = np.exp(np.sum(temp, axis=1))
s = np.sum(e**(-.25) - 4 * e**(-.5))
cost = self.__n / bandwidthdet + (2. / bandwidthdet) * s
print '!!!', cost
return cost / 10000.
def distribution_parameters(self, parameter_name):
if parameter_name=='trend':
E = dot(dot(self.derivative_matrix.T,inv(diag(self.parameters.list['omega'].current_value))),self.derivative_matrix)
mean = dot(inv(eye(self.size)+E),self.data)
cov = (self.parameters.list['sigma2'].current_value)*inv(eye(self.size)+E)
return {'mean' : mean, 'cov' : cov}
elif parameter_name=='sigma2':
E = dot(dot(self.derivative_matrix.T,inv(diag(self.parameters.list['omega'].current_value))),self.derivative_matrix)
pos = self.size
loc = 0
scale = 0.5*dot((self.data-dot(eye(self.size),self.parameters.list['trend'].current_value)).T,(self.data-dot(eye(self.size),self.parameters.list['trend'].current_value)))+0.5*dot(dot(self.parameters.list['trend'].current_value.T,E),self.parameters.list['trend'].current_value)
elif parameter_name=='lambda2':
pos = self.size-self.total_variation_order-1+self.alpha
loc = 0.5*(norm(dot(self.derivative_matrix,self.parameters.list['trend'].current_value),ord=1))/self.parameters.list['sigma2'].current_value+self.rho
scale = 1
elif parameter_name==str('omega'):
pos = [sqrt(((self.parameters.list['lambda2'].current_value**2)*self.parameters.list['sigma2'].current_value)/(dj**2)) for dj in dot(self.derivative_matrix,self.parameters.list['trend'].current_value)]
loc = 0
scale = self.parameters.list['lambda2'].current_value**2
return {'pos' : pos, 'loc' : loc, 'scale' : scale}
def test_byteorder_check():
# Byte order check should pass for native order
if sys.byteorder == 'little':
native = '<'
else:
native = '>'
for dtt in (np.float32, np.float64):
arr = np.eye(4, dtype=dtt)
n_arr = arr.newbyteorder(native)
sw_arr = arr.newbyteorder('S').byteswap()
assert_equal(arr.dtype.byteorder, '=')
for routine in (linalg.inv, linalg.det, linalg.pinv):
# Normal call
res = routine(arr)
# Native but not '='
assert_array_equal(res, routine(n_arr))
# Swapped
assert_array_equal(res, routine(sw_arr))
def test_basic(self):
import numpy.linalg as linalg
A = np.array([[1., 2.],
[3., 4.]])
mA = matrix(A)
assert_(np.allclose(linalg.inv(A), mA.I))
assert_(np.all(np.array(np.transpose(A) == mA.T)))
assert_(np.all(np.array(np.transpose(A) == mA.H)))
assert_(np.all(A == mA.A))
B = A + 2j*A
mB = matrix(B)
assert_(np.allclose(linalg.inv(B), mB.I))
assert_(np.all(np.array(np.transpose(B) == mB.T)))
assert_(np.all(np.array(np.transpose(B).conj() == mB.H)))
def test_basic(self):
import numpy.linalg as linalg
A = np.array([[1., 2.], [3., 4.]])
mA = matrix(A)
B = np.identity(2)
for i in range(6):
assert_(np.allclose((mA ** i).A, B))
B = np.dot(B, A)
Ainv = linalg.inv(A)
B = np.identity(2)
for i in range(6):
assert_(np.allclose((mA ** -i).A, B))
B = np.dot(B, Ainv)
assert_(np.allclose((mA * mA).A, np.dot(A, A)))
assert_(np.allclose((mA + mA).A, (A + A)))
assert_(np.allclose((3*mA).A, (3*A)))
mA2 = matrix(A)
mA2 *= 3
assert_(np.allclose(mA2.A, 3*A))
def test_model_pq(self):
"""Test the model PQ matrix generation
"""
new_model = self.bayes.update(models={
'simp': self.model.update_dof([4, 2])})
P, q = new_model._get_model_pq()
epsilon = np.array([2, 1])
sigma = inv(np.diag(np.ones(2)))
P_true = sigma
q_true = -np.dot(epsilon, sigma)
npt.assert_array_almost_equal(P, P_true, decimal=8)
npt.assert_array_almost_equal(q, q_true, decimal=8)
def test_sim_pq(self):
"""Test the simulation PQ matrix generation
"""
initial_data = self.bayes.get_data()
sens_matrix = self.bayes._get_sens(initial_data=initial_data)
P, q = self.bayes._get_sim_pq(initial_data, sens_matrix)
sigma = self.exp1.get_sigma()
P_true = np.dot(np.dot(sens_matrix['simple'].T,
inv(sigma)),
sens_matrix['simple'])
npt.assert_array_almost_equal(P, P_true, decimal=8)
epsilon = self.bayes.simulations['simple']['exp']\
.compare(initial_data['simple'])
q_true = -np.dot(np.dot(epsilon, inv(sigma)), sens_matrix['simple'])
npt.assert_array_almost_equal(q, q_true, decimal=8)
def get_log_like(self, sim_data):
"""Gets the log likelihood of the current simulation
Args:
sim_data(list): Length three list corresponding to the `__call__`
from a Experiment object
experiment(Experiment): A valid Experiment object
Return:
(float): The log of the likelihood of the simulation
"""
epsilon = self.compare(sim_data)
return -0.5 * np.dot(epsilon,
np.dot(inv(self.get_sigma()),
epsilon))
def get_pq(self, scale=False):
"""Returns the P and q matrix components for the model
Keyword Args:
scale(bool): Flag to scale the model values
"""
prior_scale = self.get_scaling()
prior_var = inv(self.get_sigma())
prior_delta = self.get_dof() - self.prior.get_dof()
if scale:
return np.dot(prior_scale, np.dot(prior_var, prior_scale)),\
-np.dot(prior_scale, np.dot(prior_delta, prior_var))
else:
return prior_var, -np.dot(prior_delta, prior_var)
# end
def trainClassifer(self,labels,vectors,verbose=False,ilog=None):
'''
Do not call this function instead call train.
'''
self.training_size = len(labels)
c = len(labels)
r = len(vectors[0])
y = array(labels,'d')
X = zeros((r,c),'d')
for i in range(len(vectors)):
X[:,i] = vectors[i]
tmp1 = inv(self.lam*eye(r) + dot(X,X.transpose()))
tmp2 = dot(y,X.transpose())
self.w = w = dot(tmp1,tmp2)
def trainRidgeRegression(self,labels,vectors,verbose=False):
self.training_size = len(labels)
c = len(labels)
r = len(vectors[0])
self.c = c
self.r = r
y = array(labels,'d')
X = zeros((r,c),'d')
for i in range(len(vectors)):
X[:,i] = vectors[i]
self.X = X
kernel_matrix = zeros((c,c),'d')
for i in range(c):
kernel_matrix[:,i] = self.kernel(X,X[:,i:i+1])
self.w = w = dot(y,inv(kernel_matrix + self.lam*eye(c)))
def coefficient_variance(number_of_coefficient, y_arr, y_explained, x_matrix):
"""
:param number_of_coefficient: number from range(n)
:param y_arr: input y
:param y_explained: y explained array
:param x_matrix: x matrix from docs
:return: variance of coefficient
"""
variance_e_2 = rss(y_arr, y_explained) / (n - k)
# (X^T * X)^-1
mmatrix = linal.inv(numpy.dot(x_matrix.T, x_matrix))
v_matrix = numpy.dot(variance_e_2, mmatrix)
return v_matrix[number_of_coefficient][number_of_coefficient]
##############################################################
######### ??????? ???????? ??????????? y ?? x2, x3 ? x4#######
####### ??????? ?????????? ????????? #########################
##############################################################
def fast_optimize(endog, exog, n_obs=0, n_vars=0, max_iter=10000, tolerance=1e-10):
"""
A convenience function for the Newton-Raphson method to evaluate a logistic model.
:param endog: An Nx1 vector of endogenous predictions
:param exog: An NxK vector of exogenous predictors
:param n_obs: The number of observations N
:param n_vars: The number of exogenous predictors K
:param max_iter: The maximum number of iterations
:param tolerance: Margin of error for convergence
:return: The error-minimizing parameters for the model.
"""
iterations = 0
oldparams = np.inf
newparams = np.repeat(0, n_vars)
while iterations < max_iter and np.any(np.abs(newparams - oldparams) > tolerance):
oldparams = newparams
try:
H = logit_hessian(exog, oldparams, n_obs)
newparams = oldparams - dot(inv(H), logit_score(endog, exog, oldparams, n_obs))
except LinAlgError:
raise LinAlgError
iterations += 1
return newparams
def test_basic(self):
import numpy.linalg as linalg
A = np.array([[1., 2.],
[3., 4.]])
mA = matrix(A)
assert_(np.allclose(linalg.inv(A), mA.I))
assert_(np.all(np.array(np.transpose(A) == mA.T)))
assert_(np.all(np.array(np.transpose(A) == mA.H)))
assert_(np.all(A == mA.A))
B = A + 2j*A
mB = matrix(B)
assert_(np.allclose(linalg.inv(B), mB.I))
assert_(np.all(np.array(np.transpose(B) == mB.T)))
assert_(np.all(np.array(np.transpose(B).conj() == mB.H)))
def test_basic(self):
import numpy.linalg as linalg
A = np.array([[1., 2.], [3., 4.]])
mA = matrix(A)
B = np.identity(2)
for i in range(6):
assert_(np.allclose((mA ** i).A, B))
B = np.dot(B, A)
Ainv = linalg.inv(A)
B = np.identity(2)
for i in range(6):
assert_(np.allclose((mA ** -i).A, B))
B = np.dot(B, Ainv)
assert_(np.allclose((mA * mA).A, np.dot(A, A)))
assert_(np.allclose((mA + mA).A, (A + A)))
assert_(np.allclose((3*mA).A, (3*A)))
mA2 = matrix(A)
mA2 *= 3
assert_(np.allclose(mA2.A, 3*A))
def update(self, z):
sigmas_f, sigmas_h = self.sigmas_f, self.sigmas_h
for i in range(self.M.num_sigmas):
sigmas_h[i] = self.h(sigmas_f[i])
zp, Pz = self.unscented_transform(sigmas_h, self.M.Wm, self.M.Wc, self._R)
Pxz = np.zeros((self._n, self._k))
for i in range(self.M.num_sigmas):
Pxz += self.M.Wc[i] * np.outer(sigmas_f[i] - self.xp, sigmas_h[i] - zp)
K = Pxz.dot(inv(Pz)) # Kalman gain
self._x = self.xp + K.dot(z-zp)
self._P = self.Pp - K.dot(Pz).dot(K.T)
def get_dimensions(self):
# Calculate within class scatter
Sw = np.sum(self._scatter_matrix_by_class.values(),
axis=0)
# Calculate between class scatter
s = (self._p, self._p)
Sb = np.zeros(s)
for k in self._classes:
a = self._mean_vector_by_class[k] - self._global_mean_vector
Sb += self._N_by_class[k] * a.dot(a.T)
# Compute eigenvectors
Sw_inv = inv(Sw)
A = Sw_inv.dot(Sb)
eigen_values, eigen_vectors = eig(A)
idx = np.argsort(eigen_values)
eigen_vectors = eigen_vectors[idx][::-1]
return eigen_vectors
def _udpate_item_features(self):
# Gibbs sampling for item features
for item_id in xrange(self.n_item):
indices = self.ratings_csc_[:, item_id].indices
features = self.user_features_[indices, :]
rating = self.ratings_csc_[:, item_id].data - self.mean_rating_
rating = np.reshape(rating, (rating.shape[0], 1))
covar = inv(self.alpha_item +
self.beta * np.dot(features.T, features))
lam = cholesky(covar)
temp = (self.beta * np.dot(features.T, rating) +
np.dot(self.alpha_item, self.mu_item))
mean = np.dot(covar, temp)
temp_feature = mean + np.dot(
lam, self.rand_state.randn(self.n_feature, 1))
self.item_features_[item_id, :] = temp_feature.ravel()
def _update_user_features(self):
# Gibbs sampling for user features
for user_id in xrange(self.n_user):
indices = self.ratings_csr_[user_id, :].indices
features = self.item_features_[indices, :]
rating = self.ratings_csr_[user_id, :].data - self.mean_rating_
rating = np.reshape(rating, (rating.shape[0], 1))
covar = inv(
self.alpha_user + self.beta * np.dot(features.T, features))
lam = cholesky(covar)
# aplha * sum(V_j * R_ij) + LAMBDA_U * mu_u
temp = (self.beta * np.dot(features.T, rating) +
np.dot(self.alpha_user, self.mu_user))
# mu_i_star
mean = np.dot(covar, temp)
temp_feature = mean + np.dot(
lam, self.rand_state.randn(self.n_feature, 1))
self.user_features_[user_id, :] = temp_feature.ravel()
def _update_user_feature(self):
"""Fix item features and update user features
"""
for i in xrange(self.n_user):
_, item_idx = self.ratings_csr_[i, :].nonzero()
# number of ratings of user i
n_u = item_idx.shape[0]
if n_u == 0:
logger.debug("no ratings for user %d", i)
continue
item_features = self.item_features_.take(item_idx, axis=0)
ratings = self.ratings_csr_[i, :].data - self.mean_rating_
A_i = (np.dot(item_features.T, item_features) +
self.reg * n_u * np.eye(self.n_feature))
V_i = np.dot(item_features.T, ratings)
self.user_features_[i, :] = np.dot(inv(A_i), V_i)
def test_byteorder_check():
# Byte order check should pass for native order
if sys.byteorder == 'little':
native = '<'
else:
native = '>'
for dtt in (np.float32, np.float64):
arr = np.eye(4, dtype=dtt)
n_arr = arr.newbyteorder(native)
sw_arr = arr.newbyteorder('S').byteswap()
assert_equal(arr.dtype.byteorder, '=')
for routine in (linalg.inv, linalg.det, linalg.pinv):
# Normal call
res = routine(arr)
# Native but not '='
assert_array_equal(res, routine(n_arr))
# Swapped
assert_array_equal(res, routine(sw_arr))
def test_basic(self):
import numpy.linalg as linalg
A = np.array([[1., 2.],
[3., 4.]])
mA = matrix(A)
assert_(np.allclose(linalg.inv(A), mA.I))
assert_(np.all(np.array(np.transpose(A) == mA.T)))
assert_(np.all(np.array(np.transpose(A) == mA.H)))
assert_(np.all(A == mA.A))
B = A + 2j*A
mB = matrix(B)
assert_(np.allclose(linalg.inv(B), mB.I))
assert_(np.all(np.array(np.transpose(B) == mB.T)))
assert_(np.all(np.array(np.transpose(B).conj() == mB.H)))
def test_basic(self):
import numpy.linalg as linalg
A = np.array([[1., 2.], [3., 4.]])
mA = matrix(A)
B = np.identity(2)
for i in range(6):
assert_(np.allclose((mA ** i).A, B))
B = np.dot(B, A)
Ainv = linalg.inv(A)
B = np.identity(2)
for i in range(6):
assert_(np.allclose((mA ** -i).A, B))
B = np.dot(B, Ainv)
assert_(np.allclose((mA * mA).A, np.dot(A, A)))
assert_(np.allclose((mA + mA).A, (A + A)))
assert_(np.allclose((3*mA).A, (3*A)))
mA2 = matrix(A)
mA2 *= 3
assert_(np.allclose(mA2.A, 3*A))
def warpImage(background, image, parallelogram):
'''
@params:
background is unchanged image
image is image to be warped
parallelogram is the coordinates to warp the image to, starting at upper
left and going clockwise
returns a new image that is the composition of background and image
after image has been warped
'''
mapped = np.array([[parallelogram[0].x, parallelogram[1].x, parallelogram[2].x],
[parallelogram[0].y, parallelogram[1].y, parallelogram[2].y], [1,1,1]])
width, height = image.size
original = np.array([[0, width, width],[0, 0, height]])
#solve for affine matrix
solution = np.dot(original, inv(mapped))
#unroll matrix into a sequence
affine = (solution[0][0], solution[0][1], solution[0][2], solution[1][0], solution[1][1], solution[1][2])
transformed = image.transform(background.size, Image.AFFINE, affine)
white = Image.new("RGBA", (width, height), "white")
transformedMask = white.transform(background.size, Image.AFFINE, affine)
background.paste(transformed, (0,0), transformedMask)
return background
def test_byteorder_check():
# Byte order check should pass for native order
if sys.byteorder == 'little':
native = '<'
else:
native = '>'
for dtt in (np.float32, np.float64):
arr = np.eye(4, dtype=dtt)
n_arr = arr.newbyteorder(native)
sw_arr = arr.newbyteorder('S').byteswap()
assert_equal(arr.dtype.byteorder, '=')
for routine in (linalg.inv, linalg.det, linalg.pinv):
# Normal call
res = routine(arr)
# Native but not '='
assert_array_equal(res, routine(n_arr))
# Swapped
assert_array_equal(res, routine(sw_arr))
def test_basic(self):
import numpy.linalg as linalg
A = np.array([[1., 2.],
[3., 4.]])
mA = matrix(A)
assert_(np.allclose(linalg.inv(A), mA.I))
assert_(np.all(np.array(np.transpose(A) == mA.T)))
assert_(np.all(np.array(np.transpose(A) == mA.H)))
assert_(np.all(A == mA.A))
B = A + 2j*A
mB = matrix(B)
assert_(np.allclose(linalg.inv(B), mB.I))
assert_(np.all(np.array(np.transpose(B) == mB.T)))
assert_(np.all(np.array(np.transpose(B).conj() == mB.H)))
def test_basic(self):
import numpy.linalg as linalg
A = np.array([[1., 2.], [3., 4.]])
mA = matrix(A)
B = np.identity(2)
for i in range(6):
assert_(np.allclose((mA ** i).A, B))
B = np.dot(B, A)
Ainv = linalg.inv(A)
B = np.identity(2)
for i in range(6):
assert_(np.allclose((mA ** -i).A, B))
B = np.dot(B, Ainv)
assert_(np.allclose((mA * mA).A, np.dot(A, A)))
assert_(np.allclose((mA + mA).A, (A + A)))
assert_(np.allclose((3*mA).A, (3*A)))
mA2 = matrix(A)
mA2 *= 3
assert_(np.allclose(mA2.A, 3*A))
def tforminv(trans, uv):
"""
Function:
----------
apply the inverse of affine transform 'trans' to uv
Parameters:
----------
@trans: 3x3 np.array
transform matrix
@uv: Kx2 np.array
each row is a pair of coordinates (x, y)
Returns:
----------
@xy: Kx2 np.array
each row is a pair of inverse-transformed coordinates (x, y)
"""
Tinv = inv(trans)
xy = tformfwd(Tinv, uv)
return xy
def tforminv(trans, uv):
"""
Function:
----------
apply the inverse of affine transform 'trans' to uv
Parameters:
----------
@trans: 3x3 np.array
transform matrix
@uv: Kx2 np.array
each row is a pair of coordinates (x, y)
Returns:
----------
@xy: Kx2 np.array
each row is a pair of inverse-transformed coordinates (x, y)
"""
Tinv = inv(trans)
xy = tformfwd(Tinv, uv)
return xy
def tforminv(trans, uv):
"""
Function:
----------
apply the inverse of affine transform 'trans' to uv
Parameters:
----------
@trans: 3x3 np.array
transform matrix
@uv: Kx2 np.array
each row is a pair of coordinates (x, y)
Returns:
----------
@xy: Kx2 np.array
each row is a pair of inverse-transformed coordinates (x, y)
"""
Tinv = inv(trans)
xy = tformfwd(Tinv, uv)
return xy
def aryule(c, k):
"""Solve Yule-Walker equation.
Args:
c (numpy array): Coefficients (i.e. autocorrelation)
k (int): Assuming the AR(k) model
Returns:
numpy array: k model parameters
Some formulations solve: C a = -c,
but we actually solve C a = c.
"""
a = np.zeros(k)
# ignore a singular matrix
C = toeplitz(c[:k])
if not np.all(C == 0.0) and np.isfinite(ln.cond(C)):
a = np.dot(ln.inv(C), c[1:])
return a
def estimate_X_TLS(y,H):
""" Estimator of x for the Linear Model using Total Least Square (TLS). As compared to the classical Least Squares Estimator, the TLS estimator is more suited when H is not exactly known or contained with noise [GOL80]_.
.. [GOL80] Golub, Gene H., and Charles F. Van Loan. "An analysis of the total least squares problem." SIAM Journal on Numerical Analysis 17.6 (1980): 883-893.
"""
N,L=H.shape
H=np.matrix(H)
Z=np.hstack([H,y])
U,S,V=lg.svd(Z)
V=np.matrix(V)
V=V.H
VHY=V[:L,L:];
VYY=V[L:,L:];
x= -VHY*lg.inv(VYY);
return x
def test_byteorder_check():
# Byte order check should pass for native order
if sys.byteorder == 'little':
native = '<'
else:
native = '>'
for dtt in (np.float32, np.float64):
arr = np.eye(4, dtype=dtt)
n_arr = arr.newbyteorder(native)
sw_arr = arr.newbyteorder('S').byteswap()
assert_equal(arr.dtype.byteorder, '=')
for routine in (linalg.inv, linalg.det, linalg.pinv):
# Normal call
res = routine(arr)
# Native but not '='
assert_array_equal(res, routine(n_arr))
# Swapped
assert_array_equal(res, routine(sw_arr))
def test_basic(self):
import numpy.linalg as linalg
A = np.array([[1., 2.],
[3., 4.]])
mA = matrix(A)
assert_(np.allclose(linalg.inv(A), mA.I))
assert_(np.all(np.array(np.transpose(A) == mA.T)))
assert_(np.all(np.array(np.transpose(A) == mA.H)))
assert_(np.all(A == mA.A))
B = A + 2j*A
mB = matrix(B)
assert_(np.allclose(linalg.inv(B), mB.I))
assert_(np.all(np.array(np.transpose(B) == mB.T)))
assert_(np.all(np.array(np.transpose(B).conj() == mB.H)))
def test_basic(self):
import numpy.linalg as linalg
A = np.array([[1., 2.], [3., 4.]])
mA = matrix(A)
B = np.identity(2)
for i in range(6):
assert_(np.allclose((mA ** i).A, B))
B = np.dot(B, A)
Ainv = linalg.inv(A)
B = np.identity(2)
for i in range(6):
assert_(np.allclose((mA ** -i).A, B))
B = np.dot(B, Ainv)
assert_(np.allclose((mA * mA).A, np.dot(A, A)))
assert_(np.allclose((mA + mA).A, (A + A)))
assert_(np.allclose((3*mA).A, (3*A)))
mA2 = matrix(A)
mA2 *= 3
assert_(np.allclose(mA2.A, 3*A))
def make_eigvals_positive(am, targetprod):
"""For the symmetric square matrix `am`, increase any zero eigenvalues
such that the total product of eigenvalues is greater or equal to
`targetprod`. Returns a (possibly) new, non-singular matrix."""
w, v = linalg.eigh(am) # use eigh since a is symmetric
mask = w < 1.e-10
if np.any(mask):
nzprod = np.product(w[~mask]) # product of nonzero eigenvalues
nzeros = mask.sum() # number of zero eigenvalues
new_val = max(1.e-10, (targetprod / nzprod) ** (1. / nzeros))
w[mask] = new_val # adjust zero eigvals
am_new = np.dot(np.dot(v, np.diag(w)), linalg.inv(v)) # re-form cov
else:
am_new = am
return am_new
def test_byteorder_check():
# Byte order check should pass for native order
if sys.byteorder == 'little':
native = '<'
else:
native = '>'
for dtt in (np.float32, np.float64):
arr = np.eye(4, dtype=dtt)
n_arr = arr.newbyteorder(native)
sw_arr = arr.newbyteorder('S').byteswap()
assert_equal(arr.dtype.byteorder, '=')
for routine in (linalg.inv, linalg.det, linalg.pinv):
# Normal call
res = routine(arr)
# Native but not '='
assert_array_equal(res, routine(n_arr))
# Swapped
assert_array_equal(res, routine(sw_arr))
def test_basic(self):
import numpy.linalg as linalg
A = np.array([[1., 2.],
[3., 4.]])
mA = matrix(A)
assert_(np.allclose(linalg.inv(A), mA.I))
assert_(np.all(np.array(np.transpose(A) == mA.T)))
assert_(np.all(np.array(np.transpose(A) == mA.H)))
assert_(np.all(A == mA.A))
B = A + 2j*A
mB = matrix(B)
assert_(np.allclose(linalg.inv(B), mB.I))
assert_(np.all(np.array(np.transpose(B) == mB.T)))
assert_(np.all(np.array(np.transpose(B).conj() == mB.H)))
def test_basic(self):
import numpy.linalg as linalg
A = np.array([[1., 2.], [3., 4.]])
mA = matrix(A)
B = np.identity(2)
for i in range(6):
assert_(np.allclose((mA ** i).A, B))
B = np.dot(B, A)
Ainv = linalg.inv(A)
B = np.identity(2)
for i in range(6):
assert_(np.allclose((mA ** -i).A, B))
B = np.dot(B, Ainv)
assert_(np.allclose((mA * mA).A, np.dot(A, A)))
assert_(np.allclose((mA + mA).A, (A + A)))
assert_(np.allclose((3*mA).A, (3*A)))
mA2 = matrix(A)
mA2 *= 3
assert_(np.allclose(mA2.A, 3*A))
def compSpecSample(self, angle):
"""
Computes the spectrum in a sample with Capon beamformer.
MUST compute the covariance matrix (call `compCov()`) before calling
this function.
Parameters
----------
angle : float
Direction-of-arrival (DOA) angle in range [0, pi).
Returns
-------
p : float
The response of Capon beamformer.
"""
a = self.sarr.steer(angle)
p = 1. / (a.T.conj().dot(inv(self.r).dot(a)))
# Discard imaginary part
return p.real
def compSpecSample(self, angle):
"""
Computes the spatial spectrum response in a specified angle with FLOM
matrix.
MUST compute the FLOM matrix (call `compFLOM()`) before calling this
function.
Parameters
----------
angle : float
Direction-of-arrival (DOA) angle in range [0, pi).
Returns
-------
p : float
The response of spatial spectrum.
"""
a = self.sarr.steer(angle)
p = 1. / (a.T.conj().dot(inv(self.gamma).dot(a)))
# Discard imaginary part
return p.real
def _mvee(self, points, tolerance):
# Taken from: http://stackoverflow.com/questions/14016898/port-matlab-bounding-ellipsoid-code-to-python
points = np.asmatrix(points)
n, d = points.shape
q = np.column_stack((points, np.ones(n))).T
err = tolerance + 1.0
u = np.ones(n) / n
while err > tolerance:
# assert u.sum() == 1 # invariant
x = q * np.diag(u) * q.T
m = np.diag(q.T * la.inv(x) * q)
jdx = np.argmax(m)
step_size = (m[jdx] - d - 1.0) / ((d + 1) * (m[jdx] - 1.0))
new_u = (1 - step_size) * u
new_u[jdx] += step_size
err = la.norm(new_u - u)
u = new_u
c = u * points
a = la.inv(points.T * np.diag(u) * points - c.T * c) / d
return np.asarray(a), np.squeeze(np.asarray(c))
def _int_var_rbf_hyp(self, hyp, X, jitter=1e-8):
"""
Posterior integral variance as a function of hyper-parameters
:param hyp: RBF kernel hyper-parameters [s2, el_1, ..., el_d]
:param X: sigma-points
:param jitter: numerical jitter (for stabilizing computations)
:return: posterior integral variance
"""
# reshape X to SP matrix
X = np.reshape(X, (self.n, self.d))
# set kernel hyper-parameters
s2, el = 1, hyp # sig_var hyper always set to 1
self.kern.param_array[0] = s2 # variance
self.kern.param_array[1:] = el # lengthscale
K = self.kern.K(X)
L = np.diag(el ** 2)
# posterior variance of the integral
ks = s2 * np.sqrt(det(L + np.eye(self.d))) * multivariate_normal(mean=np.zeros(self.d), cov=L).pdf(X)
postvar = s2 * np.sqrt(det(2 * inv(L) + np.eye(self.d))) ** -1 - ks.dot(
solve(K + jitter * np.eye(self.n), ks.T))
return postvar
def cov(self):
"""
Compute parameter covariance
Returns
-------
c : ndarray
Parameter covariance
"""
s = self.s
nobs = self._xe.shape[0]
scale = 1 / (nobs - int(self._debiased) * self._df)
if self.square:
ji = self.inv_jacobian
out = ji @ s @ ji.T
else:
j = self.jacobian
out = inv(j.T @ inv(s) @ j)
out = (scale / 2) * (out + out.T)
return out
def w(self, moments):
"""
Score/moment condition weighting matrix
Parameters
----------
moments : ndarray
Moment conditions (nobs by nmoments)
Returns
-------
w : ndarray
Weighting matrix computed from moment conditions
"""
if self._center:
moments = moments - moments.mean(0)[None, :]
nobs = moments.shape[0]
out = moments.T @ moments / nobs
return inv((out + out.T) / 2.0)
def w(self, moments):
"""
Score/moment condition weighting matrix
Parameters
----------
moments : ndarray
Moment conditions (nobs by nmoments)
Returns
-------
w : ndarray
Weighting matrix computed from moment conditions
"""
if self._center:
moments = moments - moments.mean(0)[None, :]
out = self._kernel_cov(moments)
return inv(out)
def _f_statistic(self, params, cov, debiased):
non_const = ~(self._x.ptp(0) == 0)
test_params = params[non_const]
test_cov = cov[non_const][:, non_const]
test_stat = test_params.T @ inv(test_cov) @ test_params
test_stat = float(test_stat)
nobs, nvar = self._x.shape
null = 'All parameters ex. constant are zero'
name = 'Model F-statistic'
df = test_params.shape[0]
if debiased:
wald = WaldTestStatistic(test_stat / df, null, df, nobs - nvar,
name=name)
else:
wald = WaldTestStatistic(test_stat, null, df, name=name)
return wald
def cov(self):
"""Covariance of estimated parameters"""
x, z = self.x, self.z
nobs, nvar = x.shape
pinvz = self._pinvz
v = (x.T @ z) @ (pinvz @ x) / nobs
if self._kappa != 1:
kappa = self._kappa
xpx = x.T @ x / nobs
v = (1 - kappa) * xpx + kappa * v
vinv = inv(v)
c = vinv @ self.s @ vinv / nobs
return (c + c.T) / 2
def _gls_cov(self):
x = self._x
sigma = self._sigma
sigma_inv = inv(sigma)
xpx = blocked_inner_prod(x, sigma_inv)
# Handles case where sigma_inv is not inverse of full_sigma
xeex = blocked_inner_prod(x, sigma_inv @ self._full_sigma @ sigma_inv)
if self._constraints is None:
xpxi = inv(xpx)
cov = xpxi @ xeex @ xpxi
else:
cons = self._constraints
xpx = cons.t.T @ xpx @ cons.t
xpxi = inv(xpx)
xeex = cons.t.T @ xeex @ cons.t
cov = cons.t @ (xpxi @ xeex @ xpxi) @ cons.t.T
cov = (cov + cov.T) / 2
return cov
def __init__(self, x, eps, sigma, full_sigma, gls=False, debiased=False, constraints=None):
super(HeteroskedasticCovariance, self).__init__(x, eps, sigma, full_sigma,
gls=gls,
debiased=debiased,
constraints=constraints)
self._name = 'Heteroskedastic (Robust) Covariance'
k = len(x)
weights = inv(sigma) if gls else eye(k)
bigx = blocked_diag_product(x, weights)
nobs = eps.shape[0]
e = eps.T.ravel()[:, None]
bigxe = bigx * e
m = bigx.shape[1]
xe = zeros((nobs, m))
for i in range(nobs):
xe[i, :] = bigxe[i::nobs].sum(0)[None, :]
self._moments = xe