Python numpy 模块,trace() 实例源码
我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.trace()。
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""Returns the sum along the diagonals of an array.
It computes the sum along the diagonals at ``axis1`` and ``axis2``.
Args:
a (cupy.ndarray): Array to take trace.
offset (int): Index of diagonals. Zero indicates the main diagonal, a
positive value an upper diagonal, and a negative value a lower
diagonal.
axis1 (int): The first axis along which the trace is taken.
axis2 (int): The second axis along which the trace is taken.
dtype: Data type specifier of the output.
out (cupy.ndarray): Output array.
Returns:
cupy.ndarray: The trace of ``a`` along axes ``(axis1, axis2)``.
.. seealso:: :func:`numpy.trace`
"""
# TODO(okuta): check type
return a.trace(offset, axis1, axis2, dtype, out)
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2, numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.0
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2., numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def trace(mpa, axes=(0, 1)):
"""Compute the trace of the given MPA.
If you specify axes (see partialtrace() for details), you must
ensure that the result has no physical legs anywhere.
:param mpa: MParray
:param axes: Axes for trace, ``(axis1, axis2)`` or ``(axes1, axes2, ...)``
with ``axesN=(axisN_1, axisN_2)`` or ``axesN=None``.
(default: ``(0, 1)``)
:returns: A single scalar of type ``mpa.dtype``
"""
out = partialtrace(mpa, axes)
out = out.to_array()
assert out.size == 1, 'trace must return a single scalar'
return out[None][0]
def test_sandwich(nr_sites, local_dim, rank, rgen, dtype):
mps = factory.random_mpa(nr_sites, local_dim, rank,
randstate=rgen, dtype=dtype, normalized=True)
mps2 = factory.random_mpa(nr_sites, local_dim, rank,
randstate=rgen, dtype=dtype, normalized=True)
mpo = factory.random_mpa(nr_sites, [local_dim] * 2, rank,
randstate=rgen, dtype=dtype)
mpo.canonicalize()
mpo /= mp.trace(mpo)
vec = mps.to_array().ravel()
op = mpo.to_array_global().reshape([local_dim**nr_sites] * 2)
res_arr = np.vdot(vec, np.dot(op, vec))
res_mpo = mp.inner(mps, mp.dot(mpo, mps))
res_sandwich = mp.sandwich(mpo, mps)
assert_almost_equal(res_mpo, res_arr)
assert_almost_equal(res_sandwich, res_arr)
vec2 = mps2.to_array().ravel()
res_arr = np.vdot(vec2, np.dot(op, vec))
res_mpo = mp.inner(mps2, mp.dot(mpo, mps))
res_sandwich = mp.sandwich(mpo, mps, mps2)
assert_almost_equal(res_mpo, res_arr)
assert_almost_equal(res_sandwich, res_arr)
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2, numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2, numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def test_dipolar_tensor(self):
# initial stupid test...
###### TODO : do a reasonable test!!! ######
p = np.array([[0.,0.,0.]])
fc = np.array([[0.,0.,1.]],dtype=np.complex)
k = np.array([0.,0.,0.0])
phi= np.array([0.,])
mu = np.array([0.5,0.5,0.5])
sc = np.array([10,10,10],dtype=np.int32)
latpar = np.diag([2.,2.,2.])
r = 10.
res = lfcext.DipolarTensor(p,mu,sc,latpar,r)
np.testing.assert_array_almost_equal(res, np.zeros([3,3]))
mu = np.array([0.25,0.25,0.25])
res = lfcext.DipolarTensor(p,mu,sc,latpar,r)
np.testing.assert_array_almost_equal(np.trace(res), np.zeros([3]))
np.testing.assert_array_almost_equal(res, res.copy().T)
def get_neg_log_post(Phi, sigma_J_list, ROI_list, G, MMT, q, Sigma_E, GL,
nu, V, prior_on = False):
eps = 1E-13
p = Phi.shape[0]
n_ROI = len(sigma_J_list)
Qu = Phi.dot(Phi.T)
G_Sigma_G = np.zeros(MMT.shape)
for i in range(n_ROI):
G_Sigma_G += sigma_J_list[i]**2 * np.dot(G[:,ROI_list[i]], G[:,ROI_list[i]].T)
cov = Sigma_E + G_Sigma_G + GL.dot(Qu).dot(GL.T)
inv_cov = np.linalg.inv(cov)
eigs = np.real(np.linalg.eigvals(cov)) + eps
log_det_cov = np.sum(np.log(eigs))
result = q*log_det_cov + np.trace(MMT.dot(inv_cov))
if prior_on:
inv_Q = np.linalg.inv(Qu)
#det_Q = np.linalg.det(Qu)
log_det_Q = np.sum(np.log(np.diag(Phi)**2))
result = result + np.float(nu+p+1)*log_det_Q+ np.trace(V.dot(inv_Q))
return result
#==============================================================================
# update both Qu and Sigma_J, gradient of Qu and Sigma J
def get_neg_log_post(Phi, sigma_J_list, ROI_list, G, MMT, q, Sigma_E, GL,
nu, V, prior_on = False):
eps = 1E-13
p = Phi.shape[0]
n_ROI = len(sigma_J_list)
Qu = Phi.dot(Phi.T)
G_Sigma_G = np.zeros(MMT.shape)
for i in range(n_ROI):
G_Sigma_G += sigma_J_list[i]**2 * np.dot(G[:,ROI_list[i]], G[:,ROI_list[i]].T)
cov = Sigma_E + G_Sigma_G + GL.dot(Qu).dot(GL.T)
inv_cov = np.linalg.inv(cov)
eigs = np.real(np.linalg.eigvals(cov)) + eps
log_det_cov = np.sum(np.log(eigs))
result = q*log_det_cov + np.trace(MMT.dot(inv_cov))
if prior_on:
inv_Q = np.linalg.inv(Qu)
#det_Q = np.linalg.det(Qu)
log_det_Q = np.sum(np.log(np.diag(Phi)**2))
result = result + np.float(nu+p+1)*log_det_Q+ np.trace(V.dot(inv_Q))
return result
#==============================================================================
# update both Qu and Sigma_J, gradient of Qu and Sigma J
def calc_mean_var_loss(epochsInds,loss_train):
#Loss train is in dimension # epochs X #batchs
num_of_epochs = loss_train.shape[0]
#Average over the batchs
loss_train_mean = np.mean(loss_train,1)
#The diff divided by the sampled indexes
d_mean_loss_to_dt = np.sqrt(np.abs(np.diff(loss_train_mean) / np.diff(epochsInds[:])))
var_loss = []
#Go over the epochs
for epoch_index in range(num_of_epochs):
#The loss for the specpic epoch
current_loss = loss_train[epoch_index, :]
#The derivative between the batchs
current_loss_dt = np.diff(current_loss)
#The mean of his derivative
average_loss = np.mean(current_loss_dt)
current_loss_minus_mean = current_loss_dt- average_loss
#The covarince between the batchs
cov_mat = np.dot(current_loss_minus_mean[:, None], current_loss_minus_mean[None, :])
# The trace of the cov matrix
trac_cov = np.trace(cov_mat)
var_loss.append(trac_cov)
return np.array(var_loss), d_mean_loss_to_dt
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2, numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def process_fidelity(self, reference_unitary):
"""
Compute the quantum process fidelity of the estimated state with respect to a unitary
process. For non-sparse reference_unitary, this implementation this will be expensive in
higher dimensions.
:param (qutip.Qobj|matrix-like) reference_unitary: A unitary operator that induces a process
as ``rho -> other*rho*other.dag()``, can also be a superoperator or Pauli-transfer matrix.
:return: The process fidelity, a real number between 0 and 1.
:rtype: float
"""
if isinstance(reference_unitary, qt.Qobj):
if not reference_unitary.issuper or reference_unitary.superrep != "super":
sother = qt.to_super(reference_unitary)
else:
sother = reference_unitary
tm_other = self.pauli_basis.transfer_matrix(sother)
else:
tm_other = csr_matrix(reference_unitary)
dimension = self.pauli_basis.ops[0].shape[0]
return np.trace(tm_other.T * self.r_est).real / dimension ** 2
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.0
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2., numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def one_time_from_two_time(two_time_corr):
"""
This will provide the one-time correlation data from two-time
correlation data.
Parameters
----------
two_time_corr : array
matrix of two time correlation
shape (number of labels(ROI's), number of frames, number of frames)
Returns
-------
one_time_corr : array
matrix of one time correlation
shape (number of labels(ROI's), number of frames)
"""
one_time_corr = np.zeros((two_time_corr.shape[0], two_time_corr.shape[2]))
for g in two_time_corr:
for j in range(two_time_corr.shape[2]):
one_time_corr[:, j] = np.trace(g, offset=j)/two_time_corr.shape[2]
return one_time_corr
def one_time_from_two_time(two_time_corr):
"""
This will provide the one-time correlation data from two-time
correlation data.
Parameters
----------
two_time_corr : array
matrix of two time correlation
shape (number of labels(ROI's), number of frames, number of frames)
Returns
-------
one_time_corr : array
matrix of one time correlation
shape (number of labels(ROI's), number of frames)
"""
one_time_corr = np.zeros((two_time_corr.shape[0], two_time_corr.shape[2]))
for g in two_time_corr:
for j in range(two_time_corr.shape[2]):
one_time_corr[:, j] = np.trace(g, offset=j)/two_time_corr.shape[2]
return one_time_corr
def __trace_middle_dims(sys, dims, reverse=True):
"""
Get system dimensions for __trace_middle.
Args:
j (int): system to trace over.
dims(list[int]): dimensions of all subsystems.
reverse (bool): if true system-0 is right-most system tensor product.
Returns:
Tuple (dim1, dims2, dims3)
"""
dpre = dims[:sys]
dpost = dims[sys + 1:]
if reverse:
dpre, dpost = (dpost, dpre)
dim1 = int(np.prod(dpre))
dim2 = int(dims[sys])
dim3 = int(np.prod(dpost))
return (dim1, dim2, dim3)
def quadratic_loss(covariance, precision):
"""Computes ...
Parameters
----------
covariance : 2D ndarray (n_features, n_features)
Maximum Likelihood Estimator of covariance
precision : 2D ndarray (n_features, n_features)
The precision matrix of the model to be tested
Returns
-------
Quadratic loss
"""
assert covariance.shape == precision.shape
dim, _ = precision.shape
return np.trace((np.dot(covariance, precision) - np.eye(dim))**2)
def multivariate_prior_KL(meanA, covA, meanB, covB):
# KL[ qA | qB ] = E_{qA} \log [qA / qB] where qA and aB are
# K dimensional multivariate normal distributions.
# Analytically tractable and equal to...
# 0.5 * (Tr(covB^{-1} covA) + (meanB - meanA)^T covB^{-1} (meanB - meanA)
# - K + log(det(covB)) - log (det(covA)))
K = covA.shape[0]
traceTerm = 0.5 * np.trace(np.linalg.solve(covB, covA))
delta = meanB - meanA
mahalanobisTerm = 0.5 * np.dot(delta.T, np.linalg.solve(covB, delta))
constantTerm = -0.5 * K
priorLogDeterminantTerm = 0.5*np.linalg.slogdet(covB)[1]
variationalLogDeterminantTerm = -0.5 * np.linalg.slogdet(covA)[1]
return (traceTerm +
mahalanobisTerm +
constantTerm +
priorLogDeterminantTerm +
variationalLogDeterminantTerm)
def contract_internal(self, label1, label2, index1=0, index2=0):
"""By default will contract the first index with label1 with the
first index with label2. index1 and index2 can be specified to contract
indices that are not the first with the specified label."""
label1_indices = [i for i, x in enumerate(self.labels) if x == label1]
label2_indices = [i for i, x in enumerate(self.labels) if x == label2]
index_to_contract1 = label1_indices[index1]
index_to_contract2 = label2_indices[index2]
self.data = np.trace(self.data, axis1=index_to_contract1, axis2=
index_to_contract2)
# The following removes the contracted indices from the list of labels
self.labels = [label for j, label in enumerate(self.labels)
if j not in [index_to_contract1, index_to_contract2]]
# aliases for contract_internal
def estimate_cov(self, samples, mean):
"""
Estimate the empirical covariance of the weight vectors, possibly
with regularization.
"""
d = mean.shape[0]
# Accumulate statistics
Sigma = np.zeros((d, d))
for t in range(len(samples)):
zm = samples[t] - mean
Sigma = Sigma + zm.dot(zm.T)
# Normalize factor of estimate
if self._norm_style == 'ML':
norm = 1.0/(len(samples)-1)
elif self._norm_style == 'Trace':
norm = 1.0/np.trace(Sigma)
else:
raise ValueError('Norm style {} not known'.format(self._norm_style))
Sigma = norm*Sigma
# Add diagonal loading term
self.diag_eps = 0.1*np.mean(np.abs(np.linalg.eig(Sigma)[0])) # TODO
return Sigma + self.diag_eps*self._id
def compute_gradient_totalcverr_wrt_lambda(self,matrix_results,lambda_val,sigmasq_z):
# 0: K_tst_tr; 1: K_tr_tr; 2: D_tst_tr; 3: D_tr_tr
num_sample_cv = self.num_samples
ttl_num_folds = np.shape(matrix_results)[1]
gradient_cverr_per_fold = np.zeros(ttl_num_folds)
for jj in range(ttl_num_folds):
uu = np.shape(matrix_results[3][jj])[0] # number of training samples
M_tst_tr = exp(matrix_results[2][jj]*float(-1/2)*sigmasq_z**(-1))
M_tr_tr = exp(matrix_results[3][jj]*float(-1/2)*sigmasq_z**(-1))
lower_ZZ = cholesky(M_tr_tr+ lambda_val*eye(uu), lower=True)
ZZ = cho_solve((lower_ZZ,True),eye(uu))
first_term = matrix_results[0][jj].dot(ZZ.dot(ZZ.dot(M_tst_tr.T)))
second_term = M_tst_tr.dot(ZZ.dot(ZZ.dot(
matrix_results[1][jj].dot(ZZ.dot(M_tst_tr.T)))))
gradient_cverr_per_fold[jj] = trace(first_term-second_term)
return 2*sum(gradient_cverr_per_fold)/float(num_sample_cv)
# lambda = exp(eta)
def compute_gradient_totalcverr_wrt_sqsigma(self,matrix_results,lambda_val,sigmasq_z):
# 0: K_tst_tr; 1: K_tr_tr; 2: D_tst_tr; 3: D_tr_tr
num_sample_cv = self.num_samples
ttl_num_folds = np.shape(matrix_results)[1]
gradient_cverr_per_fold = np.zeros(ttl_num_folds)
for jj in range(ttl_num_folds):
uu = np.shape(matrix_results[3][jj])[0]
log_M_tr_tst = matrix_results[2][jj].T*float(-1/2)*sigmasq_z**(-1)
M_tr_tst = exp(log_M_tr_tst)
log_M_tr_tr = matrix_results[3][jj]*float(-1/2)*sigmasq_z**(-1)
M_tr_tr = exp(log_M_tr_tr)
lower_ZZ = cholesky(M_tr_tr+ lambda_val*eye(uu), lower=True)
ZZ = cho_solve((lower_ZZ,True),eye(uu))
term_1 = matrix_results[0][jj].dot(ZZ.dot((M_tr_tr*sigmasq_z**(-1)*(-log_M_tr_tr)).dot(ZZ.dot(M_tr_tst))))
term_2 = -matrix_results[0][jj].dot(ZZ.dot(M_tr_tst*(-log_M_tr_tst*sigmasq_z**(-1))))
term_3 = (sigmasq_z**(-1)*(M_tr_tst.T)*(-log_M_tr_tst.T)).dot(ZZ.dot(matrix_results[1][jj].dot(ZZ.dot(M_tr_tst))))
term_4 = -(M_tr_tst.T).dot(ZZ.dot((M_tr_tr*sigmasq_z**(-1)*(-log_M_tr_tr)).dot(ZZ.dot(matrix_results[1][jj].dot(
ZZ.dot(M_tr_tst))))))
term_5 = -(M_tr_tst.T).dot(ZZ.dot(matrix_results[1][jj].dot(ZZ.dot((M_tr_tr*sigmasq_z**(-1)*(-log_M_tr_tr)).dot(
ZZ.dot(M_tr_tst))))))
term_6 = (M_tr_tst.T).dot(ZZ.dot(matrix_results[1][jj].dot(ZZ.dot(M_tr_tst*sigmasq_z**(-1)*(-log_M_tr_tst)))))
gradient_cverr_per_fold[jj] = trace(2*term_1 + 2*term_2 + term_3 + term_4 + term_5 + term_6)
return sum(gradient_cverr_per_fold)/float(num_sample_cv)
def compute_totalcverr(self,matrix_results,lambda_val,sigmasq_z):
# 0: K_tst_tr; 1: K_tr_tr; 2: K_tst_tst; 3: D_tst_tr; 4: D_tr_tr
num_sample_cv = self.num_samples
ttl_num_folds = np.shape(matrix_results)[1]
cverr_per_fold = np.zeros(ttl_num_folds)
for jj in range(ttl_num_folds):
uu = np.shape(matrix_results[4][jj])[0] # number of training samples
M_tst_tr = exp(matrix_results[3][jj]*float(-1/2)*sigmasq_z**(-1))
M_tr_tr = exp(matrix_results[4][jj]*float(-1/2)*sigmasq_z**(-1))
lower_ZZ = cholesky(M_tr_tr+ lambda_val*eye(uu), lower=True)
ZZ = cho_solve((lower_ZZ,True),eye(uu))
first_term = matrix_results[2][jj]
second_term = - matrix_results[0][jj].dot(ZZ.dot(M_tst_tr.T))
third_term = np.transpose(second_term)
fourth_term = M_tst_tr.dot(ZZ.dot(
matrix_results[1][jj].dot(ZZ.dot(M_tst_tr.T))))
cverr_per_fold[jj] = trace(first_term + second_term + third_term + fourth_term)
return sum(cverr_per_fold)/float(num_sample_cv)
def covariance_distance_from_matrices(m1, m2, mul_factor=1):
"""
Covariance distance between matrices m1 and m2, defined as
d = factor * (1 - (trace(m1 * m2)) / (norm_fro(m1) + norm_fro(m2)))
:param m1: matrix
:param m2: matrix
:param mul_factor: multiplicative factor for the formula, it equals to the maximal value the distance can reach
:return: mul_factor * (1 - (np.trace(m1.dot(m2))) / (np.linalg.norm(m1) + np.linalg.norm(m2)))
"""
if np.nan not in m1 and np.nan not in m2:
return \
mul_factor * (1 - (np.trace(m1.dot(m2)) / (np.linalg.norm(m1, ord='fro') * np.linalg.norm(m2, ord='fro'))))
else:
return np.nan
# --- global distances: (segm, segm) |-> real
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.0
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2., numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def test_sum_bit0(self):
n = 32
x = np.random.random(n)
# x = np.arange(n).astype(np.float64)
x_gpu = drv.to_device(x)
trace(
x_gpu, np.int32(0), block=(
n, 1, 1), grid=(
1, 1, 1), shared=8 * 128)
x2 = drv.from_device_like(x_gpu, x)
print(x)
print(x2)
assert np.allclose(x2[1], np.sum(x[::2]))
assert np.allclose(x2[0], np.sum(x[1::2]))
def test_sum_bit1(self):
n = 32
x = np.random.random(n)
# x = np.arange(n).astype(np.float64)
x_gpu = drv.to_device(x)
trace(
x_gpu, np.int32(1), block=(
n, 1, 1), grid=(
1, 1, 1), shared=8 * 128)
x2 = drv.from_device_like(x_gpu, x)
print(x)
print(x2)
assert np.allclose(x2[1], np.sum(x[::4]) + np.sum(x[1::4]))
assert np.allclose(x2[0], np.sum(x[2::4]) + np.sum(x[3::4]))
def test_preserve_trace_ground_state(self, dm):
dm.hadamard(2)
assert np.allclose(dm.trace(), 1)
dm.hadamard(4)
assert np.allclose(dm.trace(), 1)
dm.hadamard(0)
assert np.allclose(dm.trace(), 1)
# @pytest.mark.skip
# def test_squares_to_one(self, dm_random):
# dm = dm_random
# a0 = dm.to_array()
# dm.hadamard(4)
# dm.hadamard(4)
# # dm.hadamard(2)
# # dm.hadamard(2)
# # dm.hadamard(0)
# # dm.hadamard(0)
# a1 = dm.to_array()
# assert np.allclose(np.triu(a0), np.triu(a1))
def norm_fro_err(X, W, H, norm_X):
""" Compute the approximation error in Frobeinus norm
norm(X - W.dot(H.T)) is efficiently computed based on trace() expansion
when W and H are thin.
Parameters
----------
X : numpy.array or scipy.sparse matrix, shape (m,n)
W : numpy.array, shape (m,k)
H : numpy.array, shape (n,k)
norm_X : precomputed norm of X
Returns
-------
float
"""
sum_squared = norm_X * norm_X - 2 * np.trace(H.T.dot(X.T.dot(W))) \
+ np.trace((W.T.dot(W)).dot(H.T.dot(H)))
return math.sqrt(np.maximum(sum_squared, 0))
def sdp_km(D, n_clusters):
ones = np.ones((D.shape[0], 1))
Z = cp.Semidef(D.shape[0])
objective = cp.Maximize(cp.trace(D * Z))
constraints = [Z >= 0,
Z * ones == ones,
cp.trace(Z) == n_clusters]
prob = cp.Problem(objective, constraints)
prob.solve(solver=cp.SCS, verbose=False)
Q = np.asarray(Z.value)
rs = Q.sum(axis=1)
print('Q', Q.min(), Q.max(), '|',
rs.min(), rs.max(), '|',
np.trace(Q), np.trace(D.dot(Q)))
print('Final objective', np.trace(D.dot(Q)))
return np.asarray(Z.value)
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2, numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2, numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def neg_log_likelihood(self, Lam, Theta, fixed, vary):
"compute the negative log-likelihood of the GCRF"
return -log(np.linalg.det(Lam)) + \
np.trace(np.dot(fixed.Syy, Lam) + \
2*np.dot(fixed.Sxy.T, Theta) + \
np.dot(vary.Psi, Lam))
def neg_log_likelihood_wrt_Lam(self, Lam, fixed, vary):
# compute the negative log-likelihood of the GCRF when Theta is fixed
return -log(np.linalg.det(Lam)) + \
np.trace(np.dot(fixed.Syy, Lam) + \
np.dot(vary.Psi, Lam))
def check_descent(self, newton_lambda, alpha, fixed, vary):
# check if we have made suffcient descent
DLam = np.trace(np.dot(self.grad_wrt_Lam(fixed, vary), newton_lambda)) + \
self.lamL * np.linalg.norm(self.Lam + newton_lambda, ord=1) - \
self.lamL * np.linalg.norm(self.Lam, ord=1)
nll_a = self.l1_neg_log_likelihood_wrt_Lam(self.Lam + alpha * newton_lambda, fixed, vary)
nll_b = self.l1_neg_log_likelihood_wrt_Lam(self.Lam, fixed, vary) + alpha * self.slack * DLam
return nll_a <= nll_b
def check_descent2(self, newton_lambda, alpha, fixed, vary):
lhs = self.l1_neg_log_likelihood(self.Lam + alpha*newton_lambda, self.Theta, fixed, vary)
mu = np.trace(np.dot(self.grad_wrt_Lam(fixed, vary), newton_lambda)) + \
self.lamL*self.l1_norm_off_diag(self.Lam + newton_lambda) +\
self.lamT*np.linalg.norm(self.Theta, ord=1)
rhs = self.neg_log_likelihood(self.Lam, self.Theta, fixed, vary) +\
alpha * self.slack * mu
return lhs <= rhs
def updateTransform(self):
muX = np.divide(np.sum(np.dot(self.P, self.X), axis=0), self.Np)
muY = np.divide(np.sum(np.dot(np.transpose(self.P), self.Y), axis=0), self.Np)
self.XX = self.X - np.tile(muX, (self.N, 1))
YY = self.Y - np.tile(muY, (self.M, 1))
self.A = np.dot(np.transpose(self.XX), np.transpose(self.P))
self.A = np.dot(self.A, YY)
U, _, V = np.linalg.svd(self.A, full_matrices=True)
C = np.ones((self.D, ))
C[self.D-1] = np.linalg.det(np.dot(U, V))
self.R = np.dot(np.dot(U, np.diag(C)), V)
self.YPY = np.dot(np.transpose(self.P1), np.sum(np.multiply(YY, YY), axis=1))
self.s = np.trace(np.dot(np.transpose(self.A), self.R)) / self.YPY
self.t = np.transpose(muX) - self.s * np.dot(self.R, np.transpose(muY))
def updateVariance(self):
qprev = self.q
trAR = np.trace(np.dot(self.A, np.transpose(self.R)))
xPx = np.dot(np.transpose(self.Pt1), np.sum(np.multiply(self.XX, self.XX), axis =1))
self.q = (xPx - 2 * self.s * trAR + self.s * self.s * self.YPY) / (2 * self.sigma2) + self.D * self.Np/2 * np.log(self.sigma2)
self.err = np.abs(self.q - qprev)
self.sigma2 = (xPx - self.s * trAR) / (self.Np * self.D)
if self.sigma2 <= 0:
self.sigma2 = self.tolerance / 10
def updateVariance(self):
qprev = self.q
trAB = np.trace(np.dot(self.A, np.transpose(self.B)))
xPx = np.dot(np.transpose(self.Pt1), np.sum(np.multiply(self.XX, self.XX), axis =1))
trBYPYP = np.trace(np.dot(np.dot(self.B, self.YPY), np.transpose(self.B)))
self.q = (xPx - 2 * trAB + trBYPYP) / (2 * self.sigma2) + self.D * self.Np/2 * np.log(self.sigma2)
self.err = np.abs(self.q - qprev)
self.sigma2 = (xPx - trAB) / (self.Np * self.D)
if self.sigma2 <= 0:
self.sigma2 = self.tolerance / 10
def identity_matrix():
"""Return 4x4 identity/unit matrix.
>>> I = identity_matrix()
>>> numpy.allclose(I, numpy.dot(I, I))
True
>>> numpy.sum(I), numpy.trace(I)
(4.0, 4.0)
>>> numpy.allclose(I, numpy.identity(4, dtype=numpy.float64))
True
"""
return numpy.identity(4, dtype=numpy.float64)
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.0
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2., numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def rotation_from_matrix(matrix):
"""Return rotation angle and axis from rotation matrix.
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> angle, direc, point = rotation_from_matrix(R0)
>>> R1 = rotation_matrix(angle, direc, point)
>>> is_same_transform(R0, R1)
True
"""
R = numpy.array(matrix, dtype=numpy.float64, copy=False)
R33 = R[:3, :3]
# direction: unit eigenvector of R33 corresponding to eigenvalue of 1
l, W = numpy.linalg.eig(R33.T)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
direction = numpy.real(W[:, i[-1]]).squeeze()
# point: unit eigenvector of R33 corresponding to eigenvalue of 1
l, Q = numpy.linalg.eig(R)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(Q[:, i[-1]]).squeeze()
point /= point[3]
# rotation angle depending on direction
cosa = (numpy.trace(R33) - 1.0) / 2.0
if abs(direction[2]) > 1e-8:
sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2]
elif abs(direction[1]) > 1e-8:
sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1]
else:
sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0]
angle = math.atan2(sina, cosa)
return angle, direction, point
def scale_from_matrix(matrix):
"""Return scaling factor, origin and direction from scaling matrix.
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S0 = scale_matrix(factor, origin)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
>>> S0 = scale_matrix(factor, origin, direct)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
factor = numpy.trace(M33) - 2.0
try:
# direction: unit eigenvector corresponding to eigenvalue factor
l, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(l) - factor) < 1e-8)[0][0]
direction = numpy.real(V[:, i]).squeeze()
direction /= vector_norm(direction)
except IndexError:
# uniform scaling
factor = (factor + 2.0) / 3.0
direction = None
# origin: any eigenvector corresponding to eigenvalue 1
l, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
origin = numpy.real(V[:, i[-1]]).squeeze()
origin /= origin[3]
return factor, origin, direction
def identity_matrix():
"""Return 4x4 identity/unit matrix.
>>> I = identity_matrix()
>>> numpy.allclose(I, numpy.dot(I, I))
True
>>> numpy.sum(I), numpy.trace(I)
(4.0, 4.0)
>>> numpy.allclose(I, numpy.identity(4))
True
"""
return numpy.identity(4)
def rotation_from_matrix(matrix):
"""Return rotation angle and axis from rotation matrix.
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> angle, direc, point = rotation_from_matrix(R0)
>>> R1 = rotation_matrix(angle, direc, point)
>>> is_same_transform(R0, R1)
True
"""
R = numpy.array(matrix, dtype=numpy.float64, copy=False)
R33 = R[:3, :3]
# direction: unit eigenvector of R33 corresponding to eigenvalue of 1
w, W = numpy.linalg.eig(R33.T)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
direction = numpy.real(W[:, i[-1]]).squeeze()
# point: unit eigenvector of R33 corresponding to eigenvalue of 1
w, Q = numpy.linalg.eig(R)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(Q[:, i[-1]]).squeeze()
point /= point[3]
# rotation angle depending on direction
cosa = (numpy.trace(R33) - 1.0) / 2.0
if abs(direction[2]) > 1e-8:
sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2]
elif abs(direction[1]) > 1e-8:
sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1]
else:
sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0]
angle = math.atan2(sina, cosa)
return angle, direction, point
def scale_from_matrix(matrix):
"""Return scaling factor, origin and direction from scaling matrix.
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S0 = scale_matrix(factor, origin)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
>>> S0 = scale_matrix(factor, origin, direct)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
factor = numpy.trace(M33) - 2.0
try:
# direction: unit eigenvector corresponding to eigenvalue factor
w, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(w) - factor) < 1e-8)[0][0]
direction = numpy.real(V[:, i]).squeeze()
direction /= vector_norm(direction)
except IndexError:
# uniform scaling
factor = (factor + 2.0) / 3.0
direction = None
# origin: any eigenvector corresponding to eigenvalue 1
w, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
origin = numpy.real(V[:, i[-1]]).squeeze()
origin /= origin[3]
return factor, origin, direction
def rotation_from_matrix(matrix):
"""Return rotation angle and axis from rotation matrix.
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> angle, direc, point = rotation_from_matrix(R0)
>>> R1 = rotation_matrix(angle, direc, point)
>>> is_same_transform(R0, R1)
True
"""
R = numpy.array(matrix, dtype=numpy.float64, copy=False)
R33 = R[:3, :3]
# direction: unit eigenvector of R33 corresponding to eigenvalue of 1
w, W = numpy.linalg.eig(R33.T)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
direction = numpy.real(W[:, i[-1]]).squeeze()
# point: unit eigenvector of R33 corresponding to eigenvalue of 1
w, Q = numpy.linalg.eig(R)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(Q[:, i[-1]]).squeeze()
point /= point[3]
# rotation angle depending on direction
cosa = (numpy.trace(R33) - 1.0) / 2.0
if abs(direction[2]) > 1e-8:
sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2]
elif abs(direction[1]) > 1e-8:
sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1]
else:
sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0]
angle = math.atan2(sina, cosa)
return angle, direction, point
# Function to translate handshape coding to degrees of rotation, adduction, flexion
def rotation_from_matrix(matrix):
"""Return rotation angle and axis from rotation matrix.
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> angle, direc, point = rotation_from_matrix(R0)
>>> R1 = rotation_matrix(angle, direc, point)
>>> is_same_transform(R0, R1)
True
"""
R = numpy.array(matrix, dtype=numpy.float64, copy=False)
R33 = R[:3, :3]
# direction: unit eigenvector of R33 corresponding to eigenvalue of 1
w, W = numpy.linalg.eig(R33.T)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
direction = numpy.real(W[:, i[-1]]).squeeze()
# point: unit eigenvector of R33 corresponding to eigenvalue of 1
w, Q = numpy.linalg.eig(R)
i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(Q[:, i[-1]]).squeeze()
point /= point[3]
# rotation angle depending on direction
cosa = (numpy.trace(R33) - 1.0) / 2.0
if abs(direction[2]) > 1e-8:
sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2]
elif abs(direction[1]) > 1e-8:
sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1]
else:
sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0]
angle = math.atan2(sina, cosa)
return angle, direction, point
# Function to translate handshape coding to degrees of rotation, adduction, flexion
def identity_matrix():
"""Return 4x4 identity/unit matrix.
>>> I = identity_matrix()
>>> numpy.allclose(I, numpy.dot(I, I))
True
>>> numpy.sum(I), numpy.trace(I)
(4.0, 4.0)
>>> numpy.allclose(I, numpy.identity(4, dtype=numpy.float64))
True
"""
return numpy.identity(4, dtype=numpy.float64)