Python numpy 模块,abs() 实例源码
我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.abs()。
def roll_zeropad(a, shift, axis=None):
a = np.asanyarray(a)
if shift == 0: return a
if axis is None:
n = a.size
reshape = True
else:
n = a.shape[axis]
reshape = False
if np.abs(shift) > n:
res = np.zeros_like(a)
elif shift < 0:
shift += n
zeros = np.zeros_like(a.take(np.arange(n-shift), axis))
res = np.concatenate((a.take(np.arange(n-shift,n), axis), zeros), axis)
else:
zeros = np.zeros_like(a.take(np.arange(n-shift,n), axis))
res = np.concatenate((zeros, a.take(np.arange(n-shift), axis)), axis)
if reshape:
return res.reshape(a.shape)
else:
return res
def test_quantize_from_probs2(size, resolution):
set_random_seed(make_seed(size, resolution))
probs = np.exp(np.random.random(size)).astype(np.float32)
probs2 = probs.reshape((1, size))
quantized = quantize_from_probs2(probs2, resolution)
assert quantized.shape == probs2.shape
assert quantized.dtype == np.int8
assert np.all(quantized.sum(axis=1) == resolution)
# Check that quantized result is closer to target than any other value.
quantized = quantized.reshape((size, ))
target = resolution * probs / probs.sum()
distance = np.abs(quantized - target).sum()
for combo in itertools.combinations(range(size), resolution):
other = np.zeros(size, np.int8)
for i in combo:
other[i] += 1
assert other.sum() == resolution
other_distance = np.abs(other - target).sum()
assert distance <= other_distance
def contest(self, b, g, r):
""" Search for biased BGR values
Finds closest neuron (min dist) and updates self.freq
finds best neuron (min dist-self.bias) and returns position
for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
self.bias[i] = self.GAMMA*((1/self.NETSIZE)-self.freq[i])"""
i, j = self.SPECIALS, self.NETSIZE
dists = abs(self.network[i:j] - np.array([b,g,r])).sum(1)
bestpos = i + np.argmin(dists)
biasdists = dists - self.bias[i:j]
bestbiaspos = i + np.argmin(biasdists)
self.freq[i:j] *= (1-self.BETA)
self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
self.freq[bestpos] += self.BETA
self.bias[bestpos] -= self.BETAGAMMA
return bestbiaspos
def Kdim(self, kdimParams):
if (self.prevKdimParams is not None and np.max(np.abs(kdimParams-self.prevKdimParams)) < self.epsilon): return self.cache['Kdim']
K = np.zeros((self.n, self.n, len(self.kernels)))
params_ind = 0
for k_i, k in enumerate(self.kernels):
numHyp = k.getNumParams()
kernelParams_range = np.array(xrange(params_ind, params_ind+numHyp), dtype=np.int)
kernel_params = kdimParams[kernelParams_range]
if ((numHyp == 0 and 'Kdim' in self.cache) or (numHyp>0 and self.prevKdimParams is not None and np.max(np.abs(kernel_params-self.prevKdimParams[kernelParams_range])) < self.epsilon)):
K[:,:,k_i] = self.cache['Kdim'][:,:,k_i]
else:
K[:,:,k_i] = k.getTrainKernel(kernel_params)
params_ind += numHyp
self.prevKdimParams = kdimParams.copy()
self.cache['Kdim'] = K
return K
def mypsd(Rates,time_range,bin_w = 5., nmax = 4000):
bins = np.arange(0,len(time_range),1)
#print bins
a,b = np.histogram(Rates, bins)
ff = (1./len(bins))*abs(np.fft.fft(Rates- np.mean(Rates)))**2
Fs = 1./(1*0.001)
freq2 = np.fft.fftfreq(len(bins))[0:len(bins/2)+1] # d= dt
freq = np.fft.fftfreq(len(bins))[:len(ff)/2+1]
px = ff[0:len(ff)/2+1]
max_px = np.max(px[1:])
idx = px == max_px
corr_freq = freq[pl.find(idx)]
new_px = px
max_pow = new_px[pl.find(idx)]
return new_px,freq,corr_freq[0],freq2, max_pow
def scale_variance(Theta, eps):
"""Allows to scale a Precision Matrix such that its
corresponding covariance has unit variance
Parameters
----------
Theta: ndarray
Precision Matrix
eps: float
values to threshold to zero
Returns
-------
Theta: ndarray
Precision of rescaled Sigma
Sigma: ndarray
Sigma with ones on diagonal
"""
Sigma = np.linalg.inv(Theta)
V = np.diag(np.sqrt(np.diag(Sigma) ** -1))
Sigma = V.dot(Sigma).dot(V.T) # = VSV
Theta = np.linalg.inv(Sigma)
Theta[np.abs(Theta) <= eps] = 0.
return Theta, Sigma
def idamax(a):
""" Returns the index of maximum absolute value (positive or negative)
in the input array a.
Note: Loosely based of a subroutine in GAMESS with the same name
Arguments:
a -- a numpy array where we are to find the maximum
value in (either positive or negative)
Returns:
the index in the array where the maximum value is.
"""
idx = -1
v = 0.0
for i, value in enumerate(numpy.abs(a)):
if value > v:
idx = i
v = value
return idx
def idamin(a):
""" Returns the index of minimum absolute value (positive or negative)
in the input array a.
Arguments:
a -- a numpy array where we are to find the minimum
value in (either positive or negative)
Returns:
the index in the array where the maximum value is.
"""
idx = -1
v = 1.0e30
for i, value in enumerate(numpy.abs(a)):
if value < v:
idx = i
v = value
return idx
def fCauchy(ftrue, alpha, p):
"""Returns Cauchy model noisy value
Cauchy with median 1e3*alpha and with p=0.2, zero otherwise
P(Cauchy > 1,10,100,1000) = 0.25, 0.032, 0.0032, 0.00032
"""
# expects ftrue to be a np.array
popsi = np.shape(ftrue)
fval = ftrue + alpha * np.maximum(0., 1e3 + (_rand(popsi) < p) *
_randn(popsi) / (np.abs(_randn(popsi)) + 1e-199))
tol = 1e-8
fval = fval + 1.01 * tol
idx = ftrue < tol
try:
fval[idx] = ftrue[idx]
except IndexError: # fval is a scalar
if idx:
fval = ftrue
return fval
### CLASS DEFINITION ###
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = compute_xopt(self.rseed, dim)
self.xopt[:min(dim, self.maxindex):2] = abs(self.xopt[:min(dim, self.maxindex):2])
self.scales = (self.condition ** .5) ** linspace(0, 1, dim)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
self.arrscales = resize(self.scales, curshape)
def _evalfull(self, x):
fadd = self.fopt
curshape, dim = self.shape_(x)
# it is assumed x are row vectors
if self.lastshape != curshape:
self.initwithsize(curshape, dim)
# BOUNDARY HANDLING
fadd = fadd + self.boundaryhandling(x)
# TRANSFORMATION IN SEARCH SPACE
x = x - self.arrxopt # cannot be replaced with x -= arrxopt!
x = dot(x, self.rotation)
# COMPUTATION core
ftrue = np.sqrt(np.sum(np.abs(x) ** self.arrexpo, -1))
fval = self.noise(ftrue)
# FINALIZE
ftrue += fadd
fval += fadd
return fval, ftrue
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
if self.zerox:
self.xopt = zeros(dim)
else:
self.xopt = 0.5 * sign(unif(dim, self.rseed) - 0.5) * 4.2096874633
self.scales = (self.condition ** .5) ** np.linspace(0, 1, dim)
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(2 * np.abs(self.xopt), curshape)
self.arrscales = resize(self.scales, curshape)
self.arrsigns = resize(sign(self.xopt), curshape)
def initialize(self, length=None):
"""see ``__init__``"""
if length is None:
length = len(self.bounds)
max_i = min((len(self.bounds) - 1, length - 1))
self._lb = array([self.bounds[min((i, max_i))][0]
if self.bounds[min((i, max_i))][0] is not None
else -np.Inf
for i in range(length)], copy=False)
self._ub = array([self.bounds[min((i, max_i))][1]
if self.bounds[min((i, max_i))][1] is not None
else np.Inf
for i in range(length)], copy=False)
lb = self._lb
ub = self._ub
# define added values for lower and upper bound
self._al = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(lb[i])) / 20])
if isfinite(lb[i]) else 1 for i in rglen(lb)], copy=False)
self._au = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(ub[i])) / 20])
if isfinite(ub[i]) else 1 for i in rglen(ub)], copy=False)
def update_measure(self):
"""updated noise level measure using two fitness lists ``self.fit`` and
``self.fitre``, return ``self.noiseS, all_individual_measures``.
Assumes that ``self.idx`` contains the indices where the fitness
lists differ.
"""
lam = len(self.fit)
idx = np.argsort(self.fit + self.fitre)
ranks = np.argsort(idx).reshape((2, lam))
rankDelta = ranks[0] - ranks[1] - np.sign(ranks[0] - ranks[1])
# compute rank change limits using both ranks[0] and ranks[1]
r = np.arange(1, 2 * lam) # 2 * lam - 2 elements
limits = [0.5 * (Mh.prctile(np.abs(r - (ranks[0, i] + 1 - (ranks[0, i] > ranks[1, i]))),
self.theta * 50) +
Mh.prctile(np.abs(r - (ranks[1, i] + 1 - (ranks[1, i] > ranks[0, i]))),
self.theta * 50))
for i in self.idx]
# compute measurement
# max: 1 rankchange in 2*lambda is always fine
s = np.abs(rankDelta[self.idx]) - Mh.amax(limits, 1) # lives roughly in 0..2*lambda
self.noiseS += self.cum * (np.mean(s) - self.noiseS)
return self.noiseS, s
def rho(self, points):
""" Solves the goodness of fit.
"""
assert self._solved, 'you need to solve first.'
m, n = self.A.shape
#numer = [ np.abs(np.dot(self.c, point) - np.dot(self.dual, self.b)) / np.abs(np.dot(self.dual, self.b)) for point in points ]
numer = [np.abs(np.dot(self.c, point) - 1) for point in points]
numer = sum(numer)
denom = 0
for i in range(m):
#denomTerm = [ np.abs(np.dot(self.A[i], point) - self.b[i]) / np.abs(self.b[i]) for point in points ]
denomTerm = [
np.abs(
np.dot(self.A[i] / np.linalg.norm(
self.A[i].T, self.normalize_c), point) - 1)
for point in points
]
denom += sum(denomTerm)
rho = 1 - numer / denom
return rho[0, 0]
def rho(self, points):
""" Solves the goodness of fit.
"""
assert self._solved, 'you need to solve first.'
m, n = self.A.shape
numer = [
np.abs(np.dot(self.c, point) - np.dot(self.dual, self.b))
for point in points
]
numer = sum(numer)
denom = 0
for i in range(m):
denomTerm = [
np.abs(np.dot(self.A[i], point) - self.b[i]) / np.linalg.norm(
self.A[i].T, self.normalize_c) for point in points
]
denom += sum(denomTerm)
rho = 1 - numer / denom
return rho[0, 0]
def find_outliers(data):
absolute_normalized = np.abs(zscore(data))
return absolute_normalized > 3
def genplot(x, y, fit, xdata=None, ydata=None, maxpts=10000):
bin_range = (0, 360)
a = (np.arange(*bin_range))
f_a = nuth_func(a, fit[0], fit[1], fit[2])
nuth_func_str = r'$y=%0.2f*cos(%0.2f-x)+%0.2f$' % tuple(fit)
if xdata.size > maxpts:
import random
idx = random.sample(list(range(xdata.size)), 10000)
else:
idx = np.arange(xdata.size)
f, ax = plt.subplots()
ax.set_xlabel('Aspect (deg)')
ax.set_ylabel('dh/tan(slope) (m)')
ax.plot(xdata[idx], ydata[idx], 'k.', label='Orig pixels')
ax.plot(x, y, 'ro', label='Bin median')
ax.axhline(color='k')
ax.plot(a, f_a, 'b', label=nuth_func_str)
ax.set_xlim(*bin_range)
pad = 0.2 * np.max([np.abs(y.min()), np.abs(y.max())])
ax.set_ylim(y.min() - pad, y.max() + pad)
ax.legend(prop={'size':8})
return f
#Function copied from from openPIV pyprocess
def update(self, params, grads):
# init
self.iterations += 1
a_t = self.lr / (1 - np.power(self.beta1, self.iterations))
if self.ms is None:
self.ms = [_zero(p.shape) for p in params]
if self.vs is None:
self.vs = [_zero(p.shape) for p in params]
# update parameters
for i, (m, v, p, g) in enumerate(zip(self.ms, self.vs, params, grads)):
m = self.beta1 * m + (1 - self.beta1) * g
v = np.maximum(self.beta2 * v, np.abs(g))
p -= a_t * m / (v + self.epsilon)
self.ms[i] = m
self.vs[i] = v
def resize_image(image,target_shape, pad_value = 0):
assert isinstance(target_shape, list) or isinstance(target_shape, tuple)
add_shape, subs_shape = [], []
image_shape = image.shape
shape_difference = np.asarray(target_shape, dtype=int) - np.asarray(image_shape,dtype=int)
for diff in shape_difference:
if diff < 0:
subs_shape.append(np.s_[int(np.abs(np.ceil(diff/2))):int(np.floor(diff/2))])
add_shape.append((0, 0))
else:
subs_shape.append(np.s_[:])
add_shape.append((int(np.ceil(1.0*diff/2)),int(np.floor(1.0*diff/2))))
output = np.pad(image, tuple(add_shape), 'constant', constant_values=(pad_value, pad_value))
output = output[subs_shape]
return output
def __test_ks(self,x):
x = x[~np.isnan(x)]
n = x.size
x.sort()
yCDF = np.arange(1,n+1)/float(n)
notdup = np.hstack([np.diff(x,1),[1]])
notdup = notdup>0
x_expcdf = x[notdup]
y_expcdf = np.hstack([[0],yCDF[notdup]])
zScores = (x_expcdf-np.mean(x))/np.std(x,ddof=1);
mu = 0
sigma = 1
theocdf = 0.5*erfc(-(zScores-mu)/(np.sqrt(2)*sigma))
delta1 = y_expcdf[:-1]-theocdf
delta2 = y_expcdf[1:]-theocdf
deltacdf = np.abs(np.hstack([delta1,delta2]))
KSmax = deltacdf.max()
return KSmax
def __test_ks(self,x):
x = x[~np.isnan(x)]
n = x.size
x.sort()
yCDF = np.arange(1,n+1)/float(n)
notdup = np.hstack([np.diff(x,1),[1]])
notdup = notdup>0
x_expcdf = x[notdup]
y_expcdf = np.hstack([[0],yCDF[notdup]])
zScores = (x_expcdf-np.mean(x))/np.std(x,ddof=1);
mu = 0
sigma = 1
theocdf = 0.5*erfc(-(zScores-mu)/(np.sqrt(2)*sigma))
delta1 = y_expcdf[:-1]-theocdf
delta2 = y_expcdf[1:]-theocdf
deltacdf = np.abs(np.hstack([delta1,delta2]))
KSmax = deltacdf.max()
return KSmax
def verify_result(result, expected, float_cmp, debug):
if not isinstance(result, pd.core.series.Series):
if result == expected:
print('TEST OK')
return
else:
print('TEST Failed.')
return
result = result.dropna()
expected = expected.dropna()
if debug:
print('RESULT:')
print(result)
print('EXPECTED:')
print(expected)
if float_cmp:
cmp = (np.abs(result - expected) < 2.631048e-06)
else:
cmp = (result == expected)
if len(cmp[cmp == False]) > 0 :
print('TEST Failed.')
return
print('TEST OK.')
return
def Saliency_map(image,model,preprocess,ground_truth,use_gpu=False,method=util.GradType.GUIDED):
vis_param_dict['method'] = method
img_tensor = preprocess(image)
img_tensor.unsqueeze_(0)
if use_gpu:
img_tensor=img_tensor.cuda()
input = Variable(img_tensor,requires_grad=True)
if input.grad is not None:
input.grad.data.zero_()
model.zero_grad()
output = model(input)
ind=torch.LongTensor(1)
if(isinstance(ground_truth,np.int64)):
ground_truth=np.asscalar(ground_truth)
ind[0]=ground_truth
ind=Variable(ind)
energy=output[0,ground_truth]
energy.backward()
grad=input.grad
if use_gpu:
return np.abs(grad.data.cpu().numpy()[0]).max(axis=0)
return np.abs(grad.data.numpy()[0]).max(axis=0)
def nufft_scale1(N, K, alpha, beta, Nmid):
'''
calculate image space scaling factor
'''
# import types
# if alpha is types.ComplexType:
alpha = numpy.real(alpha)
# print('complex alpha may not work, but I just let it as')
L = len(alpha) - 1
if L > 0:
sn = numpy.zeros((N, 1))
n = numpy.arange(0, N).reshape((N, 1), order='F')
i_gam_n_n0 = 1j * (2 * numpy.pi / K) * (n - Nmid) * beta
for l1 in range(-L, L + 1):
alf = alpha[abs(l1)]
if l1 < 0:
alf = numpy.conj(alf)
sn = sn + alf * numpy.exp(i_gam_n_n0 * l1)
else:
sn = numpy.dot(alpha, numpy.ones((N, 1), dtype=numpy.float32))
return sn
def nufft_T(N, J, K, alpha, beta):
'''
equation (29) and (26)Fessler's paper
create the overlapping matrix CSSC (diagonal dominent matrix)
of J points
and then find out the pseudo-inverse of CSSC '''
# import scipy.linalg
L = numpy.size(alpha) - 1
# print('L = ', L, 'J = ',J, 'a b', alpha,beta )
cssc = numpy.zeros((J, J))
[j1, j2] = numpy.mgrid[1:J + 1, 1:J + 1]
overlapping_mat = j2 - j1
for l1 in range(-L, L + 1):
for l2 in range(-L, L + 1):
alf1 = alpha[abs(l1)]
# if l1 < 0: alf1 = numpy.conj(alf1)
alf2 = alpha[abs(l2)]
# if l2 < 0: alf2 = numpy.conj(alf2)
tmp = overlapping_mat + beta * (l1 - l2)
tmp = dirichlet(1.0 * tmp / (1.0 * K / N))
cssc = cssc + alf1 * numpy.conj(alf2) * tmp
return mat_inv(cssc)
def fit(self, X, y=None):
old_threshold = None
threshold = None
self.threshold_ = 0.0
self._fit(X,y)
count = 0
while count < 100 and (old_threshold is None or abs(threshold - old_threshold) > 0.01):
old_threshold = threshold
ss = self.decision_function(X,y)
threshold = percentile(ss, 100 * self.contamination)
self._fit(X[ss > threshold],y[ss > threshold] if y is not None else None)
count += 1
self.threshold_ = threshold
return self
def round_solution_pool(pool, constraints):
pool.distinct().sort()
P = pool.P
L0_reg_ind = np.isnan(constraints['coef_set'].C_0j)
L0_max = constraints['L0_max']
rounded_pool = SolutionPool(P)
for solution in pool.solutions:
# sort from largest to smallest coefficients
feature_order = np.argsort([-abs(x) for x in solution])
rounded_solution = np.zeros(shape=(1, P))
l0_norm_count = 0
for k in range(0, P):
j = feature_order[k]
if not L0_reg_ind[j]:
rounded_solution[0, j] = np.round(solution[j], 0)
elif l0_norm_count < L0_max:
rounded_solution[0, j] = np.round(solution[j], 0)
l0_norm_count += L0_reg_ind[j]
rounded_pool.add(objvals=np.nan, solutions=rounded_solution)
rounded_pool.distinct().sort()
return rounded_pool
def a_metric (solution, prediction, task='regression'):
''' 1 - Mean absolute error divided by mean absolute deviation '''
mae = mvmean(np.abs(solution-prediction)) # mean absolute error
mad = mvmean(np.abs(solution-mvmean(solution))) # mean absolute deviation
score = 1 - mae / mad
return mvmean(score)
### END REGRESSION METRICS
### CLASSIFICATION METRICS (work on solutions in {0, 1} and predictions in [0, 1])
# These can be computed for regression scores only after running normalize_array
def pac_metric (solution, prediction, task='binary.classification'):
''' Probabilistic Accuracy based on log_loss metric.
We assume the solution is in {0, 1} and prediction in [0, 1].
Otherwise, run normalize_array.'''
debug_flag=False
[sample_num, label_num] = solution.shape
if label_num==1: task='binary.classification'
eps = 1e-15
the_log_loss = log_loss(solution, prediction, task)
# Compute the base log loss (using the prior probabilities)
pos_num = 1.* sum(solution) # float conversion!
frac_pos = pos_num / sample_num # prior proba of positive class
the_base_log_loss = prior_log_loss(frac_pos, task)
# Alternative computation of the same thing (slower)
# Should always return the same thing except in the multi-label case
# For which the analytic solution makes more sense
if debug_flag:
base_prediction = np.empty(prediction.shape)
for k in range(sample_num): base_prediction[k,:] = frac_pos
base_log_loss = log_loss(solution, base_prediction, task)
diff = np.array(abs(the_base_log_loss-base_log_loss))
if len(diff.shape)>0: diff=max(diff)
if(diff)>1e-10:
print('Arrggh {} != {}'.format(the_base_log_loss,base_log_loss))
# Exponentiate to turn into an accuracy-like score.
# In the multi-label case, we need to average AFTER taking the exp
# because it is an NL operation
pac = mvmean(np.exp(-the_log_loss))
base_pac = mvmean(np.exp(-the_base_log_loss))
# Normalize: 0 for random, 1 for perfect
score = (pac - base_pac) / sp.maximum(eps, (1 - base_pac))
return score
def a_score_(solution, prediction):
mad = float(mvmean(abs(solution-mvmean(solution))))
return 1 - metrics.mean_absolute_error(solution, prediction)/mad
def build_data_auto_encoder(data, step, win_size):
count = data.shape[1] / float(step)
docX = np.zeros((count, 3, win_size))
for i in range(0, data.shape[1] - win_size, step):
c = i / step
docX[c][0] = np.abs(data[0, i:i + win_size] - data[1, i:i + win_size])
docX[c][1] = np.power(data[0, i:i + win_size] - data[1, i:i + win_size], 2)
docX[c][2] = np.pad(
(data[0, i:i + win_size - 1] - data[0, i + 1:i + win_size]) * (data[1, i:i + win_size - 1] - data[1, i + 1:i + win_size]),
(0, 1), 'constant', constant_values=0)
data = np.dstack((docX[:, 0], docX[:, 1], docX[:, 2])).reshape(docX.shape[0], docX.shape[1]*docX.shape[2])
return data
def reject_outliers(data, m = 2.):
d = np.abs(data - np.median(data))
mdev = np.median(d)
s = d/mdev if mdev else 0.
return data[s<m]
def get_line_region(self, position, name=''):
"""Creates a line region at the given position (start_x, start_y, end_x, end_y),
inclusive.
Args:
position: Position of the line region (start_x, start_y, end_x, end_y).
name: Name of the region.
Returns:
Line region.
"""
start_idx = self.get_index(position[:2])
end_idx = self.get_index(position[2:])
x_diff = start_idx % self.x.samples - end_idx % self.x.samples
y_diff = int(start_idx / self.x.samples) - int(end_idx / self.x.samples)
num_points = max(np.abs([x_diff, y_diff]))
point_indices = []
for ii in range(num_points + 1):
x_position = start_idx % self.x.samples - np.round(ii / num_points * x_diff)
y_position = int(start_idx / self.x.samples) - np.round(ii / num_points * y_diff)
point_indices.append(int(x_position + self.x.samples * y_position))
return reg.LineRegion(point_indices, position, name=name)
def get_index(self, value):
"""Returns the index of a given value.
Args:
value: Value the index requested for.
Returns:
Index.
"""
index, = np.where(np.abs(self.vector - value) <= self.snap_radius)
assert len(index) < 2, "Multiple points found within snap radius of given value."
assert len(index) > 0, "No point found within snap radius of given value."
return int(index)
def alterneigh(self, alpha, rad, i, b, g, r):
if i-rad >= self.SPECIALS-1:
lo = i-rad
start = 0
else:
lo = self.SPECIALS-1
start = (self.SPECIALS-1 - (i-rad))
if i+rad <= self.NETSIZE:
hi = i+rad
end = rad*2-1
else:
hi = self.NETSIZE
end = (self.NETSIZE - (i+rad))
a = self.geta(alpha, rad)[start:end]
p = self.network[lo+1:hi]
p -= np.transpose(np.transpose(p - np.array([b, g, r])) * a)
#def contest(self, b, g, r):
# """ Search for biased BGR values
# Finds closest neuron (min dist) and updates self.freq
# finds best neuron (min dist-self.bias) and returns position
# for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
# self.bias[i] = self.GAMMA*((1/self.NETSIZE)-self.freq[i])"""
#
# i, j = self.SPECIALS, self.NETSIZE
# dists = abs(self.network[i:j] - np.array([b,g,r])).sum(1)
# bestpos = i + np.argmin(dists)
# biasdists = dists - self.bias[i:j]
# bestbiaspos = i + np.argmin(biasdists)
# self.freq[i:j] -= self.BETA * self.freq[i:j]
# self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
# self.freq[bestpos] += self.BETA
# self.bias[bestpos] -= self.BETAGAMMA
# return bestbiaspos
def calc_scores(self, lag):
data = self.raw_data[:, abs(self.raw_lags) <= lag]
control = self.raw_control
score = self.overlap[self.pairs[:, 0], self.pairs[:, 1]]
score2 = control - data.mean(axis=1)
score3 = control
return score, score2, score3
def data_tooltip(self, x, y):
row = int(y)
if row >= 0 and row < len(self.raw_data):
all_raw_data = self.raw_data
data_idx = self.sort_idcs[row]
lag_diff = np.abs(x - self.raw_lags)
nearest_lag_idx = np.argmin(lag_diff)
nearest_lag = self.raw_lags[nearest_lag_idx]
value = all_raw_data[data_idx, nearest_lag_idx]
return ('%.2f - lag: %.2fms (template similarity: %.2f '
'CC metric %.2f)') % (value, nearest_lag,
self.score_x[data_idx],
self.score_y[data_idx])
else:
return ''
def update_statusbar(self, event):
# Update information about the mouse position to the status bar
status_bar = self.statusbar
if event.inaxes == self.electrode_ax:
status_bar.showMessage(u'x: %.0f?m y: %.0f?m' % (event.xdata, event.ydata))
elif event.inaxes == self.data_x:
yspacing = numpy.max(np.abs(self.data))*1.05
if yspacing != 0:
row = int((event.ydata + 0.5*yspacing)/yspacing)
else:
row = int((event.ydata))
if row < 0 or row >= len(self.inspect_points):
status_bar.clearMessage()
else:
time_idx = np.argmin(np.abs(self.time - event.xdata))
start_idx = np.argmin(np.abs(self.time - self.t_start))
rel_time_idx = time_idx - start_idx
electrode_idx = self.inspect_points[row]
electrode_x, electrode_y = self.points[electrode_idx]
data = self.data[rel_time_idx, electrode_idx]
msg = '%.2f' % data
if self.show_fit:
fit = self.curve[electrode_idx, rel_time_idx]
msg += ' (fit: %.2f)' % fit
msg += ' t: %.2fs ' % self.time[time_idx]
msg += u'(electrode %d at x: %.0f?m y: %.0f?m)' % (electrode_idx, electrode_x, electrode_y)
status_bar.showMessage(msg)
def sameParams(self, params, i=None):
if (self.prevParams is None): return False
if (i is None): return (np.max(np.abs(params-self.prevParams)) < self.epsilon)
return ((np.abs(params[i]-self.prevParams[i])) < self.epsilon)
def getEE(self, EEParams):
if (self.prevEEParams is not None):
if (EEParams.shape[0] == 0 or np.max(np.abs(EEParams-self.prevEEParams < self.epsilon))): return self.cache['EE']
Kd = self.Kdim(EEParams)
EE = elsympol(Kd, len(self.kernels))
self.prevEEParams = EEParams.copy()
self.cache['EE'] = EE
return EE
def getScaledE(self, params, i, E):
if (self.prevHyp0Params is not None and np.abs(self.prevHyp0Params[i]-params[i]) < self.epsilon): return self.cache['E_scaled'][i]
if ('E_scaled' not in self.cache.keys()): self.cache['E_scaled'] = [None for j in xrange(len(self.kernels))]
for j in xrange(len(self.kernels)):
if (self.prevHyp0Params is not None and np.abs(self.prevHyp0Params[j]-params[j]) < self.epsilon): continue
E_scaled = E[:,:,j+1]*np.exp(2*params[j])
self.cache['E_scaled'][j] = E_scaled
self.prevHyp0Params = params.copy()
return self.cache['E_scaled'][i]
def __init__(self, X, pos):
Kernel.__init__(self)
self.X_scaled = X/np.sqrt(X.shape[1])
d = pos.shape[0]
self.D = np.abs(np.tile(np.column_stack(pos).T, (1, d)) - np.tile(pos, (d, 1))) / 100000.0
def __init__(self, X, pos):
Kernel.__init__(self)
self.X_scaled = X/np.sqrt(X.shape[1])
d = pos.shape[0]
self.D = np.abs(np.tile(np.column_stack(pos).T, (1, d)) - np.tile(pos, (d, 1))) / 100000.0
def __init__(self, X, pos):
Kernel.__init__(self)
self.X_scaled = X/np.sqrt(X.shape[1])
d = pos.shape[0]
self.D = np.abs(np.tile(np.column_stack(pos).T, (1, d)) - np.tile(pos, (d, 1))) / 100000.0
def AorthogonalityCheck(A, U, d):
"""
Test the frobenious norm of D^{-1}(U^TAU) - I_k
"""
V = np.zeros(U.shape)
AV = np.zeros(U.shape)
Av = Vector()
v = Vector()
A.init_vector(Av,0)
A.init_vector(v,1)
nvec = U.shape[1]
for i in range(0,nvec):
v.set_local(U[:,i])
v *= 1./math.sqrt(d[i])
A.mult(v,Av)
AV[:,i] = Av.get_local()
V[:,i] = v.get_local()
VtAV = np.dot(V.T, AV)
err = VtAV - np.eye(nvec, dtype=VtAV.dtype)
# plt.imshow(np.abs(err))
# plt.colorbar()
# plt.show()
print("i, ||Vt(i,:)AV(:,i) - I_i||_F, V[:,i] = 1/sqrt(lambda_i) U[:,i]")
for i in range(1,nvec+1):
print(i, np.linalg.norm(err[0:i,0:i], 'fro') )
def load_scan(path):
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
#slices.sort(key = lambda x: int(x.InstanceNumber))
acquisitions = [x.AcquisitionNumber for x in slices]
vals, counts = np.unique(acquisitions, return_counts=True)
vals = vals[::-1] # reverse order so the later acquisitions are first (the np.uniques seems to always return the ordered 1 2 etc.
counts = counts[::-1]
## take the acquistions that has more entries; if these are identical take the later entrye
acq_val_sel = vals[np.argmax(counts)]
##acquisitions = sorted(np.unique(acquisitions), reverse=True)
if len(vals) > 1:
print ("WARNING ##########: MULTIPLE acquisitions & counts, acq_val_sel, path: ", vals, counts, acq_val_sel, path)
slices2= [x for x in slices if x.AcquisitionNumber == acq_val_sel]
slices = slices2
## ONE path includes 2 acquisitions (2 sets), take the latter acquiisiton only whihch cyupically is better than the first/previous ones.
## example of the '../input/stage1/b8bb02d229361a623a4dc57aa0e5c485'
#slices.sort(key = lambda x: int(x.ImagePositionPatient[2])) # from v 8, BUG should be float
slices.sort(key = lambda x: float(x.ImagePositionPatient[2])) # from v 9
try:
slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2])
except:
slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation)
for s in slices:
s.SliceThickness = slice_thickness
return slices
def load_scan(path):
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
#slices.sort(key = lambda x: int(x.InstanceNumber))
#slices.sort(key = lambda x: int(x.ImagePositionPatient[2])) # from v 8 - BUGGY (should be float caused issues with segmenting and rescaling ....
slices.sort(key = lambda x: float(x.ImagePositionPatient[2])) # from v 8
try:
slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2])
except:
slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation)
for s in slices:
s.SliceThickness = slice_thickness
return slices