Python numpy 模块,log10() 实例源码
我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.log10()。
def sample_hparams():
hparams = {}
for k, sample_range in ranges.items():
if isinstance(sample_range, (LogRange, LinearRange)):
if isinstance(sample_range[0], int):
# LogRange not valid for ints
hparams[k] = random.randint(sample_range[0], sample_range[1])
elif isinstance(sample_range[0], float):
start, end = sample_range
if isinstance(sample_range, LogRange):
start, end = np.log10(start), np.log10(end)
choice = np.random.uniform(start, end)
if isinstance(sample_range, LogRange):
choice = np.exp(choice)
hparams[k] = choice
return hparams
def sample_hparams():
hparams = {}
for k, sample_range in ranges.items():
if isinstance(sample_range, (LogRange, LinearRange)):
if isinstance(sample_range[0], int):
# LogRange not valid for ints
hparams[k] = random.randint(sample_range[0], sample_range[1])
elif isinstance(sample_range[0], float):
start, end = sample_range
if isinstance(sample_range, LogRange):
start, end = np.log10(start), np.log10(end)
choice = np.random.uniform(start, end)
if isinstance(sample_range, LogRange):
choice = np.exp(choice)
hparams[k] = choice
return hparams
def sample_hparams():
hparams = {}
for k, sample_range in ranges.items():
if isinstance(sample_range, (LogRange, LinearRange)):
if isinstance(sample_range[0], int):
# LogRange not valid for ints
hparams[k] = random.randint(sample_range[0], sample_range[1])
elif isinstance(sample_range[0], float):
start, end = sample_range
if isinstance(sample_range, LogRange):
start, end = np.log10(start), np.log10(end)
choice = np.random.uniform(start, end)
if isinstance(sample_range, LogRange):
choice = np.exp(choice)
hparams[k] = choice
return hparams
def sample_hparams():
hparams = {}
for k, sample_range in ranges.items():
if isinstance(sample_range, (LogRange, LinearRange)):
if isinstance(sample_range[0], int):
# LogRange not valid for ints
hparams[k] = random.randint(sample_range[0], sample_range[1])
elif isinstance(sample_range[0], float):
start, end = sample_range
if isinstance(sample_range, LogRange):
start, end = np.log10(start), np.log10(end)
choice = np.random.uniform(start, end)
if isinstance(sample_range, LogRange):
choice = np.exp(choice)
hparams[k] = choice
return hparams
def sample_hparams():
hparams = {}
for k, sample_range in ranges.items():
if isinstance(sample_range, (LogRange, LinearRange)):
if isinstance(sample_range[0], int):
# LogRange not valid for ints
hparams[k] = random.randint(sample_range[0], sample_range[1])
elif isinstance(sample_range[0], float):
start, end = sample_range
if isinstance(sample_range, LogRange):
start, end = np.log10(start), np.log10(end)
choice = np.random.uniform(start, end)
if isinstance(sample_range, LogRange):
choice = np.exp(choice)
hparams[k] = choice
return hparams
def compHistDistance(h1, h2):
def normalize(h):
if np.sum(h) == 0:
return h
else:
return h / np.sum(h)
def smoothstep(x, x_min=0., x_max=1., k=2.):
m = 1. / (x_max - x_min)
b = - m * x_min
x = m * x + b
return betainc(k, k, np.clip(x, 0., 1.))
def fn(X, Y, k):
return 4. * (1. - smoothstep(Y, 0, (1 - Y) * X + Y + .1)) \
* np.sqrt(2 * X) * smoothstep(X, 0., 1. / k, 2) \
+ 2. * smoothstep(Y, 0, (1 - Y) * X + Y + .1) \
* (1. - 2. * np.sqrt(2 * X) * smoothstep(X, 0., 1. / k, 2) - 0.5)
h1 = normalize(h1)
h2 = normalize(h2)
return max(0, np.sum(fn(h2, h1, len(h1))))
# return np.sum(np.where(h2 != 0, h2 * np.log10(h2 / (h1 + 1e-10)), 0)) # KL divergence
def export(self, fileName):
"""
Export data from the ImageView to a file, or to a stack of files if
the data is 3D. Saving an image stack will result in index numbers
being added to the file name. Images are saved as they would appear
onscreen, with levels and lookup table applied.
"""
img = self.getProcessedImage()
if self.hasTimeAxis():
base, ext = os.path.splitext(fileName)
fmt = "%%s%%0%dd%%s" % int(np.log10(img.shape[0])+1)
for i in range(img.shape[0]):
self.imageItem.setImage(img[i], autoLevels=False)
self.imageItem.save(fmt % (base, i, ext))
self.updateImage()
else:
self.imageItem.save(fileName)
def logTickValues(self, minVal, maxVal, size, stdTicks):
## start with the tick spacing given by tickValues().
## Any level whose spacing is < 1 needs to be converted to log scale
ticks = []
for (spacing, t) in stdTicks:
if spacing >= 1.0:
ticks.append((spacing, t))
if len(ticks) < 3:
v1 = int(np.floor(minVal))
v2 = int(np.ceil(maxVal))
#major = list(range(v1+1, v2))
minor = []
for v in range(v1, v2):
minor.extend(v + np.log10(np.arange(1, 10)))
minor = [x for x in minor if x>minVal and x<maxVal]
ticks.append((None, minor))
return ticks
def export(self, fileName):
"""
Export data from the ImageView to a file, or to a stack of files if
the data is 3D. Saving an image stack will result in index numbers
being added to the file name. Images are saved as they would appear
onscreen, with levels and lookup table applied.
"""
img = self.getProcessedImage()
if self.hasTimeAxis():
base, ext = os.path.splitext(fileName)
fmt = "%%s%%0%dd%%s" % int(np.log10(img.shape[0])+1)
for i in range(img.shape[0]):
self.imageItem.setImage(img[i], autoLevels=False)
self.imageItem.save(fmt % (base, i, ext))
self.updateImage()
else:
self.imageItem.save(fileName)
def riess_sn_fit(app_mag_s, app_mag_err_s, z_s, sig_int_s):
# helpful parameters. only fitting an intercept here
n_s = len(app_mag_s)
n_obs = n_s
n_par = 1
y_vec = np.zeros(n_obs)
l_mat = np.zeros((n_obs, n_par))
c_mat_inv = np.zeros((n_obs, n_obs))
# loop through SNe
k = 0
for i in range(0, n_s):
y_vec[k] = np.log10(z2d(z_s[i])) - 0.2 * app_mag_s[i]
l_mat[k, 0] = 1.0
c_mat_inv[k, k] = 1.0 / 0.2 ** 2 / \
(app_mag_err_s[i] ** 2 + sig_int_s ** 2)
k += 1
# fit, calculate residuals in useable form and return
ltci = np.dot(l_mat.transpose(), c_mat_inv)
q_hat_cov = np.linalg.inv(np.dot(ltci, l_mat))
q_hat = np.dot(np.dot(q_hat_cov, ltci), y_vec)
res = y_vec - np.dot(l_mat, q_hat)
return q_hat, np.sqrt(np.diag(q_hat_cov)), res
def absolute_magnitude(parallax, m):
"""Calculate the absolute magnitude based on distance and apparent mag.
Inputs
------
parallax : float
The parallax in mas
m : float
The apparent magnitude
Output
------
M : float
The absolute magnitude
"""
d = 1. / (parallax*1e-3) # Conversion to arcsecond before deriving distance
mu = 5 * np.log10(d) - 5
M = m - mu
return M
def absolute_magnitude(parallax, m):
"""Calculate the absolute magnitude based on distance and apparent mag.
Inputs
------
parallax : float
The parallax in mas
m : float
The apparent magnitude
Output
------
M : float
The absolute magnitude
"""
d = 1. / (parallax*1e-3) # Conversion to arcsecond before deriving distance
mu = 5 * np.log10(d) - 5
M = m - mu
return M
def plot_mean_debye(sol, ax):
x = np.log10(sol[0]["data"]["tau"])
x = np.linspace(min(x), max(x),100)
list_best_rtd = [100*np.sum([a*(x**i) for (i, a) in enumerate(s["params"]["a"])], axis=0) for s in sol]
# list_best_rtd = [s["fit"]["best"] for s in sol]
y = np.mean(list_best_rtd, axis=0)
y_min = 100*np.sum([a*(x**i) for (i, a) in enumerate(sol[0]["params"]["a"] - sol[0]["params"]["a_std"])], axis=0)
y_max = 100*np.sum([a*(x**i) for (i, a) in enumerate(sol[0]["params"]["a"] + sol[0]["params"]["a_std"])], axis=0)
ax.errorbar(10**x[(x>-6)&(x<2)], y[(x>-6)&(x<2)], None, None, "-", color='blue',linewidth=2, label="Mean RTD", zorder=10)
plt.plot(10**x[(x>-6)&(x<2)], y_min[(x>-6)&(x<2)], color='lightgray', alpha=1, zorder=-1, label="RTD range")
plt.plot(10**x[(x>-6)&(x<2)], y_max[(x>-6)&(x<2)], color='lightgray', alpha=1, zorder=-1)
plt.fill_between(sol[0]["data"]["tau"], 100*(sol[0]["params"]["m_"]-sol[0]["params"]["m__std"]) , 100*(sol[0]["params"]["m_"]+sol[0]["params"]["m__std"]), color='lightgray', alpha=1, zorder=-1, label="RTD SD")
ax.set_xlabel("Relaxation time (s)", fontsize=14)
ax.set_ylabel("Chargeability (%)", fontsize=14)
plt.yticks(fontsize=14), plt.xticks(fontsize=14)
plt.xscale("log")
ax.set_xlim([1e-6, 1e1])
ax.set_ylim([0, 5.0])
ax.legend(loc=1, fontsize=12)
# ax.set_title(title+" step method", fontsize=14)
def vecnorm(vec, norm, epsilon=1e-3):
"""
Scale a vector to unit length. The only exception is the zero vector, which
is returned back unchanged.
"""
if norm not in ('prob', 'max1', 'logmax1'):
raise ValueError("'%s' is not a supported norm. Currently supported norms include 'prob',\
'max1' and 'logmax1'." % norm)
if isinstance(vec, np.ndarray):
vec = np.asarray(vec, dtype=float)
if norm == 'prob':
veclen = np.sum(np.abs(vec)) + epsilon * len(vec) # smoothing
elif norm == 'max1':
veclen = np.max(vec) + epsilon
elif norm == 'logmax1':
vec = np.log10(1. + vec)
veclen = np.max(vec) + epsilon
if veclen > 0.0:
return (vec + epsilon) / veclen
else:
return vec
else:
raise ValueError('vec should be ndarray, found: %s' % type(vec))
def normalizeMTX(MTX, logScale=False):
""" Normalizes a matrix to [0 ... 1]
Parameters
----------
MTX : array_like
Matrix to be normalized
logScale : bool
Toggle conversion logScale [Default: False]
Returns
-------
MTX : array_liked
Normalized Matrix
"""
MTX -= MTX.min()
MTX /= MTX.max()
if logScale:
MTX += 0.00001
MTX = _np.log10(_np.abs(MTX))
MTX += 5
MTX /= 5.000004343
# MTX = 20 * _np.log10(_np.abs(MTX))
return MTX
def db(data, power=False):
'''Convenience function to calculate the 20*log10(abs(x))
Parameters
----------
data : array_like
signals to be converted to db
power : boolean
data is a power signal and only needs factor 10
Returns
-------
db : array_like
20 * log10(abs(data))
'''
if power:
factor = 10
else:
factor = 20
return factor * np.log10(np.abs(data))
def cal_hist(self, t1, t2, data1_maxlen, hist_size):
mhist = np.zeros((data1_maxlen, hist_size), dtype=np.float32)
d1len = len(self.data1[t1])
if self.use_hist_feats:
assert (t1, t2) in self.hist_feats
caled_hist = np.reshape(self.hist_feats[(t1, t2)], (d1len, hist_size))
if d1len < data1_maxlen:
mhist[:d1len, :] = caled_hist[:, :]
else:
mhist[:, :] = caled_hist[:data1_maxlen, :]
else:
t1_rep = self.embed[self.data1[t1]]
t2_rep = self.embed[self.data2[t2]]
mm = t1_rep.dot(np.transpose(t2_rep))
for (i,j), v in np.ndenumerate(mm):
if i >= data1_maxlen:
break
vid = int((v + 1.) / 2. * ( hist_size - 1.))
mhist[i][vid] += 1.
mhist += 1.
mhist = np.log10(mhist)
return mhist
def cal_hist(self, t1, t2, data1_maxlen, hist_size):
mhist = np.zeros((data1_maxlen, hist_size), dtype=np.float32)
t1_cont = list(self.data1[t1])
t2_cont = list(self.data2[t2])
d1len = len(t1_cont)
if self.use_hist_feats:
assert (t1, t2) in self.hist_feats
caled_hist = np.reshape(self.hist_feats[(t1, t2)], (d1len, hist_size))
if d1len < data1_maxlen:
mhist[:d1len, :] = caled_hist[:, :]
else:
mhist[:, :] = caled_hist[:data1_maxlen, :]
else:
t1_rep = self.embed[t1_cont]
t2_rep = self.embed[t2_cont]
mm = t1_rep.dot(np.transpose(t2_rep))
for (i,j), v in np.ndenumerate(mm):
if i >= data1_maxlen:
break
vid = int((v + 1.) / 2. * ( hist_size - 1.))
mhist[i][vid] += 1.
mhist += 1.
mhist = np.log10(mhist)
return mhist
def cal_hist(self, t1, t2, data1_maxlen, hist_size):
mhist = np.zeros((data1_maxlen, hist_size), dtype=np.float32)
t1_cont = list(self.data1[t1])
t2_cont = list(self.data2[t2])
d1len = len(t1_cont)
if self.use_hist_feats:
assert (t1, t2) in self.hist_feats
curr_pair_feats = list(self.hist_feats[(t1, t2)])
caled_hist = np.reshape(curr_pair_feats, (d1len, hist_size))
if d1len < data1_maxlen:
mhist[:d1len, :] = caled_hist[:, :]
else:
mhist[:, :] = caled_hist[:data1_maxlen, :]
else:
t1_rep = self.embed[t1_cont]
t2_rep = self.embed[t2_cont]
mm = t1_rep.dot(np.transpose(t2_rep))
for (i,j), v in np.ndenumerate(mm):
if i >= data1_maxlen:
break
vid = int((v + 1.) / 2. * ( hist_size - 1.))
mhist[i][vid] += 1.
mhist += 1.
mhist = np.log10(mhist)
return mhist
def cal_hist(self, t1, t2, data1_maxlen, hist_size):
mhist = np.zeros((data1_maxlen, hist_size), dtype=np.float32)
t1_cont = list(self.data1[t1])
t2_cont = list(self.data2[t2])
d1len = len(t1_cont)
if self.use_hist_feats:
assert (t1, t2) in self.hist_feats
caled_hist = np.reshape(self.hist_feats[(t1, t2)], (d1len, hist_size))
if d1len < data1_maxlen:
mhist[:d1len, :] = caled_hist[:, :]
else:
mhist[:, :] = caled_hist[:data1_maxlen, :]
else:
t1_rep = self.embed[t1_cont]
t2_rep = self.embed[t2_cont]
mm = t1_rep.dot(np.transpose(t2_rep))
for (i,j), v in np.ndenumerate(mm):
if i >= data1_maxlen:
break
vid = int((v + 1.) / 2. * ( hist_size - 1.))
mhist[i][vid] += 1.
mhist += 1.
mhist = np.log10(mhist)
return mhist
def cal_hist(self, t1, t2, data1_maxlen, hist_size):
mhist = np.zeros((data1_maxlen, hist_size), dtype=np.float32)
t1_cont = list(self.data1[t1])
t2_cont = list(self.data2[t2])
d1len = len(t1_cont)
if self.use_hist_feats:
assert (t1, t2) in self.hist_feats
curr_pair_feats = list(self.hist_feats[(t1, t2)])
caled_hist = np.reshape(curr_pair_feats, (d1len, hist_size))
if d1len < data1_maxlen:
mhist[:d1len, :] = caled_hist[:, :]
else:
mhist[:, :] = caled_hist[:data1_maxlen, :]
else:
t1_rep = self.embed[t1_cont]
t2_rep = self.embed[t2_cont]
mm = t1_rep.dot(np.transpose(t2_rep))
for (i,j), v in np.ndenumerate(mm):
if i >= data1_maxlen:
break
vid = int((v + 1.) / 2. * ( hist_size - 1.))
mhist[i][vid] += 1.
mhist += 1.
mhist = np.log10(mhist)
return mhist
def SLcomputePSNR(X, Xnoisy):
"""
SLcomputePSNR Compute peak signal to noise ratio (PSNR).
Usage:
PSNR = SLcomputePSNR(X, Xnoisy)
Input:
X: 2D or 3D signal.
Xnoisy: 2D or 3D noisy signal.
Output:
PSNR: The peak signal to noise ratio (in dB).
"""
MSEsqrt = np.linalg.norm(X-Xnoisy) / np.sqrt(X.size)
if MSEsqrt == 0:
return np.inf
else:
return 20 * np.log10(255 / MSEsqrt)
def SLcomputeSNR(X, Xnoisy):
"""
SLcomputeSNR Compute signal to noise ratio (SNR).
Usage:
SNR = SLcomputeSNR(X, Xnoisy)
Input:
X: 2D or 3D signal.
Xnoisy: 2D or 3D noisy signal.
Output:
SNR: The signal to noise ratio (in dB).
"""
if np.linalg.norm(X-Xnoisy) == 0:
return np.Inf
else:
return 10 * np.log10( np.sum(np.power(X,2)) / np.sum(np.power(X-Xnoisy,2)) )
def generate(self, n=1):
"""
Generate a sample of luminosity values within [min, max] from
the above luminosity distribution.
"""
results = []
# Get the maximum value of the flux number density function,
# which is a monotonically decreasing.
M = self.fluxDensity(self.fmin)
for i in range(n):
while True:
u = np.random.uniform() * M
y = 10 ** np.random.uniform(low=np.log10(self.fmin),
high=np.log10(self.fmax))
if u <= self.fluxDensity(y):
results.append(y)
break
return results
def eval(self, t):
# given a time vector t, return the design matrix column vector(s)
if self.type is None:
return np.array([])
hl = np.zeros((t.shape[0],))
ht = np.zeros((t.shape[0],))
if self.type in (0,2):
hl[t >= self.year] = np.log10(1 + (t[t >= self.year] - self.year) / self.T)
if self.type in (1,2):
ht[t >= self.year] = 1
return np.append(ht,hl) if np.any(hl) else ht
def PlotAppRes3Layers_wrapper(fmin, fmax, nbdata, h1, h2, rhol1, rhol2, rhol3, mul1, mul2, mul3, epsl1, epsl2, epsl3, PlotEnvelope, F_Envelope):
frangn=frange(np.log10(fmin), np.log10(fmax), nbdata)
sig3= np.array([0., 0.001, 0.1, 0.001])
thick3 = np.array([120000., 50., 50.])
eps3=np.array([1., 1., 1., 1])
mu3=np.array([1., 1., 1., 1])
chg3=np.array([0., 0.1, 0., 0.2])
chg3_0=np.array([0., 0.1, 0., 0.])
taux3=np.array([0., 0.1, 0., 0.1])
c3=np.array([1., 1., 1., 1.])
sig3[1]=1./rhol1
sig3[2]=1./rhol2
sig3[3]=1./rhol3
mu3[1]=mul1
mu3[2]=mul2
mu3[3]=mul3
eps3[1]=epsl1
eps3[2]=epsl2
eps3[3]=epsl3
thick3[1]=h1
thick3[2]=h2
PlotAppRes(frangn, thick3, sig3, chg3_0, taux3, c3, mu3, eps3, 3, F_Envelope, PlotEnvelope)
def quenchedfrac_zfourge(M, z):
par_q1, par_q2 = zfourgeparams(z, type='quiescent')
x_q1 = 10.**(M-par_q1[1])
dn_q1 = np.log10(np.log(10)*np.exp(-1.*x_q1)*x_q1*(10.**par_q1[3]*x_q1**(par_q1[2]) + 10.**(par_q1[5])*x_q1**(par_q1[4])))
x_q2 = 10.**(M-par_q2[1])
dn_q2 = np.log10(np.log(10)*np.exp(-1.*x_q2)*x_q2*(10.**par_q2[3]*x_q2**(par_q2[2]) + 10.**(par_q2[5])*x_q2**(par_q2[4])))
par_sf1, par_sf2 = zfourgeparams(z, type='star-forming')
x_sf1 = 10.**(M-par_sf1[1])
dn_sf1 = np.log10(np.log(10)*np.exp(-1.*x_sf1)*x_sf1*(10.**par_sf1[3]*x_sf1**(par_sf1[2]) + 10.**(par_sf1[5])*x_sf1**(par_sf1[4])))
x_sf2 = 10.**(M-par_sf2[1])
dn_sf2 = np.log10(np.log(10)*np.exp(-1.*x_sf2)*x_sf2*(10.**par_sf2[3]*x_sf2**(par_sf2[2]) + 10.**(par_sf2[5])*x_sf2**(par_sf2[4])))
fq1 = 10.**dn_q1/(10.**dn_q1+10.**dn_sf1)
fq2 = 10.**dn_q2/(10.**dn_q2+10.**dn_sf2)
return (fq1*(par_q2[0]-z)+fq2*(z-par_q1[0]))/(par_q2[0]-par_q1[0])
# ------ OBSOLETE, left in for backwards-compatibility ------ #
def __scale_coefficient(self, result, result_index, t, sum_log=False):
"""
?????
:param result:????
:param result_index:??????
:param t: ??????
:param sum_log: ??c_coefficient???
:return:
"""
sum_column = np.sum(result[result_index][:, t], axis=0)
if sum_column == 0.:
result[result_index][:, t] = 1. / len(self.__states)
sum_column = 1.
result[result_index][:, t] /= sum_column
if sum_log:
self.__c_coefficient += math.log10(sum_column)
def process(self, **kwargs):
"""Process module."""
self._rest_times = kwargs['rest_times']
self._rest_t_explosion = kwargs[self.key('resttexplosion')]
outputs = OrderedDict()
max_times = max(self._rest_times)
if max_times > self._rest_t_explosion:
outputs['dense_times'] = np.unique(
np.concatenate(([0.0], [
x + self._rest_t_explosion
for x in np.logspace(
self.L_T_MIN,
np.log10(max_times - self._rest_t_explosion),
num=self._n_times)
], self._rest_times)))
else:
outputs['dense_times'] = np.array(self._rest_times)
outputs['dense_indices'] = np.searchsorted(
outputs['dense_times'], self._rest_times)
return outputs
def volcano(data):
if len(data.index.levels[1]) != 2:
raise Exception('Volcano requires secondary index with two values')
indexA, indexB = data.index.levels[1]
dataA = data.xs(indexA, level=1)
dataB = data.xs(indexB, level=1)
meanA = dataA.mean(axis=0)
meanB = dataB.mean(axis=0)
change = meanB.div(meanA)
statistic, pvalues = ttest_ind(dataA, dataB)
pvalues = pd.DataFrame(
[statistic, pvalues, -np.log10(pvalues), change, np.log2(change)],
columns=data.columns,
index=['t', 'p', '-log10(p)', 'foldchange', 'log2(foldchange)']).transpose()
return pvalues
def ttest(data):
if len(data.index.levels[1]) != 2:
raise Exception('T-test requires secondary index with two values')
indexA, indexB = data.index.levels[1]
dataA = data.xs(indexA, level=1)
dataB = data.xs(indexB, level=1)
statistic, pvalues = ttest_ind(dataA, dataB)
pvalues = pd.DataFrame(
[statistic, pvalues, -np.log10(pvalues)],
columns=data.columns,
index=['t', 'p', '-log10(p)']).transpose()
return pvalues
def test_branch_cuts(self):
# check branch cuts and continuity on them
yield _check_branch_cut, np.log, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1, True
yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1, True
yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True
yield _check_branch_cut, np.arccos, [ -2, 2], [1j, 1j], 1, -1, True
yield _check_branch_cut, np.arctan, [0-2j, 2j], [1, 1], -1, 1, True
yield _check_branch_cut, np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True
yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True
yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True
# check against bogus branch cuts: assert continuity between quadrants
yield _check_branch_cut, np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1
yield _check_branch_cut, np.arccos, [0-2j, 2j], [ 1, 1], 1, 1
yield _check_branch_cut, np.arctan, [ -2, 2], [1j, 1j], 1, 1
yield _check_branch_cut, np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1
yield _check_branch_cut, np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1
yield _check_branch_cut, np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1
def test_branch_cuts_complex64(self):
# check branch cuts and continuity on them
yield _check_branch_cut, np.log, -0.5, 1j, 1, -1, True, np.complex64
yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1, True, np.complex64
yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1, True, np.complex64
yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1, True, np.complex64
yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1, True, np.complex64
yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64
yield _check_branch_cut, np.arccos, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64
yield _check_branch_cut, np.arctan, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64
yield _check_branch_cut, np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64
yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True, np.complex64
yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64
# check against bogus branch cuts: assert continuity between quadrants
yield _check_branch_cut, np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64
yield _check_branch_cut, np.arccos, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64
yield _check_branch_cut, np.arctan, [ -2, 2], [1j, 1j], 1, 1, False, np.complex64
yield _check_branch_cut, np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1, False, np.complex64
yield _check_branch_cut, np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1, False, np.complex64
yield _check_branch_cut, np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1, False, np.complex64
def final_lifetime(self):
'''
Returns the final time of the evolution'
'''
age=[]
mass=[]
m=self.run_historydata
for case in m:
##lifetime till first TP
age.append(np.log10(case.get("star_age")[-1]))
mass.append(case.get("star_mass")[0])
#plt.plot(mass,age,"*",markersize=10,linestyle="-",label=label)
#plt.xlabel('star mass $[M_{\odot}]$',fontsize=20)
#plt.ylabel('Logarithmic stellar lifetime',fontsize=20)
print 'Mini','|','Age'
for k in range(len(age)):
print mass[k],'|',age[k]
return mass,age
def lifetime(self,label=""):
'''
Calculate stellar lifetime till first TP
dependent of initial mass
'''
plt.rcParams.update({'font.size': 20})
plt.rc('xtick', labelsize=20)
plt.rc('ytick', labelsize=20)
t0_model=self.set_find_first_TP()
m=self.run_historydata
i=0
age=[]
mass=[]
for case in m:
##lifetime till first TP
age.append(np.log10(case.get("star_age")[t0_model[i]]))
mass.append(case.get("star_mass")[0])
i+=1
plt.plot(mass,age,"*",markersize=10,linestyle="-",label=label)
plt.xlabel('star mass $[M_{\odot}]$',fontsize=20)
plt.ylabel('Logarithmic stellar lifetime',fontsize=20)
def _padding_model_number(number, max_num):
'''
This method returns a zero-front padded string
It makes out of str(45) -> '0045' if 999 < max_num < 10000. This is
meant to work for reasonable integers (maybe less than 10^6).
Parameters
----------
number : integer
number that the string should represent.
max_num : integer
max number of cycle list, implies how many 0s have be padded
'''
cnum = str(number)
clen = len(cnum)
cmax = int(log10(max_num)) + 1
return (cmax - clen)*'0' + cnum
def _get_knot_spacing(self):
"""Returns a list of knot locations based on the spline parameters
If the option `spacing` is 'lin', uses linear spacing
'log', uses log spacing
Places 'spline_N' knots between 'spline_min' and 'spline_max'
"""
space_key = self.get_option('spacing').lower()[:3]
if space_key == 'log':
vol = np.logspace(np.log10(self.get_option('spline_min')),
np.log10(self.get_option('spline_max')),
self.get_option('spline_N'))
elif space_key == 'lin':
vol = np.linspace(self.get_option('spline_min'),
self.get_option('spline_max'),
self.get_option('spline_N'))
else:
raise KeyError("{:} only `lin`ear and `log` spacing are"
"accepted".format(self.get_inform(1)))
# end
return vol
def epsilon_enhance(img, g, rho, mu, tau):
alpha0 = ridgelet(img)
alpha = np.abs(alpha0)
lvl,na1,na2 = np.shape(alpha)
n1,n2 = np.shape(img)
sigma = MAD(img)
level = mk_thresh(n1,n2)*sigma
alpha_enhanced = np.copy(alpha)*0
for i in range(lvl-1):
alpha_enhanced[i, np.where(alpha[i,:,:]<mu)] = (mu/(alpha[i,np.where(alpha[i,:,:].astype(float)<mu)]))**g
plt.imshow(np.log10(alpha_enhanced[i,:,:])); plt.title('mu');plt.colorbar(); plt.show()
alpha_enhanced[i, np.where(alpha[i,:,:]<2*tau*level[i])] = ((alpha[i, np.where(alpha[i,:,:]<2*tau*level[i])].astype(float)-tau*level[i])/(tau*level[i]))*(mu/(tau*level[i]))**g +(2*tau*level[i]-alpha[i, np.where(alpha[i,:,:]<2*tau*level[i])].astype(float))/(tau*level[i])
plt.imshow(np.log10(alpha_enhanced[i,:,:])); plt.title('2sigma');plt.colorbar(); plt.show()
alpha_enhanced[i, np.where(alpha[i,:,:]<tau*level[i])] = 1.
plt.imshow(np.log10(alpha_enhanced[i,:,:])); plt.title('1'); plt.colorbar(); plt.show()
alpha_enhanced[i, np.where(alpha[i,:,:]>=mu)] = (mu/alpha[i,np.where(alpha[i,:,:]>=mu)].astype(float))**rho
plt.imshow(np.log10(alpha_enhanced[i,:,:]));plt.title('final'); plt.colorbar(); plt.show()
alpha_enhanced[-1,:,:] = 1
return alpha0*alpha_enhanced
def plot(self, nmin=-3.5, nmax=1.5):
"""Plots the field magnitude."""
x, y = meshgrid(
linspace(XMIN/ZOOM+XOFFSET, XMAX/ZOOM+XOFFSET, 200),
linspace(YMIN/ZOOM, YMAX/ZOOM, 200))
z = zeros_like(x)
for i in range(x.shape[0]):
for j in range(x.shape[1]):
z[i, j] = log10(self.magnitude([x[i, j], y[i, j]]))
levels = arange(nmin, nmax+0.2, 0.2)
cmap = pyplot.cm.get_cmap('plasma')
pyplot.contourf(x, y, numpy.clip(z, nmin, nmax),
10, cmap=cmap, levels=levels, extend='both')
# pylint: disable=too-few-public-methods
def plot_volcano(logFC,p_val,sample_name,saveName,logFC_thresh):
fig=pl.figure()
## To plot and save
pl.scatter(logFC[(p_val>0.05)|(abs(logFC)<logFC_thresh)],-np.log10(p_val[(p_val>0.05)|(abs(logFC)<logFC_thresh)]),color='blue',alpha=0.5);
pl.scatter(logFC[(p_val<0.05)&(abs(logFC)>logFC_thresh)],-np.log10(p_val[(p_val<0.05)&(abs(logFC)>logFC_thresh)]),color='red');
pl.hlines(-np.log10(0.05),min(logFC),max(logFC))
pl.vlines(-logFC_thresh,min(-np.log10(p_val)),max(-np.log10(p_val)))
pl.vlines(logFC_thresh,min(-np.log10(p_val)),max(-np.log10(p_val)))
pl.xlim(-3,3)
pl.xlabel('Log Fold Change')
pl.ylabel('-log10(p-value)')
pl.savefig(saveName)
pl.close(fig)
# def plot_histograms(df_peaks,pntr_list):
#
# for pntr in pntr_list:
# colName =pntr[2]+'_Intragenic_position'
# pl.hist(df_peaks[colName])
# pl.xlabel(colName)
# pl.ylabel()
# pl.show()
def compute_spectrogram(self, sig, data_length_sec, sampling_frequency, nfreq_bands, win_length_sec, stride_sec):
n_channels = 16
n_timesteps = int((data_length_sec - win_length_sec) / stride_sec + 1)
n_fbins = nfreq_bands
sig = np.transpose(sig)
sig2 = np.zeros((n_channels, n_fbins, n_timesteps))
for i in range(n_channels):
sigc = np.zeros((n_fbins, n_timesteps))
for frame_num, w in enumerate(range(0, int(data_length_sec - win_length_sec + 1), stride_sec)):
sigw = sig[i, w * sampling_frequency: (w + win_length_sec) * sampling_frequency]
sigw = self.hanning(sigw)
fft = self.log10(np.absolute(np.fft.rfft(sigw)))
fft_freq = np.fft.rfftfreq(n=sigw.shape[-1], d=1.0 / sampling_frequency)
sigc[:nfreq_bands, frame_num] = self.group_into_bands(fft, fft_freq, nfreq_bands)
sig2[i, :, :] = sigc
return np.transpose(sig2, axes=(2,1,0))
def find_null_offset(xpts, powers, default=0.0):
"""Finds the offset corresponding to the minimum power using a fit to the measured data"""
def model(x, a, b, c):
return a*(x - b)**2 + c
powers = np.power(10, powers/10.)
min_idx = np.argmin(powers)
try:
fit = curve_fit(model, xpts, powers, p0=[1, xpts[min_idx], powers[min_idx]])
except RuntimeError:
logger.warning("Mixer null offset fit failed.")
return default, np.zeros(len(powers))
best_offset = np.real(fit[0][1])
best_offset = np.minimum(best_offset, xpts[-1])
best_offset = np.maximum(best_offset, xpts[0])
xpts_fine = np.linspace(xpts[0],xpts[-1],101)
fit_pts = np.array([np.real(model(x, *fit[0])) for x in xpts_fine])
if min(fit_pts)<0: fit_pts-=min(fit_pts)-1e-10 #prevent log of a negative number
return best_offset, xpts_fine, 10*np.log10(fit_pts)
def transform(audio_data, save_image_path, nFFT=256, overlap=0.75):
'''audio_data: signals to convert
save_image_path: path to store the image file'''
# spectrogram
freq_data = stft(audio_data, nFFT, overlap)
freq_data = np.maximum(np.abs(freq_data),
np.max(np.abs(freq_data)) / 10000)
log_freq_data = 20. * np.log10(freq_data / 1e-4)
N_samples = log_freq_data.shape[0]
# log_freq_data = np.maximum(log_freq_data, max_m - 70)
# print(np.max(np.max(log_freq_data)))
# print(np.min(np.min(log_freq_data)))
log_freq_data = np.round(log_freq_data)
log_freq_data = np.transpose(log_freq_data)
# ipdb.set_trace()
assert np.max(np.max(log_freq_data)) < 256, 'spectrogram value too large'
# save the image
spec_imag = Image.fromarray(log_freq_data)
spec_imag = spec_imag.convert('RGB')
spec_imag.save(save_image_path)
return N_samples
def Get3FGL(Cat,xdata,ydata,dydata):
#create a spectrum for a given catalog and compute the model+butterfly
# 3FGL CATALOG
Cat.MakeSpectrum("3FGL",1e-4,0.3)
enerbut,but,enerphi,phi = Cat.Plot("3FGL")
# read DATA Point from 3FGL CATALOG
em3FGL,ep3FGL,flux3FGL,dflux3FGL = Cat.GetDataPoints('3FGL') #energy in TeV since the user ask for that in the call of Cat
ener3FGL = numpy.sqrt(em3FGL*ep3FGL)
dem3FGL = ener3FGL-em3FGL
dep3FGL = ep3FGL-ener3FGL
c=Cat.ReadPL('3FGL')[3]
e2dnde3FGL = (-c+1)*flux3FGL*numpy.power(ener3FGL*1e6,-c+2)/(numpy.power((ep3FGL*1e6),-c+1)-numpy.power((em3FGL*1e6),-c+1))*1.6e-6
de2dnde3FGL = e2dnde3FGL*dflux3FGL/flux3FGL
for i in xrange(len(ener3FGL)):
xdata.append(numpy.log10(ener3FGL[i]))
ydata.append(numpy.log10(e2dnde3FGL[i]))
dydata.append(numpy.log10(de2dnde3FGL[i]))
return enerbut,but,enerphi,phi,ener3FGL, e2dnde3FGL, dem3FGL, dep3FGL, de2dnde3FGL
def debias_mse(zhat,ztrue):
"""
If zhat and ztrue are 1D vectors, the function computes the *debiased normalized MSE* defined as:
dmse_lin = min_c ||ztrue-c*zhat||^2/||ztrue||^2 = (1-|zhat'*ztrue|^2/||ztrue||^2||zhat||^2)
The function returns the value in dB: dmse = 10*log10(dmse_lin)
If zhat and ztrue are matrices, dmse_lin is computed for each column and then averaged over the columns
"""
zcorr = np.abs(np.sum(zhat.conj()*ztrue,axis=0))**2
zhatpow = np.sum(np.abs(zhat)**2,axis=0)
zpow = np.sum(np.abs(ztrue)**2,axis=0)
tol = 1e-8
if np.any(zhatpow < tol) or np.any(zpow < tol):
dmse = 0
else:
dmse = 10*np.log10(np.mean(1 - zcorr/zhatpow/zpow))
return dmse
def saturation_index_countour(lab, elem1, elem2, Ks, labels=False):
plt.figure()
plt.title('Saturation index %s%s' % (elem1, elem2))
resoluion = 100
n = math.ceil(lab.time.size / resoluion)
plt.xlabel('Time')
z = np.log10((lab.species[elem1]['concentration'][:, ::n] + 1e-8) * (
lab.species[elem2]['concentration'][:, ::n] + 1e-8) / lab.constants[Ks])
lim = np.max(abs(z))
lim = np.linspace(-lim - 0.1, +lim + 0.1, 51)
X, Y = np.meshgrid(lab.time[::n], -lab.x)
plt.xlabel('Time')
CS = plt.contourf(X, Y, z, 20, cmap=ListedColormap(sns.color_palette(
"RdBu_r", 101)), origin='lower', levels=lim, extend='both')
if labels:
plt.clabel(CS, inline=1, fontsize=10, colors='w')
# cbar = plt.colorbar(CS)
if labels:
plt.clabel(CS, inline=1, fontsize=10, colors='w')
cbar = plt.colorbar(CS)
plt.ylabel('Depth')
ax = plt.gca()
ax.ticklabel_format(useOffset=False)
cbar.ax.set_ylabel('Saturation index %s%s' % (elem1, elem2))
return ax
def make_spectrum(self, filename, use_normalize):
sr, y = wav.read(filename)
if sr != 16000:
raise ValueError('Sampling rate is expected to be 16kHz!')
if y.dtype!='float32':
y = np.float32(y/32767.)
D=librosa.stft(y,n_fft=512,hop_length=256,win_length=512,window=scipy.signal.hamming)
Sxx=np.log10(abs(D)**2)
if use_normalize:
mean = np.mean(Sxx, axis=1).reshape((257,1))
std = np.std(Sxx, axis=1).reshape((257,1))+1e-12
Sxx = (Sxx-mean)/std
slices = []
for i in range(0, Sxx.shape[1]-self.FRAMELENGTH, self.OVERLAP):
slices.append(Sxx[:,i:i+self.FRAMELENGTH])
return np.array(slices)
def fit_koff(nmax=523, NN=4e8, **params):
tbind = params.pop("tbind")
params["kd"] = 1e9/tbind
dx = params.pop("dx")
rw = randomwalk.get_rw(NAME, params, setup=setup_rw, calc=True)
rw.domains[1].dx = dx
times = draw_empirically(rw, N=NN, nmax=nmax, success=False)
bins = np.logspace(np.log10(min(times)), np.log10(max(times)), 35)
#bins = np.logspace(-3., 2., 35)
hist, _ = np.histogram(times, bins=bins)
cfd = np.cumsum(hist)/float(np.sum(hist))
t = 0.5*(bins[:-1] + bins[1:])
tmean = times.mean()
toff = NLS(t, cfd, t0=tmean)
koff = 1./toff
return dict(t=t, cfd=cfd, toff=toff, tmean=tmean, koff=koff)
##### run rw in collect mode and draw bindings from empirical distributions
def _update_data_x(self):
if self.is_zero_span():
self._data_x = np.zeros(self.points)
# data_x will be measured during first scan...
return
if self.logscale:
raw_values = np.logspace(
np.log10(self.start_freq),
np.log10(self.stop_freq),
self.points,
endpoint=True)
else:
raw_values = np.linspace(self.start_freq,
self.stop_freq,
self.points,
endpoint=True)
values = np.zeros(len(raw_values))
for index, val in enumerate(raw_values):
values[index] = self.iq.__class__.frequency. \
validate_and_normalize(self, val) # retrieve the real freqs...
self._data_x = values
def _set_widget_value(self, new_value, transform_magnitude=lambda data :
20. * np.log10(np.abs(data) + sys.float_info.epsilon)):
if new_value is None:
return
x, y = new_value
shape = np.shape(y)
if len(shape) > 2:
raise ValueError("Data cannot be larger than 2 "
"dimensional")
if len(shape) == 1:
y = [y]
self._set_real(np.isreal(y).all())
for i, values in enumerate(y):
self._display_curve_index(x, values, i, transform_magnitude=transform_magnitude)
while (i + 1 < len(self.curves)): # delete remaining curves
i += 1
self.curves[i].hide()