Python math 模块,pow() 实例源码
我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用math.pow()。
def get_cubic_root(self):
# We have the equation x^2 D^2 + (1-x)^4 * C / h_min^2
# where x = sqrt(mu).
# We substitute x, which is sqrt(mu), with x = y + 1.
# It gives y^3 + py = q
# where p = (D^2 h_min^2)/(2*C) and q = -p.
# We use the Vieta's substution to compute the root.
# There is only one real solution y (which is in [0, 1] ).
# http://mathworld.wolfram.com/VietasSubstitution.html
# eps in the numerator is to prevent momentum = 1 in case of zero gradient
p = (self._dist_to_opt + eps)**2 * (self._h_min + eps)**2 / 2 / (self._grad_var + eps)
w3 = (-math.sqrt(p**2 + 4.0 / 27.0 * p**3) - p) / 2.0
w = math.copysign(1.0, w3) * math.pow(math.fabs(w3), 1.0/3.0)
y = w - p / 3.0 / (w + eps)
x = y + 1
if DEBUG:
logging.debug("p %f, den %f", p, self._grad_var + eps)
logging.debug("w3 %f ", w3)
logging.debug("y %f, den %f", y, w + eps)
return x
def get_data_hash(self, data_bytes):
"""Calculate Merkle's root hash of the given data bytes"""
# Calculate tree parameters
data_len = len(data_bytes)
tree_populated_width = math.ceil(data_len / self._chunk_len)
tree_height = math.ceil(math.log2(tree_populated_width))
tree_width = int(math.pow(2, tree_height))
tree_bottom_layer = ['\x00'] * tree_width
with io.BytesIO(data_bytes) as b_data:
self._initial_hasher(
b_data,
tree_populated_width,
tree_bottom_layer
)
# Get Merkle's root hash
mrh = self._calculate_root_hash(tree_bottom_layer)
return mrh
def __nearest_pow_2(self,x):
"""
Find power of two nearest to x
>>> _nearest_pow_2(3)
2.0
>>> _nearest_pow_2(15)
16.0
:type x: float
:param x: Number
:rtype: Int
:return: Nearest power of 2 to x
"""
a = math.pow(2, math.ceil(np.log2(x)))
b = math.pow(2, math.floor(np.log2(x)))
if abs(a - x) < abs(b - x):
return a
else:
return b
# calculate spectrogram of signals
def _nearest_pow_2(x):
"""
Find power of two nearest to x
>>> _nearest_pow_2(3)
2.0
>>> _nearest_pow_2(15)
16.0
:type x: float
:param x: Number
:rtype: Int
:return: Nearest power of 2 to x
"""
a = M.pow(2, M.ceil(np.log2(x)))
b = M.pow(2, M.floor(np.log2(x)))
if abs(a - x) < abs(b - x):
return a
else:
return b
def __nearest_pow_2(self,x):
"""
Find power of two nearest to x
>>> _nearest_pow_2(3)
2.0
>>> _nearest_pow_2(15)
16.0
:type x: float
:param x: Number
:rtype: Int
:return: Nearest power of 2 to x
"""
a = math.pow(2, math.ceil(np.log2(x)))
b = math.pow(2, math.floor(np.log2(x)))
if abs(a - x) < abs(b - x):
return a
else:
return b
# calculate spectrogram of signals
def _nearest_pow_2(x):
"""
Find power of two nearest to x
>>> _nearest_pow_2(3)
2.0
>>> _nearest_pow_2(15)
16.0
:type x: float
:param x: Number
:rtype: Int
:return: Nearest power of 2 to x
"""
a = M.pow(2, M.ceil(np.log2(x)))
b = M.pow(2, M.floor(np.log2(x)))
if abs(a - x) < abs(b - x):
return a
else:
return b
def test_length(self):
# should be able to unpack to r,g,b,a and r,g,b
c = pygame.Color(1,2,3,4)
self.assertEquals(len(c), 4)
c.set_length(3)
self.assertEquals(len(c), 3)
# it keeps the old alpha anyway...
self.assertEquals(c.a, 4)
# however you can't get the alpha in this way:
self.assertRaises (IndexError, lambda x:c[x], 4)
c.set_length(4)
self.assertEquals(len(c), 4)
self.assertEquals(len(c), 4)
self.assertRaises (ValueError, c.set_length, 5)
self.assertRaises (ValueError, c.set_length, -1)
self.assertRaises (ValueError, c.set_length, 0)
self.assertRaises (ValueError, c.set_length, pow(2,long_(33)))
def waf_get_ip_set(ip_set_id):
response = None
waf = boto3.client('waf')
for attempt in range(API_CALL_NUM_RETRIES):
try:
response = waf.get_ip_set(IPSetId=ip_set_id)
except Exception, e:
print(e)
delay = math.pow(2, attempt)
print("[waf_get_ip_set] Retrying in %d seconds..." % (delay))
time.sleep(delay)
else:
break
else:
print("[waf_get_ip_set] Failed ALL attempts to call API")
return response
def waf_update_ip_set(ip_set_id, updates_list):
response = None
if updates_list != []:
waf = boto3.client('waf')
for attempt in range(API_CALL_NUM_RETRIES):
try:
response = waf.update_ip_set(IPSetId=ip_set_id,
ChangeToken=waf.get_change_token()['ChangeToken'],
Updates=updates_list)
except Exception, e:
delay = math.pow(2, attempt)
print("[waf_update_ip_set] Retrying in %d seconds..." % (delay))
time.sleep(delay)
else:
break
else:
print("[waf_update_ip_set] Failed ALL attempts to call API")
return response
def waf_update_ip_set(ip_set_id, source_ip):
waf = boto3.client('waf')
for attempt in range(API_CALL_NUM_RETRIES):
try:
response = waf.update_ip_set(IPSetId=ip_set_id,
ChangeToken=waf.get_change_token()['ChangeToken'],
Updates=[{
'Action': 'INSERT',
'IPSetDescriptor': {
'Type': 'IPV4',
'Value': "%s/32"%source_ip
}
}]
)
except Exception, e:
delay = math.pow(2, attempt)
print "[waf_update_ip_set] Retrying in %d seconds..." % (delay)
time.sleep(delay)
else:
break
else:
print "[waf_update_ip_set] Failed ALL attempts to call API"
def waf_get_ip_set(ip_set_id):
response = None
waf = boto3.client('waf')
for attempt in range(API_CALL_NUM_RETRIES):
try:
response = waf.get_ip_set(IPSetId=ip_set_id)
except Exception, e:
print(e)
delay = math.pow(2, attempt)
print("[waf_get_ip_set] Retrying in %d seconds..." % (delay))
time.sleep(delay)
else:
break
else:
print("[waf_get_ip_set] Failed ALL attempts to call API")
return response
def can_delete_rule(stack_name, rule_id):
result = False
for attempt in range(API_CALL_NUM_RETRIES):
try:
waf = boto3.client('waf')
rule_detail = waf.get_rule(RuleId=rule_id)
result = (stack_name == None or (rule_detail['Rule']['Name'].startswith(stack_name + " - ") and rule_detail['Rule']['Name'] != (stack_name + " - Whitelist Rule") ))
except Exception, e:
print(e)
delay = math.pow(2, attempt)
print("[can_delete_rule] Retrying in %d seconds..." % (delay))
time.sleep(delay)
else:
break
else:
print("[can_delete_rule] Failed ALL attempts to call API")
return result
def create_bitmap_grid(bitmap, n, num_bins, level_size):
"""
Arranges a time-series bitmap into a 2-D grid for heatmap visualization
"""
assert num_bins % n == 0, 'num_bins has to be a multiple of n'
m = num_bins // n
row_count = int(math.pow(m, level_size))
col_count = int(math.pow(n, level_size))
grid = np.full((row_count, col_count), 0.0)
for feat, count in bitmap.items():
i, j = symbols2index(m, n, feat)
grid[i, j] = count
return grid
def concEval (self, vmap = {}):
retv_lhs = self.lhs().concEval(vmap)
assert((type(retv_lhs) is int) or (type(retv_lhs) is float) or (isinstance(retv_lhs, Fraction)))
retv_rhs = self.rhs().concEval(vmap)
assert((type(retv_rhs) is int) or (type(retv_rhs) is float) or (isinstance(retv_rhs, Fraction)))
if (self.operator.label == "+"):
return (retv_lhs + retv_rhs)
elif (self.operator.label == "-"):
return (retv_lhs - retv_rhs)
elif (self.operator.label == "*"):
return (retv_lhs * retv_rhs)
elif (self.operator.label == "/"):
return (retv_lhs / retv_rhs)
elif (self.operator.label == "^"):
assert(type(retv_rhs) is int)
return math.pow(retv_lhs, retv_rhs)
else:
sys.exit("ERROR: unknown operator found in function \"similar\" of a BinaryExpr")
def search_nn_dist(self, point, distance, best=None):
"""
Search the n nearest nodes of the given point which are within given
distance
point must be a location, not a node. A list containing the n nearest
nodes to the point within the distance will be returned.
"""
if best is None:
best = []
# consider the current node
if self.dist(point) < distance:
best.append(self)
# sort the children, nearer one first (is this really necessairy?)
children = sorted(self.children, key=lambda c_p1: c_p1[0].dist(point))
for child, p in children:
# check if child node needs to be recursed
if self.axis_dist(point, self.axis) < math.pow(distance, 2):
child.search_nn_dist(point, distance, best)
return best
def Rstr(self):
array2=[]
prixe = math.log(0.03637 / float(252) + 1)
ret = self.sharedf
ret['change']=ret['change']-prixe
rstr = []
print 1
if len(ret) > 525:
for z in range(0, 504):
array2.append(math.pow(math.pow(float(1) / 2, float(1 / float(126))), (503 - z)))
for h in range(0,525):
rstr.append(numpy.NaN)
for c in range(525, len(ret)):
rett=0
for f in range(0,len(duan)-21):
rett=rett+duan.iloc[f, 16]*array2[f]
rstr.append(rett)
print rstr
ret['rstr'] = rstr
return ret[['date','rstr']]
def Dastd(self):
dastd=[]
for x in range(0,251):
dastd.append(numpy.NaN)
dfgg = self.sharedf
weight=[]
all=0
num=0
for x in range(0,252):
weight.append(math.pow(math.pow(float(1) / 2, float(1 / float(63))), (252 - x - 1)))
all=all+math.pow(math.pow(float(1) / 2, float(1 / float(63))), (252 - x - 1))
for x in range(252,len(dfgg['change'])+1):
dd=0
mean=dfgg['change'][x-252:x].mean()
for y in dfgg['change'][x-252:x]:
dd= dd+math.sqrt(math.pow((y-mean),2)*weight[num]/all)
num=num+1
dastd.append(dd)
num=0
dfgg['dastd'] = dastd
return dfgg[['date','dastd']]
def setPolygon(self):
'''Calculate position and rotation of the arc arrow head.'''
rotDeg = 0
xlength = self.pos1.x() - self.pos2.x()
ylength = self.pos1.y() - self.pos2.y()
d = math.sqrt( math.pow( xlength , 2) + math.pow( ylength , 2) )
if d > 0:
beta = math.acos( xlength / d )
rotDeg = math.degrees( beta )
self.arrowPolygonObject.setPolygon( QtGui.QPolygonF( [
QtCore.QPointF( (self.pos2.x() -10), (self.pos2.y() +5)),
QtCore.QPointF( (self.pos2.x() -10) , (self.pos2.y() -5)),
QtCore.QPointF( self.pos2.x() , self.pos2.y())
] ) )
self.arrowPolygonObject.setBrush( QtGui.QBrush(QtCore.Qt.black) )
""" self.angle()!!!!!!!!!"""
# self.arcLinePolygon.angle()
# self.arcLinePolygon.rotate(rotDeg)
# self.arcLinePolygon.setPos( self.pos2 )
#------------------------------------------------------------------------------------------------
def setPolygon(self):
rotDeg = 0
xlength = self.pos1.x() - self.pos2.x()
ylength = self.pos1.y() - self.pos2.y()
d = math.sqrt( math.pow( xlength , 2) + math.pow( ylength , 2) )
if d > 0:
beta = math.acos( xlength / d )
rotDeg = math.degrees( beta )
self.arcLinePolygon.setPolygon( QtGui.QPolygonF( [
QtCore.QPointF( (self.pos2.x() -10), (self.pos2.y() +5)),
QtCore.QPointF( (self.pos2.x() -10) , (self.pos2.y() -5)),
QtCore.QPointF( self.pos2.x() , self.pos2.y())
] ) )
self.arcLinePolygon.setBrush( QtGui.QBrush(QtCore.Qt.black) )
""" self.angle()!!!!!!!!!"""
# self.arcLinePolygon.angle()
# self.arcLinePolygon.rotate(rotDeg)
# self.arcLinePolygon.setPos( self.pos2 )
#------------------------------------------------------------------------------------------------
def compute_circle_intersection(x0, y0, x1, y1, r0, r1):
d = compute_distance(x0, y0, x1, y1)
if d < math.fabs(r0 - r1) or r0 +r1 < d:
return None
a = (math.pow(r0, 2) - math.pow(r1, 2) + math.pow(d, 2))/float(2 * d)
h = math.sqrt(math.pow(r0, 2) - math.pow(a, 2))
x2 = x0 + a * (x1 - x0)/float(d)
y2 = y0 + a * (y1 - y0)/float(d)
x3 = x2 + h * (y1 - y0)/ d
y3 = y2 - h * (x1 - x0)/ d
x3_prime = x2 - h * (y1 - y0)/ d
y3_prime = y2 + h * (x1 - x0)/ d
return (x3, y3), (x3_prime, y3_prime)
def get_heat_index(temperature, humidity):
temperature = cf.s2f(temperature)
humidity = cf.s2f(humidity)
if humidity > 0.0 and temperature >= 77.0:
# temperature ºF
# humidity over 100
c1 = -42.379
c2 = 2.04901523
c3 = 10.14333127
c4 = -0.22475541
c5 = -0.00683783
c6 = -0.05481717
c7 = 0.00122874
c8 = 0.00085282
c9 = -0.00000199
hi = c1 + c2 * temperature + c3 * humidity + c4 * temperature *\
humidity + c5 * math.pow(temperature, 2.0) + c6 *\
math.pow(humidity, 2.0) + c7 * math.pow(temperature, 2.0) *\
humidity + c8 * temperature * math.pow(humidity, 2.0) + c9 *\
math.pow(temperature, 2.0) * math.pow(humidity, 2.0)
return hi - temperature
return 0
def luminance(color: ColorType) -> float:
"""
Calculate the relative luminance (as defined by WCAG 2.0) of
the given color.
:param color: a color
:return: the calculated relative luminance between 0.0 and 10
"""
rgb = color.rgb
vals = []
for c in rgb:
if c <= 0.03928:
c /= 12.92
else:
c = math.pow((c + 0.055) / 1.055, 2.4)
vals.append(c)
L = 0.2126 * vals[0] + 0.7152 * vals[1] + 0.0722 * vals[2]
return L
def run(self):
log.debug('[ Start TweetThread ]')
i = 1
a = float(1.5)
# GetInfoThread?GetCancelThread, GetNewsThread?????????
while active_count() >= 3:
time.sleep(1)
else:
while True:
try:
t = self.queue.get(block=False, timeout=None)
except Exception:
# ????????????
log.debug('[ End TweetThread ]\n')
break
if i < 12:
i += 1
# 1.5^(????)???
w = pow(a, i)
time.sleep(w)
lib.tweeter.tweet(t)
def ndcg(self, y_true, y_pred, k = 20):
s = 0.
c = self.zipped(y_true, y_pred)
c_g = sorted(c, key=lambda x:x[0], reverse=True)
c_p = sorted(c, key=lambda x:x[1], reverse=True)
#idcg = [0. for i in range(k)]
idcg = np.zeros([k], dtype=np.float32)
dcg = np.zeros([k], dtype=np.float32)
#dcg = [0. for i in range(k)]
for i, (g,p) in enumerate(c_g):
if g > self.rel_threshold:
idcg[i:] += (math.pow(2., g) - 1.) / math.log(2. + i)
if i >= k:
break
for i, (g,p) in enumerate(c_p):
if g > self.rel_threshold:
dcg[i:] += (math.pow(2., g) - 1.) / math.log(2. + i)
if i >= k:
break
for idx, v in enumerate(idcg):
if v == 0.:
dcg[idx] = 0.
else:
dcg[idx] /= v
return dcg
def ndcg(self, y_true, y_pred, k = 20):
s = 0.
c = self.zipped(y_true, y_pred)
c_g = sorted(c, key=lambda x:x[0], reverse=True)
c_p = sorted(c, key=lambda x:x[1], reverse=True)
#idcg = [0. for i in range(k)]
idcg = np.zeros([k], dtype=np.float32)
dcg = np.zeros([k], dtype=np.float32)
#dcg = [0. for i in range(k)]
for i, (g,p) in enumerate(c_g):
if g > self.rel_threshold:
idcg[i:] += (math.pow(2., g) - 1.) / math.log(2. + i)
if i >= k:
break
for i, (g,p) in enumerate(c_p):
if g > self.rel_threshold:
dcg[i:] += (math.pow(2., g) - 1.) / math.log(2. + i)
if i >= k:
break
for idx, v in enumerate(idcg):
if v == 0.:
dcg[idx] = 0.
else:
dcg[idx] /= v
return dcg
def choose_location(self):
# initialize variables
S = len(self.location2visits) # number of already visited locations
if S == 0:
self.home = self.__preferential_exploration(self.home)
return self.home
## choose a probability to return o explore
p_new = uniform(0, 1)
if p_new <= self.rho * pow(S, -self.gamma): # choose to return or explore
# PREFERENTIAL EXPLORATION
current_location = self.trajectory[-1] # the last visited location
return self.__preferential_exploration(current_location)
else:
# PREFERENTIAL RETURN
return self.__preferential_return()
def __get_waiting_time(self):
"""
Extract a waiting time from a power law with exponential cut-off distribution.
The parameters of the distribution are taken from the paper:
C. Song et al., Modelling the scaling properties of human mobility, Nature Physics 6, 818-823 (2010).
---
To simulate a power law with exponential cut-off x^(-alpha) * exp(-lambda * x), we can generate an exponentially
distributed random number U and then accept or reject it with probability p or 1-p respectively (i.e. accept if U < p
or reject if U > p, where U is a uniform [0, 1] random variable), where p = (x/x_min)^(-alpha) and x_min=1.
http://www.santafe.edu/aaronc/powerlaws/
---
:return: float
a waiting time chosen from the waiting time distribution
"""
x = expon.rvs(1.0/self.tau)
while pow(x, -(1 + self.beta)) < uniform(0.0, 1.0):
x = expon.rvs(1.0/self.tau)
return x
def generateStandardization(source_attr, target_attr, source_index):
tempSum = 0.0
anoSum = 0.0
for each in source_attr:
tempSum += each[source_index]
avg = tempSum/float(len(source_attr))
for each in source_attr:
anoSum += float(math.pow((each[source_index] - avg), 2))
standardV = anoSum/float(len(source_attr))
standardV = math.sqrt(standardV)
for i in range(len(source_attr)):
temp = source_attr[i][source_index]
res = (temp-avg) / standardV
target_attr[i].append(res)
# Directly copy attr without modifiy
def distance(self,pos):
for i in range(self.numSpeakers):
SpkPos = vars.getVars("Speakers")[i].getCenter()
SpkRad = vars.getVars("Speakers")[i].getZoneRad()
dist = math.sqrt(math.pow((pos[0]-SpkPos[0]),2) + math.pow((pos[1]-SpkPos[1]),2))
amp = max(0, 1. - dist / float(SpkRad))
if self.isAList:
self.audio.setBlueAmp(i,amp)
self.audio.setRedAmp(i,amp)
elif self.currentCircle == self.blueCircle:
self.audio.setBlueAmp(i,amp)
elif self.currentCircle == self.redCircle:
self.audio.setRedAmp(i,amp)
# FL START 23/05/2017
# Cette fonction est adaptée pour fonctionner avec une manette de PlayStation 3.
# Le code devra probablement être ajusté si un autre type de controleur OSC est utilisé.
def calc_gauss_amp(node_xyz, center=(0.0, 0.0, -2.0), sigma=(1.0, 1.0, 1.0),
amp=1.0, amp_cut=0.05, sym="qsym"):
"""calculated the Gaussian amplitude at the node
:param node_xyz: list of x,y,z node coordinates
:param center: list of x,y,z for Gaussian center
:param sigma: list of x,y,z Guassian width
:param amp: peak Gaussian source amplitude
:param amp_cut: lower threshold (pct of max) for amplitude creating a
point load
:param qsym: mesh symemetry (qsym, hsym, none)
:returns: nodeGaussAmp - point load amplitude at the specified node
"""
from math import pow, exp
exp1 = pow((node_xyz[1] - center[0]) / sigma[0], 2)
exp2 = pow((node_xyz[2] - center[1]) / sigma[1], 2)
exp3 = pow((node_xyz[3] - center[2]) / sigma[2], 2)
nodeGaussAmp = amp * exp(-(exp1 + exp2 + exp3))
if (nodeGaussAmp / amp) < amp_cut:
nodeGaussAmp = None
else:
nodeGaussAmp = sym_scale_amp(node_xyz, nodeGaussAmp, sym)
return nodeGaussAmp
def score_match(query_rep, text, length_penalty, dictionary=None, debug=False):
if text == "":
return 0
if not dictionary:
words = text.lower().split(' ')
else:
words = [w for w in dictionary.tokenize(text.lower())]
score = 0
rw = query_rep['words']
used = {}
for w in words:
if w in rw and w not in used:
score += rw[w]
if debug:
print("match: " + w)
used[w] = True
norm = math.sqrt(len(used))
score = score / math.pow(norm * query_rep['norm'], length_penalty)
return score
def get_num_samples(self, idx):
"""
Number of samples needed to estimate the population variance within the tolerance limit
Sample variance is normally distributed http://stats.stackexchange.com/a/105338/71884
(see warning below).
Var(s^2) /approx 1/n * (\mu_4 - \sigma^4)
Adjust n as per the tolerance needed to estimate the sample variance
warning: does not work for some distributions like bernoulli - https://stats.stackexchange.com/a/104911
use the min_samples for explicitly controlling the number of samples to be drawn
"""
if self.min_samples:
return self.min_samples
min_samples = 1000
tol = 10.0
required_precision = self.prec / tol
if not self.scipy_dist:
return min_samples
args, kwargs = self.scipy_arg_fn(**self.get_dist_params(idx, wrap_tensor=False))
try:
fourth_moment = np.max(self.scipy_dist.moment(4, *args, **kwargs))
var = np.max(self.scipy_dist.var(*args, **kwargs))
min_computed_samples = int(math.ceil((fourth_moment - math.pow(var, 2)) / required_precision))
except (AttributeError, ValueError):
return min_samples
return max(min_samples, min_computed_samples)
def get_tolerance(zoom_level):
# pixels squared factor
tolerance_square_pixels = 7
# default Google/Bing map tile scales
metres_per_pixel = 156543.03390625 / math.pow(2.0, float(zoom_level + 1))
# the tolerance (metres) for vector simplification using the VW algorithm
square_metres_per_pixel = math.pow(metres_per_pixel, 2.0)
# tolerance to use
tolerance = square_metres_per_pixel * tolerance_square_pixels
return tolerance
# maximum number of decimal places for boundary coordinates - improves display performance
def quadraticFactorisation(N=4):
(p,q,pn) = primeFactorisation(N) #??N??????
for ptr0 in range(len(q)): #??????????????
if (q[ptr0] % 2): q[ptr0] += 1
if len(p): #?????????????
if p[0] == 2: p.append(3); q.append(2) #??2??????3^2
else: p.append(2); q.append(2) #??????????2^2
x = y = 1
slc = len(p) / 2 #??
for ptr1 in range(slc): #?????x
x *= int(math.pow(p[ptr1],q[ptr1]))
for ptr2 in range(slc,len(p)): #?????y
y *= int(math.pow(p[ptr2],q[ptr2]))
if (x % 2): x *= 4 #?x?????????4??2^2?
if (y % 2): y *= 4 #?y?????????4??2^2?
return solve(x,y) #?????a?b
#????? | ??????????????????
def get_price_for_trade(prediction, trade):
"""Returns the price of a trade for a prediction."""
if trade.contract == 'CONTRACT_ONE':
old_quantity = prediction.contract_one
old_quantity_other = prediction.contract_two
else:
old_quantity = prediction.contract_two
old_quantity_other = prediction.contract_one
if trade.direction == 'BUY':
new_quantity = old_quantity + trade.quantity
else:
new_quantity = old_quantity - trade.quantity
price = (prediction.liquidity * math.log(
math.pow(math.e, (new_quantity / prediction.liquidity)) +
math.pow(math.e, (old_quantity_other / prediction.liquidity)))) - (
prediction.liquidity * math.log(
math.pow(math.e, (old_quantity / prediction.liquidity)) +
math.pow(math.e, (old_quantity_other / prediction.liquidity))))
return price
def advance(self, dt, padding):
bodies = self.bodies
def calc_vel(i):
b1 = bodies[i]
for b2 in bodies:
d_pos = b1.pos.sub(b2.pos)
distance = d_pos.length() + padding
mag = dt / math.pow(distance, 3)
b1.vel = b1.vel.sub(d_pos.scale(b2.mass).scale(mag))
gpumap(calc_vel, self.indices)
def update(body):
body.pos = body.pos.add(body.vel.scale(dt))
gpumap(update, bodies)
def advance(self, dt, padding):
bodies = self.bodies
def calc_vel(i):
b1 = bodies[i]
for b2 in bodies:
d_pos = b1.pos.sub(b2.pos)
distance = d_pos.length() + padding
mag = dt / math.pow(distance, 3)
b1.vel = b1.vel.sub(d_pos.scale(b2.mass).scale(mag))
list(map(calc_vel, self.indices))
def update(body):
body.pos = body.pos.add(body.vel.scale(dt))
list(map(update, bodies))
def E_MurnV(V,a0,a1,a2,a3):
"""
This function implements the Murnaghan EOS (in a form which is best for fitting).
Returns the energy at the volume *V* using the coefficients *a0,a1,a2,a3*
from the equation:
.. math::
E = a_0 - (a_2*a_1)/(a_3-1.0) V a_2/a_3 ( a_1/V^{a_3})/(a_3-1.0) +1.0 )
"""
res=np.zeros(len(V))
for i in range(0,len(V)):
res[i]=a0 - a2*a1/(a3-1.0) + V[i]*a2/a3*( pow(a1/V[i],a3)/(a3-1.0)+1.0 )
return res
# Other functions
def c_qv2(T,omega):
x = omega * kb1 / T
expx = math.exp(-x) # exponential term
x2 = math.pow(x,2)
return x2*K_BOLTZMANN_RY*expx/math.pow(expx-1.0,2)
################################################################################
#
# This function computes the thermal expansions alpha using the Gruneisein
# parameters
# more comments to be added
# First with min0, freq and grun T-independent
#
# More ibrav types to be implemented
def _get_max_sigma(self, R):
"""Calculate maximum sigma of scanner RAS coordinates
Parameters
----------
R : 2D array, with shape [n_voxel, n_dim]
The coordinate matrix of fMRI data from one subject
Returns
-------
max_sigma : float
The maximum sigma of scanner coordinates.
"""
max_sigma = 2.0 * math.pow(np.nanmax(np.std(R, axis=0)), 2)
return max_sigma
def get_epsilon_k(self):
'''
Get $\epsilon_k$ according to the exploration schedule
'''
trial = self.env.count_trials - 2 # ?
if self.decayfun == 'tpower':
# e = a^t, where 0 < z < 1
# self.f_epsilon = math.pow(0.9675, trial) # for 100 trials
self.f_epsilon = math.pow(0.9333, trial) # for 50 trials
elif self.decayfun == 'trig':
# e = cos(at), where 0 < z < 1
# self.f_epsilon = math.cos(0.0168 * trial) # for 100 trials
self.f_epsilon = math.cos(0.03457 * trial) # for 50 trials
else:
# self.f_epsilon = max(0., 1. - (1./45. * trial)) # for 50 trials
self.f_epsilon = max(0., 1. - (1./95. * trial)) # for 100 trials
return self.f_epsilon
def getlearningrate(epoch, opt):
# update lr
lr = opt.LR
if opt.lrPolicy == "multistep":
if epoch + 1.0 > opt.nEpochs * opt.ratio[1]: # 0.6 or 0.8
lr = opt.LR * 0.01
elif epoch + 1.0 > opt.nEpochs * opt.ratio[0]: # 0.4 or 0.6
lr = opt.LR * 0.1
elif opt.lrPolicy == "linear":
k = (0.001-opt.LR)/math.ceil(opt.nEpochs/2.0)
lr = k*math.ceil((epoch+1)/opt.step)+opt.LR
elif opt.lrPolicy == "exp":
power = math.floor((epoch+1)/opt.step)
lr = lr*math.pow(opt.gamma, power)
elif opt.lrPolicy == "fixed":
lr = opt.LR
else:
assert False, "invalid lr policy"
return lr
def get_inputs_values(self, enemes, input_size=4):
inputs = []
for i in range(input_size):
inputs.append(0.0)
inputs[0] = (self.x * 1.0 / SCREEN_SIZE[0])
index = 1
for eneme in enemes:
inputs[index] = eneme.x * 1.0 / SCREEN_SIZE[0]
index += 1
inputs[index] = eneme.y * 1.0 / SCREEN_SIZE[1]
index += 1
# if len(enemes) > 0:
# distance = math.sqrt(math.pow(enemes[0].x + enemes[0].width/2 - self.x + self.width/2, 2) + math.pow(enemes[0].y + enemes[0].height/2 - self.y + self.height/2, 2));
if len(enemes) > 0 and self.x < enemes[0].x:
inputs[index] = -1.0
index += 1
else:
inputs[index] = 1.0
return inputs
def hex_to_rgb(value, alpha=True):
"""Convets a Hex code to a Blender RGB Value"""
gamma = 2.2
value = value.lstrip('#')
lv = len(value)
fin = list(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))
r = pow(fin[0] / 255, gamma)
g = pow(fin[1] / 255, gamma)
b = pow(fin[2] / 255, gamma)
fin.clear()
fin.append(r)
fin.append(g)
fin.append(b)
if alpha == True:
fin.append(1.0)
return tuple(fin)
def rgb_to_hex(rgb):
"""Converts Blender RGB Value to Hex code"""
gamma = 1/2.2
fin = list(rgb)
r = fin[0]*255
g = fin[1]*255
b = fin[2]*255
r = int(255*pow(r / 255, gamma))
g = int(255*pow(g / 255, gamma))
b = int(255*pow(b / 255, gamma))
fin.clear()
fin.append(r)
fin.append(g)
fin.append(b)
fin = tuple(fin)
return '#%02x%02x%02x' % fin
def convertSize(size):
if (size == 0):
return '0 Bytes'
size_name = ("Bytes", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size,1024)))
p = math.pow(1024,i)
s = round(size/p,2)
return '{} {}'.format(s,size_name[i])
#http://stackoverflow.com/questions/1392413/calculating-a-directory-size-using-python
def convertSize(size):
if (size == 0):
return '0 Bytes'
size_name = ("Bytes", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size,1024)))
p = math.pow(1024,i)
s = round(size/p,2)
return '{} {}'.format(s,size_name[i])
def weibull(t1, t2, lam, k):
return 1 - exp(pow(t1 / lam,k) - pow(t2 / lam, k))
def calc_stem_radius(self, stem):
"""Calculate radius of this stem as defined in paper"""
if stem.depth == 0: # trunk
result = stem.length * self.param.ratio * self.param.radius_mod[0]
else: # other
result = self.param.radius_mod[stem.depth] * stem.parent.radius * pow((
stem.length / stem.parent.length), self.param.ratio_power)
result = max(0.005, result)
result = min(stem.radius_limit, result)
return result
def shape_ratio(self, shape, ratio):
"""Calculate shape ratio as defined in paper"""
if shape == 1: # spherical
result = 0.2 + 0.8 * sin(pi * ratio)
elif shape == 2: # hemispherical
result = 0.2 + 0.8 * sin(0.5 * pi * ratio)
elif shape == 3: # cylindrical
result = 1.0
elif shape == 4: # tapered cylindrical
result = 0.5 + 0.5 * ratio
elif shape == 5: # flame
if ratio <= 0.7:
result = ratio / 0.7
else:
result = (1.0 - ratio) / 0.3
elif shape == 6: # inverse conical
result = 1.0 - 0.8 * ratio
elif shape == 7: # tend flame
if ratio <= 0.7:
result = 0.5 + 0.5 * ratio / 0.7
else:
result = 0.5 + 0.5 * (1.0 - ratio) / 0.3
elif shape == 8: # envelope
if ratio < 0 or ratio > 1:
result = 0.0
elif ratio < 1 - self.param.prune_width_peak:
result = pow(ratio / (1 - self.param.prune_width_peak),
self.param.prune_power_high)
else:
result = pow((1 - ratio) / (1 - self.param.prune_width_peak),
self.param.prune_power_low)
else: # conical (0)
result = 0.2 + 0.8 * ratio
return result