Python numpy 模块,max() 实例源码
我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.max()。
def pad_batch(mini_batch):
mini_batch_size = len(mini_batch)
# print mini_batch.shape
# print mini_batch
max_sent_len1 = int(np.max([len(x[0]) for x in mini_batch]))
max_sent_len2 = int(np.max([len(x[1]) for x in mini_batch]))
# print max_sent_len1, max_sent_len2
# max_token_len = int(np.mean([len(val) for sublist in mini_batch for val in sublist]))
main_matrix1 = np.zeros((mini_batch_size, max_sent_len1), dtype= np.int)
main_matrix2 = np.zeros((mini_batch_size, max_sent_len2), dtype= np.int)
for idx1, i in enumerate(mini_batch):
for idx2, j in enumerate(i[0]):
try:
main_matrix1[i,j] = j
except IndexError:
pass
for idx1, i in enumerate(mini_batch):
for idx2, j in enumerate(i[1]):
try:
main_matrix2[i,j] = j
except IndexError:
pass
main_matrix1_t = Variable(torch.from_numpy(main_matrix1))
main_matrix2_t = Variable(torch.from_numpy(main_matrix2))
# print main_matrix1_t.size()
# print main_matrix2_t.size()
return [main_matrix1_t, main_matrix2_t]
# return [Variable(torch.cat((main_matrix1_t, main_matrix2_t), 0))
# def pad_batch(mini_batch):
# # print mini_batch
# # print type(mini_batch)
# # print mini_batch.shape
# # for i, _ in enumerate(mini_batch):
# # print i, _
# return [Variable(torch.from_numpy(np.asarray(_))) for _ in mini_batch[0]]
def _zero_one_normalize(predictions, epsilon=1e-7):
"""Normalize the predictions to the range between 0.0 and 1.0.
For some predictions like SVM predictions, we need to normalize them before
calculate the interpolated average precision. The normalization will not
change the rank in the original list and thus won't change the average
precision.
Args:
predictions: a numpy 1-D array storing the sparse prediction scores.
epsilon: a small constant to avoid denominator being zero.
Returns:
The normalized prediction.
"""
denominator = numpy.max(predictions) - numpy.min(predictions)
ret = (predictions - numpy.min(predictions)) / numpy.max(denominator,
epsilon)
return ret
def calc_loss(self, states, actions, rewards, next_states, episode_ends):
qv = self.agent.q(states)
q_t = self.target(next_states) # Q(s', *)
max_q_prime = np.array(list(map(np.max, q_t.data)), dtype=np.float32) # max_a Q(s', a)
target = cuda.to_cpu(qv.data.copy())
for i in range(self.replay_size):
if episode_ends[i][0] is True:
_r = np.sign(rewards[i])
else:
_r = np.sign(rewards[i]) + self.gamma * max_q_prime[i]
target[i, actions[i]] = _r
td = Variable(self.target.arr_to_gpu(target)) - qv
td_tmp = td.data + 1000.0 * (abs(td.data) <= 1) # Avoid zero division
td_clip = td * (abs(td.data) <= 1) + td/abs(td_tmp) * (abs(td.data) > 1)
zeros = Variable(self.target.arr_to_gpu(np.zeros((self.replay_size, self.target.n_action), dtype=np.float32)))
loss = F.mean_squared_error(td_clip, zeros)
self._loss = loss.data
self._qv = np.max(qv.data)
return loss
def getLargestFaceBoundingBox(self, rgbImg, skipMulti=False):
"""
Find the largest face bounding box in an image.
:param rgbImg: RGB image to process. Shape: (height, width, 3)
:type rgbImg: numpy.ndarray
:param skipMulti: Skip image if more than one face detected.
:type skipMulti: bool
:return: The largest face bounding box in an image, or None.
:rtype: dlib.rectangle
"""
assert rgbImg is not None
faces = self.getAllFaceBoundingBoxes(rgbImg)
if (not skipMulti and len(faces) > 0) or len(faces) == 1:
return max(faces, key=lambda rect: rect.width() * rect.height())
else:
return None
def getLargestFaceBoundingBox(self, rgbImg, skipMulti=False):
"""
Find the largest face bounding box in an image.
:param rgbImg: RGB image to process. Shape: (height, width, 3)
:type rgbImg: numpy.ndarray
:param skipMulti: Skip image if more than one face detected.
:type skipMulti: bool
:return: The largest face bounding box in an image, or None.
:rtype: dlib.rectangle
"""
assert rgbImg is not None
faces = self.getAllFaceBoundingBoxes(rgbImg)
if (not skipMulti and len(faces) > 0) or len(faces) == 1:
return max(faces, key=lambda rect: rect.width() * rect.height())
else:
return None
def test_accuracy_full_batch(tokens, features, mini_batch_size, word_attn, sent_attn, th=0.5):
p = []
l = []
cnt = 0
g = gen_minibatch1(tokens, features, mini_batch_size, False)
for token, feature in g:
if cnt % 100 == 0:
print(cnt)
cnt +=1
# print token.size()
# y_pred = get_predictions(token, word_attn, sent_attn)
# print y_pred
y_pred = get_predictions(token, feature, word_attn, sent_attn)
# print y_pred
# _, y_pred = torch.max(y_pred, 1)
# y_pred = y_pred[:, 1]
# print y_pred
p.append(np.ndarray.flatten(y_pred.data.cpu().numpy()))
p = [item for sublist in p for item in sublist]
p = np.array(p)
return p
def test_accuracy_full_batch(tokens, features, mini_batch_size, word_attn, sent_attn, th=0.5):
p = []
l = []
cnt = 0
g = gen_minibatch1(tokens, features, mini_batch_size, False)
for token, feature in g:
if cnt % 100 == 0:
print cnt
cnt +=1
# print token.size()
# y_pred = get_predictions(token, word_attn, sent_attn)
# print y_pred
y_pred = get_predictions(token, feature, word_attn, sent_attn)
# print y_pred
# _, y_pred = torch.max(y_pred, 1)
# y_pred = y_pred[:, 1]
# print y_pred
p.append(np.ndarray.flatten(y_pred.data.cpu().numpy()))
p = [item for sublist in p for item in sublist]
p = np.array(p)
return p
def getRectArrangements(n):
p = prime.Prime()
f = p.getPrimeFactors(n)
f_count = len(f)
ma = multiplyArray(f)
arrangements = set([(1,ma)])
if (f_count > 1):
perms = set(p.getPermutations(f))
for perm in perms:
for i in range(1,f_count):
v1 = multiplyArray(perm[0:i])
v2 = multiplyArray(perm[i:])
arrangements.add((min(v1, v2),max(v1, v2)))
return sorted(list(arrangements), cmp=proportion_sort, reverse=True)
def update_sort_idcs(self):
# The selected points are sorted before all the other points -- an easy
# way to achieve this is to add the maximum score to their score
if self.current_order == 0:
score = self.score_x
elif self.current_order == 1:
score = self.score_y
elif self.current_order == 2:
score = self.score_z
else:
raise AssertionError(self.current_order)
score = score.copy()
if len(self.selected_points):
score[np.array(sorted(self.selected_points))] += score.max()
self.sort_idcs = np.argsort(score)
def update_data_sort_order(self, new_sort_order=None):
if new_sort_order is not None:
self.current_order = new_sort_order
self.update_sort_idcs()
self.data_image.set_extent((self.raw_lags[0], self.raw_lags[-1],
0, len(self.sort_idcs)))
self.data_ax.set_ylim(0, len(self.sort_idcs))
all_raw_data = self.raw_data
all_raw_data /= (1 + self.raw_data.mean(1)[:, np.newaxis])
if len(all_raw_data) > 0:
cmax = 0.5*all_raw_data.max()
cmin = 0.5*all_raw_data.min()
all_raw_data = all_raw_data[self.sort_idcs, :]
else:
cmin = 0
cmax = 1
self.data_image.set_data(all_raw_data)
self.data_image.set_clim(cmin, cmax)
self.data_selection.set_y(len(self.sort_idcs)-len(self.selected_points))
self.data_selection.set_height(len(self.selected_points))
self.update_data_plot()
def __init__(self, top_n=None):
"""Construct an AveragePrecisionCalculator to calculate average precision.
This class is used to calculate the average precision for a single label.
Args:
top_n: A positive Integer specifying the average precision at n, or
None to use all provided data points.
Raises:
ValueError: An error occurred when the top_n is not a positive integer.
"""
if not ((isinstance(top_n, int) and top_n >= 0) or top_n is None):
raise ValueError("top_n must be a positive integer or None.")
self._top_n = top_n # average precision at n
self._total_positives = 0 # total number of positives have seen
self._heap = [] # max heap of (prediction, actual)
def __init__(self, top_n=None):
"""Construct an AveragePrecisionCalculator to calculate average precision.
This class is used to calculate the average precision for a single label.
Args:
top_n: A positive Integer specifying the average precision at n, or
None to use all provided data points.
Raises:
ValueError: An error occurred when the top_n is not a positive integer.
"""
if not ((isinstance(top_n, int) and top_n >= 0) or top_n is None):
raise ValueError("top_n must be a positive integer or None.")
self._top_n = top_n # average precision at n
self._total_positives = 0 # total number of positives have seen
self._heap = [] # max heap of (prediction, actual)
def _zero_one_normalize(predictions, epsilon=1e-7):
"""Normalize the predictions to the range between 0.0 and 1.0.
For some predictions like SVM predictions, we need to normalize them before
calculate the interpolated average precision. The normalization will not
change the rank in the original list and thus won't change the average
precision.
Args:
predictions: a numpy 1-D array storing the sparse prediction scores.
epsilon: a small constant to avoid denominator being zero.
Returns:
The normalized prediction.
"""
denominator = numpy.max(predictions) - numpy.min(predictions)
ret = (predictions - numpy.min(predictions)) / numpy.max(denominator,
epsilon)
return ret
def _zero_one_normalize(predictions, epsilon=1e-7):
"""Normalize the predictions to the range between 0.0 and 1.0.
For some predictions like SVM predictions, we need to normalize them before
calculate the interpolated average precision. The normalization will not
change the rank in the original list and thus won't change the average
precision.
Args:
predictions: a numpy 1-D array storing the sparse prediction scores.
epsilon: a small constant to avoid denominator being zero.
Returns:
The normalized prediction.
"""
denominator = numpy.max(predictions) - numpy.min(predictions)
ret = (predictions - numpy.min(predictions)) / numpy.max(denominator,
epsilon)
return ret
def getTrainKernel(self, params):
self.checkParams(params)
if (self.sameParams(params)): return self.cache['getTrainKernel']
ell = np.exp(params[0])
if (self.K_sq is None): K = sq_dist(self.X_scaled.T / ell) #precompute squared distances
else: K = self.K_sq / ell**2
self.cache['K_sq_scaled'] = K
# # # #manual computation (just for sanity checks)
# # # K1 = np.exp(-K / 2.0)
# # # K2 = np.zeros((self.X_scaled.shape[0], self.X_scaled.shape[0]))
# # # for i1 in xrange(self.X_scaled.shape[0]):
# # # for i2 in xrange(i1, self.X_scaled.shape[0]):
# # # diff = self.X_scaled[i1,:] - self.X_scaled[i2,:]
# # # K2[i1, i2] = np.exp(-np.sum(diff**2) / (2*ell))
# # # K2[i2, i1] = K2[i1, i2]
# # # print np.max((K1-K2)**2)
# # # sys.exit(0)
K_exp = np.exp(-K / 2.0)
self.cache['getTrainKernel'] = K_exp
self.saveParams(params)
return K_exp
def Kdim(self, kdimParams):
if (self.prevKdimParams is not None and np.max(np.abs(kdimParams-self.prevKdimParams)) < self.epsilon): return self.cache['Kdim']
K = np.zeros((self.n, self.n, len(self.kernels)))
params_ind = 0
for k_i, k in enumerate(self.kernels):
numHyp = k.getNumParams()
kernelParams_range = np.array(xrange(params_ind, params_ind+numHyp), dtype=np.int)
kernel_params = kdimParams[kernelParams_range]
if ((numHyp == 0 and 'Kdim' in self.cache) or (numHyp>0 and self.prevKdimParams is not None and np.max(np.abs(kernel_params-self.prevKdimParams[kernelParams_range])) < self.epsilon)):
K[:,:,k_i] = self.cache['Kdim'][:,:,k_i]
else:
K[:,:,k_i] = k.getTrainKernel(kernel_params)
params_ind += numHyp
self.prevKdimParams = kdimParams.copy()
self.cache['Kdim'] = K
return K
def removeTopPCs(X, numRemovePCs):
t0 = time.time()
X_mean = X.mean(axis=0)
X -= X_mean
XXT = symmetrize(blas.dsyrk(1.0, X, lower=0))
s,U = la.eigh(XXT)
if (np.min(s) < -1e-4): raise Exception('Negative eigenvalues found')
s[s<0]=0
ind = np.argsort(s)[::-1]
U = U[:, ind]
s = s[ind]
s = np.sqrt(s)
#remove null PCs
ind = (s>1e-6)
U = U[:, ind]
s = s[ind]
V = X.T.dot(U/s)
#print 'max diff:', np.max(((U*s).dot(V.T) - X)**2)
X = (U[:, numRemovePCs:]*s[numRemovePCs:]).dot((V.T)[numRemovePCs:, :])
X += X_mean
return X
def resample(image, scan, new_spacing=[1,1,1]):
# Determine current pixel spacing
spacing = map(float, ([scan[0].SliceThickness] + scan[0].PixelSpacing))
spacing = np.array(list(spacing))
resize_factor = spacing / new_spacing
new_real_shape = image.shape * resize_factor
new_shape = np.round(new_real_shape)
real_resize_factor = new_shape / image.shape
new_spacing = spacing / real_resize_factor
#image = scipy.ndimage.interpolation.zoom(image, real_resize_factor) # nor mode= "wrap"/xxx, nor cval=-1024 can ensure that the min and max values are unchanged .... # cval added
image = scipy.ndimage.interpolation.zoom(image, real_resize_factor, mode='nearest') ### early orig modified
#image = scipy.ndimage.zoom(image, real_resize_factor, order=1) # order=1 bilinear , preserves the min and max of the image -- pronbably better for us (also faster than spkine/order=2)
#image = scipy.ndimage.zoom(image, real_resize_factor, mode='nearest', order=1) # order=1 bilinear , preserves the min and max of the image -- pronbably better for us (also faster than spkine/order=2)
return image, new_spacing
def mypsd(Rates,time_range,bin_w = 5., nmax = 4000):
bins = np.arange(0,len(time_range),1)
#print bins
a,b = np.histogram(Rates, bins)
ff = (1./len(bins))*abs(np.fft.fft(Rates- np.mean(Rates)))**2
Fs = 1./(1*0.001)
freq2 = np.fft.fftfreq(len(bins))[0:len(bins/2)+1] # d= dt
freq = np.fft.fftfreq(len(bins))[:len(ff)/2+1]
px = ff[0:len(ff)/2+1]
max_px = np.max(px[1:])
idx = px == max_px
corr_freq = freq[pl.find(idx)]
new_px = px
max_pow = new_px[pl.find(idx)]
return new_px,freq,corr_freq[0],freq2, max_pow
def spec_entropy(Rates,time_range=[],bin_w = 5.,freq_range = []):
'''Function to calculate the spectral entropy'''
power,freq,dfreq,dummy,dummy = mypsd(Rates,time_range,bin_w = bin_w)
if freq_range != []:
power = power[(freq>=freq_range[0]) & (freq <= freq_range[1])]
freq = freq[(freq>=freq_range[0]) & (freq <= freq_range[1])]
maxFreq = freq[np.where(power==np.max(power))]*1000*100
perMax = (np.max(power)/np.sum(power))*100
k = len(freq)
power = power/sum(power)
sum_power = 0
for ii in range(k):
sum_power += (power[ii]*np.log(power[ii]))
spec_ent = -(sum_power/np.log(k))
return spec_ent,dfreq,maxFreq,perMax
def testStartStopModulation(self):
radiusInMilliRad= 12.4
frequencyInHz= 100.
centerInMilliRad= [-10, 15]
self._tt.setTargetPosition(centerInMilliRad)
self._tt.startModulation(radiusInMilliRad,
frequencyInHz,
centerInMilliRad)
self.assertTrue(
np.allclose(
[1, 1, 0],
self._ctrl.getWaveGeneratorStartStopMode()))
waveform= self._ctrl.getWaveform(1)
wants= self._tt._milliRadToGcsUnitsOneAxis(-10, self._tt.AXIS_A)
got= np.mean(waveform)
self.assertAlmostEqual(
wants, got, msg="wants %g, got %g" % (wants, got))
wants= self._tt._milliRadToGcsUnitsOneAxis(-10 + 12.4, self._tt.AXIS_A)
got= np.max(waveform)
self.assertAlmostEqual(
wants, got, msg="wants %g, got %g" % (wants, got))
self._tt.stopModulation()
self.assertTrue(
np.allclose(centerInMilliRad, self._tt.getTargetPosition()))
def getLatLonRange(pbo_info, station_list):
'''
Retrive the range of latitude and longitude occupied by a set of stations
@param pbo_info: PBO Metadata
@param station_list: List of stations
@return list containg two tuples, lat_range and lon_range
'''
coord_list = getStationCoords(pbo_info, station_list)
lat_list = []
lon_list = []
for coord in coord_list:
lat_list.append(coord[0])
lon_list.append(coord[1])
lat_range = (np.min(lat_list), np.max(lat_list))
lon_range = (np.min(lon_list), np.max(lon_list))
return [lat_range, lon_range]
def conv1(model):
n1, n2, x, y, z = model.conv1.W.shape
fig = plt.figure()
for nn in range(0, n1):
ax = fig.add_subplot(4, 5, nn+1, projection='3d')
ax.set_xlim(0.0, x)
ax.set_ylim(0.0, y)
ax.set_zlim(0.0, z)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
for xx in range(0, x):
for yy in range(0, y):
for zz in range(0, z):
max = np.max(model.conv1.W.data[nn, :])
min = np.min(model.conv1.W.data[nn, :])
step = (max - min) / 1.0
C = (model.conv1.W.data[nn, 0, xx, yy, zz] - min) / step
color = cm.cool(C)
C = abs(1.0 - C)
ax.plot(np.array([xx]), np.array([yy]), np.array([zz]), "o", color=color, ms=7.0*C, mew=0.1)
plt.savefig("result/graph_conv1.png")
def create_graph():
logfile = 'result/log'
xs = []
ys = []
ls = []
f = open(logfile, 'r')
data = json.load(f)
print(data)
for d in data:
xs.append(d["iteration"])
ys.append(d["main/accuracy"])
ls.append(d["main/loss"])
plt.clf()
plt.cla()
plt.hlines(1, 0, np.max(xs), colors='r', linestyles="dashed") # y=-1, 1??????
plt.title(r"loss/accuracy")
plt.plot(xs, ys, label="accuracy")
plt.plot(xs, ls, label="loss")
plt.legend()
plt.savefig("result/log.png")
def reshapeWeights(self, weights, normalize=True, modifier=None):
# reshape the weights matrix to a grid for visualization
n_rows = int(np.sqrt(weights.shape[1]))
n_cols = int(np.sqrt(weights.shape[1]))
kernel_size = int(np.sqrt(weights.shape[0]/3))
weights_grid = np.zeros((int((np.sqrt(weights.shape[0]/3)+1)*n_rows), int((np.sqrt(weights.shape[0]/3)+1)*n_cols), 3), dtype=np.float32)
for i in range(weights_grid.shape[0]/(kernel_size+1)):
for j in range(weights_grid.shape[1]/(kernel_size+1)):
index = i * (weights_grid.shape[0]/(kernel_size+1))+j
if not np.isclose(np.sum(weights[:, index]), 0):
if normalize:
weights_grid[i * (kernel_size + 1):i * (kernel_size + 1) + kernel_size, j * (kernel_size + 1):j * (kernel_size + 1) + kernel_size]=\
(weights[:, index].reshape(kernel_size, kernel_size, 3) - np.min(weights[:, index])) / ((np.max(weights[:, index]) - np.min(weights[:, index])) + 1.e-6)
else:
weights_grid[i * (kernel_size + 1):i * (kernel_size + 1) + kernel_size, j * (kernel_size + 1):j * (kernel_size + 1) + kernel_size] =\
(weights[:, index].reshape(kernel_size, kernel_size, 3))
if modifier is not None:
weights_grid[i * (kernel_size + 1):i * (kernel_size + 1) + kernel_size, j * (kernel_size + 1):j * (kernel_size + 1) + kernel_size] *= modifier[index]
return weights_grid
def extract_features_from_roi(roi):
roi_width = roi.shape[1]
roi_height = roi.shape[2]
new_width = roi_width / feature_size
new_height = roi_height / feature_size
pooled_values = np.zeros([feature_size, feature_size, 512])
for j in range(512):
for i in range(feature_size):
for k in range(feature_size):
if k == (feature_size-1) & i == (feature_size-1):
patch = roi[j, i * new_width:roi_width, k * new_height:roi_height]
elif k == (feature_size-1):
patch = roi[j, i * new_width:(i + 1) * new_width, k * new_height:roi_height]
elif i == (feature_size-1):
patch = roi[j, i * new_width:roi_width, k * new_height:(k + 1) * new_height]
else:
patch = roi[j, i * new_width:(i + 1) * new_width, k * new_height:(k + 1) * new_height]
pooled_values[i, k, j] = np.max(patch)
return pooled_values
def room2blocks_plus_normalized(data_label, num_point, block_size, stride,
random_sample, sample_num, sample_aug):
""" room2block, with input filename and RGB preprocessing.
for each block centralize XYZ, add normalized XYZ as 678 channels
"""
data = data_label[:,0:6]
data[:,3:6] /= 255.0
label = data_label[:,-1].astype(np.uint8)
max_room_x = max(data[:,0])
max_room_y = max(data[:,1])
max_room_z = max(data[:,2])
data_batch, label_batch = room2blocks(data, label, num_point, block_size, stride,
random_sample, sample_num, sample_aug)
new_data_batch = np.zeros((data_batch.shape[0], num_point, 9))
for b in range(data_batch.shape[0]):
new_data_batch[b, :, 6] = data_batch[b, :, 0]/max_room_x
new_data_batch[b, :, 7] = data_batch[b, :, 1]/max_room_y
new_data_batch[b, :, 8] = data_batch[b, :, 2]/max_room_z
minx = min(data_batch[b, :, 0])
miny = min(data_batch[b, :, 1])
data_batch[b, :, 0] -= (minx+block_size/2)
data_batch[b, :, 1] -= (miny+block_size/2)
new_data_batch[:, :, 0:6] = data_batch
return new_data_batch, label_batch
def room2samples_plus_normalized(data_label, num_point):
""" room2sample, with input filename and RGB preprocessing.
for each block centralize XYZ, add normalized XYZ as 678 channels
"""
data = data_label[:,0:6]
data[:,3:6] /= 255.0
label = data_label[:,-1].astype(np.uint8)
max_room_x = max(data[:,0])
max_room_y = max(data[:,1])
max_room_z = max(data[:,2])
#print(max_room_x, max_room_y, max_room_z)
data_batch, label_batch = room2samples(data, label, num_point)
new_data_batch = np.zeros((data_batch.shape[0], num_point, 9))
for b in range(data_batch.shape[0]):
new_data_batch[b, :, 6] = data_batch[b, :, 0]/max_room_x
new_data_batch[b, :, 7] = data_batch[b, :, 1]/max_room_y
new_data_batch[b, :, 8] = data_batch[b, :, 2]/max_room_z
#minx = min(data_batch[b, :, 0])
#miny = min(data_batch[b, :, 1])
#data_batch[b, :, 0] -= (minx+block_size/2)
#data_batch[b, :, 1] -= (miny+block_size/2)
new_data_batch[:, :, 0:6] = data_batch
return new_data_batch, label_batch
def mine(self, im, gt_bboxes):
"""
Propose bounding boxes using proposer, and
augment non-overlapping boxes with IoU < 0.1
to the ground truth set.
(up to a maximum of num_proposals)
"""
bboxes = self.proposer_.process(im)
if len(gt_bboxes):
# Determine bboxes that have low IoU with ground truth
# iou = [N x GT]
iou = brute_force_match(bboxes, gt_bboxes,
match_func=lambda x,y: intersection_over_union(x,y))
# print('Detected {}, {}, {}'.format(iou.shape, len(gt_bboxes), len(bboxes))) # , np.max(iou, axis=1)
overlap_inds, = np.where(np.max(iou, axis=1) < 0.1)
bboxes = bboxes[overlap_inds]
# print('Remaining non-overlapping {}'.format(len(bboxes)))
bboxes = bboxes[:self.num_proposals_]
targets = self.generate_targets(len(bboxes))
return bboxes, targets
def inc_region(self, dst, y, x, h, w):
'''Incremets dst in the specified region. Runs fastest on np.int8, but not much slower on
np.int16.'''
dh, dw = dst.shape
h2 = h // 2
w2 = w // 2
py = y - h2
px = x - w2
y_min = max(0, py)
y_max = min(dh, y + h2)
x_min = max(0, px)
x_max = min(dw, x + w2)
if y_max - y_min <= 0 or x_max - x_min <= 0:
return
dst[y_min:y_max, x_min:x_max] += 1
def compHistDistance(h1, h2):
def normalize(h):
if np.sum(h) == 0:
return h
else:
return h / np.sum(h)
def smoothstep(x, x_min=0., x_max=1., k=2.):
m = 1. / (x_max - x_min)
b = - m * x_min
x = m * x + b
return betainc(k, k, np.clip(x, 0., 1.))
def fn(X, Y, k):
return 4. * (1. - smoothstep(Y, 0, (1 - Y) * X + Y + .1)) \
* np.sqrt(2 * X) * smoothstep(X, 0., 1. / k, 2) \
+ 2. * smoothstep(Y, 0, (1 - Y) * X + Y + .1) \
* (1. - 2. * np.sqrt(2 * X) * smoothstep(X, 0., 1. / k, 2) - 0.5)
h1 = normalize(h1)
h2 = normalize(h2)
return max(0, np.sum(fn(h2, h1, len(h1))))
# return np.sum(np.where(h2 != 0, h2 * np.log10(h2 / (h1 + 1e-10)), 0)) # KL divergence
def effective_sample_size(x, mu, var, logger):
"""
Calculate the effective sample size of sequence generated by MCMC.
:param x:
:param mu: mean of the variable
:param var: variance of the variable
:param logger: logg
:return: effective sample size of the sequence
Make sure that `mu` and `var` are correct!
"""
# batch size, time, dimension
b, t, d = x.shape
ess_ = np.ones([d])
for s in range(1, t):
p = auto_correlation_time(x, s, mu, var)
if np.sum(p > 0.05) == 0:
break
else:
for j in range(0, d):
if p[j] > 0.05:
ess_[j] += 2.0 * p[j] * (1.0 - float(s) / t)
logger.info('ESS: max [%f] min [%f] / [%d]' % (t / np.min(ess_), t / np.max(ess_), t))
return t / ess_
def score_fun_first_term(vals_hist,a_mid):
sum = 0.0
lim = int(np.max(vals_hist.keys()))
for i in range(0, lim+1):
if (vals_hist[i] > 0):
inner_sum = 0.0
for j in range(0, i):
inner_sum += j/(1.0 + a_mid*j)
sum += vals_hist[i]*inner_sum
return sum
##############################
## in-line functions
##############################
def estimate_clipping_rect(projector, size):
"""
Return:
rect -- NSRect style 2d-tuple.
flipped (bool) -- Whether y-axis is flipped.
"""
# lt -> rt -> lb -> rb
image_corners = [(0, 0), (size[0], 0), (0, size[1]), size]
x_points = []
y_points = []
for corner in image_corners:
x, y = map(int, projector.project_point(*corner))
x_points.append(x)
y_points.append(y)
min_x = min(x_points)
min_y = min(y_points)
max_x = max(x_points)
max_y = max(y_points)
rect = ((min_x, min_y), (max_x - min_x, max_y - min_y))
flipped = y_points[3] < 0
return rect, flipped
def to_dim_times_two(self, bounds):
"""return boundaries in format ``[[lb0, ub0], [lb1, ub1], ...]``,
as used by ``BoxConstraints...`` class.
"""
if not bounds:
b = [[None, None]]
else:
l = [None, None] # figure out lenths
for i in [0, 1]:
try:
l[i] = len(bounds[i])
except TypeError:
bounds[i] = [bounds[i]]
l[i] = 1
b = [] # bounds in different format
try:
for i in range(max(l)):
b.append([bounds[0][i] if i < l[0] else None,
bounds[1][i] if i < l[1] else None])
except (TypeError, IndexError):
print("boundaries must be provided in the form " +
"[scalar_of_vector, scalar_or_vector]")
raise
return b
def alleviate_conditioning_in_coordinates(self, condition=1e8):
"""pass scaling from `C` to `sigma_vec`.
As a result, `C` is a correlation matrix, i.e., all diagonal
entries of `C` are `1`.
"""
if max(self.dC) / min(self.dC) > condition:
# allows for much larger condition numbers, if axis-parallel
if hasattr(self, 'sm') and isinstance(self.sm, sampler.GaussFullSampler):
old_coordinate_condition = max(self.dC) / min(self.dC)
old_condition = self.sm.condition_number
factors = self.sm.to_correlation_matrix()
self.sigma_vec *= factors
self.pc /= factors
self._updateBDfromSM(self.sm)
utils.print_message('\ncondition in coordinate system exceeded'
' %.1e, rescaled to %.1e, '
'\ncondition changed from %.1e to %.1e'
% (old_coordinate_condition, max(self.dC) / min(self.dC),
old_condition, self.sm.condition_number),
iteration=self.countiter)
def plot_axes_scaling(self, iabscissa=1):
from matplotlib import pyplot
if not hasattr(self, 'D'):
self.load()
dat = self
if np.max(dat.D[:, 5:]) == np.min(dat.D[:, 5:]):
pyplot.text(0, dat.D[-1, 5],
'all axes scaling values equal to %s'
% str(dat.D[-1, 5]),
verticalalignment='center')
return self # nothing interesting to plot
self._enter_plotting()
pyplot.semilogy(dat.D[:, iabscissa], dat.D[:, 5:], '-b')
# pyplot.hold(True)
pyplot.grid(True)
ax = array(pyplot.axis())
# ax[1] = max(minxend, ax[1])
pyplot.axis(ax)
pyplot.title('Principle Axes Lengths')
# pyplot.xticks(xticklocs)
self._xlabel(iabscissa)
self._finalize_plotting()
return self
def initwithsize(self, curshape, dim):
# DIM-dependent initialization
if self.dim != dim:
scale = max(1, dim ** .5 / 8.)
self.linearTF = scale * compute_rotation(self.rseed, dim)
# if self.zerox:
# self.xopt = zeros(dim) # does not work here
# else:
# TODO: clean this line
self.xopt = np.hstack(dot(self.linearTF, 0.5 * np.ones((dim, 1)) / scale ** 2))
# DIM- and POPSI-dependent initialisations of DIM*POPSI matrices
if self.lastshape != curshape:
self.dim = dim
self.lastshape = curshape
self.arrxopt = resize(self.xopt, curshape)
def update_measure(self):
"""updated noise level measure using two fitness lists ``self.fit`` and
``self.fitre``, return ``self.noiseS, all_individual_measures``.
Assumes that ``self.idx`` contains the indices where the fitness
lists differ.
"""
lam = len(self.fit)
idx = np.argsort(self.fit + self.fitre)
ranks = np.argsort(idx).reshape((2, lam))
rankDelta = ranks[0] - ranks[1] - np.sign(ranks[0] - ranks[1])
# compute rank change limits using both ranks[0] and ranks[1]
r = np.arange(1, 2 * lam) # 2 * lam - 2 elements
limits = [0.5 * (Mh.prctile(np.abs(r - (ranks[0, i] + 1 - (ranks[0, i] > ranks[1, i]))),
self.theta * 50) +
Mh.prctile(np.abs(r - (ranks[1, i] + 1 - (ranks[1, i] > ranks[0, i]))),
self.theta * 50))
for i in self.idx]
# compute measurement
# max: 1 rankchange in 2*lambda is always fine
s = np.abs(rankDelta[self.idx]) - Mh.amax(limits, 1) # lives roughly in 0..2*lambda
self.noiseS += self.cum * (np.mean(s) - self.noiseS)
return self.noiseS, s
def logscale_img(img_array,
cap=255.0,
coeff=1000.0):
'''
This scales the image according to the relation:
logscale_img = np.log(coeff*(img/max(img))+1)/np.log(coeff)
Taken from the DS9 scaling algorithms page at:
http://hea-www.harvard.edu/RD/ds9/ref/how.html
According to that page:
coeff = 1000.0 works well for optical images
coeff = 100.0 works well for IR images
'''
logscaled_img = np.log(coeff*img_array/np.nanmax(img_array)+1)/np.log(coeff)
return cap*logscaled_img
def genplot(x, y, fit, xdata=None, ydata=None, maxpts=10000):
bin_range = (0, 360)
a = (np.arange(*bin_range))
f_a = nuth_func(a, fit[0], fit[1], fit[2])
nuth_func_str = r'$y=%0.2f*cos(%0.2f-x)+%0.2f$' % tuple(fit)
if xdata.size > maxpts:
import random
idx = random.sample(list(range(xdata.size)), 10000)
else:
idx = np.arange(xdata.size)
f, ax = plt.subplots()
ax.set_xlabel('Aspect (deg)')
ax.set_ylabel('dh/tan(slope) (m)')
ax.plot(xdata[idx], ydata[idx], 'k.', label='Orig pixels')
ax.plot(x, y, 'ro', label='Bin median')
ax.axhline(color='k')
ax.plot(a, f_a, 'b', label=nuth_func_str)
ax.set_xlim(*bin_range)
pad = 0.2 * np.max([np.abs(y.min()), np.abs(y.max())])
ax.set_ylim(y.min() - pad, y.max() + pad)
ax.legend(prop={'size':8})
return f
#Function copied from from openPIV pyprocess
def center_clipping(x, percent=30):
"""
Performs center clipping, a spectral whitening process
need some type of spectrum flattening so that the
speech signal more closely approximates a periodic impulse train
Args:
x (array): signal data
percent (float): percent threshold to clip
Returns:
cc (array): center clipped signal
clip_level (float): value of clipping
"""
max_amp = np.max(np.abs(x))
clip_level = max_amp * (percent / 100)
positive_mask = x > clip_level
negative_mask = x < -clip_level
cc = np.zeros(x.shape)
cc[positive_mask] = x[positive_mask] - clip_level
cc[negative_mask] = x[negative_mask] + clip_level
return cc, clip_level
def calc_tvd(sess,Generator,Data,N=50000,nbins=10):
Xd=sess.run(Data.X,{Data.N:N})
step,Xg=sess.run([Generator.step,Generator.X],{Generator.N:N})
p_gen,_ = np.histogramdd(Xg,bins=nbins,range=[[0,1],[0,1],[0,1]],normed=True)
p_dat,_ = np.histogramdd(Xd,bins=nbins,range=[[0,1],[0,1],[0,1]],normed=True)
p_gen/=nbins**3
p_dat/=nbins**3
tvd=0.5*np.sum(np.abs( p_gen-p_dat ))
mvd=np.max(np.abs( p_gen-p_dat ))
return step,tvd, mvd
s_tvd=make_summary(Data.name+'_tvd',tvd)
s_mvd=make_summary(Data.name+'_mvd',mvd)
return step,s_tvd,s_mvd
#return make_summary('tvd/'+Generator.name,tvd)
def scatter2d(x,y,title='2dscatterplot',xlabel=None,ylabel=None):
fig=plt.figure()
plt.scatter(x,y)
plt.title(title)
if xlabel:
plt.xlabel(xlabel)
if ylabel:
plt.ylabel(ylabel)
if not 0<=np.min(x)<=np.max(x)<=1:
raise ValueError('summary_scatter2d title:',title,' input x exceeded [0,1] range.\
min:',np.min(x),' max:',np.max(x))
if not 0<=np.min(y)<=np.max(y)<=1:
raise ValueError('summary_scatter2d title:',title,' input y exceeded [0,1] range.\
min:',np.min(y),' max:',np.max(y))
plt.xlim([0,1])
plt.ylim([0,1])
return fig
def get_next_batch(experience, model, num_actions, gamma, batch_size):
batch_indices = np.random.randint(low=0, high=len(experience),
size=batch_size)
batch = [experience[i] for i in batch_indices]
X = np.zeros((batch_size, 80, 80, 4))
Y = np.zeros((batch_size, num_actions))
for i in range(len(batch)):
s_t, a_t, r_t, s_tp1, game_over = batch[i]
X[i] = s_t
Y[i] = model.predict(s_t)[0]
Q_sa = np.max(model.predict(s_tp1)[0])
if game_over:
Y[i, a_t] = r_t
else:
Y[i, a_t] = r_t + gamma * Q_sa
return X, Y
############################# main ###############################
# initialize parameters
def forward(self, input):
"""During the forward pass, it inhibits all inhibitions below some
threshold :math:`?`, typically :math:`0`. In other words, it computes point-wise
.. math:: y=max(0,x)
Parameters
----------
x : float32
The activation (the summed, weighted input of a neuron).
Returns
-------
float32
The output of the rectify function applied to the activation.
"""
self.last_forward = input
return np.maximum(0.0, input)
def forward(self, input):
""":math:`\\varphi(\\mathbf{x})_j =
\\frac{e^{\mathbf{x}_j}}{\sum_{k=1}^K e^{\mathbf{x}_k}}`
where :math:`K` is the total number of neurons in the layer. This
activation function gets applied row-wise.
Parameters
----------
x : float32
The activation (the summed, weighted input of a neuron).
Returns
-------
float32 where the sum of the row is 1 and each single value is in [0, 1]
The output of the softmax function applied to the activation.
"""
assert np.ndim(input) == 2
self.last_forward = input
x = input - np.max(input, axis=1, keepdims=True)
exp_x = np.exp(x)
s = exp_x / np.sum(exp_x, axis=1, keepdims=True)
return s
def main(max_iter):
# prepare
npdl.utils.random.set_seed(1234)
# data
digits = load_digits()
X_train = digits.data
X_train /= np.max(X_train)
Y_train = digits.target
n_classes = np.unique(Y_train).size
# model
model = npdl.model.Model()
model.add(npdl.layers.Dense(n_out=500, n_in=64, activation=npdl.activations.ReLU()))
model.add(npdl.layers.Dense(n_out=n_classes, activation=npdl.activations.Softmax()))
model.compile(loss=npdl.objectives.SCCE(), optimizer=npdl.optimizers.SGD(lr=0.005))
# train
model.fit(X_train, npdl.utils.data.one_hot(Y_train), max_iter=max_iter, validation_split=0.1)
def _updateMaxTextSize(self, x):
## Informs that the maximum tick size orthogonal to the axis has
## changed; we use this to decide whether the item needs to be resized
## to accomodate.
if self.orientation in ['left', 'right']:
mx = max(self.textWidth, x)
if mx > self.textWidth or mx < self.textWidth-10:
self.textWidth = mx
if self.style['autoExpandTextSpace'] is True:
self._updateWidth()
#return True ## size has changed
else:
mx = max(self.textHeight, x)
if mx > self.textHeight or mx < self.textHeight-10:
self.textHeight = mx
if self.style['autoExpandTextSpace'] is True:
self._updateHeight()
#return True ## size has changed
def _updateHeight(self):
if not self.isVisible():
h = 0
else:
if self.fixedHeight is None:
if not self.style['showValues']:
h = 0
elif self.style['autoExpandTextSpace'] is True:
h = self.textHeight
else:
h = self.style['tickTextHeight']
h += self.style['tickTextOffset'][1] if self.style['showValues'] else 0
h += max(0, self.style['tickLength'])
if self.label.isVisible():
h += self.label.boundingRect().height() * 0.8
else:
h = self.fixedHeight
self.setMaximumHeight(h)
self.setMinimumHeight(h)
self.picture = None