Python numpy 模块,amin() 实例源码
我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用numpy.amin()。
def min_side(_, pos):
"""
Given an object pixels' positions, return the minimum side length of its
bounding box
:param _: pixel values (unused)
:param pos: pixel position (1-D)
:return: minimum bounding box side length
"""
xs = np.array([i / SSIZE for i in pos])
ys = np.array([i % SSIZE for i in pos])
minx = np.amin(xs)
miny = np.amin(ys)
maxx = np.amax(xs)
maxy = np.amax(ys)
ct1 = compute_line(np.array([minx, miny]), np.array([minx, maxy]))
ct2 = compute_line(np.array([minx, miny]), np.array([maxx, miny]))
return min(ct1, ct2)
def generate_patch_probs(path, patch_locations, patch_size, im_size):
x, y, z = patch_locations
seg = nib.load(glob.glob(os.path.join(path, '*_seg.nii.gz'))[0]).get_data().astype(np.float32)
p = []
for i in range(len(x)):
for j in range(len(y)):
for k in range(len(z)):
patch = seg[int(x[i] - patch_size / 2) : int(x[i] + patch_size / 2),
int(y[j] - patch_size / 2) : int(y[j] + patch_size / 2),
int(z[k] - patch_size / 2) : int(z[k] + patch_size / 2)]
patch = (patch > 0).astype(np.float32)
percent = np.sum(patch) / (patch_size * patch_size * patch_size)
p.append((1 - np.abs(percent - 0.5)) * percent)
p = np.asarray(p, dtype=np.float32)
p[p == 0] = np.amin(p[np.nonzero(p)])
p = p / np.sum(p)
return p
def view_trigger_snippets_bis(trigger_snippets, elec_index, save=None):
fig = pylab.figure()
ax = fig.add_subplot(1, 1, 1)
for n in xrange(0, trigger_snippets.shape[2]):
y = trigger_snippets[:, elec_index, n]
x = numpy.arange(- (y.size - 1) / 2, (y.size - 1) / 2 + 1)
b = 0.5 + 0.5 * numpy.random.rand()
ax.plot(x, y, color=(0.0, 0.0, b), linestyle='solid')
ax.grid(True)
ax.set_xlim([numpy.amin(x), numpy.amax(x)])
ax.set_xlabel("time")
ax.set_ylabel("amplitude")
if save is None:
pylab.show()
else:
pylab.savefig(save)
pylab.close(fig)
return
def to_rgb(img):
"""
Converts the given array into a RGB image. If the number of channels is not
3 the array is tiled such that it has 3 channels. Finally, the values are
rescaled to [0,255)
:param img: the array to convert [nx, ny, channels]
:returns img: the rgb image [nx, ny, 3]
"""
img = np.atleast_3d(img)
channels = img.shape[2]
if channels < 3:
img = np.tile(img, 3)
img[np.isnan(img)] = 0
img -= np.amin(img)
img /= np.amax(img)
img *= 255
return img
def add(self, x, y = None):
self.X = np.memmap(
self.path+"/X.npy", self.X.dtype,
shape = (self.nrows + x.shape[0] , x.shape[1])
)
self.X[self.nrows:self.nrows + x.shape[0],:] = x
if y is not None:
if x.shape != y.shape: raise "x and y should have the same shape"
self.Y = np.memmap(
self.path+"/Y.npy", self.Y.dtype,
shape = (self.nrows + y.shape[0] , y.shape[1])
)
self.Y[self.nrows:self.nrows + y.shape[0],:] = y
delta = x - self.running_mean
n = self.X.shape[0] + np.arange(x.shape[0]) + 1
self.running_dev += np.sum(delta * (x - self.running_mean), 0)
self.running_mean += np.sum(delta / n[:, np.newaxis], 0)
self.running_max = np.amax(np.vstack((self.running_max, x)), 0)
self.running_min = np.amin(np.vstack((self.running_min, x)), 0)
self.nrows += x.shape[0]
def _make_grid(dim=(11,4)):
"""
this function generates the structure for an asymmetrical circle grid
domain (0-1)
"""
x,y = range(dim[0]),range(dim[1])
p = np.array([[[s,i] for s in x] for i in y], dtype=np.float32)
p[:,1::2,1] += 0.5
p = np.reshape(p, (-1,2), 'F')
# scale height = 1
x_scale = 1./(np.amax(p[:,0])-np.amin(p[:,0]))
y_scale = 1./(np.amax(p[:,1])-np.amin(p[:,1]))
p *=x_scale,x_scale/.5
return p
def analytic_infinite_wire(obsloc,wireloc,orientation,I=1.):
"""
Compute the response of an infinite wire with orientation 'orientation'
and current I at the obsvervation locations obsloc
Output:
B: magnetic field [Bx,By,Bz]
"""
n,d = obsloc.shape
t,d = wireloc.shape
d = np.sqrt(np.dot(obsloc**2.,np.ones([d,t]))+np.dot(np.ones([n,d]),(wireloc.T)**2.)
- 2.*np.dot(obsloc,wireloc.T))
distr = np.amin(d, axis=1, keepdims = True)
idxmind = d.argmin(axis=1)
r = obsloc - wireloc[idxmind]
orient = np.c_[[orientation for i in range(obsloc.shape[0])]]
B = (mu_0*I)/(2*np.pi*(distr**2.))*np.cross(orientation,r)
return B
def calculate_feature_statistics(feature_id):
feature = Feature.objects.get(pk=feature_id)
dataframe = _get_dataframe(feature.dataset.id)
feature_col = dataframe[feature.name]
feature.min = np.amin(feature_col).item()
feature.max = np.amax(feature_col).item()
feature.mean = np.mean(feature_col).item()
feature.variance = np.nanvar(feature_col).item()
unique_values = np.unique(feature_col)
integer_check = (np.mod(unique_values, 1) == 0).all()
feature.is_categorical = integer_check and (unique_values.size < 10)
if feature.is_categorical:
feature.categories = list(unique_values)
feature.save(update_fields=['min', 'max', 'variance', 'mean', 'is_categorical', 'categories'])
del unique_values, feature
def find_min_max(scp_file):
minimum = float("inf")
maximum = -float("inf")
uid = 0
offset = 0
ark_dict, uid = read_mats(uid, offset, scp_file)
while ark_dict:
for key in ark_dict.keys():
mat_max = np.amax(ark_dict[key])
mat_min = np.amin(ark_dict[key])
if mat_max > maximum:
maximum = mat_max
if mat_min < minimum:
minimum = mat_min
ark_dict, uid = read_mats(uid, offset, scp_file)
print("min:", minimum, "max:", maximum)
return minimum, maximum
def _gini(self, array):
"""Calculate the Gini coefficient of a numpy array."""
# https://github.com/oliviaguest/gini
# based on bottom eq:
# http://www.statsdirect.com/help/generatedimages/equations/equation154.svg
# from:
# http://www.statsdirect.com/help/default.htm#nonparametric_methods/gini.htm
# All values are treated equally, arrays must be 1d:
array = array.flatten()
if np.amin(array) < 0:
# Values cannot be negative:
array -= np.amin(array)
# Values cannot be 0:
array += 0.0000001
# Values must be sorted:
array = np.sort(array)
# Index per array element:
index = np.arange(1,array.shape[0]+1)
# Number of array elements:
n = array.shape[0]
# Gini coefficient:
return ((np.sum((2 * index - n - 1) * array)) / (n * np.sum(array)))
def displayDataset(self, dataset):
eps = 0.00001
linewidth = dataset.linewidth
if np.var(dataset.values) < eps:
linewidth += 2
mean = np.mean(dataset.values)
x = np.arange(0, 1, 0.1)
x = np.sort(np.append(x, [mean, mean-eps, mean+eps]))
density = [1 if v == mean else 0 for v in x]
else:
self.kde.fit(np.asarray([[x] for x in dataset.values]))
## Computes the x axis
x_max = np.amax(dataset.values)
x_min = np.amin(dataset.values)
delta = x_max - x_min
density_delta = 1.1 * delta
x = np.arange(x_min, x_max, density_delta / self.num_points)
x_density = [[y] for y in x]
## kde.score_samples returns the 'log' of the density
log_density = self.kde.score_samples(x_density).tolist()
density = map(math.exp, log_density)
self.ax.plot(x, density, label = dataset.label, color = dataset.color,
linewidth = linewidth, linestyle = dataset.linestyle)
def display(self, output_filename):
fig, (ax) = plt.subplots(1, 1)
data = [d.values for d in self.datasets]
labels = [d.label for d in self.datasets]
bp = ax.boxplot(data, labels = labels, notch = 0, sym = '+', vert = '1', whis = 1.5)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='black', marker='+')
for i in range(len(self.datasets)):
box = bp['boxes'][i]
box_x = []
box_y = []
for j in range(5):
box_x.append(box.get_xdata()[j])
box_y.append(box.get_ydata()[j])
box_coords = list(zip(box_x, box_y))
box_polygon = Polygon(box_coords, facecolor = self.datasets[i].color)
ax.add_patch(box_polygon)
if self.title is not None:
ax.set_title(self.title)
x_min = np.amin([np.amin(d.values) for d in self.datasets])
x_max = np.amax([np.amax(d.values) for d in self.datasets])
ax.set_ylim(x_min - 0.05*(x_max - x_min), x_max + 0.05*(x_max - x_min))
fig.savefig(output_filename)
plt.close(fig)
def build_img_pair(img_batch):
input_cast = img_batch[:,:,:,0:6].astype(dtype = np.float32)
input_min = np.amin(input_cast, axis=(1,2,3))
input_max = np.amax(input_cast, axis=(1,2,3))
for i in range(3):
input_min = np.expand_dims(input_min, i+1)
input_max = np.expand_dims(input_max, i+1)
input_norm = (input_cast - input_min) / (input_max - input_min)
gt_cast = img_batch[:,:,:,6].astype(dtype = np.float32)
gt_cast = np.expand_dims(gt_cast, 3)
gt_min = np.amin(gt_cast, axis=(1,2,3))
gt_max = np.amax(gt_cast, axis=(1,2,3))
for i in range(3):
gt_min = np.expand_dims(gt_min, i+1)
gt_max = np.expand_dims(gt_max, i+1)
gt_norm = (gt_cast - gt_min) / (gt_max - gt_min)
return input_norm, gt_norm
def gini(array):
"""Calculate the Gini coefficient of a numpy array."""
# based on bottom eq:
# http://www.statsdirect.com/help/generatedimages/equations/equation154.svg
# from:
# http://www.statsdirect.com/help/default.htm#nonparametric_methods/gini.htm
# All values are treated equally, arrays must be 1d:
array = array.flatten()
if np.amin(array) < 0:
# Values cannot be negative:
array -= np.amin(array)
# Values cannot be 0:
array += 0.0000001
# Values must be sorted:
array = np.sort(array)
# Index per array element:
index = np.arange(1,array.shape[0]+1)
# Number of array elements:
n = array.shape[0]
# Gini coefficient:
return ((np.sum((2 * index - n - 1) * array)) / (n * np.sum(array)))
def Get_Batch_Chrominance():
''''Convert every image in the batch to LAB Colorspace and normalize each value of it between [0,1]
Return:
AbColores_values array [batch_size,2224,224,2] 0-> A value, 1-> B value color
'''
global AbColores_values
global ColorImages_Batch
AbColores_values = np.empty((Batch_size,224,224,2),"float32")
for indx in range(Batch_size):
lab = color.rgb2lab(ColorImages_Batch[indx])
Min_valueA = np.amin(lab[:,:,1])
Max_valueA = np.amax(lab[:,:,1])
Min_valueB = np.amin(lab[:,:,2])
Max_valueB = np.amax(lab[:,:,2])
AbColores_values[indx,:,:,0] = Normalize(lab[:,:,1],-128,127)
AbColores_values[indx,:,:,1] = Normalize(lab[:,:,2],-128,127)
def match_set_with_pts(db_set_feats, query_set_feats, dist_type,
pt_set_dist_mode):
print('start matching sets using points...')
if query_set_feats is None:
query_set_feats = db_set_feats
dist_mat = np.empty(
(len(query_set_feats), len(db_set_feats)), dtype=np.float)
for i in range(len(query_set_feats)):
for j in range(len(db_set_feats)):
if dist_type == DistType.Hamming:
tmp_dist_mat = scipy.spatial.distance.cdist(query_set_feats[i],
db_set_feats[j], 'hamming')
if dist_type == DistType.L2:
tmp_dist_mat = scipy.spatial.distance.cdist(
query_set_feats[i], db_set_feats[j], 'euclidean')
if pt_set_dist_mode == PtSetDist.Min:
dist_mat[i, j] = np.amin(tmp_dist_mat)
if pt_set_dist_mode == PtSetDist.Avg:
dist_mat[i, j] = np.mean(tmp_dist_mat)
if pt_set_dist_mode == PtSetDist.MeanMin:
dist_mat[i, j] = np.mean(np.amin(tmp_dist_mat, axis=1))
return dist_mat
def __init__(self, data, leafsize=10):
"""Construct a kd-tree.
Parameters:
===========
data : array-like, shape (n,k)
The data points to be indexed. This array is not copied, and
so modifying this data will result in bogus results.
leafsize : positive integer
The number of points at which the algorithm switches over to
brute-force.
"""
self.data = np.asarray(data)
self.n, self.m = np.shape(self.data)
self.leafsize = int(leafsize)
if self.leafsize<1:
raise ValueError("leafsize must be at least 1")
self.maxes = np.amax(self.data,axis=0)
self.mins = np.amin(self.data,axis=0)
self.tree = self.__build(np.arange(self.n), self.maxes, self.mins)
def to_rgb(img):
"""
Converts the given array into a RGB image. If the number of channels is not
3 the array is tiled such that it has 3 channels. Finally, the values are
rescaled to [0,255)
:param img: the array to convert [nx, ny, channels]
:returns img: the rgb image [nx, ny, 3]
"""
img = np.atleast_3d(img)
channels = img.shape[2]
if channels < 3:
img = np.tile(img, 3)
img[np.isnan(img)] = 0
img -= np.amin(img)
img /= np.amax(img)
img *= 255
return img
def get_classification(self, idx):
"""Docs"""
img_rows = self.params.get('img_rows', self.Sxx.shape[0])
img_cols = self.params.get('img_cols', 1)
if self.Sxx is None or self.active_song.classification is None:
raise TypeError('No active song from which to get data')
if np.amax(idx) > self.Sxx.shape[1]:
raise IndexError('Data index of sample out of bounds, only {0} '
'samples in the dataset'.format(self.Sxx.shape[1] - img_cols))
if np.amin(idx) < 0:
raise IndexError('Data index of sample out of bounds, '
'negative index requested')
# index out the data
classification = self.active_song.classification[idx]
return classification
def standard_resize(image, max_side):
if image is None:
return None, None, None
original_h, original_w, _ = image.shape
if all(side < max_side for side in [original_h, original_w]):
return image, original_h, original_w
aspect_ratio = float(np.amax((original_w, original_h)) / float(np.amin((original_h, original_w))))
if original_w >= original_h:
new_w = max_side
new_h = max_side / aspect_ratio
else:
new_h = max_side
new_w = max_side / aspect_ratio
new_h = int(new_h)
new_w = int(new_w)
resized_image = cv2.resize(image, (new_w, new_h))
return resized_image, new_w, new_h
def update_time_to_recover_map(self):
max_distance = 30
self.recover_map = numpy.zeros((max_distance + 1, self.width, self.height))
self.recover_map_smooth = numpy.zeros((max_distance + 1, self.width, self.height))
self.recover_map[0] = numpy.divide(self.strength_map, numpy.maximum(self.production_map, 0.001))
self.recover_map[0] = numpy.multiply(self.recover_map[0], 1 - (self.is_owner_map[self.game_map.my_id] + self.is_enemy_map))
self.recover_map[0] += (self.is_owner_map[self.game_map.my_id] + self.is_enemy_map) * 999
distance = 1
while distance <= max_distance:
dir_map = numpy.zeros((4, self.width, self.height))
dir_map[0] = roll_xy(self.recover_map[distance - 1], 0, 1)
dir_map[1] = roll_xy(self.recover_map[distance - 1], 0, -1)
dir_map[2] = roll_xy(self.recover_map[distance - 1], 1, 0)
dir_map[3] = roll_xy(self.recover_map[distance - 1], -1, 0)
self.recover_map[distance] = numpy.add(self.recover_map[distance - 1], numpy.amin(dir_map, 0))
distance += 1
for d in range(2, max_distance):
self.recover_map[d] = self.recover_map[d] / d
self.recover_map_smooth[d] = spread_n(self.recover_map[d], 4)
def update_recover_map(self):
max_distance = 50
self.recover_map = numpy.zeros((max_distance + 1, self.width, self.height))
self.recover_map[0] = numpy.divide(self.strength_map, numpy.maximum(self.production_map, 0.01))
self.recover_map[0] = numpy.multiply(self.recover_map[0], self.is_neutral_map)
self.recover_map[0] += (self.is_owned_map + self.is_enemy_map) * 999
for distance in range(1, max_distance + 1):
dir_map = numpy.zeros((4, self.width, self.height))
dir_map[0] = roll_xy(self.recover_map[distance - 1], 0, 1)
dir_map[1] = roll_xy(self.recover_map[distance - 1], 0, -1)
dir_map[2] = roll_xy(self.recover_map[distance - 1], 1, 0)
dir_map[3] = roll_xy(self.recover_map[distance - 1], -1, 0)
self.recover_map[distance] = numpy.add(self.recover_map[distance - 1], numpy.amin(dir_map, 0))
for d in range(2, max_distance + 1):
self.recover_map[d] = self.recover_map[d] / d
def update_time_to_recover_map(self):
max_distance = 30
self.recover_map = numpy.zeros((max_distance + 1, self.width, self.height))
self.recover_map_smooth = numpy.zeros((max_distance + 1, self.width, self.height))
self.recover_map[0] = numpy.divide(self.strength_map, numpy.maximum(self.production_map, 0.001))
self.recover_map[0] = numpy.multiply(self.recover_map[0], 1 - (self.is_owner_map[self.game_map.my_id] + self.is_enemy_map))
self.recover_map[0] += (self.is_owner_map[self.game_map.my_id] + self.is_enemy_map) * 999
distance = 1
while distance <= max_distance:
dir_map = numpy.zeros((4, self.width, self.height))
dir_map[0] = roll_xy(self.recover_map[distance - 1], 0, 1)
dir_map[1] = roll_xy(self.recover_map[distance - 1], 0, -1)
dir_map[2] = roll_xy(self.recover_map[distance - 1], 1, 0)
dir_map[3] = roll_xy(self.recover_map[distance - 1], -1, 0)
self.recover_map[distance] = numpy.add(self.recover_map[distance - 1], numpy.amin(dir_map, 0))
distance += 1
for d in range(2, max_distance):
self.recover_map[d] = self.recover_map[d] / d
self.recover_map_smooth[d] = spread_n(self.recover_map[d], 4)
def update_recover_map(self):
max_distance = 50
self.recover_map = numpy.zeros((max_distance + 1, self.width, self.height))
self.recover_map[0] = numpy.divide(self.strength_map, numpy.maximum(self.production_map, 0.01))
self.recover_map[0] = numpy.multiply(self.recover_map[0], self.is_neutral_map)
self.recover_map[0] += (self.is_owned_map + self.is_enemy_map) * 999
for distance in range(1, max_distance + 1):
dir_map = numpy.zeros((4, self.width, self.height))
dir_map[0] = roll_xy(self.recover_map[distance - 1], 0, 1)
dir_map[1] = roll_xy(self.recover_map[distance - 1], 0, -1)
dir_map[2] = roll_xy(self.recover_map[distance - 1], 1, 0)
dir_map[3] = roll_xy(self.recover_map[distance - 1], -1, 0)
self.recover_map[distance] = numpy.add(self.recover_map[distance - 1], numpy.amin(dir_map, 0))
for d in range(2, max_distance + 1):
self.recover_map[d] = self.recover_map[d] / d
def update_time_to_recover_map(self):
max_distance = 30
self.recover_map = numpy.zeros((max_distance + 1, self.width, self.height))
self.recover_map_smooth = numpy.zeros((max_distance + 1, self.width, self.height))
self.recover_map[0] = numpy.divide(self.strength_map, numpy.maximum(self.production_map, 0.001))
self.recover_map[0] = numpy.multiply(self.recover_map[0], 1 - (self.is_owner_map[self.game_map.my_id] + self.is_enemy_map))
self.recover_map[0] += (self.is_owner_map[self.game_map.my_id] + self.is_enemy_map) * 999
distance = 1
while distance <= max_distance:
dir_map = numpy.zeros((4, self.width, self.height))
dir_map[0] = roll_xy(self.recover_map[distance - 1], 0, 1)
dir_map[1] = roll_xy(self.recover_map[distance - 1], 0, -1)
dir_map[2] = roll_xy(self.recover_map[distance - 1], 1, 0)
dir_map[3] = roll_xy(self.recover_map[distance - 1], -1, 0)
self.recover_map[distance] = numpy.add(self.recover_map[distance - 1], numpy.amin(dir_map, 0))
distance += 1
for d in range(2, max_distance):
self.recover_map[d] = self.recover_map[d] / d
self.recover_map_smooth[d] = spread_n(self.recover_map[d], 4)
def update_time_to_recover_map(self):
max_distance = 30
self.recover_map = numpy.zeros((max_distance + 1, self.width, self.height))
self.recover_map_smooth = numpy.zeros((max_distance + 1, self.width, self.height))
self.recover_map[0] = numpy.divide(self.strength_map, numpy.maximum(self.production_map, 0.001))
self.recover_map[0] = numpy.multiply(self.recover_map[0], 1 - (self.is_owner_map[self.game_map.my_id] + self.is_enemy_map))
self.recover_map[0] += (self.is_owner_map[self.game_map.my_id] + self.is_enemy_map) * 999
distance = 1
while distance <= max_distance:
dir_map = numpy.zeros((4, self.width, self.height))
dir_map[0] = roll_xy(self.recover_map[distance - 1], 0, 1)
dir_map[1] = roll_xy(self.recover_map[distance - 1], 0, -1)
dir_map[2] = roll_xy(self.recover_map[distance - 1], 1, 0)
dir_map[3] = roll_xy(self.recover_map[distance - 1], -1, 0)
self.recover_map[distance] = numpy.add(self.recover_map[distance - 1], numpy.amin(dir_map, 0))
distance += 1
for d in range(2, max_distance):
self.recover_map[d] = self.recover_map[d] / d
self.recover_map_smooth[d] = spread_n(self.recover_map[d], 4)
def update_time_to_recover_map(self):
max_distance = 30
self.recover_map = numpy.zeros((max_distance + 1, self.width, self.height))
self.recover_map_smooth = numpy.zeros((max_distance + 1, self.width, self.height))
self.recover_map[0] = numpy.divide(self.strength_map, numpy.maximum(self.production_map, 0.001))
self.recover_map[0] = numpy.multiply(self.recover_map[0], 1 - (self.is_owner_map[self.game_map.my_id] + self.is_enemy_map))
self.recover_map[0] += (self.is_owner_map[self.game_map.my_id] + self.is_enemy_map) * 999
distance = 1
while distance <= max_distance:
dir_map = numpy.zeros((4, self.width, self.height))
dir_map[0] = roll_xy(self.recover_map[distance - 1], 0, 1)
dir_map[1] = roll_xy(self.recover_map[distance - 1], 0, -1)
dir_map[2] = roll_xy(self.recover_map[distance - 1], 1, 0)
dir_map[3] = roll_xy(self.recover_map[distance - 1], -1, 0)
self.recover_map[distance] = numpy.add(self.recover_map[distance - 1], numpy.amin(dir_map, 0))
distance += 1
for d in range(2, max_distance):
self.recover_map[d] = self.recover_map[d] / d
self.recover_map_smooth[d] = spread_n(self.recover_map[d], 4)
def diagonal(_, pos):
"""
Given an object pixels' positions, return the diagonal length of its
bound box
:param _: pixel values (unused)
:param pos: pixel position (1-D)
:return: diagonal of bounding box
"""
xs = np.array([i / SSIZE for i in pos])
ys = np.array([i % SSIZE for i in pos])
minx = np.amin(xs)
miny = np.amin(ys)
maxx = np.amax(xs)
maxy = np.amax(ys)
return compute_line(np.array([minx, miny]), np.array([maxx, maxy]))
def binarization (array):
''' Takes a binary-class datafile and turn the max value (positive class) into 1 and the min into 0'''
array = np.array(array, dtype=float) # conversion needed to use np.inf after
if len(np.unique(array)) > 2:
raise ValueError ("The argument must be a binary-class datafile. {} classes detected".format(len(np.unique(array))))
# manipulation which aims at avoid error in data with for example classes '1' and '2'.
array[array == np.amax(array)] = np.inf
array[array == np.amin(array)] = 0
array[array == np.inf] = 1
return np.array(array, dtype=int)
def bench_on(runner, sym, Ns, trials, dtype=None):
global args, kernel, out, mkl_layer
prepare = globals().get("prepare_"+sym, prepare_default)
kernel = globals().get("kernel_"+sym, None)
if not kernel:
kernel = getattr(np.linalg, sym)
out_lvl = runner.__doc__.split('.')[0].strip()
func_s = kernel.__doc__.split('.')[0].strip()
log.debug('Preparing input data for %s (%s).. ' % (sym, func_s))
args = [prepare(int(i)) for i in Ns]
it = range(len(Ns))
# pprint(Ns)
out = np.empty(shape=(len(Ns), trials))
b = body(trials)
tic, toc = (0, 0)
log.debug('Warming up %s (%s).. ' % (sym, func_s))
runner(range(1000), empty_work)
kernel(*args[0])
runner(range(1000), empty_work)
log.debug('Benchmarking %s on %s: ' % (func_s, out_lvl))
gc_old = gc.isenabled()
# gc.disable()
tic = time.time()
runner(it, b)
toc = time.time() - tic
if gc_old:
gc.enable()
if 'reused_pool' in globals():
del globals()['reused_pool']
#calculate average time and min time and also keep track of outliers (max time in the loop)
min_time = np.amin(out)
max_time = np.amax(out)
mean_time = np.mean(out)
stdev_time = np.std(out)
#print("Min = %.5f, Max = %.5f, Mean = %.5f, stdev = %.5f " % (min_time, max_time, mean_time, stdev_time))
#final_times = [min_time, max_time, mean_time, stdev_time]
print('## %s: Outter:%s, Inner:%s, Wall seconds:%f\n' % (sym, out_lvl, mkl_layer, float(toc)))
return out
def view_trigger_snippets(trigger_snippets, chans, save=None):
# Create output directory if necessary.
if os.path.exists(save):
for f in os.listdir(save):
p = os.path.join(save, f)
os.remove(p)
os.removedirs(save)
os.makedirs(save)
# Plot figures.
fig = pylab.figure()
for (c, chan) in enumerate(chans):
ax = fig.add_subplot(1, 1, 1)
for n in xrange(0, trigger_snippets.shape[2]):
y = trigger_snippets[:, c, n]
x = numpy.arange(- (y.size - 1) / 2, (y.size - 1) / 2 + 1)
b = 0.5 + 0.5 * numpy.random.rand()
ax.plot(x, y, color=(0.0, 0.0, b), linestyle='solid')
y = numpy.mean(trigger_snippets[:, c, :], axis=1)
x = numpy.arange(- (y.size - 1) / 2, (y.size - 1) / 2 + 1)
ax.plot(x, y, color=(1.0, 0.0, 0.0), linestyle='solid')
ax.grid(True)
ax.set_xlim([numpy.amin(x), numpy.amax(x)])
ax.set_title("Channel %d" %chan)
ax.set_xlabel("time")
ax.set_ylabel("amplitude")
if save is not None:
# Save plot.
filename = "channel-%d.png" %chan
path = os.path.join(save, filename)
pylab.savefig(path)
fig.clf()
if save is None:
pylab.show()
else:
pylab.close(fig)
return
def imgSeg_logo(approx, himg, wimg):
w = np.amax(approx[:,:,0])-np.amin(approx[:,:,0]); h = np.amax(approx[:,:,1])-np.amin(approx[:,:,1])
if float(w)/float(h+0.001) > 4.5:
h = int(float(w)/3.5)
w0 = np.amin(approx[:,:,0]); h0 = np.amin(approx[:,:,1])
h1 = h0-int(3.5*h); h2 = h0;
w1 = max(w0+w/2-int(0.5*(h2-h1)), 0); w2 = min(w0+w/2+int(0.5*(h2-h1)), wimg-1)
return h1, h2, w1, w2
def imgSeg_rect(approx, himg, wimg):
w = np.amax(approx[:,:,0])-np.amin(approx[:,:,0]); h = np.amax(approx[:,:,1])-np.amin(approx[:,:,1])
if float(w)/float(h+0.001) > 4.5:
h = int(float(w)/3.5)
w0 = np.amin(approx[:,:,0]); h0 = np.amin(approx[:,:,1])
h1 = h0-int(3.6*h); h2 = min(h0+int(3*h), himg-1)
w1 = max(w0+w/2-(h2-h1), 0); w2 = min(w0+w/2+(h2-h1), wimg-1)
return h1, h2, w1, w2
def collect_point_label(anno_path, out_filename, file_format='txt'):
""" Convert original dataset files to data_label file (each line is XYZRGBL).
We aggregated all the points from each instance in the room.
Args:
anno_path: path to annotations. e.g. Area_1/office_2/Annotations/
out_filename: path to save collected points and labels (each line is XYZRGBL)
file_format: txt or numpy, determines what file format to save.
Returns:
None
Note:
the points are shifted before save, the most negative point is now at origin.
"""
points_list = []
for f in glob.glob(os.path.join(anno_path, '*.txt')):
cls = os.path.basename(f).split('_')[0]
if cls not in g_classes: # note: in some room there is 'staris' class..
cls = 'clutter'
points = np.loadtxt(f)
labels = np.ones((points.shape[0],1)) * g_class2label[cls]
points_list.append(np.concatenate([points, labels], 1)) # Nx7
data_label = np.concatenate(points_list, 0)
xyz_min = np.amin(data_label, axis=0)[0:3]
data_label[:, 0:3] -= xyz_min
if file_format=='txt':
fout = open(out_filename, 'w')
for i in range(data_label.shape[0]):
fout.write('%f %f %f %d %d %d %d\n' % \
(data_label[i,0], data_label[i,1], data_label[i,2],
data_label[i,3], data_label[i,4], data_label[i,5],
data_label[i,6]))
fout.close()
elif file_format=='numpy':
np.save(out_filename, data_label)
else:
print('ERROR!! Unknown file format: %s, please use txt or numpy.' % \
(file_format))
exit()
def collect_bounding_box(anno_path, out_filename):
""" Compute bounding boxes from each instance in original dataset files on
one room. **We assume the bbox is aligned with XYZ coordinate.**
Args:
anno_path: path to annotations. e.g. Area_1/office_2/Annotations/
out_filename: path to save instance bounding boxes for that room.
each line is x1 y1 z1 x2 y2 z2 label,
where (x1,y1,z1) is the point on the diagonal closer to origin
Returns:
None
Note:
room points are shifted, the most negative point is now at origin.
"""
bbox_label_list = []
for f in glob.glob(os.path.join(anno_path, '*.txt')):
cls = os.path.basename(f).split('_')[0]
if cls not in g_classes: # note: in some room there is 'staris' class..
cls = 'clutter'
points = np.loadtxt(f)
label = g_class2label[cls]
# Compute tightest axis aligned bounding box
xyz_min = np.amin(points[:, 0:3], axis=0)
xyz_max = np.amax(points[:, 0:3], axis=0)
ins_bbox_label = np.expand_dims(
np.concatenate([xyz_min, xyz_max, np.array([label])], 0), 0)
bbox_label_list.append(ins_bbox_label)
bbox_label = np.concatenate(bbox_label_list, 0)
room_xyz_min = np.amin(bbox_label[:, 0:3], axis=0)
bbox_label[:, 0:3] -= room_xyz_min
bbox_label[:, 3:6] -= room_xyz_min
fout = open(out_filename, 'w')
for i in range(bbox_label.shape[0]):
fout.write('%f %f %f %f %f %f %d\n' % \
(bbox_label[i,0], bbox_label[i,1], bbox_label[i,2],
bbox_label[i,3], bbox_label[i,4], bbox_label[i,5],
bbox_label[i,6]))
fout.close()
def read_testing_inputs(file, roi, im_size, output_path=None):
f_h5 = h5py.File(file, 'r')
if roi == -1:
images = np.asarray(f_h5['resized_images'], dtype=np.float32)
read_info = {}
read_info['shape'] = np.asarray(f_h5['images'], dtype=np.float32).shape
else:
images = np.asarray(f_h5['images'], dtype=np.float32)
output = h5py.File(os.path.join(output_path, 'All_' + os.path.basename(file)), 'r')
predictions = np.asarray(output['predictions'], dtype=np.float32)
output.close()
# Select the roi
roi_labels = (predictions == roi + 1).astype(np.float32)
nz = np.nonzero(roi_labels)
extract = []
for c in range(3):
start = np.amin(nz[c])
end = np.amax(nz[c])
r = end - start
extract.append((np.maximum(int(np.rint(start - r * 0.1)), 0),
np.minimum(int(np.rint(end + r * 0.1)), images.shape[c])))
extract_images = images[extract[0][0] : extract[0][1], extract[1][0] : extract[1][1], extract[2][0] : extract[2][1]]
read_info = {}
read_info['shape'] = images.shape
read_info['extract_shape'] = extract_images.shape
read_info['extract'] = extract
images = resize(extract_images, im_size, mode='constant')
f_h5.close()
return images, read_info
def normalize(im):
mini = np.amin(im)
maxi = np.amax(im)
rng = maxi-mini
im -= mini
if rng > 0:
im /= rng
return im
# ----- Type transformations --------------------------------------------------
def to_rgb(img):
img = img.reshape(img.shape[0], img.shape[1])
img[np.isnan(img)] = 0
img -= np.amin(img)
img /= np.amax(img)
blue = np.clip(4*(0.75-img), 0, 1)
red = np.clip(4*(img-0.25), 0, 1)
green= np.clip(44*np.fabs(img-0.5)-1., 0, 1)
rgb = np.stack((red, green, blue), axis=2)
return rgb
def _process_data(self, data):
# normalization
data = np.clip(np.fabs(data), self.a_min, self.a_max)
data -= np.amin(data)
data /= np.amax(data)
return data
def plot_prediction(x_test, y_test, prediction, save=False):
import matplotlib
import matplotlib.pyplot as plt
test_size = x_test.shape[0]
fig, ax = plt.subplots(test_size, 3, figsize=(12,12), sharey=True, sharex=True)
x_test = crop_to_shape(x_test, prediction.shape)
y_test = crop_to_shape(y_test, prediction.shape)
ax = np.atleast_2d(ax)
for i in range(test_size):
cax = ax[i, 0].imshow(x_test[i])
plt.colorbar(cax, ax=ax[i,0])
cax = ax[i, 1].imshow(y_test[i, ..., 1])
plt.colorbar(cax, ax=ax[i,1])
pred = prediction[i, ..., 1]
pred -= np.amin(pred)
pred /= np.amax(pred)
cax = ax[i, 2].imshow(pred)
plt.colorbar(cax, ax=ax[i,2])
if i==0:
ax[i, 0].set_title("x")
ax[i, 1].set_title("y")
ax[i, 2].set_title("pred")
fig.tight_layout()
if save:
fig.savefig(save)
else:
fig.show()
plt.show()
def computeallcpus(self):
""" overall stats for all cores on the nodes """
ratios = numpy.empty((self._ncpumetrics, self._totalcores), numpy.double)
coreindex = 0
for host, last in self._last.iteritems():
try:
elapsed = last - self._first[host]
if numpy.amin(numpy.sum(elapsed, 0)) < 1.0:
# typically happens if the job was very short and the datapoints are too close together
return {"error": ProcessingError.JOB_TOO_SHORT}
coresperhost = len(last[0, :])
ratios[:, coreindex:(coreindex+coresperhost)] = 1.0 * elapsed / numpy.sum(elapsed, 0)
coreindex += coresperhost
except ValueError:
# typically happens if the linux pmda crashes during the job
return {"error": ProcessingError.INSUFFICIENT_DATA}
results = {}
for i, name in enumerate(self._outnames):
results[name] = calculate_stats(ratios[i, :])
results['all'] = {"cnt": self._totalcores}
return results
def plot_slice_3d_2_patch(ct_scan, mask, pid, img_dir=None, idx=None):
# to convert cuda arrays to numpy array
ct_scan = np.asarray(ct_scan)
mask = np.asarray(mask)
fig, ax = plt.subplots(2, 3, figsize=[8, 8])
fig.canvas.set_window_title(pid)
if idx == None:
#just plot in the middle of the cube
in_sh = ct_scan.shape
idx = [in_sh[0]/2,in_sh[1]/2,in_sh[2]/2]
print np.amin(ct_scan), np.amax(ct_scan)
print np.amin(mask), np.amax(mask)
ax[0, 0].imshow(ct_scan[idx[0], :, :], cmap=plt.cm.gray)
ax[0, 1].imshow(ct_scan[:, idx[1], :], cmap=plt.cm.gray)
ax[0, 2].imshow(ct_scan[:, :, idx[2]], cmap=plt.cm.gray)
ax[1, 0].imshow(mask[idx[0], :, :], cmap=plt.cm.gray)
ax[1, 1].imshow(mask[:, idx[1], :], cmap=plt.cm.gray)
ax[1, 2].imshow(mask[:, :, idx[2]], cmap=plt.cm.gray)
if img_dir is not None:
fig.savefig(img_dir + '/%s.png' % pid, bbox_inches='tight')
else:
plt.show()
fig.clf()
plt.close('all')
def calculate_likelihoods(ScoreList, NumInfoSites):
num_lines = len(ScoreList)
LikeLiHoods = [likeliTest(NumInfoSites[i], int(ScoreList[i])) for i in range(num_lines)]
LikeLiHoods = np.array(LikeLiHoods).astype("float")
TopHit = np.amin(LikeLiHoods)
LikeLiHoodRatios = [LikeLiHoods[i]/TopHit for i in range(num_lines)]
LikeLiHoodRatios = np.array(LikeLiHoodRatios).astype("float")
return (LikeLiHoods, LikeLiHoodRatios)
def normalize(self):
self.X = (self.X - np.amin(self.X, 0)) \
/ (np.amax(self.X, 0) - np.amin(self.X, 0))
def normalize_points(self, x):
return np.divide(x - np.amin(self.X, 0) ,
np.amax(self.X, 0) - np.amin(self.X, 0), np.empty_like(x))
def compute_patches_at_scale(self, scale_idx, scale, p_id_base):
debug("Processing {} scale_idx:{} scale:{}".format(self.file_name, scale_idx, scale))
shape = np.array(self.shape)
size = (np.amin(shape)-1) / scale
num_samples = np.ceil( (shape-1) / size)
num_samples = [int(n*2) if n > 1 else int(n) for n in num_samples]
patches = []
sample_locs = [ self.sample_locs_for_dim( self.shape[0], size, num_samples[0]),
self.sample_locs_for_dim( self.shape[1], size, num_samples[1])]
p_id = p_id_base
for sample_loc_0 in sample_locs[0]:
for sample_loc_1 in sample_locs[1]:
patch = ImagePatch( p_id, self, (sample_loc_0, sample_loc_1), size, scale)
patch.label, patch.matched_roi_idx = \
self.get_label_for_patch(patch)
if patch.label != PASCAL_VOC_BACKGROUND_CLASS:
self.non_background_patches.append(patch)
else:
self.background_patches.append(patch)
patches.append(patch)
p_id += 1
debug("Compute {} patches".format(p_id-p_id_base))
return p_id
# Sample the Pascal VOC dataset
def lower_bounds(self):
return Vector2f(*numpy.amin(self.particles['position'], axis=0))
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amin == aargmin.choose(*a.transpose(i,*axes))))