Python numpy 模块,flipud() 实例源码
我们从Python开源项目中,提取了以下47个代码示例,用于说明如何使用numpy.flipud()。
def plot_spectra(results):
plt.figure(figsize=(10, 4))
plt.imshow(
np.concatenate(
[np.flipud(results['x'].T),
np.flipud(results['xh'].T),
np.flipud(results['x_conv'].T)],
0),
aspect='auto',
cmap='jet',
)
plt.colorbar()
plt.title('Upper: Real input; Mid: Reconstrution; Lower: Conversion to target.')
plt.savefig(
os.path.join(
args.logdir,
'{}.png'.format(
os.path.split(str(results['f'], 'utf-8'))[-1]
)
)
)
def cochleagram_extractor(xx, sr, win_len, shift_len, channel_number, win_type):
fcoefs, f = make_erb_filters(sr, channel_number, 50)
fcoefs = np.flipud(fcoefs)
xf = erb_frilter_bank(xx, fcoefs)
if win_type == 'hanning':
window = np.hanning(channel_number)
elif win_type == 'hamming':
window = np.hamming(channel_number)
elif win_type == 'triangle':
window = (1 - (np.abs(channel_number - 1 - 2 * np.arange(1, channel_number + 1, 1)) / (channel_number + 1)))
else:
window = np.ones(channel_number)
window = window.reshape((channel_number, 1))
xe = np.power(xf, 2.0)
frames = 1 + ((np.size(xe, 1)-win_len) // shift_len)
cochleagram = np.zeros((channel_number, frames))
for i in range(frames):
one_frame = np.multiply(xe[:, i*shift_len:i*shift_len+win_len], np.repeat(window, win_len, 1))
cochleagram[:, i] = np.sqrt(np.mean(one_frame, 1))
cochleagram = np.where(cochleagram == 0.0, np.finfo(float).eps, cochleagram)
return cochleagram
def ssh():
from random import randint, seed
import pandas as pd
import matplotlib.pyplot as plt
seed(1)
df = pd.DataFrame(pd.read_csv('ssh.csv', sep=';'))[:20000]
y = df.value.as_matrix()
y_raw = numpy.flipud(y)
y = numpy.append(y_raw, y_raw)
y = numpy.append(y, y_raw)
for i in range(len(y)):
y[i] += randint(-10, 10)
for i in range(46100, 46120):
y[i] += 100
y[i] *= 10
x = [i for i in range(0, len(y) * 2, 2)]
series = list(zip(x, y))
result = StddevAnomaly().search_anomaly({}, len(series), series)
print(result)
plt.plot(*zip(*series))
plt.plot(*zip(*result), 'x')
plt.show()
def _process(self, img, key=None):
if self.p.fast:
return self._fast_process(img, key)
proj = self.p.projection
if proj == img.crs:
return img
x0, x1 = img.range(0)
y0, y1 = img.range(1)
xn, yn = img.interface.shape(img, gridded=True)[:2]
px0, py0, px1, py1 = project_extents((x0, y0, x1, y1),
img.crs, proj)
src_ext, trgt_ext = (x0, x1, y0, y1), (px0, px1, py0, py1)
arrays = []
for vd in img.vdims:
arr = img.dimension_values(vd, flat=False)
projected, extents = warp_array(arr, proj, img.crs, (xn, yn),
src_ext, trgt_ext)
arrays.append(projected)
projected = np.dstack(arrays) if len(arrays) > 1 else arrays[0]
data = np.flipud(projected)
bounds = (extents[0], extents[2], extents[1], extents[3])
return img.clone(data, bounds=bounds, kdims=img.kdims,
vdims=img.vdims, crs=proj)
def n_even_fcn(f, o, w, l):
"""Even case."""
# Variables :
k = np.array(range(0, int(l) + 1, 1)) + 0.5
b = np.zeros(k.shape)
# # Run Loop :
for s in range(0, len(f), 2):
m = (o[s + 1] - o[s]) / (f[s + 1] - f[s])
b1 = o[s] - m * f[s]
b = b + (m / (4 * np.pi * np.pi) * (np.cos(2 * np.pi * k * f[
s + 1]) - np.cos(2 * np.pi * k * f[s])) / (
k * k)) * abs(np.square(w[round((s + 1) / 2)]))
b = b + (f[s + 1] * (m * f[s + 1] + b1) * np.sinc(2 * k * f[
s + 1]) - f[s] * (m * f[s] + b1) * np.sinc(2 * k * f[s])) * abs(
np.square(w[round((s + 1) / 2)]))
a = (np.square(w[0])) * 4 * b
h = 0.5 * np.concatenate((np.flipud(a), a))
return h
def NevenFcn(F, M, W, L): # N is even
# Variables :
k = np.array(range(0, int(L) + 1, 1)) + 0.5
b = np.zeros(k.shape)
# # Run Loop :
for s in range(0, len(F), 2):
m = (M[s + 1] - M[s]) / (F[s + 1] - F[s])
b1 = M[s] - m * F[s]
b = b + (m / (4 * np.pi * np.pi) * (np.cos(2 * np.pi * k * F[
s + 1]) - np.cos(2 * np.pi * k * F[s])) / (
k * k)) * abs(np.square(W[round((s + 1) / 2)]))
b = b + (F[s + 1] * (m * F[s + 1] + b1) * np.sinc(2 * k * F[
s + 1]) - F[s] * (m * F[s] + b1) * np.sinc(2 * k * F[s])) * abs(
np.square(W[round((s + 1) / 2)]))
a = (np.square(W[0])) * 4 * b
h = 0.5 * np.concatenate((np.flipud(a), a))
return h
####################################################################
# - Filt the signal :
####################################################################
def plot_feature_importances(feature_importances, title, feature_names):
# Normalize the importance values
feature_importances = 100.0 * (feature_importances / max(feature_importances))
# Sort the values and flip them
index_sorted = np.flipud(np.argsort(feature_importances))
# Arrange the X ticks
pos = np.arange(index_sorted.shape[0]) + 0.5
# Plot the bar graph
plt.figure()
plt.bar(pos, feature_importances[index_sorted], align='center')
plt.xticks(pos, feature_names[index_sorted])
plt.ylabel('Relative Importance')
plt.title(title)
plt.show()
def generate_plot(self, filename, title='', xlabel='', ylabel=''):
data_keys = list(self.data.keys())
key_num = len(data_keys)
self.plot = plt.figure()
if key_num == 1:
splt = self.plot.add_subplot(1, 1, 1)
im_data = splt.imshow(numpy.flipud(self.data[data_keys[0]][0]), origin='lower')
splt.set_xlabel(xlabel)
splt.set_ylabel(ylabel)
splt.set_title(title)
else: ## still plotting multiple image in one figure still has problem. the visualization is not good
logger.error('no supported yet')
self.plot.colorbar(im_data)
self.plot.savefig(filename) #, bbox_inches='tight'
#class MultipleLinesPlot(PlotWithData):
# def generate_plot(self, filename, title='', xlabel='', ylabel=''):
def update(self):
self._display_init()
x1, x2 = self.update_x1, self.update_x2
y1, y2 = self.update_y1, self.update_y2
region = self.buffer[y1:y2, x1:x2]
if self.v_flip:
region = numpy.fliplr(region)
if self.h_flip:
region = numpy.flipud(region)
buf_red = numpy.packbits(numpy.where(region == RED, 1, 0)).tolist()
if self.inky_version == 1:
buf_black = numpy.packbits(numpy.where(region == 0, 0, 1)).tolist()
else:
buf_black = numpy.packbits(numpy.where(region == BLACK, 0, 1)).tolist()
self._display_update(buf_black, buf_red)
self._display_fini()
def plot_dZ_contours(x, y, dZ, axes=None, dZ_interval=0.5, verbose=False,
fig_kwargs={}):
r"""For plotting seafloor deformation dZ"""
import matplotlib.pyplot as plt
dZ_max = max(dZ.max(), -dZ.min()) + dZ_interval
clines1 = numpy.arange(dZ_interval, dZ_max, dZ_interval)
clines = list(-numpy.flipud(clines1)) + list(clines1)
# Create axes if needed
if axes is None:
fig = plt.figure(**fig_kwargs)
axes = fig.add_subplot(111)
if len(clines) > 0:
if verbose:
print "Plotting contour lines at: ",clines
axes.contour(x, y, dZ, clines, colors='k')
else:
print "No contours to plot"
return axes
def plot_dZ_contours(x, y, dZ, axes=None, dZ_interval=0.5, verbose=False,
fig_kwargs={}):
r"""For plotting seafloor deformation dZ"""
import matplotlib.pyplot as plt
dZ_max = max(dZ.max(), -dZ.min()) + dZ_interval
clines1 = numpy.arange(dZ_interval, dZ_max, dZ_interval)
clines = list(-numpy.flipud(clines1)) + list(clines1)
# Create axes if needed
if axes is None:
fig = plt.figure(**fig_kwargs)
axes = fig.add_subplot(111)
if len(clines) > 0:
if verbose:
print "Plotting contour lines at: ",clines
axes.contour(x, y, dZ, clines, colors='k')
else:
print "No contours to plot"
return axes
def from_catmaid_stack(stack_info, tile_source_parameters):
# See https://catmaid.readthedocs.io/en/stable/tile_sources.html
format_url = {
1: '{source_base_url}{{z}}/{{row}}_{{col}}_{{zoom_level}}.{file_extension}',
4: '{source_base_url}{{z}}/{{zoom_level}}/{{row}}_{{col}}.{file_extension}',
5: '{source_base_url}{{zoom_level}}/{{z}}/{{row}}/{{col}}.{file_extension}',
7: '{source_base_url}largeDataTileSource/{tile_width}/{tile_height}/'
'{{zoom_level}}/{{z}}/{{row}}/{{col}}.{file_extension}',
9: '{source_base_url}{{z}}/{{row}}_{{col}}_{{zoom_level}}.{file_extension}',
}[tile_source_parameters['tile_source_type']].format(**tile_source_parameters)
bounds = np.flipud(np.array(stack_info['bounds'], dtype=np.int64))
resolution = np.flipud(np.array(stack_info['resolution']))
tile_width = int(tile_source_parameters['tile_width'])
tile_height = int(tile_source_parameters['tile_height'])
return ImageStackVolume(bounds, resolution, tile_width, tile_height, format_url,
missing_z=stack_info['broken_slices'])
def _apply_transformations(plot_config, data_slice):
"""Rotate, flip and zoom the data slice.
Depending on the plot configuration, this will apply some transformations to the given data slice.
Args:
plot_config (mdt.visualization.maps.base.MapPlotConfig): the plot configuration
data_slice (ndarray): the 2d slice of data to transform
Returns:
ndarray: the transformed 2d slice of data
"""
if plot_config.rotate:
data_slice = np.rot90(data_slice, plot_config.rotate // 90)
if not plot_config.flipud:
# by default we flipud to correct for matplotlib lower origin. If the user
# sets flipud, we do not need to to it
data_slice = np.flipud(data_slice)
data_slice = plot_config.zoom.apply(data_slice)
return data_slice
def __model_form(self, tri_array):
w = np.nan_to_num(self.weights/tri_array[:,:,:-1]**(2-self.alpha))
x = np.nan_to_num(tri_array[:,:,:-1]*(tri_array[:,:,1:]*0+1))
y = np.nan_to_num(tri_array[:,:,1:])
LDF = np.sum(w*x*y,axis=1)/np.sum(w*x*x,axis=1)
#Chainladder (alpha=1/delta=1)
#LDF = np.sum(np.nan_to_num(tri_array[:,:,1:]),axis=1) / np.sum(np.nan_to_num((tri_array[:,:,1:]*0+1)*tri_array[:,:,:-1]),axis=1)
#print(LDF.shape)
# assumes no tail
CDF = np.append(np.cumprod(LDF[:,::-1],axis=1)[:,::-1],np.array([1]*tri_array.shape[0]).reshape(tri_array.shape[0],1),axis=1)
latest = np.flip(tri_array,axis=1).diagonal(axis1=1,axis2=2)
ults = latest*CDF
lu = list(ults)
lc = list(CDF)
exp_cum_triangle = np.array([np.flipud(lu[num].reshape(tri_array.shape[2],1).dot(1/lc[num].reshape(1,tri_array.shape[2]))) for num in range(tri_array.shape[0])])
exp_incr_triangle = np.append(exp_cum_triangle[:,:,0,np.newaxis],np.diff(exp_cum_triangle),axis=2)
return LDF, CDF, ults, exp_incr_triangle
def ensurebuf(self, invalidate=True):
if self.dbuf is None:
if self.dpil is not None:
self.dbuf = self.dpil.tostring("raw", "RGBX", 0, 1)
elif self.darr is not None:
data = self.scaledpixelarray(0,255.999)
self.dbuf = np.dstack(( np.flipud(np.rollaxis(data,1)).astype(np.uint8),
np.zeros(self.shape[::-1],np.uint8) )).tostring()
else:
raise ValueError("No source data for conversion to buffer")
if invalidate:
self.dpil = None
self.darr = None
self.rangearr = None
## This private function ensures that there is a valid numpy array representation, converting from
# one of the other representations if necessary, and invalidating the other representations if requested.
def ensurearr(self, invalidate=True):
if self.darr is None:
if self.dpil is not None:
self.darr = np.fromstring(self.dpil.tostring("raw", "RGB", 0, -1), np.uint8).astype(np.float64)
self.darr = np.rollaxis(np.reshape(self.darr, (self.shape[1], self.shape[0], 3) ), 1)
elif self.dbuf is not None:
self.darr = np.fromstring(self.dbuf, np.uint8).astype(np.float64)
self.darr = np.delete(np.reshape(self.darr, (self.shape[1], self.shape[0], 4) ), 3, 2)
self.darr = np.rollaxis(np.flipud(self.darr), 1)
else:
raise ValueError("No source data for conversion to array")
self.rangearr = ( 0, 255.999 )
if invalidate:
self.dpil = None
self.dbuf = None
# -----------------------------------------------------------------
## This private helper function returns a 2-tuple containing the least and most significant 16-bit portion
# of the specified unsigned 32-bit integer value.
def ensurebuf(self, invalidate=True):
if self.dbuf is None:
if self.dpil is not None:
self.dbuf = self.dpil.tostring("raw", "RGBX", 0, 1)
elif self.darr is not None:
data = self.scaledpixelarray(0,255.999)
self.dbuf = np.dstack(( np.flipud(np.rollaxis(data,1)).astype(np.uint8),
np.zeros(self.shape[::-1],np.uint8) )).tostring()
else:
raise ValueError("No source data for conversion to buffer")
if invalidate:
self.dpil = None
self.darr = None
self.rangearr = None
## This private function ensures that there is a valid numpy array representation, converting from
# one of the other representations if necessary, and invalidating the other representations if requested.
def ensurearr(self, invalidate=True):
if self.darr is None:
if self.dpil is not None:
self.darr = np.fromstring(self.dpil.tostring("raw", "RGB", 0, -1), np.uint8).astype(np.float64)
self.darr = np.rollaxis(np.reshape(self.darr, (self.shape[1], self.shape[0], 3) ), 1)
elif self.dbuf is not None:
self.darr = np.fromstring(self.dbuf, np.uint8).astype(np.float64)
self.darr = np.delete(np.reshape(self.darr, (self.shape[1], self.shape[0], 4) ), 3, 2)
self.darr = np.rollaxis(np.flipud(self.darr), 1)
else:
raise ValueError("No source data for conversion to array")
self.rangearr = ( 0, 255.999 )
if invalidate:
self.dpil = None
self.dbuf = None
# -----------------------------------------------------------------
## This private helper function returns a 2-tuple containing the least and most significant 16-bit portion
# of the specified unsigned 32-bit integer value.
def get_historical_data(self, num_periods=200):
gdax_client = gdax.PublicClient()
end = datetime.datetime.utcnow()
end_iso = end.isoformat()
start = end - datetime.timedelta(seconds=(self.period_size * num_periods))
start_iso = start.isoformat()
ret = gdax_client.get_product_historic_rates(self.product, granularity=self.period_size, start=start_iso, end=end_iso)
# Check if we got rate limited, which will return a JSON message
while not isinstance(ret, list):
time.sleep(3)
ret = gdax_client.get_product_historic_rates(self.product, granularity=self.period_size, start=start_iso, end=end_iso)
hist_data = np.array(ret, dtype='object')
for row in hist_data:
row[0] = datetime.datetime.fromtimestamp(row[0], pytz.utc)
return np.flipud(hist_data)
def sliceImages(inputImage, targetImage):
inputSlices = []
targetSlices = []
sliceSize = 32
for y in range(0,inputImage.shape[1]//sliceSize):
for x in range(0,inputImage.shape[0]//sliceSize):
inputSlice = inputImage[x*sliceSize:(x+1)*sliceSize,y*sliceSize:(y+1)*sliceSize]
targetSlice = targetImage[x*sliceSize//2:(x+1)*sliceSize//2,y*sliceSize//2:(y+1)*sliceSize//2]
# only add slices if they're not just empty space
# if (np.any(targetSlice)):
# Reweight smaller sizes
# for i in range(0,max(1,128//inputImage.shape[1])**2):
inputSlices.append(inputSlice)
targetSlices.append(targetSlice)
# inputSlices.append(np.fliplr(inputSlice))
# targetSlices.append(np.fliplr(targetSlice))
# inputSlices.append(np.flipud(inputSlice))
# targetSlices.append(np.flipud(targetSlice))
# naiveSlice = imresize(inputSlice, 0.5)
# deltaSlice = targetSlice - naiveSlice
# targetSlices.append(deltaSlice)
# return two arrays of images in a tuple
return (inputSlices, targetSlices)
def transform(patch, flip=False, mirror=False, rotations=[]):
"""Perform data augmentation on a patch.
Args:
patch (numpy array): The patch to be processed.
flip (bool, optional): Up/down symetry.
mirror (bool, optional): left/right symetry.
rotations (int list, optional) : rotations to perform (angles in deg).
Returns:
array list: list of augmented patches
"""
transformed_patches = [patch]
for angle in rotations:
transformed_patches.append(skimage.img_as_ubyte(skimage.transform.rotate(patch, angle)))
if flip:
transformed_patches.append(np.flipud(patch))
if mirror:
transformed_patches.append(np.fliplr(patch))
return transformed_patches
# In[4]:
def transformData(self, data):
if(self.opts.has_key('newdims')):
(H, W) = self.opts['newdims']
data = misc.imresize(data, (H, W), interp='bilinear')
if(self.opts.has_key('zeromean') and self.opts['zeromean']):
mean = self.opts['dataset_mean'] # provided by bmanager
data = data - mean
if(self.opts.has_key('rangescale') and self.opts['rangescale']):
min_ = self.opts['dataset_min'] # provided by bmanager
min_ = np.abs(min_.min())
max_ = self.opts['dataset_max'] # provided by bmanager
max_ = np.abs(max_.max())
data = 127 * data / max(min_, max_)
else:
data = data - 127.0
if(self.opts.has_key('randomflip') and self.opts['randomflip']):
if(np.random.rand() <= self.opts['randomflip_prob']):
data = np.flipud(data)
self.dataflip_state = True
return data
def write_pfm(data, fpath, scale=1, file_identifier="Pf", dtype="float32"):
# PFM format definition: http://netpbm.sourceforge.net/doc/pfm.html
data = np.flipud(data)
height, width = np.shape(data)[:2]
values = np.ndarray.flatten(np.asarray(data, dtype=dtype))
endianess = data.dtype.byteorder
if endianess == '<' or (endianess == '=' and sys.byteorder == 'little'):
scale *= -1
with open(fpath, 'wb') as ff:
ff.write(file_identifier + '\n')
ff.write('%d %d\n' % (width, height))
ff.write('%d\n' % scale)
ff.write(values)
def Gaussian2D(image, sigma, padding=0):
n, m = image.shape[0], image.shape[1]
tmp = np.zeros((n + padding, m + padding))
if tmp.shape[0] < 4:
raise ValueError('Image and padding too small')
if tmp.shape[1] < 4:
raise ValueError('Image and padding too small')
B, A = __gausscoeff(sigma)
tmp[:n, :m] = image
tmp = lfilter(B, A, tmp, axis=0)
tmp = np.flipud(tmp)
tmp = lfilter(B, A, tmp, axis=0)
tmp = np.flipud(tmp)
tmp = lfilter(B, A, tmp, axis=1)
tmp = np.fliplr(tmp)
tmp = lfilter(B, A, tmp, axis=1)
tmp = np.fliplr(tmp)
return tmp[:n, :m]
#-----------------------------------------------------------------------------
def generate_plot(self, filename, title='', xlabel='', ylabel=''):
data_keys = self.data.keys()
key_num = len(data_keys)
self.plot = plt.figure()
if key_num == 1:
splt = self.plot.add_subplot(1, 1, 1)
im_data = splt.imshow(numpy.flipud(self.data[data_keys[0]][0]), origin='lower')
splt.set_xlabel(xlabel)
splt.set_ylabel(ylabel)
splt.set_title(title)
else: ## still plotting multiple image in one figure still has problem. the visualization is not good
logger.error('no supported yet')
self.plot.colorbar(im_data)
self.plot.savefig(filename) #, bbox_inches='tight'
#class MultipleLinesPlot(PlotWithData):
# def generate_plot(self, filename, title='', xlabel='', ylabel=''):
def make_G_matrix(T,g):
''' create matrix of autoregression to enforce indicator dynamics
Inputs:
T: positive integer
number of time-bins
g: nd.array, vector p x 1
Discrete time constants
Output:
G: sparse diagonal matrix
Matrix of autoregression
'''
if type(g) is np.ndarray:
if len(g) == 1 and g < 0:
g=0
# gs=np.matrix(np.hstack((-np.flipud(g[:]).T,1)))
gs=np.matrix(np.hstack((1,-(g[:]).T)))
ones_=np.matrix(np.ones((T,1)))
G = spdiags((ones_*gs).T,range(0,-len(g)-1,-1),T,T)
return G
else:
raise Exception('g must be an array')
#%%
def fits2jpg(fname):
hdu_list = fits.open(fname)
image = hdu_list[0].data
image = np.squeeze(image)
img = np.copy(image)
idx = np.isnan(img)
img[idx] = 0
img_clip = np.flipud(img)
sigma = 3.0
# Estimate stats
mean, median, std = sigma_clipped_stats(img_clip, sigma=sigma, iters=10)
# Clip off n sigma points
img_clip = clip(img_clip,std*sigma)
if img_clip.shape[0] !=150 or img_clip.shape[1] !=150:
img_clip = resize(img_clip, (150,150))
#img_clip = rgb2gray(img_clip)
outfile = fname[0:-5] +'.png'
imsave(outfile, img_clip)
return img_clip,outfile
# Do the fusion classification
def photonsToFrame(photonposframe,imagesize,background):
pixels = imagesize
edges = range(0, pixels+1)
# HANDLE CASE FOR NO PHOTONS DETECTED AT ALL IN FRAME
if photonposframe.size == 0:
simframe = _np.zeros((pixels, pixels))
else:
xx = photonposframe[:, 0]
yy = photonposframe[:, 1]
simframe, xedges, yedges = _np.histogram2d(yy, xx, bins=(edges, edges))
simframe = _np.flipud(simframe) # to be consistent with render
#simframenoise = noisy(simframe,background,noise)
simframenoise = noisy_p(simframe, background)
simframenoise[simframenoise > 2**16-1] = 2**16-1
simframeout = _np.round(simframenoise).astype('<u2')
return simframeout
def generate_plot(self, filename, title='', xlabel='', ylabel=''):
data_keys = self.data.keys()
key_num = len(data_keys)
self.plot = plt.figure()
if key_num == 1:
splt = self.plot.add_subplot(1, 1, 1)
im_data = splt.imshow(numpy.flipud(self.data[data_keys[0]][0]), origin='lower')
splt.set_xlabel(xlabel)
splt.set_ylabel(ylabel)
splt.set_title(title)
else: ## still plotting multiple image in one figure still has problem. the visualization is not good
logger.error('no supported yet')
self.plot.colorbar(im_data)
self.plot.savefig(filename) #, bbox_inches='tight'
#class MultipleLinesPlot(PlotWithData):
# def generate_plot(self, filename, title='', xlabel='', ylabel=''):
def compute_tbl_properties(y, uMean, nu, flip):
"""Compute various parameters of a TBL."""
y = y[np.nonzero(y)]
uMean = uMean[np.nonzero(uMean)]
if flip:
y = np.flipud(y)
uMean = np.flipud(uMean)
theta = momentum_thickness(y, uMean)
delta = delta_99(y, uMean)
deltaStar = delta_star(y, uMean)
uTau = np.sqrt(nu*uMean[1]/y[1])
u0 = uMean[-1]
yPlus1 = y[1]*uTau/nu
return theta, deltaStar, delta, uTau, u0, yPlus1
def DST4(samples):
"""
Method to create DST4 transformation using DST3
Arguments :
samples : (1D Array) Input samples to be transformed
Returns :
y : (1D Array) Transformed output samples
"""
# Initialize
samplesup=np.zeros(2*N, dtype = np.float32)
# Upsample signal
# Reverse order to obtain DST4 out of DCT4:
#samplesup[1::2]=np.flipud(samples)
samplesup[0::2] = samples
y = spfft.dst(samplesup,type=3,norm='ortho')*np.sqrt(2)#/2
# Flip sign of every 2nd subband to obtain DST4 out of DCT4
#y=(y[0:N])*(((-1)*np.ones(N, dtype = np.float32))**range(N))
return y[0: N]
def _get_wordcloud(img, patch, words, word_to_frequency=None, **wordcloud_kwargs):
# get the boolean mask corresponding to each patch
path = patch.get_path()
mask = path.contains_points(img.pixel_coordinates).reshape((img.y_resolution, img.x_resolution))
# make mask matplotlib-venn compatible
mask = (~mask * 255).astype(np.uint8) # black indicates mask position
mask = np.flipud(mask) # origin is in upper left
# create wordcloud
wc = WordCloud(mask=mask,
background_color=None,
mode="RGBA",
**wordcloud_kwargs)
if not word_to_frequency:
text = " ".join(words)
wc.generate(text)
else:
wc.generate_from_frequencies({word: word_to_frequency[word] for word in words})
return wc
def autocorrelation(self):
"Autocorrelation as a function of time"
if self.__autocorrelation is not None:
return self.__autocorrelationTimeSeries, self.__autocorrelation
negT = -np.flipud(self.timeSeries[1:])
autocorrelationTime = np.hstack((negT, self.timeSeries))
self.__autocorrelationTimeSeries = autocorrelationTime
initialWF = self[0]
ACF = []
for WF in self:
ACF.append(WF.overlap(initialWF))
ACF = np.array(ACF)
negACF = np.conj(np.flipud(ACF[1:]))
totalACF = np.hstack((negACF, ACF))
self.__autocorrelation = totalACF
return self.__autocorrelationTimeSeries, self.__autocorrelation
def _applyImageFlips(image, flips):
'''
Apply left-right and up-down flips to an image
Args:
image (numpy array 2D/3D): image to be flipped
flips (tuple):
[0]: Boolean to flip horizontally
[1]: Boolean to flip vertically
Returns:
Flipped image
'''
image = np.fliplr(image) if flips[0] else image
image = np.flipud(image) if flips[1] else image
return image
def convolve1d_2D_numpy(a, b, mode='full'):
nwords, ndim = a.shape
filter_width, ndim = b.shape
b = np.flipud(b) # flip the kernel
if mode == 'full':
pad = np.zeros((filter_width-1, ndim))
a = np.vstack([pad, a, pad])
shape = (nwords+filter_width-1, filter_width, ndim)
elif mode == 'valid':
shape = (nwords-filter_width+1, filter_width, ndim)
strides = (a.strides[0],) + a.strides
view = np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
conv_out = np.einsum('kij,ij->kj', view, b)
return conv_out
def convolve1d_2D_numpy(a, b, mode='full'):
nwords, ndim = a.shape
filter_width, ndim = b.shape
b = np.flipud(b) # flip the kernel
if mode == 'full':
pad = np.zeros((filter_width-1, ndim))
a = np.vstack([pad, a, pad])
shape = (nwords+filter_width-1, filter_width, ndim)
elif mode == 'valid':
shape = (nwords-filter_width+1, filter_width, ndim)
strides = (a.strides[0],) + a.strides
view = np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
conv_out = np.einsum('kij,ij->kj', view, b)
return conv_out
def captureLabelImage(self, filename):
view = self.view
self.disableLighting()
im = sgp.saveScreenshot(view, filename, shouldRender=False, shouldWrite=False)
if filename is not None:
img = vnp.getNumpyFromVtk(im, 'ImageScalars')
assert img.dtype == np.uint8
img.shape = (im.GetDimensions()[1], im.GetDimensions()[0], 3)
img = np.flipud(img)
img = img[:,:,0]
print 'writing:', filename
scipy.misc.imsave(filename, img)
return im
def _plot_displacement(ms):
if not plot:
ms.down
return
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
fig = plt.figure()
ax = fig.gca()
ms.processSources()
ax.imshow(num.flipud(ms.down), aspect='equal',
extent=[0, ms.frame.E.max(), 0, ms.frame.N.max()])
for src in ms.sources:
for seg in src.segments:
p = Polygon(seg.outline(), alpha=.8, fill=False)
ax.add_artist(p)
if isinstance(src, OkadaPath):
nodes = num.array(src.nodes)
ax.scatter(nodes[:, 0], nodes[:, 1], color='r')
plt.show()
fig.clear()
def _plot_displacement(ms):
if not plot:
ms.down
return
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon # noqa
fig = plt.figure()
ax = fig.gca()
ms.processSources()
ax.imshow(num.flipud(ms.north), aspect='equal',
extent=[0, ms.frame.E.max(), 0, ms.frame.N.max()])
# for src in ms.sources:
# for seg in src.segments:
# p = Polygon(seg.outline(), alpha=.8, fill=False)
# ax.add_artist(p)
plt.show()
fig.clear()
def img_reshape(input_img):
""" (3, 64, 64) --> (64, 64, 3) """
_img = np.transpose(input_img, (1, 2, 0))
_img = np.flipud(_img)
_img = np.reshape(_img, (1, img_dim[0], img_dim[1], img_dim[2]))
return _img
def flip_plane(array,plane=0):
# Flip axial plane LR, i.e. change left/right hemispheres. 3D tensors-only, batch_size=1.
# n_slices = array.shape[2]
# for i in range(n_slices):
# array[:,:,i] = np.flipud(array[:,:,i])
# return array
n_x = array.shape[plane]
for i in range(n_x):
if plane == 0:
array[i,:,:] = np.flipud(array[i,:,:])
if plane == 1:
array[:,i,:] = np.flipud(array[:,i,:])
if plane == 2:
array[:,:,i] = np.flipud(array[:,:,i])
return array
def _trim_silence(self, audio: ndarray) -> ndarray:
def trim_start(sound: ndarray) -> ndarray:
return numpy.array(list(dropwhile(lambda x: x < self.silence_threshold_for_not_normalized_sound, sound)))
def trim_end(sound: ndarray) -> ndarray:
return flipud(trim_start(flipud(sound)))
return trim_start(trim_end(audio))
def write_SSR_IRs(filename, time_data_l, time_data_r, wavformat="float"):
"""Takes two time signals and writes out the horizontal plane as HRIRs for the SoundScapeRenderer.
Ideally, both hold 360 IRs but smaller sets are tried to be scaled up using repeat.
Parameters
----------
filename : string
filename to write to
time_data_l, time_data_l : io.ArraySignal
ArraySignals for left/right ear
wavformat : string
wav file format to write. Either "float" or "int16"
"""
equator_IDX_left = utils.nearest_to_value_logical_IDX(time_data_l.grid.colatitude, _np.pi / 2)
equator_IDX_right = utils.nearest_to_value_logical_IDX(time_data_r.grid.colatitude, _np.pi / 2)
IRs_left = time_data_l.signal.signal[equator_IDX_left]
IRs_right = time_data_r.signal.signal[equator_IDX_right]
if _np.mod(360 / IRs_left.shape[0], 1) == 0:
IRs_left = _np.repeat(IRs_left, 360 / IRs_left.shape[0], axis=0)
else:
raise ValueError('Number of channels for left ear cannot be fit into 360.')
if _np.mod(360 / IRs_right.shape[0], 1) == 0:
IRs_right = _np.repeat(IRs_right, 360 / IRs_right.shape[0], axis=0)
else:
raise ValueError('Number of channels for left ear cannot be fit into 360.')
IRs_to_write = utils.interleave_channels(IRs_left, IRs_right, style="SSR")
data_to_write = utils.simple_resample(IRs_to_write, original_fs=time_data_l.signal.fs, target_fs=44100)
# Fix SSR IR alignment stuff: left<>right flipped and 90 degree rotation
data_to_write = _np.flipud(data_to_write)
data_to_write = _np.roll(data_to_write, -90, axis=0)
if wavformat == "float":
sio.wavfile.write(filename, 44100, data_to_write.astype(_np.float32).T)
elif wavformat == "int16":
sio.wavfile.write(filename, 44100, (data_to_write * 32767).astype(_np.int16).T)
else:
raise TypeError("Format " + wavformat + "not known. Should be either 'float' or 'int16'.")
def parsedata(self, package):
"""
This function parses the Data Package sent by MuLES to obtain all the data
available in MuLES as matrix of the size [n_samples, n_columns], therefore the
total of elements in the matrix is n_samples * n_columns. Each column represents
one channel
Argument:
package: Data package sent by MuLES.
"""
size_element = 4 # Size of each one of the elements is 4 bytes
n_columns = len(self.params['data format'])
n_bytes = len(package)
n_samples = (n_bytes/size_element) / n_columns
####mesData = np.uint8(mesData) # Convert from binary to integers (not necessary pyton)
bytes_per_element = np.flipud(np.reshape(list(bytearray(package)), [size_element,-1],order='F'))
# Changes "package" to a list with size (n_bytes,1)
# Reshapes the list into a matrix bytes_per_element which has the size: (4,n_bytes/4)
# Flips Up-Down the matrix of size (4,n_bytes/4) to correct the swap in bytes
package_correct_order = np.uint8(np.reshape(bytes_per_element,[n_bytes,-1],order='F' ))
# Unrolls the matrix bytes_per_element, in "package_correct_order"
# that has a size (n_bytes,1)
data_format_tags = self.params['data format']*n_samples
# Tags used to map the elements into their corresponding representation
package_correct_order_char = "".join(map(chr,package_correct_order))
elements = struct.unpack(data_format_tags,package_correct_order_char)
# Elements are cast in their corresponding representation
data = np.reshape(np.array(elements),[n_samples,n_columns],order='C')
# Elements are reshap into data [n_samples, n_columns]
return data
def display_current_results(self, visuals, epoch):
if self.display_id > 0: # show images in the browser
idx = 1
for label, image_numpy in visuals.items():
#image_numpy = np.flipud(image_numpy)
self.vis.image(image_numpy.transpose([2,0,1]), opts=dict(title=label),
win=self.display_id + idx)
idx += 1
if self.use_html: # save images to a html file
for label, image_numpy in visuals.items():
img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label))
util.save_image(image_numpy, img_path)
# update website
webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, reflesh=1)
for n in range(epoch, 0, -1):
webpage.add_header('epoch [%d]' % n)
ims = []
txts = []
links = []
for label, image_numpy in visuals.items():
img_path = 'epoch%.3d_%s.png' % (n, label)
ims.append(img_path)
txts.append(label)
links.append(img_path)
webpage.add_images(ims, txts, links, width=self.win_size)
webpage.save()
# errors: dictionary of error labels and values
def mirrorArray(self, x, direction="x"):
X = x.reshape((self.nx_core, self.ny_core), order="F")
if direction == "x" or direction == "y" :
X2 = np.vstack((-np.flipud(X), X))
else:
X2 = np.vstack((np.flipud(X), X))
return X2