Python numpy 模块,vsplit() 实例源码
我们从Python开源项目中,提取了以下11个代码示例,用于说明如何使用numpy.vsplit()。
def rev(self, lng, lat, z=None, _type=np.int32):
if z is None:
z = self._default_z
if all(isinstance(var, (int, float, tuple)) for var in [lng, lat]):
lng, lat = (np.array([lng]), np.array([lat]))
if not all(isinstance(var, np.ndarray) for var in [lng, lat]):
raise ValueError("lng, lat inputs must be of type int, float, tuple or numpy.ndarray")
if not isinstance(z, np.ndarray):
z = np.zeros_like(lng) + z
coord = np.dstack([lng, lat, z])
offset, scale = np.vsplit(self._offscl, 2)
normed = coord * scale + offset
X = self._rpc(normed)
result = np.rollaxis(np.inner(self._A, X) / np.inner(self._B, X), 0, 3)
rev_offset, rev_scale = np.vsplit(self._px_offscl_rev, 2)
# needs to return x/y
return np.rollaxis(result * rev_scale + rev_offset, 2).squeeze().astype(_type)[::-1]
def paint_latents( event ):
global r, Z, output,painted_rects,MASK,USER_MASK,RECON
# Get extent of latent paintbrush
x1, y1 = ( event.x - d.get() ), ( event.y - d.get() )
x2, y2 = ( event.x + d.get() ), ( event.y + d.get() )
selected_widget = event.widget
# Paint in latent space and update Z
painted_rects.append(event.widget.create_rectangle( x1, y1, x2, y2, fill = rb(color.get()),outline = rb(color.get()) ))
r[max((y1-bd),0):min((y2-bd),r.shape[0]),max((x1-bd),0):min((x2-bd),r.shape[1])] = color.get()/255.0;
Z = np.asarray([np.mean(o) for v in [np.hsplit(h,Z.shape[0])\
for h in np.vsplit((r),Z.shape[1])]\
for o in v]).reshape(Z.shape[0],Z.shape[1])
if SAMPLE_FLAG:
update_photo(None,output)
update_canvas(w) # Remove this if you wish to see a more free-form paintbrush
else:
DELTA = model.sample_at(np.float32([Z.flatten()]))[0]-to_tanh(np.float32(RECON))
MASK=scipy.ndimage.filters.gaussian_filter(np.min([np.mean(np.abs(DELTA),axis=0),np.ones((64,64))],axis=0),0.7)
# D = dampen(to_tanh(np.float32(RECON)),MASK*DELTA+(1-MASK)*ERROR)
D = MASK*DELTA+(1-MASK)*ERROR
IM = np.uint8(from_tanh(to_tanh(RECON)+D))
update_canvas(w) # Remove this if you wish to see a more free-form paintbrush
update_photo(IM,output)
# Scroll to lighten or darken an image patch
def test_nested_model_void(self):
from l1l2py import tools
data, test_data = np.vsplit(self.X, 2)
labels, test_labels = np.hsplit(self.Y, 2)
tau_opt, lambda_opt = (50.0, 0.1)
mu_range = np.linspace(0.1, 1.0, 10)
assert_raises(
ValueError, nested_models,
data, labels, test_data, test_labels,
mu_range, tau_opt, lambda_opt,
error_function=tools.regression_error,
data_normalizer=tools.standardize,
labels_normalizer=tools.center)
def get_next_sample(self):
if self.incomplete_samples_ is not None:
assert self.incomplete_samples_.size > 0
split = np.vsplit(self.incomplete_samples_, [1])
assert len(split) == 2
next_sample = split[0]
if split[1].size == 0:
self.incomplete_samples_ = None
else:
self.incomplete_samples_ = split[1]
return next_sample.ravel()
else:
return None
def deleteRow(self, row_number, data):
first, deleted, second = np.vsplit(data, [row_number, row_number + 1])
return np.vstack((first, second))
def load_csv(self):
file_name = "data/jra_race_resultNN.csv"
df = pd.read_csv(file_name)
## ???????
labelEncoder = preprocessing.LabelEncoder()
df['area_name'] = labelEncoder.fit_transform(df['area_name'])
df['race_name'] = labelEncoder.fit_transform(df['race_name'])
df['track'] = labelEncoder.fit_transform(df['track'])
df['run_direction'] = labelEncoder.fit_transform(df['run_direction'])
df['track_condition'] = labelEncoder.fit_transform(df['track_condition'])
df['horse_name'] = labelEncoder.fit_transform(df['horse_name'])
df['horse_sex'] = labelEncoder.fit_transform(df['horse_sex'])
df['jockey_name'] = labelEncoder.fit_transform(df['jockey_name'])
df['margin'] = labelEncoder.fit_transform(df['margin'])
df['is_blinkers'] = labelEncoder.fit_transform(df['is_blinkers'])
df['trainer_name'] = labelEncoder.fit_transform(df['trainer_name'])
df['comments_by_trainer'] = labelEncoder.fit_transform(df['comments_by_trainer'])
df['evaluation_by_trainer'] = labelEncoder.fit_transform(df['evaluation_by_trainer'])
df['dhorse_weight'] = labelEncoder.fit_transform(df['dhorse_weight'])
x_np = np.array(df[['area_name', 'race_number', 'race_name', 'track', 'run_direction',
'distance', 'track_condition', 'purse', 'heads_count',
'post_position', 'horse_number', 'horse_name', 'horse_sex', 'horse_age',
'jockey_name', 'time', 'margin', 'time3F',
'load_weight', 'horse_weight', 'dhorse_weight', 'odds_order',
'odds', 'is_blinkers', 'trainer_name', 'comments_by_trainer',
'evaluation_by_trainer'
]].fillna(0))
# ??
d = df[['finish_order']].to_dict('record')
self.vectorizer = DictVectorizer(sparse=False)
y_np = self.vectorizer.fit_transform(d)
self.n_classes = len(self.vectorizer.get_feature_names())
self.train_size = int(len(df[['finish_order']]) / 5)
self.batch_size = self.train_size
# ????????????????????
[self.x_train, self.x_test] = np.vsplit(x_np, [self.train_size])
[self.y_train, self.y_test] = np.vsplit(y_np, [self.train_size])
# Create model
def omega_output(self, fname, parameters):
"""This method writes the dispersion relation to the file fname.
:param fname: Filename
:type fname: string
:param parameters: Array containing the parameters.
:type parameters: numpy.ndarray
"""
omega = self.omega(parameters)
kappa = self.kappamesh
kmesh = self.kmesh
k = self.k
#print(omega, self.dks)
vgs = np.gradient(omega, *self.dks, edge_order=2)
vg = np.sqrt(sum(vgc**2 for vgc in vgs))
if self.dim==2:
vg[:3,:3] = 1
elif self.dim==3:
vg[:3,:3,:3] = 1
vph = omega/k
vph.ravel()[0] = 1.
data = [*np.broadcast_arrays(*kappa), k, omega, vph, vg, *vgs]
data = np.broadcast_arrays(*data)
blocks = zip(*map(lambda x: np.vsplit(x, x.shape[0]), data))
with open(fname, 'wb') as f:
kappanames = ' '.join(['kx', 'ky', 'kz'][:len(kappa)])
vgsnames = ' '.join(['vgx', 'vgy', 'vgz'][:len(vgs)])
f.write(('#' + kappanames + ' k omega vph vg ' + vgsnames + '\n').encode('us-ascii'))
for block_columns in blocks:
np.savetxt(f, np.vstack(map(np.ravel, block_columns)).T)
f.write(b'\n')
def gen_task_mbs(self, style, test_fold_id):
num_task_iterations = int(''.join([c for c in self.regime if c.isdigit()]))
# make blocks
if style == 'train':
task_lines = list(chain(*[fold for n, fold in enumerate(self.task_folds)
if n != test_fold_id]))
windows_x, windows_y = self.make_windows(task_lines)
block_x = np.tile(windows_x, [num_task_iterations, 1])
block_y = np.tile(windows_y, [num_task_iterations, 1])
elif style == 'test':
task_lines = list(chain(*[fold for n, fold in enumerate(self.task_folds)
if n == test_fold_id]))
windows_x, windows_y = self.make_windows(task_lines)
block_x = windows_x
block_y = windows_y
elif style == 'train1':
task_lines = list(chain(*[fold for n, fold in enumerate(self.task_folds)
if n != test_fold_id]))
windows_x, windows_y = self.make_windows(task_lines)
block_x = windows_x
block_y = windows_y
else:
raise AttributeError('rnnlab: Invalid arg to "style"')
# split to mbs
if not gcd(self.mb_size, len(block_x)) == self.mb_size:
raise Exception(
'rnnlab: Number of task_lines must be divisible by mb_size')
num_splits = len(block_x) // self.mb_size
# generate
for x, y in zip(np.vsplit(block_x, num_splits),
np.vsplit(block_y, num_splits)):
yield x, y
def batch_gen(self,
num_iterations=None):
if not num_iterations:
num_iterations = self.num_iterations
# batch
for n, term_id_doc in enumerate(self.docs):
windows_mat = self.make_windows_mat(term_id_doc)
windows_mat_x, windows_mat_y = np.split(windows_mat, [self.bptt_steps], axis=1)
for _ in range(num_iterations):
for x, y in zip(np.vsplit(windows_mat_x, self.num_mbs_in_doc),
np.vsplit(windows_mat_y, self.num_mbs_in_doc)):
yield x, y
def _split_array(image):
"""Splits an image into 16x16 sized tiles.
Returns a list of arrays.
"""
tiles = []
dims = image.shape
split_image = np.vsplit(image, int(dims[0]/16))
for tile in split_image:
tiles.extend(np.hsplit(tile, int(dims[1]/16)))
return tiles
#Currently for 16x16 tiles only
def _least_square_evoked(data, events, event_id, tmin, tmax, sfreq):
"""Least square estimation of evoked response from data.
Parameters
----------
data : ndarray, shape (n_channels, n_times)
The data to estimates evoked
events : ndarray, shape (n_events, 3)
The events typically returned by the read_events function.
If some events don't match the events of interest as specified
by event_id, they will be ignored.
event_id : dict
The id of the events to consider
tmin : float
Start time before event.
tmax : float
End time after event.
sfreq : float
Sampling frequency.
Returns
-------
evokeds_data : dict of ndarray
A dict of evoked data for each event type in event_id.
toeplitz : dict of ndarray
A dict of toeplitz matrix for each event type in event_id.
"""
nmin = int(tmin * sfreq)
nmax = int(tmax * sfreq)
window = nmax - nmin
n_samples = data.shape[1]
toeplitz_mat = dict()
full_toep = list()
for eid in event_id:
# select events by type
ix_ev = events[:, -1] == event_id[eid]
# build toeplitz matrix
trig = np.zeros((n_samples, 1))
ix_trig = (events[ix_ev, 0]) + nmin
trig[ix_trig] = 1
toep_mat = linalg.toeplitz(trig[0:window], trig)
toeplitz_mat[eid] = toep_mat
full_toep.append(toep_mat)
# Concatenate toeplitz
full_toep = np.concatenate(full_toep)
# least square estimation
predictor = np.dot(linalg.pinv(np.dot(full_toep, full_toep.T)), full_toep)
all_evokeds = np.dot(predictor, data.T)
all_evokeds = np.vsplit(all_evokeds, len(event_id))
# parse evoked response
evoked_data = dict()
for idx, eid in enumerate(event_id):
evoked_data[eid] = all_evokeds[idx].T
return evoked_data, toeplitz_mat