Python numpy 模块,prod() 实例源码
我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.prod()。
def _read_ctr_ticks(
task_handle, high_tick, low_tick, num_samps_per_chan, timeout,
interleaved=FillMode.GROUP_BY_CHANNEL):
samps_per_chan_read = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxReadCtrTicks
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int, ctypes.c_double,
ctypes.c_int,
wrapped_ndpointer(dtype=numpy.uint32, flags=('C', 'W')),
wrapped_ndpointer(dtype=numpy.uint32, flags=('C', 'W')),
ctypes.c_uint, ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, num_samps_per_chan, timeout, interleaved.value,
high_tick, low_tick, numpy.prod(high_tick.shape),
ctypes.byref(samps_per_chan_read), None)
check_for_error(error_code)
return samps_per_chan_read.value
def _read_analog_f_64(
task_handle, read_array, num_samps_per_chan, timeout,
fill_mode=FillMode.GROUP_BY_CHANNEL):
samps_per_chan_read = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxReadAnalogF64
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int, ctypes.c_double,
c_bool32,
wrapped_ndpointer(dtype=numpy.float64, flags=('C', 'W')),
ctypes.c_uint, ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, num_samps_per_chan, timeout, fill_mode.value,
read_array, numpy.prod(read_array.shape),
ctypes.byref(samps_per_chan_read), None)
check_for_error(error_code)
return samps_per_chan_read.value
def _read_binary_i_16(
task_handle, read_array, num_samps_per_chan, timeout,
fill_mode=FillMode.GROUP_BY_CHANNEL):
samps_per_chan_read = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxReadBinaryI16
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int, ctypes.c_double,
ctypes.c_int,
wrapped_ndpointer(dtype=numpy.int16, flags=('C', 'W')),
ctypes.c_uint, ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, num_samps_per_chan, timeout, fill_mode.value,
read_array, numpy.prod(read_array.shape),
ctypes.byref(samps_per_chan_read), None)
check_for_error(error_code)
return samps_per_chan_read.value
def _read_binary_u_16(
task_handle, read_array, num_samps_per_chan, timeout,
fill_mode=FillMode.GROUP_BY_CHANNEL):
samps_per_chan_read = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxReadBinaryU16
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int, ctypes.c_double,
ctypes.c_int,
wrapped_ndpointer(dtype=numpy.uint16, flags=('C', 'W')),
ctypes.c_uint, ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, num_samps_per_chan, timeout, fill_mode.value,
read_array, numpy.prod(read_array.shape),
ctypes.byref(samps_per_chan_read), None)
check_for_error(error_code)
return samps_per_chan_read.value
def _read_binary_i_32(
task_handle, read_array, num_samps_per_chan, timeout,
fill_mode=FillMode.GROUP_BY_CHANNEL):
samps_per_chan_read = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxReadBinaryI32
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int, ctypes.c_double,
ctypes.c_int,
wrapped_ndpointer(dtype=numpy.int32, flags=('C', 'W')),
ctypes.c_uint, ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, num_samps_per_chan, timeout, fill_mode.value,
read_array, numpy.prod(read_array.shape),
ctypes.byref(samps_per_chan_read), None)
check_for_error(error_code)
return samps_per_chan_read.value
def _read_binary_u_32(
task_handle, read_array, num_samps_per_chan, timeout,
fill_mode=FillMode.GROUP_BY_CHANNEL):
samps_per_chan_read = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxReadBinaryU32
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int, ctypes.c_double,
ctypes.c_int,
wrapped_ndpointer(dtype=numpy.uint32, flags=('C', 'W')),
ctypes.c_uint, ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, num_samps_per_chan, timeout, fill_mode.value,
read_array, numpy.prod(read_array.shape),
ctypes.byref(samps_per_chan_read), None)
check_for_error(error_code)
return samps_per_chan_read.value
def _read_digital_u_16(
task_handle, read_array, num_samps_per_chan, timeout,
fill_mode=FillMode.GROUP_BY_CHANNEL):
samps_per_chan_read = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxReadDigitalU16
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int, ctypes.c_double,
ctypes.c_int,
wrapped_ndpointer(dtype=numpy.uint16, flags=('C', 'W')),
ctypes.c_uint, ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, num_samps_per_chan, timeout, fill_mode.value,
read_array, numpy.prod(read_array.shape),
ctypes.byref(samps_per_chan_read), None)
check_for_error(error_code)
return samps_per_chan_read.value
def _read_digital_u_32(
task_handle, read_array, num_samps_per_chan, timeout,
fill_mode=FillMode.GROUP_BY_CHANNEL):
samps_per_chan_read = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxReadDigitalU32
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int, ctypes.c_double,
ctypes.c_int,
wrapped_ndpointer(dtype=numpy.uint32, flags=('C', 'W')),
ctypes.c_uint, ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, num_samps_per_chan, timeout, fill_mode.value,
read_array, numpy.prod(read_array.shape),
ctypes.byref(samps_per_chan_read), None)
check_for_error(error_code)
return samps_per_chan_read.value
def _read_counter_f_64(task_handle, read_array, num_samps_per_chan, timeout):
samps_per_chan_read = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxReadCounterF64
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int, ctypes.c_double,
wrapped_ndpointer(dtype=numpy.float64, flags=('C', 'W')),
ctypes.c_uint, ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, num_samps_per_chan, timeout,
read_array, numpy.prod(read_array.shape),
ctypes.byref(samps_per_chan_read), None)
check_for_error(error_code)
return samps_per_chan_read.value
def _read_counter_u_32(task_handle, read_array, num_samps_per_chan, timeout):
samps_per_chan_read = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxReadCounterU32
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int, ctypes.c_double,
wrapped_ndpointer(dtype=numpy.uint32, flags=('C', 'W')),
ctypes.c_uint, ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, num_samps_per_chan, timeout,
read_array, numpy.prod(read_array.shape),
ctypes.byref(samps_per_chan_read), None)
check_for_error(error_code)
return samps_per_chan_read.value
def _read_counter_u_32_ex(
task_handle, read_array, num_samps_per_chan, timeout,
fill_mode=FillMode.GROUP_BY_CHANNEL):
samps_per_chan_read = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxReadCounterU32Ex
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int, ctypes.c_double,
ctypes.c_int,
wrapped_ndpointer(dtype=numpy.uint32, flags=('C', 'W')),
ctypes.c_uint, ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, num_samps_per_chan, timeout, fill_mode.value,
read_array, numpy.prod(read_array.shape),
ctypes.byref(samps_per_chan_read), None)
check_for_error(error_code)
return samps_per_chan_read.value
def _read_ctr_freq(
task_handle, freq, duty_cycle, num_samps_per_chan, timeout,
interleaved=FillMode.GROUP_BY_CHANNEL):
samps_per_chan_read = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxReadCtrFreq
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int, ctypes.c_double,
ctypes.c_int,
wrapped_ndpointer(dtype=numpy.float64, flags=('C', 'W')),
wrapped_ndpointer(dtype=numpy.float64, flags=('C', 'W')),
ctypes.c_uint, ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, num_samps_per_chan, timeout, interleaved.value,
freq, duty_cycle, numpy.prod(freq.shape),
ctypes.byref(samps_per_chan_read), None)
check_for_error(error_code)
return samps_per_chan_read.value
def _read_ctr_time(
task_handle, high_time, low_time, num_samps_per_chan, timeout,
interleaved=FillMode.GROUP_BY_CHANNEL):
samps_per_chan_read = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxReadCtrTime
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes.c_int, ctypes.c_double,
ctypes.c_int,
wrapped_ndpointer(dtype=numpy.float64, flags=('C', 'W')),
wrapped_ndpointer(dtype=numpy.float64, flags=('C', 'W')),
ctypes.c_uint, ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(c_bool32)]
error_code = cfunc(
task_handle, num_samps_per_chan, timeout, interleaved.value,
high_time, low_time, numpy.prod(high_time.shape),
ctypes.byref(samps_per_chan_read), None)
check_for_error(error_code)
return samps_per_chan_read.value
def _pooling_layer(
self, layer_name, inputs, size, stride, padding='SAME'):
"""Pooling layer operation constructor.
Args:
layer_name: layer name.
inputs: input tensor
size: kernel size.
stride: stride
padding: 'SAME' or 'VALID'. See tensorflow doc for detailed description.
Returns:
A pooling layer operation.
"""
with tf.variable_scope(layer_name) as scope:
out = tf.nn.max_pool(inputs,
ksize=[1, size, size, 1],
strides=[1, stride, stride, 1],
padding=padding)
activation_size = np.prod(out.get_shape().as_list()[1:])
self.activation_counter.append((layer_name, activation_size))
return out
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None):
with tf.variable_scope(name):
stride_shape = [1, stride[0], stride[1], 1]
filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = np.prod(filter_shape[:3])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = np.prod(filter_shape[:2]) * num_filters
# initialize weights with random weights
w_bound = np.sqrt(6. / (fan_in + fan_out))
w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
collections=collections)
b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.constant_initializer(0.0),
collections=collections)
return tf.nn.conv2d(x, w, stride_shape, pad) + b
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None):
with tf.variable_scope(name):
stride_shape = [1, stride[0], stride[1], 1]
filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = np.prod(filter_shape[:3])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = np.prod(filter_shape[:2]) * num_filters
# initialize weights with random weights
w_bound = np.sqrt(6. / (fan_in + fan_out))
w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
collections=collections)
b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.constant_initializer(0.0),
collections=collections)
return tf.nn.conv2d(x, w, stride_shape, pad) + b
def load_raw(filename, volsize):
""" inspired by mhd_utils from github"""
dim = 3
element_channels = 1
np_type = np.ubyte
arr = list(volsize)
volume = np.prod(arr[0:dim - 1])
shape = (arr[dim - 1], volume, element_channels)
with open(filename,'rb') as fid:
data = np.fromfile(fid, count=np.prod(shape),dtype = np_type)
data.shape = shape
arr.reverse()
data = data.reshape(arr)
return data
def discriminator_labeler(image, output_dim, config, reuse=None):
batch_size=tf.shape(image)[0]
with tf.variable_scope("disc_labeler",reuse=reuse) as vs:
dl_bn1 = batch_norm(name='dl_bn1')
dl_bn2 = batch_norm(name='dl_bn2')
dl_bn3 = batch_norm(name='dl_bn3')
h0 = lrelu(conv2d(image, config.df_dim, name='dl_h0_conv'))#16,32,32,64
h1 = lrelu(dl_bn1(conv2d(h0, config.df_dim*2, name='dl_h1_conv')))#16,16,16,128
h2 = lrelu(dl_bn2(conv2d(h1, config.df_dim*4, name='dl_h2_conv')))#16,16,16,248
h3 = lrelu(dl_bn3(conv2d(h2, config.df_dim*8, name='dl_h3_conv')))
dim3=np.prod(h3.get_shape().as_list()[1:])
h3_flat=tf.reshape(h3, [-1,dim3])
D_labels_logits = linear(h3_flat, output_dim, 'dl_h3_Label')
D_labels = tf.nn.sigmoid(D_labels_logits)
variables = tf.contrib.framework.get_variables(vs)
return D_labels, D_labels_logits, variables
def discriminator_gen_labeler(image, output_dim, config, reuse=None):
batch_size=tf.shape(image)[0]
with tf.variable_scope("disc_gen_labeler",reuse=reuse) as vs:
dl_bn1 = batch_norm(name='dl_bn1')
dl_bn2 = batch_norm(name='dl_bn2')
dl_bn3 = batch_norm(name='dl_bn3')
h0 = lrelu(conv2d(image, config.df_dim, name='dgl_h0_conv'))#16,32,32,64
h1 = lrelu(dl_bn1(conv2d(h0, config.df_dim*2, name='dgl_h1_conv')))#16,16,16,128
h2 = lrelu(dl_bn2(conv2d(h1, config.df_dim*4, name='dgl_h2_conv')))#16,16,16,248
h3 = lrelu(dl_bn3(conv2d(h2, config.df_dim*8, name='dgl_h3_conv')))
dim3=np.prod(h3.get_shape().as_list()[1:])
h3_flat=tf.reshape(h3, [-1,dim3])
D_labels_logits = linear(h3_flat, output_dim, 'dgl_h3_Label')
D_labels = tf.nn.sigmoid(D_labels_logits)
variables = tf.contrib.framework.get_variables(vs)
return D_labels, D_labels_logits,variables
def discriminator_on_z(image, config, reuse=None):
batch_size=tf.shape(image)[0]
with tf.variable_scope("disc_z_labeler",reuse=reuse) as vs:
dl_bn1 = batch_norm(name='dl_bn1')
dl_bn2 = batch_norm(name='dl_bn2')
dl_bn3 = batch_norm(name='dl_bn3')
h0 = lrelu(conv2d(image, config.df_dim, name='dzl_h0_conv'))#16,32,32,64
h1 = lrelu(dl_bn1(conv2d(h0, config.df_dim*2, name='dzl_h1_conv')))#16,16,16,128
h2 = lrelu(dl_bn2(conv2d(h1, config.df_dim*4, name='dzl_h2_conv')))#16,16,16,248
h3 = lrelu(dl_bn3(conv2d(h2, config.df_dim*8, name='dzl_h3_conv')))
dim3=np.prod(h3.get_shape().as_list()[1:])
h3_flat=tf.reshape(h3, [-1,dim3])
D_labels_logits = linear(h3_flat, config.z_dim, 'dzl_h3_Label')
D_labels = tf.nn.tanh(D_labels_logits)
variables = tf.contrib.framework.get_variables(vs)
return D_labels,variables
def decompose_size(size):
"""Computes the number of input and output units for a weight shape.
Parameters
----------
size
Integer shape tuple.
Returns
-------
A tuple of scalars, `(fan_in, fan_out)`.
"""
if len(size) == 2:
fan_in = size[0]
fan_out = size[1]
elif len(size) == 4 or len(size) == 5:
respective_field_size = np.prod(size[2:])
fan_in = size[1] * respective_field_size
fan_out = size[0] * respective_field_size
else:
fan_in = fan_out = int(np.sqrt(np.prod(size)))
return fan_in, fan_out
def _random_overlay(self, static_hidden=False):
"""Construct random max pool locations."""
s = self.shapes[2]
if static_hidden:
args = np.random.randint(s[2], size=np.prod(s) / s[2] / s[4])
overlay = np.zeros(np.prod(s) / s[4], np.bool)
overlay[args + np.arange(len(args)) * s[2]] = True
overlay = overlay.reshape([s[0], s[1], s[3], s[2]])
overlay = np.rollaxis(overlay, -1, 2)
return arrays.extend(overlay, s[4])
else:
args = np.random.randint(s[2], size=np.prod(s) / s[2])
overlay = np.zeros(np.prod(s), np.bool)
overlay[args + np.arange(len(args)) * s[2]] = True
overlay = overlay.reshape([s[0], s[1], s[3], s[4], s[2]])
return np.rollaxis(overlay, -1, 2)
def finalization(self):
'''
Add sparse matrix multiplication on GPU
Note: use "python-cuda-cffi" generated interface to access cusparse
'''
self.gpu_flag = 0
self.CSR = cuda_cffi.cusparse.CSR.to_CSR(self.st['p'].astype(dtype), )
self.CSRH = cuda_cffi.cusparse.CSR.to_CSR(self.st['p'].getH().tocsr().astype(dtype), )
self.scikit_plan = cu_fft.Plan(self.st['Kd'], dtype, dtype)
# self.pHp = cuda_cffi.cusparse.CSR.to_CSR(
# self.st['pHp'].astype(dtype))
self.gpu_flag = 1
self.sn_gpu = pycuda.gpuarray.to_gpu(self.sn.astype(dtype))
# tmp_array = skcuda.misc.ones((numpy.prod(self.st['Kd']),1),dtype=dtype)
# tmp = cuda_cffi.cusolver.csrlsvqr(self.CSR, tmp_array)
def plan(self, om, Nd, Kd, Jd):
self.debug = 0 # debug
n_shift = tuple(0*x for x in Nd)
self.st = plan(om, Nd, Kd, Jd)
self.Nd = self.st['Nd'] # backup
self.sn = self.st['sn'] # backup
self.ndims=len(self.st['Nd']) # dimension
self.linear_phase(n_shift)
# calculate the linear phase thing
self.st['pH'] = self.st['p'].getH().tocsr()
self.st['pHp']= self.st['pH'].dot(self.st['p'])
self.NdCPUorder, self.KdCPUorder, self.nelem = preindex_copy(self.st['Nd'], self.st['Kd'])
# self.st['W'] = self.pipe_density()
self.shape = (self.st['M'], numpy.prod(self.st['Nd']))
# print('untrimmed',self.st['pHp'].nnz)
# self.truncate_selfadjoint(1e-1)
# print('trimmed', self.st['pHp'].nnz)
def __call__(self, input_layer, output_size, scope=None, in_dim=None, stddev=0.02, bias_start=0.0):
shape = input_layer.shape
input_ = input_layer.tensor
try:
if len(shape) == 4:
input_ = tf.reshape(input_, tf.pack([tf.shape(input_)[0], np.prod(shape[1:])]))
input_.set_shape([None, np.prod(shape[1:])])
shape = input_.get_shape().as_list()
with tf.variable_scope(scope or "Linear"):
matrix = self.variable("Matrix", [in_dim or shape[1], output_size], dt=tf.float32,
init=tf.random_normal_initializer(stddev=stddev))
bias = self.variable("bias", [output_size], init=tf.constant_initializer(bias_start))
return input_layer.with_tensor(tf.matmul(input_, matrix) + bias, parameters=self.vars)
except Exception:
import ipdb; ipdb.set_trace()
def _meshgrid(self, height, width):
with tf.variable_scope('_meshgrid'):
# This should be equivalent to:
# x_t, y_t = np.meshgrid(np.linspace(-1, 1, width),
# np.linspace(-1, 1, height))
# ones = np.ones(np.prod(x_t.shape))
# grid = np.vstack([x_t.flatten(), y_t.flatten(), ones])
x_t = tf.matmul(tf.ones(shape=tf.pack([height, 1])),
tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),
tf.ones(shape=tf.pack([1, width])))
x_t_flat = tf.reshape(x_t, (1, -1))
y_t_flat = tf.reshape(y_t, (1, -1))
ones = tf.ones_like(x_t_flat)
grid = tf.concat(0, [x_t_flat, y_t_flat, ones])
return grid
def build_model():
metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
metadata_path = utils.find_model_metadata(metadata_dir, patch_class_config.__name__.split('.')[-1])
metadata = utils.load_pkl(metadata_path)
print 'Build model'
model = patch_class_config.build_model()
all_layers = nn.layers.get_all_layers(model.l_out)
num_params = nn.layers.count_params(model.l_out)
print ' number of parameters: %d' % num_params
print string.ljust(' layer output shapes:', 36),
print string.ljust('#params:', 10),
print 'output shape:'
for layer in all_layers:
name = string.ljust(layer.__class__.__name__, 32)
num_param = sum([np.prod(p.get_value().shape) for p in layer.get_params()])
num_param = string.ljust(num_param.__str__(), 10)
print ' %s %s %s' % (name, num_param, layer.output_shape)
nn.layers.set_all_param_values(model.l_out, metadata['param_values'])
return model
def build_model():
metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
metadata_path = utils.find_model_metadata(metadata_dir, patch_class_config.__name__.split('.')[-1])
metadata = utils.load_pkl(metadata_path)
print 'Build model'
model = patch_class_config.build_model()
all_layers = nn.layers.get_all_layers(model.l_out)
num_params = nn.layers.count_params(model.l_out)
print ' number of parameters: %d' % num_params
print string.ljust(' layer output shapes:', 36),
print string.ljust('#params:', 10),
print 'output shape:'
for layer in all_layers:
name = string.ljust(layer.__class__.__name__, 32)
num_param = sum([np.prod(p.get_value().shape) for p in layer.get_params()])
num_param = string.ljust(num_param.__str__(), 10)
print ' %s %s %s' % (name, num_param, layer.output_shape)
nn.layers.set_all_param_values(model.l_out, metadata['param_values'])
return model
def build_model():
metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
metadata_path = utils.find_model_metadata(metadata_dir, patch_class_config.__name__.split('.')[-1])
metadata = utils.load_pkl(metadata_path)
print 'Build model'
model = patch_class_config.build_model()
all_layers = nn.layers.get_all_layers(model.l_out)
num_params = nn.layers.count_params(model.l_out)
print ' number of parameters: %d' % num_params
print string.ljust(' layer output shapes:', 36),
print string.ljust('#params:', 10),
print 'output shape:'
for layer in all_layers:
name = string.ljust(layer.__class__.__name__, 32)
num_param = sum([np.prod(p.get_value().shape) for p in layer.get_params()])
num_param = string.ljust(num_param.__str__(), 10)
print ' %s %s %s' % (name, num_param, layer.output_shape)
nn.layers.set_all_param_values(model.l_out, metadata['param_values'])
return model
def build_model():
metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
metadata_path = utils.find_model_metadata(metadata_dir, patch_class_config.__name__.split('.')[-1])
metadata = utils.load_pkl(metadata_path)
print 'Build model'
model = patch_class_config.build_model()
all_layers = nn.layers.get_all_layers(model.l_out)
num_params = nn.layers.count_params(model.l_out)
print ' number of parameters: %d' % num_params
print string.ljust(' layer output shapes:', 36),
print string.ljust('#params:', 10),
print 'output shape:'
for layer in all_layers:
name = string.ljust(layer.__class__.__name__, 32)
num_param = sum([np.prod(p.get_value().shape) for p in layer.get_params()])
num_param = string.ljust(num_param.__str__(), 10)
print ' %s %s %s' % (name, num_param, layer.output_shape)
nn.layers.set_all_param_values(model.l_out, metadata['param_values'])
return model
def build_model():
metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
metadata_path = utils.find_model_metadata(metadata_dir, patch_class_config.__name__.split('.')[-1])
metadata = utils.load_pkl(metadata_path)
print 'Build model'
model = patch_class_config.build_model()
all_layers = nn.layers.get_all_layers(model.l_out)
num_params = nn.layers.count_params(model.l_out)
print ' number of parameters: %d' % num_params
print string.ljust(' layer output shapes:', 36),
print string.ljust('#params:', 10),
print 'output shape:'
for layer in all_layers:
name = string.ljust(layer.__class__.__name__, 32)
num_param = sum([np.prod(p.get_value().shape) for p in layer.get_params()])
num_param = string.ljust(num_param.__str__(), 10)
print ' %s %s %s' % (name, num_param, layer.output_shape)
nn.layers.set_all_param_values(model.l_out, metadata['param_values'])
return model
def build_model():
metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
metadata_path = utils.find_model_metadata(metadata_dir, patch_class_config.__name__.split('.')[-1])
metadata = utils.load_pkl(metadata_path)
print 'Build model'
model = patch_class_config.build_model()
all_layers = nn.layers.get_all_layers(model.l_out)
num_params = nn.layers.count_params(model.l_out)
print ' number of parameters: %d' % num_params
print string.ljust(' layer output shapes:', 36),
print string.ljust('#params:', 10),
print 'output shape:'
for layer in all_layers:
name = string.ljust(layer.__class__.__name__, 32)
num_param = sum([np.prod(p.get_value().shape) for p in layer.get_params()])
num_param = string.ljust(num_param.__str__(), 10)
print ' %s %s %s' % (name, num_param, layer.output_shape)
nn.layers.set_all_param_values(model.l_out, metadata['param_values'])
return model
def build_model():
metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
metadata_path = utils.find_model_metadata(metadata_dir, patch_class_config.__name__.split('.')[-1])
metadata = utils.load_pkl(metadata_path)
print 'Build model'
model = patch_class_config.build_model()
all_layers = nn.layers.get_all_layers(model.l_out)
num_params = nn.layers.count_params(model.l_out)
print ' number of parameters: %d' % num_params
print string.ljust(' layer output shapes:', 36),
print string.ljust('#params:', 10),
print 'output shape:'
for layer in all_layers:
name = string.ljust(layer.__class__.__name__, 32)
num_param = sum([np.prod(p.get_value().shape) for p in layer.get_params()])
num_param = string.ljust(num_param.__str__(), 10)
print ' %s %s %s' % (name, num_param, layer.output_shape)
nn.layers.set_all_param_values(model.l_out, metadata['param_values'])
return model
def build_model():
metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
metadata_path = utils.find_model_metadata(metadata_dir, patch_class_config.__name__.split('.')[-1])
metadata = utils.load_pkl(metadata_path)
print 'Build model'
model = patch_class_config.build_model()
all_layers = nn.layers.get_all_layers(model.l_out)
num_params = nn.layers.count_params(model.l_out)
print ' number of parameters: %d' % num_params
print string.ljust(' layer output shapes:', 36),
print string.ljust('#params:', 10),
print 'output shape:'
for layer in all_layers:
name = string.ljust(layer.__class__.__name__, 32)
num_param = sum([np.prod(p.get_value().shape) for p in layer.get_params()])
num_param = string.ljust(num_param.__str__(), 10)
print ' %s %s %s' % (name, num_param, layer.output_shape)
nn.layers.set_all_param_values(model.l_out, metadata['param_values'])
return model
def build_model():
metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
metadata_path = utils.find_model_metadata(metadata_dir, patch_class_config.__name__.split('.')[-1])
metadata = utils.load_pkl(metadata_path)
print 'Build model'
model = patch_class_config.build_model()
all_layers = nn.layers.get_all_layers(model.l_out)
num_params = nn.layers.count_params(model.l_out)
print ' number of parameters: %d' % num_params
print string.ljust(' layer output shapes:', 36),
print string.ljust('#params:', 10),
print 'output shape:'
for layer in all_layers:
name = string.ljust(layer.__class__.__name__, 32)
num_param = sum([np.prod(p.get_value().shape) for p in layer.get_params()])
num_param = string.ljust(num_param.__str__(), 10)
print ' %s %s %s' % (name, num_param, layer.output_shape)
nn.layers.set_all_param_values(model.l_out, metadata['param_values'])
return model
def build_model():
metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
metadata_path = utils.find_model_metadata(metadata_dir, patch_class_config.__name__.split('.')[-1])
metadata = utils.load_pkl(metadata_path)
print 'Build model'
model = patch_class_config.build_model()
all_layers = nn.layers.get_all_layers(model.l_out)
num_params = nn.layers.count_params(model.l_out)
print ' number of parameters: %d' % num_params
print string.ljust(' layer output shapes:', 36),
print string.ljust('#params:', 10),
print 'output shape:'
for layer in all_layers:
name = string.ljust(layer.__class__.__name__, 32)
num_param = sum([np.prod(p.get_value().shape) for p in layer.get_params()])
num_param = string.ljust(num_param.__str__(), 10)
print ' %s %s %s' % (name, num_param, layer.output_shape)
nn.layers.set_all_param_values(model.l_out, metadata['param_values'])
return model
def build_model():
metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
metadata_path = utils.find_model_metadata(metadata_dir, patch_config.__name__.split('.')[-1])
metadata = utils.load_pkl(metadata_path)
print 'Build model'
model = patch_config.build_model(patch_size=(window_size, window_size, window_size))
all_layers = nn.layers.get_all_layers(model.l_out)
num_params = nn.layers.count_params(model.l_out)
print ' number of parameters: %d' % num_params
print string.ljust(' layer output shapes:', 36),
print string.ljust('#params:', 10),
print 'output shape:'
for layer in all_layers:
name = string.ljust(layer.__class__.__name__, 32)
num_param = sum([np.prod(p.get_value().shape) for p in layer.get_params()])
num_param = string.ljust(num_param.__str__(), 10)
print ' %s %s %s' % (name, num_param, layer.output_shape)
nn.layers.set_all_param_values(model.l_out, metadata['param_values'])
return model
def build_model():
metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
metadata_path = utils.find_model_metadata(metadata_dir, patch_config.__name__.split('.')[-1])
metadata = utils.load_pkl(metadata_path)
print 'Build model'
model = patch_config.build_model(patch_size=(window_size, window_size, window_size))
all_layers = nn.layers.get_all_layers(model.l_out)
num_params = nn.layers.count_params(model.l_out)
print ' number of parameters: %d' % num_params
print string.ljust(' layer output shapes:', 36),
print string.ljust('#params:', 10),
print 'output shape:'
for layer in all_layers:
name = string.ljust(layer.__class__.__name__, 32)
num_param = sum([np.prod(p.get_value().shape) for p in layer.get_params()])
num_param = string.ljust(num_param.__str__(), 10)
print ' %s %s %s' % (name, num_param, layer.output_shape)
nn.layers.set_all_param_values(model.l_out, metadata['param_values'])
return model
def build_model():
print 'Build model'
model = patch_config.build_model(patch_size=(window_size, window_size, window_size))
all_layers = nn.layers.get_all_layers(model.l_out)
num_params = nn.layers.count_params(model.l_out)
print ' number of parameters: %d' % num_params
print string.ljust(' layer output shapes:', 36),
print string.ljust('#params:', 10),
print 'output shape:'
for layer in all_layers:
name = string.ljust(layer.__class__.__name__, 32)
num_param = sum([np.prod(p.get_value().shape) for p in layer.get_params()])
num_param = string.ljust(num_param.__str__(), 10)
print ' %s %s %s' % (name, num_param, layer.output_shape)
return model
def build_model():
metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
metadata_path = utils.find_model_metadata(metadata_dir, patch_config.__name__.split('.')[-1])
metadata = utils.load_pkl(metadata_path)
print 'Build model'
model = patch_config.build_model(patch_size=(window_size, window_size, window_size))
all_layers = nn.layers.get_all_layers(model.l_out)
num_params = nn.layers.count_params(model.l_out)
print ' number of parameters: %d' % num_params
print string.ljust(' layer output shapes:', 36),
print string.ljust('#params:', 10),
print 'output shape:'
for layer in all_layers:
name = string.ljust(layer.__class__.__name__, 32)
num_param = sum([np.prod(p.get_value().shape) for p in layer.get_params()])
num_param = string.ljust(num_param.__str__(), 10)
print ' %s %s %s' % (name, num_param, layer.output_shape)
nn.layers.set_all_param_values(model.l_out, metadata['param_values'])
return model
def build_model():
metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
metadata_path = utils.find_model_metadata(metadata_dir, patch_config.__name__.split('.')[-1])
metadata = utils.load_pkl(metadata_path)
print 'Build model'
model = patch_config.build_model(patch_size=(window_size, window_size, window_size))
all_layers = nn.layers.get_all_layers(model.l_out)
num_params = nn.layers.count_params(model.l_out)
print ' number of parameters: %d' % num_params
print string.ljust(' layer output shapes:', 36),
print string.ljust('#params:', 10),
print 'output shape:'
for layer in all_layers:
name = string.ljust(layer.__class__.__name__, 32)
num_param = sum([np.prod(p.get_value().shape) for p in layer.get_params()])
num_param = string.ljust(num_param.__str__(), 10)
print ' %s %s %s' % (name, num_param, layer.output_shape)
nn.layers.set_all_param_values(model.l_out, metadata['param_values'])
return model
def build_model():
metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
metadata_path = utils.find_model_metadata(metadata_dir, patch_config.__name__.split('.')[-1])
metadata = utils.load_pkl(metadata_path)
print 'Build model'
model = patch_config.build_model(patch_size=(window_size, window_size, window_size))
all_layers = nn.layers.get_all_layers(model.l_out)
num_params = nn.layers.count_params(model.l_out)
print ' number of parameters: %d' % num_params
print string.ljust(' layer output shapes:', 36),
print string.ljust('#params:', 10),
print 'output shape:'
for layer in all_layers:
name = string.ljust(layer.__class__.__name__, 32)
num_param = sum([np.prod(p.get_value().shape) for p in layer.get_params()])
num_param = string.ljust(num_param.__str__(), 10)
print ' %s %s %s' % (name, num_param, layer.output_shape)
nn.layers.set_all_param_values(model.l_out, metadata['param_values'])
return model
def build_model():
metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
metadata_path = utils.find_model_metadata(metadata_dir, patch_config.__name__.split('.')[-1])
metadata = utils.load_pkl(metadata_path)
print 'Build model'
model = patch_config.build_model(patch_size=(window_size, window_size, window_size))
all_layers = nn.layers.get_all_layers(model.l_out)
num_params = nn.layers.count_params(model.l_out)
print ' number of parameters: %d' % num_params
print string.ljust(' layer output shapes:', 36),
print string.ljust('#params:', 10),
print 'output shape:'
for layer in all_layers:
name = string.ljust(layer.__class__.__name__, 32)
num_param = sum([np.prod(p.get_value().shape) for p in layer.get_params()])
num_param = string.ljust(num_param.__str__(), 10)
print ' %s %s %s' % (name, num_param, layer.output_shape)
nn.layers.set_all_param_values(model.l_out, metadata['param_values'])
return model
def build_model():
metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
metadata_path = utils.find_model_metadata(metadata_dir, patch_config.__name__.split('.')[-1])
metadata = utils.load_pkl(metadata_path)
print 'Build model'
model = patch_config.build_model()
all_layers = nn.layers.get_all_layers(model.l_out)
num_params = nn.layers.count_params(model.l_out)
print ' number of parameters: %d' % num_params
print string.ljust(' layer output shapes:', 36),
print string.ljust('#params:', 10),
print 'output shape:'
for layer in all_layers:
name = string.ljust(layer.__class__.__name__, 32)
num_param = sum([np.prod(p.get_value().shape) for p in layer.get_params()])
num_param = string.ljust(num_param.__str__(), 10)
print ' %s %s %s' % (name, num_param, layer.output_shape)
nn.layers.set_all_param_values(model.l_out, metadata['param_values'])
return model
def build_model():
metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
metadata_path = utils.find_model_metadata(metadata_dir, patch_config.__name__.split('.')[-1])
metadata = utils.load_pkl(metadata_path)
print 'Build model'
model = patch_config.build_model(patch_size=(window_size, window_size, window_size))
all_layers = nn.layers.get_all_layers(model.l_out)
num_params = nn.layers.count_params(model.l_out)
print ' number of parameters: %d' % num_params
print string.ljust(' layer output shapes:', 36),
print string.ljust('#params:', 10),
print 'output shape:'
for layer in all_layers:
name = string.ljust(layer.__class__.__name__, 32)
num_param = sum([np.prod(p.get_value().shape) for p in layer.get_params()])
num_param = string.ljust(num_param.__str__(), 10)
print ' %s %s %s' % (name, num_param, layer.output_shape)
nn.layers.set_all_param_values(model.l_out, metadata['param_values'])
return model
def build_model():
metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
metadata_path = utils.find_model_metadata(metadata_dir, patch_config.__name__.split('.')[-1])
metadata = utils.load_pkl(metadata_path)
print 'Build model'
model = patch_config.build_model(patch_size=(window_size, window_size, window_size))
all_layers = nn.layers.get_all_layers(model.l_out)
num_params = nn.layers.count_params(model.l_out)
print ' number of parameters: %d' % num_params
print string.ljust(' layer output shapes:', 36),
print string.ljust('#params:', 10),
print 'output shape:'
for layer in all_layers:
name = string.ljust(layer.__class__.__name__, 32)
num_param = sum([np.prod(p.get_value().shape) for p in layer.get_params()])
num_param = string.ljust(num_param.__str__(), 10)
print ' %s %s %s' % (name, num_param, layer.output_shape)
nn.layers.set_all_param_values(model.l_out, metadata['param_values'])
return model
def build_model():
metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH)
metadata_path = utils.find_model_metadata(metadata_dir, patch_config.__name__.split('.')[-1])
metadata = utils.load_pkl(metadata_path)
print 'Build model'
model = patch_config.build_model(patch_size=(window_size, window_size, window_size))
all_layers = nn.layers.get_all_layers(model.l_out)
num_params = nn.layers.count_params(model.l_out)
print ' number of parameters: %d' % num_params
print string.ljust(' layer output shapes:', 36),
print string.ljust('#params:', 10),
print 'output shape:'
for layer in all_layers:
name = string.ljust(layer.__class__.__name__, 32)
num_param = sum([np.prod(p.get_value().shape) for p in layer.get_params()])
num_param = string.ljust(num_param.__str__(), 10)
print ' %s %s %s' % (name, num_param, layer.output_shape)
nn.layers.set_all_param_values(model.l_out, metadata['param_values'])
return model
def adjust_prediction(self, probability, image):
crf = dcrf.DenseCRF(np.prod(probability.shape), 2)
# crf = dcrf.DenseCRF(np.prod(probability.shape), 1)
binary_prob = np.stack((1 - probability, probability), axis=0)
unary = unary_from_softmax(binary_prob)
# unary = unary_from_softmax(np.expand_dims(probability, axis=0))
crf.setUnaryEnergy(unary)
# per dimension scale factors
sdims = [self.sdims] * 3
smooth = create_pairwise_gaussian(sdims=sdims, shape=probability.shape)
crf.addPairwiseEnergy(smooth, compat=2)
if self.schan:
# per channel scale factors
schan = [self.schan] * 6
appearance = create_pairwise_bilateral(sdims=sdims, schan=schan, img=image, chdim=3)
crf.addPairwiseEnergy(appearance, compat=2)
result = crf.inference(self.iter)
crf_prediction = np.argmax(result, axis=0).reshape(probability.shape).astype(np.float32)
return crf_prediction
def _sample_cond_single(rng, marginal_pmf, n_group, out, eps):
"""Single sample from conditional probab. (call :func:`self.sample`)"""
n_sites = len(marginal_pmf[-1])
# Probability of the incomplete output. Empty output has unit probab.
out_p = 1.0
# `n_out` sites of the output have been sampled. We will add
# at most `n_group` sites to the output at a time.
for n_out in range(0, n_sites, n_group):
# Select marginal probability distribution on (at most)
# `n_out + n_group` sites.
p = marginal_pmf[min(n_sites, n_out + n_group)]
# Obtain conditional probab. from joint `p` and marginal `out_p`
p = p.get(tuple(out[:n_out]) + (slice(None),) * (len(p) - n_out))
p = project_pmf(mp.prune(p).to_array() / out_p, eps, eps)
# Sample from conditional probab. for next `n_group` sites
choice = rng.choice(p.size, p=p.flat)
out[n_out:n_out + n_group] = np.unravel_index(choice, p.shape)
# Update probability of the partial output
out_p *= np.prod(p.flat[choice])
# Verify we have the correct partial output probability
p = marginal_pmf[-1].get(tuple(out)).to_array()
assert abs(p - out_p) <= eps
def _rcanonicalize(self, to_site):
"""Left-canonicalizes all local tensors _ltens[:to_site] in place
:param to_site: Index of the site up to which canonicalization is to be
performed
"""
assert 0 <= to_site < len(self), 'to_site={!r}'.format(to_site)
lcanon, rcanon = self._lt.canonical_form
for site in range(lcanon, to_site):
ltens = self._lt[site]
q, r = qr(ltens.reshape((-1, ltens.shape[-1])))
# if ltens.shape[-1] > prod(ltens.phys_shape) --> trivial comp.
# can be accounted by adapting rank here
newtens = (q.reshape(ltens.shape[:-1] + (-1,)),
matdot(r, self._lt[site + 1]))
self._lt.update(slice(site, site + 2), newtens,
canonicalization=('left', None))