Python numpy 模块,set_printoptions() 实例源码
我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.set_printoptions()。
def print_resul(sol):
#==============================================================================
# Impression des résultats
pm, model, filename = sol.pm, sol.model, sol.filename
print('\n\nInversion success!')
print('Name of file:', filename)
print('Model used:', model)
try:
pm.pop("cond_std")
pm.pop("tau_i_std")
pm.pop("m_i_std")
except:
pass
e_keys = sorted([s for s in list(pm.keys()) if "_std" in s])
v_keys = [e.replace("_std", "") for e in e_keys]
labels = ["{:<8}".format(x+":") for x in v_keys]
np.set_printoptions(formatter={'float': lambda x: format(x, '6.3E')})
for l, v, e in zip(labels, v_keys, e_keys):
if "noise" not in l:
print(l, np.atleast_1d(pm[v]), '+/-', np.atleast_1d(pm[e]), np.char.mod('(%.2f%%)',abs(100*pm[e]/pm[v])))
else:
print(l, np.atleast_1d(pm[v]), '+/-', np.atleast_1d(pm[e]))
def print_resul(sol):
#==============================================================================
# Impression des résultats
pm, model, filename = sol.pm, sol.model, sol.filename
print('\n\nInversion success!')
print('Name of file:', filename)
print('Model used:', model)
try:
pm.pop("cond_std")
pm.pop("tau_i_std")
pm.pop("m_i_std")
except:
pass
e_keys = sorted([s for s in list(pm.keys()) if "_std" in s])
v_keys = [e.replace("_std", "") for e in e_keys]
labels = ["{:<8}".format(x+":") for x in v_keys]
np.set_printoptions(formatter={'float': lambda x: format(x, '6.3E')})
for l, v, e in zip(labels, v_keys, e_keys):
if "noise" not in l:
print(l, np.atleast_1d(pm[v]), '+/-', np.atleast_1d(pm[e]), np.char.mod('(%.2f%%)',abs(100*pm[e]/pm[v])))
else:
print(l, np.atleast_1d(pm[v]), '+/-', np.atleast_1d(pm[e]))
def test_timeseries_bootstrap():
"""
Tests the timeseries_bootstrap method of BASC workflow
"""
np.random.seed(27)
#np.set_printoptions(threshold=np.nan)
# Create a 10x5 matrix which counts up by column-wise
x = np.arange(50).reshape((5,10)).T
actual= timeseries_bootstrap(x,3)
desired = np.array([[ 4, 14, 24, 34, 44],
[ 5, 15, 25, 35, 45],
[ 6, 16, 26, 36, 46],
[ 8, 18, 28, 38, 48],
[ 9, 19, 29, 39, 49],
[ 0, 10, 20, 30, 40],
[ 7, 17, 27, 37, 47],
[ 8, 18, 28, 38, 48],
[ 9, 19, 29, 39, 49],
[ 8, 18, 28, 38, 48]])
np.testing.assert_equal(actual, desired)
def to_tfrecord(data, file_dir):
for key, values in data.iteritems():
writer = tf.python_io.TFRecordWriter(os.path.join(file_dir, key + '.tfrecord'))
image = values['image']
ground_truth = values['ground_truth']
shape = np.array(image.shape).astype(np.int32)
# set precision of string printing to be float32
np.set_printoptions(precision=32)
example = tf.train.Example(features=tf.train.Features(feature={
'example_name': _bytes_feature(key),
'shape': _bytes_feature(shape.tostring()),
'img_raw': _bytes_feature(image.tostring()),
'gt_raw': _bytes_feature(ground_truth.tostring())}))
writer.write(example.SerializeToString())
writer.close()
def test_formatter_reset(self):
x = np.arange(3)
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'int':None})
assert_equal(repr(x), "array([0, 1, 2])")
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'all':None})
assert_equal(repr(x), "array([0, 1, 2])")
np.set_printoptions(formatter={'int':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'int_kind':None})
assert_equal(repr(x), "array([0, 1, 2])")
x = np.arange(3.)
np.set_printoptions(formatter={'float':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1.0, 0.0, 1.0])")
np.set_printoptions(formatter={'float_kind':None})
assert_equal(repr(x), "array([ 0., 1., 2.])")
def main():
# don't use scientific notation when printing
numpy.set_printoptions(suppress=True)
filename = sys.argv[1]
with open(filename) as f:
c = pickle.load(f)
c.reset()
print 'Matrix'
print c.matrix
print
print 'Constant'
print c.constant
#env = Environment(show=True, z1=5, z2=3)
#env = Environment(show=True, z1=5, z2=8)
env = Environment(z0=100+3, z1=100+10, total_t=4, show=True)
fitness = env.run(c)
print 'fitness:', fitness
def plot_normalized_confusion_matrix_at_depth(self):
""" Returns a normalized confusion matrix.
:returns: normalized confusion matrix
:rtype: matplotlib figure
"""
cm = metrics.confusion_matrix(self.predictions['label'], self.y_pred)
np.set_printoptions(precision = 2)
fig = plt.figure()
cm_normalized = cm.astype('float') / cm.sum(axis = 1)[:, np.newaxis]
plt.imshow(cm_normalized, interpolation = 'nearest',
cmap = plt.cm.Blues)
plt.title("Normalized Confusion Matrix")
plt.colorbar()
tick_marks = np.arange(len(self.labels))
plt.xticks(tick_marks, self.labels, rotation = 45)
plt.yticks(tick_marks, self.labels)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
return(fig)
def execute_calculation(operands, first_operand, const_executor):
iterations = len(operands) != 1
for i in operands:
_operands, expected_result, description = unpack_list(*i)
if description:
print("Description: ", description)
print("Operands: ", _operands)
print("Expected result: ", expected_result)
flex_result = const_executor(*_operands)
try:
print("flex_result: {0:.30}".format(float(flex_result)))
except TypeError:
# exception for arrays
np.set_printoptions(precision=30)
print("flex_result: {}".format(flex_result))
print("difference: ", flex_result - expected_result)
if iterations:
assert_allclose(flex_result, expected_result)
elif not isinstance(first_operand, np.ndarray):
assert flex_result == expected_result
else:
assert np.array_equal(flex_result, expected_result)
def compute_mean(self, file_list):
logger = logging.getLogger("acoustic_norm")
mean_vector = numpy.zeros((1, self.feature_dimension))
all_frame_number = 0
io_funcs = BinaryIOCollection()
for file_name in file_list:
features = io_funcs.load_binary_file(file_name, self.feature_dimension)
current_frame_number = features.size // self.feature_dimension
mean_vector += numpy.reshape(numpy.sum(features, axis=0), (1, self.feature_dimension))
all_frame_number += current_frame_number
mean_vector /= float(all_frame_number)
# po=numpy.get_printoptions()
# numpy.set_printoptions(precision=2, threshold=20, linewidth=1000, edgeitems=4)
logger.info('computed mean vector of length %d :' % mean_vector.shape[1] )
logger.info(' mean: %s' % mean_vector)
# restore the print options
# numpy.set_printoptions(po)
return mean_vector
def compute_mean(self, file_list, start_index, end_index):
local_feature_dimension = end_index - start_index
mean_vector = numpy.zeros((1, local_feature_dimension))
all_frame_number = 0
io_funcs = BinaryIOCollection()
for file_name in file_list:
features, current_frame_number = io_funcs.load_binary_file_frame(file_name, self.feature_dimension)
mean_vector += numpy.reshape(numpy.sum(features[:, start_index:end_index], axis=0), (1, local_feature_dimension))
all_frame_number += current_frame_number
mean_vector /= float(all_frame_number)
# po=numpy.get_printoptions()
# numpy.set_printoptions(precision=2, threshold=20, linewidth=1000, edgeitems=4)
self.logger.info('computed mean vector of length %d :' % mean_vector.shape[1] )
self.logger.info(' mean: %s' % mean_vector)
# restore the print options
# numpy.set_printoptions(po)
return mean_vector
def test_propagate_information_filter():
np.set_printoptions(precision=2, linewidth=132)
M_matrix = np.eye(7)
sigma = np.array([0.12, 0.7, 0.0959, 0.15, 1.5, 0.2, 0.5])
x_analysis = np.array([0.17, 1.0, 0.1, 0.7, 2.0, 0.18, np.exp(-0.5*1.5)])
Pd = np.diag(sigma**2).astype(np.float32)
Pd[5, 2] = 0.8862*0.0959*0.2
Pd[2, 5] = 0.8862*0.0959*0.2
Pi = np.linalg.inv(Pd)
Q_matrix = np.eye(7)*0.1
x_forecast, P_forecast, P_forecast_inverse = propagate_information_filter(
x_analysis, None, Pi, M_matrix, Q_matrix)
assert np.allclose(
np.array(P_forecast_inverse.todense()).squeeze().diagonal(),
np.array([8.74, 1.69, 9.81, 8.16, 0.43, 9.21, 2.86]), atol=0.01)
# In reality, the matrix ought to be
# [[ 8.74 0. 0. 0. 0. 0. 0. ]
# [ 0. 1.69 0. 0. 0. 0. 0. ]
# [ 0. 0. 9.33 0. 0. -1.13 0. ]
# [ 0. 0. 0. 8.16 0. 0. 0. ]
# [ 0. 0. 0. 0. 0.43 0. 0. ]
# [ 0. 0. -1.13 0. 0. 7.28 0. ]
# [ 0. 0. 0. 0. 0. 0. 2.86]]
def main():
args = parse_args()
print('Called with args:')
print(args)
lang_db = get_language_model(args.lang_name)
imdb = get_imdb(args.imdb_name)
# Get words in space
vocabulary = imdb.get_labels(args.space)
# Get features for words
wv = [lang_db.word_vector(w) for w in vocabulary]
from sklearn.metrics.pairwise import cosine_similarity
from scipy import spatial
#spatial.distance.cosine(dataSetI, dataSetII)
tsne = TSNE(n_components=2, random_state=0)
np.set_printoptions(suppress=True)
Y = tsne.fit_transform(wv)
plt.scatter(Y[:, 0], Y[:, 1])
for label, x, y in zip(vocabulary, Y[:, 0], Y[:, 1]):
plt.annotate(label, xy=(x, y), xytext=(0, 0), textcoords='offset points')
plt.show()
def test_formatter_reset(self):
x = np.arange(3)
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'int':None})
assert_equal(repr(x), "array([0, 1, 2])")
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'all':None})
assert_equal(repr(x), "array([0, 1, 2])")
np.set_printoptions(formatter={'int':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'int_kind':None})
assert_equal(repr(x), "array([0, 1, 2])")
x = np.arange(3.)
np.set_printoptions(formatter={'float':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1.0, 0.0, 1.0])")
np.set_printoptions(formatter={'float_kind':None})
assert_equal(repr(x), "array([ 0., 1., 2.])")
def detect_nan(i, node, fn):
'''
x = theano.tensor.dscalar('x')
f = theano.function([x], [theano.tensor.log(x) * x],
mode=theano.compile.MonitorMode(post_func=detect_nan))
'''
nan_detected = False
for output in fn.outputs:
if np.isnan(output[0]).any():
nan_detected = True
np.set_printoptions(threshold=np.nan) # Print the whole arrays
print '*** NaN detected ***'
print '--------------------------NODE DESCRIPTION:'
theano.printing.debugprint(node)
print '--------------------------Variables:'
print 'Inputs : %s' % [input[0] for input in fn.inputs]
print 'Outputs: %s' % [output[0] for output in fn.outputs]
break
if nan_detected:
exit()
def __init__(self, environment):
self.env = environment
self.algorithm = MGAIL(environment=self.env)
self.init_graph = tf.global_variables_initializer()
self.saver = tf.train.Saver()
self.sess = tf.Session()
if self.env.trained_model:
self.saver.restore(self.sess, self.env.trained_model)
else:
self.sess.run(self.init_graph)
self.run_dir = self.env.run_dir
self.loss = 999. * np.ones(3)
self.reward_mean = 0
self.reward_std = 0
self.run_avg = 0.001
self.discriminator_policy_switch = 0
self.policy_loop_time = 0
self.disc_acc = 0
self.er_count = 0
self.itr = 0
self.best_reward = 0
self.mode = 'Prep'
np.set_printoptions(precision=2)
np.set_printoptions(linewidth=220)
def update_target_text(self):
np.set_printoptions(precision=3, suppress=True)
text = (
'target number = %s\n' % str(self._target_number) +
'actuator name = %s\n' % str(self._actuator_name) +
'\ninitial position\n%s' % self.position_to_str(self._initial_position) +
'\ntarget position\n%s' % self.position_to_str(self._target_position) +
'\ninitial image (left) =\n%s\n' % str(self._initial_image) +
'\ntarget image (right) =\n%s\n' % str(self._target_image)
)
self._target_output.set_text(text)
if config['image_on']:
self._initial_image_visualizer.update(self._initial_image)
self._target_image_visualizer.update(self._target_image)
self._image_visualizer.set_initial_image(self._initial_image, alpha=0.3)
self._image_visualizer.set_target_image(self._target_image, alpha=0.3)
def encode(content, word_delimiter="|", tag_delimiter="/", num_step=60):
# Create corpus instance
corpus = Corpus(word_delimiter=word_delimiter, tag_delimiter=tag_delimiter)
# Add text to corpus
corpus.add_text(content)
# Create index for character and tag
char_index = index_builder(constant.CHARACTER_LIST,
constant.CHAR_START_INDEX)
tag_index = index_builder(constant.TAG_LIST, constant.TAG_START_INDEX)
# Generate input
inb = InputBuilder(corpus, char_index, tag_index, num_step, y_one_hot=False)
# Display encoded content
np.set_printoptions(threshold=np.inf)
print("[Input]")
print(inb.x)
print("[Label]")
print(inb.y)
def PrintSolution(self, Filename=None):
numpy.set_printoptions(linewidth=numpy.inf)
out = sys.stdout if (Filename == None) else open(Filename, 'w+')
out.write('Status: %s\n' % self.status)
out.write('Total Objective: %f\n' % self.value)
for ni in self.Nodes():
nid = ni.GetId()
s = 'Node %d:\n' % nid
out.write(s)
for (varID, varName, var, offset) in self.node_variables[nid]:
val = numpy.transpose(self.GetNodeValue(nid, varName))
s = ' %s %s\n' % (varName, str(val))
out.write(s)
# Helper method to verify existence of an NId.
def compute_mean(self, file_list):
logger = logging.getLogger("acoustic_norm")
mean_vector = numpy.zeros((1, self.feature_dimension))
all_frame_number = 0
io_funcs = BinaryIOCollection()
for file_name in file_list:
features = io_funcs.load_binary_file(file_name, self.feature_dimension)
current_frame_number = features.size / self.feature_dimension
mean_vector += numpy.reshape(numpy.sum(features, axis=0), (1, self.feature_dimension))
all_frame_number += current_frame_number
mean_vector /= float(all_frame_number)
# po=numpy.get_printoptions()
# numpy.set_printoptions(precision=2, threshold=20, linewidth=1000, edgeitems=4)
logger.info('computed mean vector of length %d :' % mean_vector.shape[1] )
logger.info(' mean: %s' % mean_vector)
# restore the print options
# numpy.set_printoptions(po)
return mean_vector
def compute_mean(self, file_list, start_index, end_index):
local_feature_dimension = end_index - start_index
mean_vector = numpy.zeros((1, local_feature_dimension))
all_frame_number = 0
io_funcs = BinaryIOCollection()
for file_name in file_list:
features, current_frame_number = io_funcs.load_binary_file_frame(file_name, self.feature_dimension)
mean_vector += numpy.reshape(numpy.sum(features[:, start_index:end_index], axis=0), (1, local_feature_dimension))
all_frame_number += current_frame_number
mean_vector /= float(all_frame_number)
# po=numpy.get_printoptions()
# numpy.set_printoptions(precision=2, threshold=20, linewidth=1000, edgeitems=4)
self.logger.info('computed mean vector of length %d :' % mean_vector.shape[1] )
self.logger.info(' mean: %s' % mean_vector)
# restore the print options
# numpy.set_printoptions(po)
return mean_vector
def test_formatter_reset(self):
x = np.arange(3)
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'int':None})
assert_equal(repr(x), "array([0, 1, 2])")
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'all':None})
assert_equal(repr(x), "array([0, 1, 2])")
np.set_printoptions(formatter={'int':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'int_kind':None})
assert_equal(repr(x), "array([0, 1, 2])")
x = np.arange(3.)
np.set_printoptions(formatter={'float':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1.0, 0.0, 1.0])")
np.set_printoptions(formatter={'float_kind':None})
assert_equal(repr(x), "array([ 0., 1., 2.])")
def pformat(obj, indent=0, depth=3):
if 'numpy' in sys.modules:
import numpy as np
print_options = np.get_printoptions()
np.set_printoptions(precision=6, threshold=64, edgeitems=1)
else:
print_options = None
out = pprint.pformat(obj, depth=depth, indent=indent)
if print_options:
np.set_printoptions(**print_options)
return out
###############################################################################
# class `Logger`
###############################################################################
def compute_mean(self, file_list):
logger = logging.getLogger("acoustic_norm")
mean_vector = numpy.zeros((1, self.feature_dimension))
all_frame_number = 0
io_funcs = BinaryIOCollection()
for file_name in file_list:
features = io_funcs.load_binary_file(file_name, self.feature_dimension)
current_frame_number = features.size / self.feature_dimension
mean_vector += numpy.reshape(numpy.sum(features, axis=0), (1, self.feature_dimension))
all_frame_number += current_frame_number
mean_vector /= float(all_frame_number)
# po=numpy.get_printoptions()
# numpy.set_printoptions(precision=2, threshold=20, linewidth=1000, edgeitems=4)
logger.info('computed mean vector of length %d :' % mean_vector.shape[1] )
logger.info(' mean: %s' % mean_vector)
# restore the print options
# numpy.set_printoptions(po)
return mean_vector
def compute_mean(self, file_list, start_index, end_index):
local_feature_dimension = end_index - start_index
mean_vector = numpy.zeros((1, local_feature_dimension))
all_frame_number = 0
io_funcs = BinaryIOCollection()
for file_name in file_list:
features, current_frame_number = io_funcs.load_binary_file_frame(file_name, self.feature_dimension)
mean_vector += numpy.reshape(numpy.sum(features[:, start_index:end_index], axis=0), (1, local_feature_dimension))
all_frame_number += current_frame_number
mean_vector /= float(all_frame_number)
# po=numpy.get_printoptions()
# numpy.set_printoptions(precision=2, threshold=20, linewidth=1000, edgeitems=4)
self.logger.info('computed mean vector of length %d :' % mean_vector.shape[1] )
self.logger.info(' mean: %s' % mean_vector)
# restore the print options
# numpy.set_printoptions(po)
return mean_vector
def test_fcc_conv(self):
# np.set_printoptions(threshold=2304, linewidth=145) # 48 * 48
filename = "../poscars/POSCAR_fcc"
atoms = read_vasp(filename)
symmetry = UnfolderSymmetry(atoms)
rotations = symmetry.get_pointgroup_operations()
check_irreps(rotations)
rotations = symmetry.get_group_of_wave_vector([0.00, 0.25, 0.25])[0]
check_irreps(rotations)
rotations = symmetry.get_group_of_wave_vector([0.25, 0.00, 0.25])[0]
check_irreps(rotations)
rotations = symmetry.get_group_of_wave_vector([0.25, 0.25, 0.00])[0]
check_irreps(rotations)
def test_formatter_reset(self):
x = np.arange(3)
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'int':None})
assert_equal(repr(x), "array([0, 1, 2])")
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'all':None})
assert_equal(repr(x), "array([0, 1, 2])")
np.set_printoptions(formatter={'int':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'int_kind':None})
assert_equal(repr(x), "array([0, 1, 2])")
x = np.arange(3.)
np.set_printoptions(formatter={'float':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1.0, 0.0, 1.0])")
np.set_printoptions(formatter={'float_kind':None})
assert_equal(repr(x), "array([ 0., 1., 2.])")
def update_target_text(self):
np.set_printoptions(precision=3, suppress=True)
text = (
'target number = %s\n' % str(self._target_number) +
'actuator name = %s\n' % str(self._actuator_name) +
'\ninitial position\n%s' % self.position_to_str(self._initial_position) +
'\ntarget position\n%s' % self.position_to_str(self._target_position) +
'\ninitial image (left) =\n%s\n' % str(self._initial_image) +
'\ntarget image (right) =\n%s\n' % str(self._target_image)
)
self._target_output.set_text(text)
if config['image_on']:
self._initial_image_visualizer.update(self._initial_image)
self._target_image_visualizer.update(self._target_image)
self._image_visualizer.set_initial_image(self._initial_image, alpha=0.3)
self._image_visualizer.set_target_image(self._target_image, alpha=0.3)
def main():
np.set_printoptions(suppress=True, precision=5, linewidth=1000)
phases = {
'0_sampletrajs': phase0_sampletrajs,
'1_train': phase1_train,
'2_eval': phase2_eval,
}
parser = argparse.ArgumentParser()
parser.add_argument('spec', type=str)
parser.add_argument('phase', choices=sorted(phases.keys()))
args = parser.parse_args()
with open(args.spec, 'r') as f:
spec = yaml.load(f)
phases[args.phase](spec, args.spec)
def export_collada(mesh):
'''
Export a mesh as a COLLADA file.
'''
from ..templates import get_template
from string import Template
template_string = get_template('collada.dae.template')
template = Template(template_string)
# we bother setting this because np.array2string uses these printoptions
np.set_printoptions(threshold=np.inf, precision=5, linewidth=np.inf)
replacement = dict()
replacement['VERTEX'] = np.array2string(mesh.vertices.reshape(-1))[1:-1]
replacement['FACES'] = np.array2string(mesh.faces.reshape(-1))[1:-1]
replacement['NORMALS'] = np.array2string(mesh.vertex_normals.reshape(-1))[1:-1]
replacement['VCOUNT'] = str(len(mesh.vertices))
replacement['VCOUNTX3'] = str(len(mesh.vertices) * 3)
replacement['FCOUNT'] = str(len(mesh.faces))
export = template.substitute(replacement)
return export
def ACO(self, df):
"""
Helper indicator
:param df:
:return:
"""
df_mid_points = (df['High'] + df['Low']) / 2
mid_points = Data.toFloatArray(df_mid_points)
longav = tl.SMA(np.array(mid_points), timeperiod=40)
shortav = tl.SMA(np.array(mid_points), timeperiod=15)
A0 = longav - shortav
Mavg = tl.SMA(A0, timeperiod=15)
AcResult = tl.SMA(Mavg - A0, timeperiod=15)
signals = np.diff(AcResult)
return signals
# if __name__ == "__main__":
# np.set_printoptions(threshold=np.nan)
# pd.set_option("display.max_rows", 280)
# dt = Data()
# df = dt.getCSVData()
# #ACOscillator(df)
# ACOscillator(df)
def main():
# model_file = "../data/word2vec/character.model"
model_file = "../data/word2vec_new/word.model"
checkSimilarity(model_file, "?")
# character_wv_file = '../data/word2vec/character_model.txt'
# word_wv_file = '../data/word2vec/word_model.txt'
#
# embeddings_file = word_wv_file
# wv, vocabulary = load_embeddings(embeddings_file)
#
# tsne = TSNE(n_components=2, random_state=0)
# np.set_printoptions(suppress=True)
# Y = tsne.fit_transform(wv[:1000, :])
#
# plt.scatter(Y[:, 0], Y[:, 1])
# for label, x, y in zip(vocabulary, Y[:, 0], Y[:, 1]):
# plt.annotate(label, xy=(x, y), xytext=(0, 0), textcoords='offset points')
# plt.show()
def test_formatter_reset(self):
x = np.arange(3)
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'int':None})
assert_equal(repr(x), "array([0, 1, 2])")
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'all':None})
assert_equal(repr(x), "array([0, 1, 2])")
np.set_printoptions(formatter={'int':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'int_kind':None})
assert_equal(repr(x), "array([0, 1, 2])")
x = np.arange(3.)
np.set_printoptions(formatter={'float':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1.0, 0.0, 1.0])")
np.set_printoptions(formatter={'float_kind':None})
assert_equal(repr(x), "array([ 0., 1., 2.])")
def grad_nan_report(grads, tparams):
numpy.set_printoptions(precision=3)
D = OrderedDict()
i = 0
NaN_keys = []
magnitude = []
assert len(grads) == len(tparams)
for k, v in tparams.iteritems():
grad = grads[i]
magnitude.append(numpy.abs(grad).mean())
if numpy.isnan(grad.sum()):
NaN_keys.append(k)
#assert v.get_value().shape == grad.shape
D[k] = grad
i += 1
#norm = [numpy.sqrt(numpy.sum(grad**2)) for grad in grads]
#print '\tgrad mean(abs(x))', numpy.array(magnitude)
return D, NaN_keys
def test_formatter_reset(self):
x = np.arange(3)
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'int':None})
assert_equal(repr(x), "array([0, 1, 2])")
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'all':None})
assert_equal(repr(x), "array([0, 1, 2])")
np.set_printoptions(formatter={'int':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1, 0, 1])")
np.set_printoptions(formatter={'int_kind':None})
assert_equal(repr(x), "array([0, 1, 2])")
x = np.arange(3.)
np.set_printoptions(formatter={'float':lambda x: str(x-1)})
assert_equal(repr(x), "array([-1.0, 0.0, 1.0])")
np.set_printoptions(formatter={'float_kind':None})
assert_equal(repr(x), "array([ 0., 1., 2.])")
def pformat(obj, indent=0, depth=3):
if 'numpy' in sys.modules:
import numpy as np
print_options = np.get_printoptions()
np.set_printoptions(precision=6, threshold=64, edgeitems=1)
else:
print_options = None
out = pprint.pformat(obj, depth=depth, indent=indent)
if print_options:
np.set_printoptions(**print_options)
return out
###############################################################################
# class `Logger`
###############################################################################
def plot_swap_subject_cm(swappy,title,name):
from sklearn.metrics import confusion_matrix
import numpy as np
subs = swappy.exportSubjectData()
labs = getLabelReal(subs)
# Compute confusion matrix
cnf_matrix = confusion_matrix(labs['actual'], labs['predicted'])
np.set_printoptions(precision=2)
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['Bogus','Real'],
normalize=False,
title=title)
plt.savefig(name)
plt.show()
#import pandas as pd
#ps = pd.Series([(labs['actual'][x],labs['predicted'][x]) for x in range(0, len(labs['actual']))])
#counts = ps.value_counts()
#counts
def np_printoptions(**kwargs):
"""Context manager to temporarily set numpy print options."""
old = np.get_printoptions()
np.set_printoptions(**kwargs)
yield
np.set_printoptions(**old)
def _nn_pose_fill(valid):
"""
Looks up closest True for each False and returns
indices for fill-in-lookup
In: [True, False, True, ... , False, True]
Out: [0, 0, 2, ..., 212, 212]
"""
valid_inds, = np.where(valid)
invalid_inds, = np.where(~valid)
all_inds = np.arange(len(valid))
all_inds[invalid_inds] = -1
for j in range(10):
fwd_inds = valid_inds + j
bwd_inds = valid_inds - j
# Forward fill
invalid_inds, = np.where(all_inds < 0)
fwd_fill_inds = np.intersect1d(fwd_inds, invalid_inds)
all_inds[fwd_fill_inds] = all_inds[fwd_fill_inds-j]
# Backward fill
invalid_inds, = np.where(all_inds < 0)
if not len(invalid_inds): break
bwd_fill_inds = np.intersect1d(bwd_inds, invalid_inds)
all_inds[bwd_fill_inds] = all_inds[bwd_fill_inds+j]
# Check if any missing
invalid_inds, = np.where(all_inds < 0)
if not len(invalid_inds): break
# np.set_printoptions(threshold=np.nan)
# print valid.astype(np.int)
# print np.array_str(all_inds)
# print np.where(all_inds < 0)
return all_inds
def save_results(save_dict=None, **kwargs):
np.set_printoptions(precision=2,threshold=np.nan)
if save_dict==None:
save_dict=kwargs
for key in save_dict.keys():
save_dict[key] = str(save_dict[key])
np.set_printoptions(precision=2,threshold=1000)
append_json('experiments.json', save_dict)
jsondict2csv('experiments.json', 'experiments.csv')
def run_model(model):
'''Train model'''
# Call global variables
x_train, x_test, y_train, y_test = X_TRAIN, X_TEST, Y_TRAIN, Y_TEST
model.fit(x_train, y_train)
# make predictions for test data
y_pred = model.predict(x_test)
# Accuracy
acc = metrics.accuracy_score(y_test, y_pred)
print('Accuracy: %.2f%%' % (acc * 100.0))
# F1_score
# f1_score = metrics.f1_score(y_test, y_pred)
# print("F1_score: %.2f%%" % (f1_score * 100.0))
# AUC of ROC
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred)
auc = metrics.auc(fpr, tpr)
print('AUC: %.3f' % (auc))
# Logs for each fold
crossvalidation_acc.append(acc)
crossvalidation_auc.append(auc)
if ARGS.m:
cnf_matrix = confusion_matrix(y_test, y_pred)
print(cnf_matrix)
np.set_printoptions(precision=2)
if ARGS.t == '2':
classes = np.asarray(['Spiced', 'Non-spliced'])
plot_confusion_matrix(cnf_matrix, classes=classes, normalize=True)
elif ARGS.t == '3':
classes = np.asarray(['Low', 'Medium', 'High'])
plot_confusion_matrix(cnf_matrix, classes=classes, normalize=True)
plt.show()
if ARGS.f:
feature_selection(imp=IMP, model=model)
print()
def __init__(self, model, filename, mcmc=default_mcmc, headers=1,
ph_units="mrad", cc_modes=2, decomp_poly=4, c_exp=1.0,
log_min_tau=-3, guess_noise=False, keep_traces=False,
ccdt_priors='auto', ccdt_cfg=None):
self.model = model
self.filename = filename
self.mcmc = mcmc
self.headers = headers
self.ph_units = ph_units
self.cc_modes = cc_modes
self.decomp_poly = decomp_poly
self.c_exp = c_exp
self.log_min_tau = log_min_tau
self.guess_noise = guess_noise
self.keep_traces = keep_traces
self.ccd_priors = ccdt_priors
self.ccdtools_config = ccdt_cfg
if model == "CCD":
if self.ccd_priors == 'auto':
self.ccd_priors = self.get_ccd_priors(config=self.ccdtools_config)
print("\nUpdated CCD priors with new data")
self.start()
# def print_resul(self):
# #==============================================================================
# # Impression des résultats
# pm, model, filename = self.pm, self.model, self.filename
# print('\n\nInversion success!')
# print('Name of file:', filename)
# print('Model used:', model)
# e_keys = sorted([s for s in list(pm.keys()) if "_std" in s])
# v_keys = [e.replace("_std", "") for e in e_keys]
# labels = ["{:<8}".format(x+":") for x in v_keys]
# np.set_printoptions(formatter={'float': lambda x: format(x, '6.3E')})
# for l, v, e in zip(labels, v_keys, e_keys):
# print(l, pm[v], '+/-', pm[e], np.char.mod('(%.2f%%)',abs(100*pm[e]/pm[v])))
def __init__(self, model, filename, mcmc=default_mcmc, headers=1,
ph_units="mrad", cc_modes=2, decomp_poly=4, c_exp=1.0,
log_min_tau=-3, guess_noise=False, keep_traces=False,
ccdt_priors='auto', ccdt_cfg=None):
self.model = model
self.filename = filename
self.mcmc = mcmc
self.headers = headers
self.ph_units = ph_units
self.cc_modes = cc_modes
self.decomp_poly = decomp_poly
self.c_exp = c_exp
self.log_min_tau = log_min_tau
self.guess_noise = guess_noise
self.keep_traces = keep_traces
self.ccd_priors = ccdt_priors
self.ccdtools_config = ccdt_cfg
if model == "CCD":
if self.ccd_priors == 'auto':
self.ccd_priors = self.get_ccd_priors(config=self.ccdtools_config)
print("\nUpdated CCD priors with new data")
self.start()
# def print_resul(self):
# #==============================================================================
# # Impression des résultats
# pm, model, filename = self.pm, self.model, self.filename
# print('\n\nInversion success!')
# print('Name of file:', filename)
# print('Model used:', model)
# e_keys = sorted([s for s in list(pm.keys()) if "_std" in s])
# v_keys = [e.replace("_std", "") for e in e_keys]
# labels = ["{:<8}".format(x+":") for x in v_keys]
# np.set_printoptions(formatter={'float': lambda x: format(x, '6.3E')})
# for l, v, e in zip(labels, v_keys, e_keys):
# print(l, pm[v], '+/-', pm[e], np.char.mod('(%.2f%%)',abs(100*pm[e]/pm[v])))
def plot_tsne(doc_codes, doc_labels, classes_to_visual, save_file):
# markers = ["D", "p", "*", "s", "d", "8", "^", "H", "v", ">", "<", "h", "|"]
markers = ["o", "v", "8", "s", "p", "*", "h", "H", "+", "x", "D"]
plt.rc('legend',**{'fontsize':30})
classes_to_visual = list(set(classes_to_visual))
C = len(classes_to_visual)
while True:
if C <= len(markers):
break
markers += markers
class_ids = dict(zip(classes_to_visual, range(C)))
if isinstance(doc_codes, dict) and isinstance(doc_labels, dict):
codes, labels = zip(*[(code, doc_labels[doc]) for doc, code in doc_codes.items() if doc_labels[doc] in classes_to_visual])
else:
codes, labels = doc_codes, doc_labels
X = np.r_[list(codes)]
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
np.set_printoptions(suppress=True)
X = tsne.fit_transform(X)
plt.figure(figsize=(10, 10), facecolor='white')
for c in classes_to_visual:
idx = np.array(labels) == c
# idx = get_indices(labels, c)
plt.plot(X[idx, 0], X[idx, 1], linestyle='None', alpha=1, marker=markers[class_ids[c]],
markersize=10, label=c)
legend = plt.legend(loc='upper right', shadow=True)
# plt.title("tsne")
# plt.savefig(save_file)
plt.savefig(save_file, format='eps', dpi=2000)
plt.show()
def plot_tsne_3d(doc_codes, doc_labels, classes_to_visual, save_file, maker_size=None, opaque=None):
markers = ["D", "p", "*", "s", "d", "8", "^", "H", "v", ">", "<", "h", "|"]
plt.rc('legend',**{'fontsize':20})
colors = ['r', 'b', 'g', 'c', 'm', 'y', 'k']
C = len(classes_to_visual)
while True:
if C <= len(markers):
break
markers += markers
while True:
if C <= len(colors):
break
colors += colors
class_ids = dict(zip(classes_to_visual, range(C)))
if isinstance(doc_codes, dict) and isinstance(doc_labels, dict):
codes, labels = zip(*[(code, doc_labels[doc]) for doc, code in doc_codes.items() if doc_labels[doc] in classes_to_visual])
else:
codes, labels = doc_codes, doc_labels
X = np.r_[list(codes)]
tsne = TSNE(perplexity=30, n_components=3, init='pca', n_iter=5000)
np.set_printoptions(suppress=True)
X = tsne.fit_transform(X)
fig = plt.figure(figsize=(10, 10), facecolor='white')
ax = fig.add_subplot(111, projection='3d')
# The problem is that the legend function don't support the type returned by a 3D scatter.
# So you have to create a "dummy plot" with the same characteristics and put those in the legend.
scatter_proxy = []
for i in range(C):
cls = classes_to_visual[i]
idx = np.array(labels) == cls
ax.scatter(X[idx, 0], X[idx, 1], X[idx, 2], c=colors[i], alpha=opaque[i] if opaque else 1, s=maker_size[i] if maker_size else 20, marker=markers[i], label=cls)
scatter_proxy.append(mpl.lines.Line2D([0],[0], linestyle="none", c=colors[i], marker=markers[i], label=cls))
ax.legend(scatter_proxy, classes_to_visual, numpoints=1)
plt.savefig(save_file)
plt.show()
def DBN_plot_tsne(doc_codes, doc_labels, classes_to_visual, save_file):
markers = ["o", "v", "8", "s", "p", "*", "h", "H", "+", "x", "D"]
C = len(classes_to_visual)
while True:
if C <= len(markers):
break
markers += markers
class_ids = dict(zip(classes_to_visual.keys(), range(C)))
codes, labels = doc_codes, doc_labels
X = np.r_[list(codes)]
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
np.set_printoptions(suppress=True)
X = tsne.fit_transform(X)
plt.figure(figsize=(10, 10), facecolor='white')
for c in classes_to_visual.keys():
idx = np.array(labels) == c
# idx = get_indices(labels, c)
plt.plot(X[idx, 0], X[idx, 1], linestyle='None', alpha=0.6, marker=markers[class_ids[c]],
markersize=6, label=classes_to_visual[c])
legend = plt.legend(loc='upper center', shadow=True)
plt.title("tsne")
plt.savefig(save_file)
plt.show()
def reuters_visualize_tsne(doc_codes, doc_labels, classes_to_visual, save_file):
"""
Visualize the input data on a 2D PCA plot. Depending on the number of components,
the plot will contain an X amount of subplots.
@param doc_codes:
@param number_of_components: The number of principal components for the PCA plot.
"""
# markers = ["p", "s", "h", "H", "+", "x", "D"]
markers = ["o", "v", "8", "s", "p", "*", "h", "H", "+", "x", "D"]
C = len(classes_to_visual)
while True:
if C <= len(markers):
break
markers += markers
class_names = classes_to_visual.keys()
class_ids = dict(zip(class_names, range(C)))
class_names = set(class_names)
codes, labels = zip(*[(code, doc_labels[doc]) for doc, code in doc_codes.items() if class_names.intersection(set(doc_labels[doc]))])
X = np.r_[list(codes)]
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
np.set_printoptions(suppress=True)
X = tsne.fit_transform(X)
plt.figure(figsize=(10, 10), facecolor='white')
for c in classes_to_visual.keys():
idx = get_indices(labels, c)
plt.plot(X[idx, 0], X[idx, 1], linestyle='None', alpha=0.6, marker=markers[class_ids[c]],
markersize=6, label=classes_to_visual[c])
legend = plt.legend(loc='upper center', shadow=True)
plt.title("tsne")
plt.savefig(save_file)
plt.show()
def main():
# Collect the user arguments and hyper parameters
args, hyper_params = get_args_and_hyperparameters()
np.set_printoptions( precision=8, suppress=True, edgeitems=6, threshold=2048)
# setup the CPU or GPU backend
be = gen_backend(**extract_valid_args(args, gen_backend))
# load the training dataset. This will download the dataset from the web and cache it
# locally for subsequent use.
train_set = MultiscaleSampler('trainval', '2007', samples_per_img=hyper_params.samples_per_img,
sample_height=224, path=args.data_dir,
samples_per_batch=hyper_params.samples_per_batch,
max_imgs = hyper_params.max_train_imgs,
shuffle = hyper_params.shuffle)
# create the model by replacing the classification layer of AlexNet with
# new adaptation layers
model, opt = create_model( args, hyper_params)
# Seed the Alexnet conv layers with pre-trained weights
if args.model_file is None and hyper_params.use_pre_trained_weights:
load_imagenet_weights(model, args.data_dir)
train( args, hyper_params, model, opt, train_set)
# Load the test dataset. This will download the dataset from the web and cache it
# locally for subsequent use.
test_set = MultiscaleSampler('test', '2007', samples_per_img=hyper_params.samples_per_img,
sample_height=224, path=args.data_dir,
samples_per_batch=hyper_params.samples_per_batch,
max_imgs = hyper_params.max_test_imgs,
shuffle = hyper_params.shuffle)
test( args, hyper_params, model, test_set)
return
# parse the command line arguments
def trainer(model_params):
"""Train a sketch-rnn model."""
np.set_printoptions(precision=8, edgeitems=6, linewidth=200, suppress=True)
tf.logging.info('sketch-rnn')
tf.logging.info('Hyperparams:')
for key, val in model_params.values().iteritems():
tf.logging.info('%s = %s', key, str(val))
tf.logging.info('Loading data files.')
datasets = load_dataset(FLAGS.data_dir, model_params)
train_set = datasets[0]
valid_set = datasets[1]
test_set = datasets[2]
model_params = datasets[3]
eval_model_params = datasets[4]
reset_graph()
model = sketch_rnn_model.Model(model_params)
eval_model = sketch_rnn_model.Model(eval_model_params, reuse=True)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
if FLAGS.resume_training:
load_checkpoint(sess, FLAGS.log_root)
# Write config file to json file.
tf.gfile.MakeDirs(FLAGS.log_root)
with tf.gfile.Open(
os.path.join(FLAGS.log_root, 'model_config.json'), 'w') as f:
json.dump(model_params.values(), f, indent=True)
train(sess, model, eval_model, train_set, valid_set, test_set)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
np.set_printoptions(precision=2)
plt.figure()
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
logger.info("Normalized confusion matrix")
else:
logger.info('Confusion matrix, without normalization')
logger.info(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
def __init__(self, n_hidden=10, max_iter=10000, tol=1e-5, anneal=True, missing_values=None,
discourage_overlap=True, gaussianize='standard', gpu=False,
verbose=False, seed=None):
self.m = n_hidden # Number of latent factors to learn
self.max_iter = max_iter # Number of iterations to try
self.tol = tol # Threshold for convergence
self.anneal = anneal
self.eps = 0 # If anneal is True, it's adjusted during optimization to avoid local minima
self.missing_values = missing_values
self.discourage_overlap = discourage_overlap # Whether or not to discourage overlapping latent factors
self.gaussianize = gaussianize # Preprocess data: 'standard' scales to zero mean and unit variance
self.gpu = gpu # Enable GPU support for some large matrix multiplications.
if self.gpu:
cm.cublas_init()
self.yscale = 1. # Can be arbitrary, but sets the scale of Y
np.random.seed(seed) # Set seed for deterministic results
self.verbose = verbose
if verbose:
np.set_printoptions(precision=3, suppress=True, linewidth=160)
print(('Linear CorEx with {:d} latent factors'.format(n_hidden)))
# Initialize these when we fit on data
self.n_samples, self.nv = 0, 0 # Number of samples/variables in input data
self.ws = np.zeros((0, 0)) # m by nv array of weights
self.moments = {} # Dictionary of moments
self.theta = None # Parameters for preprocessing each variable
self.history = {} # Keep track of values for each iteration
self.last_update = 0 # Used for momentum methods