Python tensorflow.python.framework.ops 模块,get_default_session() 实例源码
我们从Python开源项目中,提取了以下6个代码示例,用于说明如何使用tensorflow.python.framework.ops.get_default_session()。
def get_session():
"""Returns the TF session to be used by the backend.
If a default TensorFlow session is available, we will return it.
Else, we will return the global Keras session.
If no global Keras session exists at this point:
we will create a new global session.
Note that you can manually set the global session
via `K.set_session(sess)`.
Returns:
A TensorFlow session.
"""
global _SESSION
if ops.get_default_session() is not None:
session = ops.get_default_session()
else:
if _SESSION is None:
if not os.environ.get('OMP_NUM_THREADS'):
config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
num_thread = int(os.environ.get('OMP_NUM_THREADS'))
config = config_pb2.ConfigProto(
intra_op_parallelism_threads=num_thread, allow_soft_placement=True)
_SESSION = session_module.Session(config=config)
session = _SESSION
if not _MANUAL_VAR_INIT:
with session.graph.as_default():
_initialize_variables()
return session
def _run_monitor(self,
monitor,
num_epochs=3,
num_steps_per_epoch=10,
pass_max_steps=True):
if pass_max_steps:
max_steps = num_epochs * num_steps_per_epoch - 1
else:
max_steps = None
monitor.begin(max_steps=max_steps)
for epoch in xrange(num_epochs):
monitor.epoch_begin(epoch)
should_stop = False
step = epoch * num_steps_per_epoch
next_epoch_step = step + num_steps_per_epoch
while (not should_stop) and (step < next_epoch_step):
tensors = monitor.step_begin(step)
output = ops.get_default_session().run(tensors) if tensors else {}
output = dict(
zip([t.name if isinstance(t, ops.Tensor) else t for t in tensors],
output))
should_stop = monitor.step_end(step=step, output=output)
monitor.post_step(step=step, session=None)
step += 1
monitor.epoch_end(epoch)
monitor.end()
def test_logging_trainable(self):
with ops.Graph().as_default() as g, self.test_session(g):
var = variables.Variable(constant_op.constant(42.0), name='foo')
var.initializer.run()
cof = constant_op.constant(1.0)
loss = math_ops.subtract(
math_ops.multiply(var, cof), constant_op.constant(1.0))
train_step = gradient_descent.GradientDescentOptimizer(0.5).minimize(loss)
ops.get_default_session().run(train_step)
self._run_monitor(learn.monitors.LoggingTrainable('foo'))
self.assertRegexpMatches(str(self.logged_message), var.name)
def _operator_and_mat_and_feed_dict(self, shape, dtype, use_placeholder):
sess = ops.get_default_session()
shape = list(shape)
# Either 1 or 2 matrices, depending.
num_operators = rng.randint(low=1, high=3)
matrices = [
linear_operator_test_util.random_positive_definite_matrix(
shape, dtype, force_well_conditioned=True)
for _ in range(num_operators)
]
if use_placeholder:
matrices_ph = [
array_ops.placeholder(dtype=dtype) for _ in range(num_operators)
]
# Evaluate here because (i) you cannot feed a tensor, and (ii)
# values are random and we want the same value used for both mat and
# feed_dict.
matrices = sess.run(matrices)
operator = linalg.LinearOperatorComposition(
[linalg.LinearOperatorMatrix(m_ph) for m_ph in matrices_ph])
feed_dict = {m_ph: m for (m_ph, m) in zip(matrices_ph, matrices)}
else:
operator = linalg.LinearOperatorComposition(
[linalg.LinearOperatorMatrix(m) for m in matrices])
feed_dict = None
# Convert back to Tensor. Needed if use_placeholder, since then we have
# already evaluated each matrix to a numpy array.
apply_order_list = list(reversed(matrices))
mat = ops.convert_to_tensor(apply_order_list[0])
for other_mat in apply_order_list[1:]:
mat = math_ops.matmul(other_mat, mat)
return operator, mat, feed_dict
def _operator_and_mat_and_feed_dict(self, shape, dtype, use_placeholder):
sess = ops.get_default_session()
shape = list(shape)
# Test only the case of 2 matrices.
# The Square test uses either 1 or 2, so we have tested the case of 1 matrix
# sufficiently.
num_operators = 2
# Create 2 matrices/operators, A1, A2, which becomes A = A1 A2.
# Use inner dimension of 2.
k = 2
batch_shape = shape[:-2]
shape_1 = batch_shape + [shape[-2], k]
shape_2 = batch_shape + [k, shape[-1]]
matrices = [
linear_operator_test_util.random_normal(
shape_1, dtype=dtype), linear_operator_test_util.random_normal(
shape_2, dtype=dtype)
]
if use_placeholder:
matrices_ph = [
array_ops.placeholder(dtype=dtype) for _ in range(num_operators)
]
# Evaluate here because (i) you cannot feed a tensor, and (ii)
# values are random and we want the same value used for both mat and
# feed_dict.
matrices = sess.run(matrices)
operator = linalg.LinearOperatorComposition(
[linalg.LinearOperatorMatrix(m_ph) for m_ph in matrices_ph])
feed_dict = {m_ph: m for (m_ph, m) in zip(matrices_ph, matrices)}
else:
operator = linalg.LinearOperatorComposition(
[linalg.LinearOperatorMatrix(m) for m in matrices])
feed_dict = None
# Convert back to Tensor. Needed if use_placeholder, since then we have
# already evaluated each matrix to a numpy array.
apply_order_list = list(reversed(matrices))
mat = ops.convert_to_tensor(apply_order_list[0])
for other_mat in apply_order_list[1:]:
mat = math_ops.matmul(other_mat, mat)
return operator, mat, feed_dict
def assert_bijective_and_finite(bijector, x, y, atol=0, rtol=1e-5, sess=None):
"""Assert that forward/inverse (along with jacobians) are inverses and finite.
It is recommended to use x and y values that are very very close to the edge
of the Bijector's domain.
Args:
bijector: A Bijector instance.
x: np.array of values in the domain of bijector.forward.
y: np.array of values in the domain of bijector.inverse.
atol: Absolute tolerance.
rtol: Relative tolerance.
sess: TensorFlow session. Defaults to the default session.
Raises:
AssertionError: If tests fail.
"""
sess = sess or ops.get_default_session()
# These are the incoming points, but people often create a crazy range of
# values for which these end up being bad, especially in 16bit.
assert_finite(x)
assert_finite(y)
f_x = bijector.forward(x)
g_y = bijector.inverse(y)
(
x_from_x,
y_from_y,
ildj_f_x,
fldj_x,
ildj_y,
fldj_g_y,
f_x_v,
g_y_v,) = sess.run([
bijector.inverse(f_x),
bijector.forward(g_y),
bijector.inverse_log_det_jacobian(f_x),
bijector.forward_log_det_jacobian(x),
bijector.inverse_log_det_jacobian(y),
bijector.forward_log_det_jacobian(g_y),
f_x,
g_y,
])
assert_finite(x_from_x)
assert_finite(y_from_y)
assert_finite(ildj_f_x)
assert_finite(fldj_x)
assert_finite(ildj_y)
assert_finite(fldj_g_y)
assert_finite(f_x_v)
assert_finite(g_y_v)
np.testing.assert_allclose(x_from_x, x, atol=atol, rtol=rtol)
np.testing.assert_allclose(y_from_y, y, atol=atol, rtol=rtol)
np.testing.assert_allclose(-ildj_f_x, fldj_x, atol=atol, rtol=rtol)
np.testing.assert_allclose(-ildj_y, fldj_g_y, atol=atol, rtol=rtol)