Python tensorflow.python.framework.ops 模块,Tensor() 实例源码
我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.python.framework.ops.Tensor()。
def dense(inputs, units, bias_shape, w_i, b_i=None, activation=tf.nn.relu):
# ??tf.layers?????flatten
# dense1 = tf.layers.dense(tf.contrib.layers.flatten(relu5), activation=tf.nn.relu, units=50)
if not isinstance(inputs, ops.Tensor):
inputs = ops.convert_to_tensor(inputs, dtype='float')
# dim_list = inputs.get_shape().as_list()
# flatten_shape = dim_list[1] if len(dim_list) <= 2 else reduce(lambda x, y: x * y, dim_list[1:])
# reshaped = tf.reshape(inputs, [dim_list[0], flatten_shape])
if len(inputs.shape) > 2:
inputs = tf.contrib.layers.flatten(inputs)
flatten_shape = inputs.shape[1]
weights = tf.get_variable('weights', shape=[flatten_shape, units], initializer=w_i)
dense = tf.matmul(inputs, weights)
if bias_shape is not None:
assert bias_shape[0] == units
biases = tf.get_variable('biases', shape=bias_shape, initializer=b_i)
return activation(dense + biases) if activation is not None else dense + biases
return activation(dense) if activation is not None else dense
def _transpose_batch_time(x):
"""Transpose the batch and time dimensions of a Tensor.
Retains as much of the static shape information as possible.
Args:
x: A tensor of rank 2 or higher.
Returns:
x transposed along the first two dimensions.
Raises:
ValueError: if `x` is rank 1 or lower.
"""
x_static_shape = x.get_shape()
if x_static_shape.ndims is not None and x_static_shape.ndims < 2:
raise ValueError(
"Expected input tensor %s to have rank at least 2, but saw shape: %s" %
(x, x_static_shape))
x_rank = array_ops.rank(x)
x_t = array_ops.transpose(
x, array_ops.concat(
([1, 0], math_ops.range(2, x_rank)), axis=0))
x_t.set_shape(
tensor_shape.TensorShape([
x_static_shape[1].value, x_static_shape[0].value
]).concatenate(x_static_shape[2:]))
return x_t
def _tensor_shape_tensor_conversion_function(s, dtype=None, name=None,
as_ref=False):
_ = as_ref
if not s.is_fully_defined():
raise ValueError(
"Cannot convert a partially known TensorShape to a Tensor: %s" % s)
s_list = s.as_list()
int64_value = 0
for dim in s_list:
if dim >= 2**31:
int64_value = dim
break
if dtype is not None:
if dtype not in (dtypes.int32, dtypes.int64):
raise TypeError("Cannot convert a TensorShape to dtype: %s" % dtype)
if dtype == dtypes.int32 and int64_value:
raise ValueError("Cannot convert a TensorShape to dtype int32; "
"a dimension is too large (%s)" % int64_value)
else:
dtype = dtypes.int64 if int64_value else dtypes.int32
if name is None:
name = "shape_as_tensor"
return constant(s_list, dtype=dtype, name=name)
def _ImageDimensions(image):
"""Returns the dimensions of an image tensor.
Args:
image: A 3-D Tensor of shape `[height, width, channels]`.
Returns:
A list of `[height, width, channels]` corresponding to the dimensions of the
input image. Dimensions that are statically known are python integers,
otherwise they are integer scalar tensors.
"""
if image.get_shape().is_fully_defined():
return image.get_shape().as_list()
else:
static_shape = image.get_shape().with_rank(3).as_list()
dynamic_shape = array_ops.unstack(array_ops.shape(image), 3)
return [s if s is not None else d
for s, d in zip(static_shape, dynamic_shape)]
def assert_scalar_int(tensor):
"""Assert `tensor` is 0-D, of type `tf.int32` or `tf.int64`.
Args:
tensor: Tensor to test.
Returns:
`tensor`, for chaining.
Raises:
ValueError: if `tensor` is not 0-D, of type `tf.int32` or `tf.int64`.
"""
data_type = tensor.dtype
if data_type.base_dtype not in [dtypes.int32, dtypes.int64]:
raise ValueError('Unexpected type %s for %s.' % (data_type, tensor.name))
shape = tensor.get_shape()
if shape.ndims != 0:
raise ValueError('Unexpected shape %s for %s.' % (shape, tensor.name))
return tensor
def reduce_sum_n(tensors, name=None):
"""Reduce tensors to a scalar sum.
This reduces each tensor in `tensors` to a scalar via `tf.reduce_sum`, then
adds them via `tf.add_n`.
Args:
tensors: List of tensors, all of the same numeric type.
name: Tensor name, and scope for all other ops.
Returns:
Total loss tensor, or None if no losses have been configured.
Raises:
ValueError: if `losses` is missing or empty.
"""
if not tensors:
raise ValueError('No tensors provided.')
tensors = [math_ops.reduce_sum(t, name='%s/sum' % t.op.name) for t in tensors]
if len(tensors) == 1:
return tensors[0]
with ops.name_scope(name, 'reduce_sum_n', tensors) as scope:
return math_ops.add_n(tensors, name=scope)
def _is_shape(expected_shape, actual_tensor, actual_shape=None):
"""Returns whether actual_tensor's shape is expected_shape.
Args:
expected_shape: Integer list defining the expected shape, or tensor of same.
actual_tensor: Tensor to test.
actual_shape: Shape of actual_tensor, if we already have it.
Returns:
New tensor.
"""
with ops.name_scope('is_shape', values=[actual_tensor]) as scope:
is_rank = _is_rank(array_ops.size(expected_shape), actual_tensor)
if actual_shape is None:
actual_shape = array_ops.shape(actual_tensor, name='actual')
shape_equal = _all_equal(
ops.convert_to_tensor(expected_shape, name='expected'),
actual_shape)
return math_ops.logical_and(is_rank, shape_equal, name=scope)
def assert_global_step(global_step_tensor):
"""Asserts `global_step_tensor` is a scalar int `Variable` or `Tensor`.
Args:
global_step_tensor: `Tensor` to test.
"""
if not (isinstance(global_step_tensor, variables.Variable) or
isinstance(global_step_tensor, ops.Tensor)):
raise TypeError('Existing "global_step" must be a Variable or Tensor.')
if not global_step_tensor.dtype.base_dtype.is_integer:
raise TypeError(
'Existing "global_step" does not have integer type: %s' %
global_step_tensor.dtype)
if global_step_tensor.get_shape().ndims != 0:
raise TypeError(
'Existing "global_step" is not scalar: %s' %
global_step_tensor.get_shape())
def filter_ts_from_regex(ops, regex):
r"""Get all the tensors linked to ops that match the given regex.
Args:
ops: an object convertible to a list of tf.Operation.
regex: a regular expression matching the tensors' name.
For example, "^foo(/.*)?:\d+$" will match all the tensors in the "foo"
scope.
Returns:
A list of tf.Tensor.
Raises:
TypeError: if ops cannot be converted to a list of tf.Operation.
"""
ops = util.make_list_of_op(ops)
regex_obj = make_regex(regex)
return filter_ts(ops, positive_filter=lambda op: regex_obj.search(op.name))
def select_ops_and_ts(*args, **kwargs):
"""Helper to select operations and tensors.
Args:
*args: list of 1) regular expressions (compiled or not) or 2) (array of)
tf.Operation 3) (array of) tf.Tensor. Regular expressions matching tensors
must start with the comment "(?#ts)", for instance: "(?#ts)^foo/.*".
**kwargs: 'graph': tf.Graph in which to perform the regex query.This is
required when using regex.
'positive_filter': an elem if selected only if positive_filter(elem) is
True. This is optional.
Returns:
A tuple `(ops, ts)` where:
`ops` is a list of tf.Operation
`ts` is a list of tf.Tensor
Raises:
TypeError: if the optional keyword argument graph is not a tf.Graph
or if an argument in args is not an (array of) tf.Tensor
or an (array of) tf.Operation or a string or a regular expression.
ValueError: if one of the keyword arguments is unexpected or if a regular
expression is used without passing a graph as a keyword argument.
"""
ops = select_ops(*args, restrict_ops_regex=False, **kwargs)
ts = select_ts(*args, restrict_ts_regex=True, **kwargs)
return ops, ts
def get_tensors(graph):
"""get all the tensors which are input or output of an op in the graph.
Args:
graph: a tf.Graph.
Returns:
A list of tf.Tensor.
Raises:
TypeError: if graph is not a tf.Graph.
"""
if not isinstance(graph, tf_ops.Graph):
raise TypeError("Expected a graph, got: {}".format(type(graph)))
ts = []
for op in graph.get_operations():
ts += op.outputs
return ts
def get_consuming_ops(ts):
"""Return all the consuming ops of the tensors in ts.
Args:
ts: a list of tf.Tensor
Returns:
A list of all the consuming tf.Operation of the tensors in ts.
Raises:
TypeError: if ts cannot be converted to a list of tf.Tensor.
"""
ts = make_list_of_t(ts, allow_graph=False)
ops = []
for t in ts:
for op in t.consumers():
if op not in ops:
ops.append(op)
return ops
def make_placeholder_from_tensor(t, scope=None):
"""Create a tf.placeholder for the Graph Editor.
Note that the correct graph scope must be set by the calling function.
Args:
t: a tf.Tensor whose name will be used to create the placeholder
(see function placeholder_name).
scope: absolute scope within which to create the placeholder. None
means that the scope of t is preserved. "" means the root scope.
Returns:
A newly created tf.placeholder.
Raises:
TypeError: if t is not None or a tf.Tensor.
"""
return tf_array_ops.placeholder(dtype=t.dtype, shape=t.get_shape(),
name=placeholder_name(t, scope=scope))
def _store_index_maps(self, sequences, context, states):
"""Prepares the internal dictionaries _name_to_index and _index_to_name.
These dictionaries are used to keep track of indices into the barrier.
Args:
sequences: `OrderedDict` of string, `Tensor` pairs.
context: `OrderedDict` of string, `Tensor` pairs.
states: `OrderedDict` of string, `Tensor` pairs.
"""
assert isinstance(sequences, dict)
assert isinstance(context, dict)
assert isinstance(states, dict)
self._name_to_index = dict((name, ix) for (ix, name) in enumerate(
["__length", "__total_length", "__next_key",
"__sequence", "__sequence_count"]
+ ["__sequence__%s" % k for k in sequences.keys()]
+ ["__context__%s" % k for k in context.keys()]
+ ["__state__%s" % k for k in states.keys()]))
self._index_to_name = [
name for (name, _) in sorted(
self._name_to_index.items(), key=lambda n_ix: n_ix[1])]
def classification_signature(input_tensor,
classes_tensor=None,
scores_tensor=None):
"""Creates a classification signature.
Args:
input_tensor: Tensor specifying the input to a graph.
classes_tensor: Tensor specifying the output classes of a graph.
scores_tensor: Tensor specifying the scores of the output classes.
Returns:
A Signature message.
"""
signature = manifest_pb2.Signature()
signature.classification_signature.input.tensor_name = input_tensor.name
if classes_tensor is not None:
signature.classification_signature.classes.tensor_name = classes_tensor.name
if scores_tensor is not None:
signature.classification_signature.scores.tensor_name = scores_tensor.name
return signature
def _padding_mask(sequence_lengths, padded_length):
"""Creates a mask used for calculating losses with padded input.
Args:
sequence_lengths: a `Tensor` of shape `[batch_size]` containing the unpadded
length of each sequence.
padded_length: a scalar `Tensor` indicating the length of the sequences
after padding
Returns:
A boolean `Tensor` M of shape `[batch_size, padded_length]` where
`M[i, j] == True` when `lengths[i] > j`.
"""
range_tensor = math_ops.range(padded_length)
return math_ops.less(array_ops.expand_dims(range_tensor, 0),
array_ops.expand_dims(sequence_lengths, 1))
def _activations_to_loss(self, features, activations, targets):
"""Map `activations` and `targets` to a loss `Tensor`.
`activations` has shape `[batch_size, padded_length,
self._target_column.num_label_columns]`. It is the output of
`_construct_rnn`.
`targets` is a `Tensor` of shape `[batch_size, padded_length]`. The type
of `targets` depends on what type of `TargetColumn` is being used.
Args:
features: a `dict` containing the input and (optionally) sequence length
information and initial state. This is the same `features` passed to
`_construct_rnn`.
activations: a `Tensor` of activations representing the output of the RNN.
targets: a `Tensor` of target values.
Returns:
loss: A scalar `Tensor` representing the aggregated loss for the batch.
"""
raise NotImplementedError()
def _activations_to_eval_ops(self, features, activations, targets, metrics):
"""Map `activations` to eval operations.
`activations` has shape [batch_size, time, num_labels]. `TargetColumn`s
require shape [n, num_labels]. `activations` is flattened before being
converted to labels. Afterwards, its shape is reconstituted.
Args:
features: a `dict` containing the input and (optionally) sequence length
information and initial state.
activations: logit values returned by `_construct_rnn`.
targets: a `Tensor` of target values.
metrics: a list of `Metric`s to evaluate. Possibly `None`.
Returns:
A dict of named eval ops.
"""
raise NotImplementedError()
def _run_with_monitors(session, step, tensors, feed_dict, monitors):
"""Runs session for given tensors with monitor callbacks."""
for monitor in monitors:
tensors += monitor.step_begin(step)
tensors = list(set(tensors))
outputs = session.run(tensors, feed_dict=feed_dict)
outputs = dict(zip(
[t.name if isinstance(t, ops.Tensor) else t for t in tensors],
outputs))
should_stop = False
for monitor in monitors:
induce_stop = monitor.step_end(step, outputs)
should_stop = should_stop or induce_stop
return outputs, should_stop
def infer(restore_checkpoint_path, output_dict, feed_dict=None):
"""Restore graph from `restore_checkpoint_path` and run `output_dict` tensors.
If `restore_checkpoint_path` is supplied, restore from checkpoint. Otherwise,
init all variables.
Args:
restore_checkpoint_path: A string containing the path to a checkpoint to
restore.
output_dict: A `dict` mapping string names to `Tensor` objects to run.
Tensors must all be from the same graph.
feed_dict: `dict` object mapping `Tensor` objects to input values to feed.
Returns:
Dict of values read from `output_dict` tensors. Keys are the same as
`output_dict`, values are the results read from the corresponding `Tensor`
in `output_dict`.
Raises:
ValueError: if `output_dict` or `feed_dicts` is None or empty.
"""
return run_feeds(output_dict=output_dict,
feed_dicts=[feed_dict] if feed_dict is not None else [None],
restore_checkpoint_path=restore_checkpoint_path)[0]
def softmax(logits, scope=None):
"""Performs softmax on Nth dimension of N-dimensional logit tensor.
For two-dimensional logits this reduces to tf.nn.softmax. The N-th dimension
needs to have a specified number of elements (number of classes).
Args:
logits: N-dimensional `Tensor` with logits, where N > 1.
scope: Optional scope for variable_scope.
Returns:
a `Tensor` with same shape and type as logits.
"""
# TODO(jrru): Add axis argument which defaults to last dimension.
with variable_scope.variable_scope(scope, 'softmax', [logits]):
num_logits = utils.last_dimension(logits.get_shape(), min_rank=2)
logits_2d = array_ops.reshape(logits, [-1, num_logits])
predictions = nn.softmax(logits_2d)
predictions = array_ops.reshape(predictions, array_ops.shape(logits))
predictions.set_shape(logits.get_shape())
return predictions
def collect_named_outputs(collections, alias, outputs):
"""Add `Tensor` outputs tagged with alias to collections.
It is useful to collect end-points or tags for summaries. Example of usage:
logits = collect_named_outputs('end_points', 'inception_v3/logits', logits)
assert logits.alias == 'inception_v3/logits'
Args:
collections: A collection or list of collections. If None skip collection.
alias: String, alias to name the outputs, ex. 'inception_v3/conv1'
outputs: Tensor, an output tensor to collect
Returns:
The outputs Tensor to allow inline call.
"""
# Remove ending '/' if present.
if alias[-1] == '/':
alias = alias[:-1]
outputs.alias = alias
if collections:
ops.add_to_collections(collections, outputs)
return outputs
def get_tensor_alias(tensor):
"""Given a tensor gather its alias, its op.name or its name.
If the tensor does not have an alias it would default to its name.
Args:
tensor: A `Tensor`.
Returns:
A string with the alias of the tensor.
"""
if hasattr(tensor, 'alias'):
alias = tensor.alias
else:
if tensor.name[-2:] == ':0':
# Use op.name for tensor ending in :0
alias = tensor.op.name
else:
alias = tensor.name
return alias
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
with ops.name_scope(name, self._name) as name:
update_op = self._opt.apply_gradients(
grads_and_vars, global_step=global_step)
clip_update_ops = []
with ops.control_dependencies([update_op]):
for grad, var in grads_and_vars:
if grad is None or var not in self._vars_to_clip_dims:
continue
with ops.name_scope("clip_" + var.op.name):
if isinstance(grad, ops.Tensor):
clip_update_ops.append(self._clip_dense(var))
else:
clip_update_ops.append(self._clip_sparse(grad, var))
# In case no var was clipped, still need to run the update_op.
return control_flow_ops.group(*([update_op] + clip_update_ops), name=name)
def reduce_sum_n(tensors, name=None):
"""Reduce tensors to a scalar sum.
This reduces each tensor in `tensors` to a scalar via `tf.reduce_sum`, then
adds them via `tf.add_n`.
Args:
tensors: List of tensors, all of the same numeric type.
name: Tensor name, and scope for all other ops.
Returns:
Total loss tensor, or None if no losses have been configured.
Raises:
ValueError: if `losses` is missing or empty.
"""
if not tensors:
raise ValueError('No tensors provided.')
tensors = [math_ops.reduce_sum(t, name='%s/sum' % t.op.name) for t in tensors]
if len(tensors) == 1:
return tensors[0]
with ops.name_scope(name, 'reduce_sum_n', tensors) as scope:
return math_ops.add_n(tensors, name=scope)
def _is_shape(expected_shape, actual_tensor, actual_shape=None):
"""Returns whether actual_tensor's shape is expected_shape.
Args:
expected_shape: Integer list defining the expected shape, or tensor of same.
actual_tensor: Tensor to test.
actual_shape: Shape of actual_tensor, if we already have it.
Returns:
New tensor.
"""
with ops.name_scope('is_shape', values=[actual_tensor]) as scope:
is_rank = _is_rank(array_ops.size(expected_shape), actual_tensor)
if actual_shape is None:
actual_shape = array_ops.shape(actual_tensor, name='actual')
shape_equal = _all_equal(
ops.convert_to_tensor(expected_shape, name='expected'),
actual_shape)
return math_ops.logical_and(is_rank, shape_equal, name=scope)
def _assert_shape_op(expected_shape, actual_tensor):
"""Asserts actual_tensor's shape is expected_shape.
Args:
expected_shape: List of integers defining the expected shape, or tensor of
same.
actual_tensor: Tensor to test.
Returns:
New assert tensor.
"""
with ops.name_scope('assert_shape', values=[actual_tensor]) as scope:
actual_shape = array_ops.shape(actual_tensor, name='actual')
is_shape = _is_shape(expected_shape, actual_tensor, actual_shape)
return control_flow_ops.Assert(
is_shape, [
'Wrong shape for %s [expected] [actual].' % actual_tensor.name,
expected_shape,
actual_shape
], name=scope)
def filter_ts_from_regex(ops, regex):
r"""Get all the tensors linked to ops that match the given regex.
Args:
ops: an object convertible to a list of tf.Operation.
regex: a regular expression matching the tensors' name.
For example, "^foo(/.*)?:\d+$" will match all the tensors in the "foo"
scope.
Returns:
A list of tf.Tensor.
Raises:
TypeError: if ops cannot be converted to a list of tf.Operation.
"""
ops = util.make_list_of_op(ops)
regex_obj = make_regex(regex)
return filter_ts(ops, positive_filter=lambda op: regex_obj.search(op.name))
def get_tensors(graph):
"""get all the tensors which are input or output of an op in the graph.
Args:
graph: a `tf.Graph`.
Returns:
A list of `tf.Tensor`.
Raises:
TypeError: if graph is not a `tf.Graph`.
"""
if not isinstance(graph, tf_ops.Graph):
raise TypeError("Expected a graph, got: {}".format(type(graph)))
ts = []
for op in graph.get_operations():
ts += op.outputs
return ts
def get_consuming_ops(ts):
"""Return all the consuming ops of the tensors in ts.
Args:
ts: a list of `tf.Tensor`
Returns:
A list of all the consuming `tf.Operation` of the tensors in `ts`.
Raises:
TypeError: if ts cannot be converted to a list of `tf.Tensor`.
"""
ts = make_list_of_t(ts, allow_graph=False)
ops = []
for t in ts:
for op in t.consumers():
if op not in ops:
ops.append(op)
return ops
def make_placeholder_from_tensor(t, scope=None):
"""Create a `tf.placeholder` for the Graph Editor.
Note that the correct graph scope must be set by the calling function.
Args:
t: a `tf.Tensor` whose name will be used to create the placeholder
(see function placeholder_name).
scope: absolute scope within which to create the placeholder. None
means that the scope of `t` is preserved. `""` means the root scope.
Returns:
A newly created `tf.placeholder`.
Raises:
TypeError: if `t` is not `None` or a `tf.Tensor`.
"""
return tf_array_ops.placeholder(
dtype=t.dtype, shape=t.get_shape(), name=placeholder_name(
t, scope=scope))
def _store_index_maps(self, sequences, context, states):
"""Prepares the internal dictionaries _name_to_index and _index_to_name.
These dictionaries are used to keep track of indices into the barrier.
Args:
sequences: `OrderedDict` of string, `Tensor` pairs.
context: `OrderedDict` of string, `Tensor` pairs.
states: `OrderedDict` of string, `Tensor` pairs.
"""
assert isinstance(sequences, dict)
assert isinstance(context, dict)
assert isinstance(states, dict)
self._name_to_index = dict((name, ix) for (ix, name) in enumerate(
["__length", "__total_length", "__next_key",
"__sequence", "__sequence_count"]
+ ["__sequence__%s" % k for k in sequences.keys()]
+ ["__context__%s" % k for k in context.keys()]
+ ["__state__%s" % k for k in states.keys()]))
self._index_to_name = [
name for (name, _) in sorted(
self._name_to_index.items(), key=lambda n_ix: n_ix[1])]
def classification_signature(input_tensor,
classes_tensor=None,
scores_tensor=None):
"""Creates a classification signature.
Args:
input_tensor: Tensor specifying the input to a graph.
classes_tensor: Tensor specifying the output classes of a graph.
scores_tensor: Tensor specifying the scores of the output classes.
Returns:
A Signature message.
"""
signature = manifest_pb2.Signature()
signature.classification_signature.input.tensor_name = input_tensor.name
if classes_tensor is not None:
signature.classification_signature.classes.tensor_name = classes_tensor.name
if scores_tensor is not None:
signature.classification_signature.scores.tensor_name = scores_tensor.name
return signature
def _get_examples(file_name_queue, reader, num_threads, read_batch_size,
parse_fn):
with ops.name_scope('read'):
example_list = []
for _ in range(num_threads):
if read_batch_size > 1:
keys, examples_proto = reader().read_up_to(file_name_queue,
read_batch_size)
else:
keys, examples_proto = reader().read(file_name_queue)
if parse_fn:
parsed_examples = parse_fn(examples_proto)
# Map keys into example map because batch_join doesn't support
# tuple of Tensor + dict.
if isinstance(parsed_examples, dict):
parsed_examples[KEY_FEATURE_NAME] = keys
example_list.append(parsed_examples)
else:
example_list.append((keys, parsed_examples))
else:
example_list.append((keys, examples_proto))
return example_list
def _run_with_monitors(session, step, tensors, feed_dict, monitors):
"""Runs session for given tensors with monitor callbacks."""
for monitor in monitors:
tensors += monitor.step_begin(step)
tensors = list(set(tensors))
outputs = session.run(tensors, feed_dict=feed_dict)
outputs = dict(zip(
[t.name if isinstance(t, ops.Tensor) else t for t in tensors],
outputs))
should_stop = False
for monitor in monitors:
induce_stop = monitor.step_end(step, outputs)
should_stop = should_stop or induce_stop
return outputs, should_stop
def collect_named_outputs(collections, alias, outputs):
"""Add `Tensor` outputs tagged with alias to collections.
It is useful to collect end-points or tags for summaries. Example of usage:
logits = collect_named_outputs('end_points', 'inception_v3/logits', logits)
assert logits.alias == 'inception_v3/logits'
Args:
collections: A collection or list of collections. If None skip collection.
alias: String, alias to name the outputs, ex. 'inception_v3/conv1'
outputs: Tensor, an output tensor to collect
Returns:
The outputs Tensor to allow inline call.
"""
# Remove ending '/' if present.
if alias[-1] == '/':
alias = alias[:-1]
outputs.alias = alias
if collections:
ops.add_to_collections(collections, outputs)
return outputs
def get_tensor_alias(tensor):
"""Given a tensor gather its alias, its op.name or its name.
If the tensor does not have an alias it would default to its name.
Args:
tensor: A `Tensor`.
Returns:
A string with the alias of the tensor.
"""
if hasattr(tensor, 'alias'):
alias = tensor.alias
else:
if tensor.name[-2:] == ':0':
# Use op.name for tensor ending in :0
alias = tensor.op.name
else:
alias = tensor.name
return alias
def _ImageDimensions(image):
"""Returns the dimensions of an image tensor.
Args:
image: A 3-D Tensor of shape `[height, width, channels]`.
Returns:
A list of `[height, width, channels]` corresponding to the dimensions of the
input image. Dimensions that are statically known are python integers,
otherwise they are integer scalar tensors.
"""
if image.get_shape().is_fully_defined():
return image.get_shape().as_list()
else:
static_shape = image.get_shape().with_rank(3).as_list()
dynamic_shape = array_ops.unstack(array_ops.shape(image), 3)
return [s if s is not None else d
for s, d in zip(static_shape, dynamic_shape)]
def make_input(self, x, name=""):
"""Returns Tensor of the same type/device as x which can be used
as input to native TensorFlow ops, and substituted later with an ITensor,
using callable created with env.make_function(). The user must ensure
that future ITensor is on the same device as x, otherwise you will see
memcpy/CUDA sync errors.
Args:
x: ITensor used to initalize input tensor. It used only to determine
dtype and device placement.
Returns:
A Tensor that can be used in TensorFlow ops.
"""
op_name = "custom_input_%s"%(name)
input_holder, input_ = session_ops.get_session_tensor(x.tf_handle,
x.dtype,
name=op_name)
self.input_dict[input_] = input_holder
return input_
def save_op(self, filename_tensor, vars_to_save):
"""Create an Op to save 'vars_to_save'.
This is intended to be overridden by subclasses that want to generate
different Ops.
Args:
filename_tensor: String Tensor.
vars_to_save: A list of BaseSaverBuilder.VarToSave objects.
Returns:
An Operation that save the variables.
"""
# pylint: disable=protected-access
return io_ops._save(
filename=filename_tensor,
tensor_names=[vs.name for vs in vars_to_save],
tensors=[vs.var for vs in vars_to_save],
tensor_slices=[vs.slice_spec for vs in vars_to_save])
def restore_op(self, filename_tensor, var_to_save, preferred_shard):
"""Create an Op to read the variable 'var_to_save'.
This is intended to be overridden by subclasses that want to generate
different Ops.
Args:
filename_tensor: String Tensor.
var_to_save: A BaseSaverBuilder.VarToSave object.
preferred_shard: Int. Shard to open first when loading a sharded file.
Returns:
A Tensor resulting from reading 'var_to_save' from 'filename'.
"""
# pylint: disable=protected-access
return io_ops._restore_slice(
filename_tensor,
var_to_save.name,
var_to_save.slice_spec,
var_to_save.var.dtype,
preferred_shard=preferred_shard)
def _AddShardedSaveOps(self, filename_tensor, per_device):
"""Add ops to save the params per shard.
Args:
filename_tensor: String Tensor.
per_device: A list of (device, BaseSaverBuilder.VarToSave) pairs, as
returned by _GroupByDevices().
Returns:
An op to save the variables.
"""
num_shards = len(per_device)
sharded_saves = []
num_shards_tensor = constant_op.constant(num_shards, name="num_shards")
for shard, (device, vars_to_save) in enumerate(per_device):
with ops.device(device):
sharded_filename = self.sharded_filename(
filename_tensor, shard, num_shards_tensor)
sharded_saves.append(self._AddSaveOps(sharded_filename, vars_to_save))
# Return the sharded name for the save path.
with ops.control_dependencies([x.op for x in sharded_saves]):
# pylint: disable=protected-access
return gen_io_ops._sharded_filespec(filename_tensor, num_shards_tensor)
def _RunOp(operator, a, b):
"""Run the operator 'op' for 'a'.
Args:
operator: string. The operator name.
a: A Variable.
b: Second argument to the operator. None if unary.
Returns:
The result of the operator.
"""
# pylint: disable=protected-access
if b is not None:
return getattr(ops.Tensor, operator)(a._AsTensor(), b)
else:
return getattr(ops.Tensor, operator)(a._AsTensor())
# pylint: enable=protected-access
def _ImageDimensions(image):
"""Returns the dimensions of an image tensor.
Args:
image: A 3-D Tensor of shape `[height, width, channels]`.
Returns:
A list of `[height, width, channels]` corresponding to the dimensions of the
input image. Dimensions that are statically known are python integers,
otherwise they are integer scalar tensors.
"""
if image.get_shape().is_fully_defined():
return image.get_shape().as_list()
else:
static_shape = image.get_shape().with_rank(3).as_list()
dynamic_shape = array_ops.unstack(array_ops.shape(image), 3)
return [s if s is not None else d
for s, d in zip(static_shape, dynamic_shape)]
def noisy_dense(inputs, units, bias_shape, c_names, w_i, b_i=None, activation=tf.nn.relu, noisy_distribution='factorised'):
def f(e_list):
return tf.multiply(tf.sign(e_list), tf.pow(tf.abs(e_list), 0.5))
# ??tf.layers?????flatten
# dense1 = tf.layers.dense(tf.contrib.layers.flatten(relu5), activation=tf.nn.relu, units=50)
if not isinstance(inputs, ops.Tensor):
inputs = ops.convert_to_tensor(inputs, dtype='float')
# dim_list = inputs.get_shape().as_list()
# flatten_shape = dim_list[1] if len(dim_list) <= 2 else reduce(lambda x, y: x * y, dim_list[1:])
# reshaped = tf.reshape(inputs, [dim_list[0], flatten_shape])
if len(inputs.shape) > 2:
inputs = tf.contrib.layers.flatten(inputs)
flatten_shape = inputs.shape[1]
weights = tf.get_variable('weights', shape=[flatten_shape, units], initializer=w_i)
w_noise = tf.get_variable('w_noise', [flatten_shape, units], initializer=w_i, collections=c_names)
if noisy_distribution == 'independent':
weights += tf.multiply(tf.random_normal(shape=w_noise.shape), w_noise)
elif noisy_distribution == 'factorised':
noise_1 = f(tf.random_normal(tf.TensorShape([flatten_shape, 1]), dtype=tf.float32)) # ???????????????
noise_2 = f(tf.random_normal(tf.TensorShape([1, units]), dtype=tf.float32))
weights += tf.multiply(noise_1 * noise_2, w_noise)
dense = tf.matmul(inputs, weights)
if bias_shape is not None:
assert bias_shape[0] == units
biases = tf.get_variable('biases', shape=bias_shape, initializer=b_i)
b_noise = tf.get_variable('b_noise', [1, units], initializer=b_i, collections=c_names)
if noisy_distribution == 'independent':
biases += tf.multiply(tf.random_normal(shape=b_noise.shape), b_noise)
elif noisy_distribution == 'factorised':
biases += tf.multiply(noise_2, b_noise)
return activation(dense + biases) if activation is not None else dense + biases
return activation(dense) if activation is not None else dense
# ???bias??????relu
def _transpose_batch_time(x):
"""Transpose the batch and time dimensions of a Tensor.
Retains as much of the static shape information as possible.
Args:
x: A tensor of rank 2 or higher.
Returns:
x transposed along the first two dimensions.
Raises:
ValueError: if `x` is rank 1 or lower.
"""
x_static_shape = x.get_shape()
if x_static_shape.ndims is not None and x_static_shape.ndims < 2:
raise ValueError(
"Expected input tensor %s to have rank at least 2, but saw shape: %s" %
(x, x_static_shape))
x_rank = array_ops.rank(x)
x_t = array_ops.transpose(
x, array_ops.concat(
([1, 0], math_ops.range(2, x_rank)), axis=0))
x_t.set_shape(
tensor_shape.TensorShape([
x_static_shape[1].value, x_static_shape[0].value
]).concatenate(x_static_shape[2:]))
return x_t
def _create_zero_outputs(size, dtype, batch_size):
"""Create a zero outputs Tensor structure."""
def _t(s):
return (s if isinstance(s, ops.Tensor) else constant_op.constant(
tensor_shape.TensorShape(s).as_list(),
dtype=dtypes.int32,
name="zero_suffix_shape"))
def _create(s, d):
return array_ops.zeros(
array_ops.concat(
([batch_size], _t(s)), axis=0), dtype=d)
return nest.map_structure(_create, size, dtype)
def sample(self, time, outputs, state, name=None):
"""sample for GreedyEmbeddingHelper."""
del time, state # unused by sample_fn
# Outputs are logits, use argmax to get the most probable id
if not isinstance(outputs, ops.Tensor):
raise TypeError("Expected outputs to be a single Tensor, got: %s" %
type(outputs))
sample_ids = math_ops.cast(
math_ops.argmax(outputs, axis=-1), dtypes.int32)
return sample_ids
def _transpose_batch_time(x):
"""Transpose the batch and time dimensions of a Tensor.
Retains as much of the static shape information as possible.
Args:
x: A tensor of rank 2 or higher.
Returns:
x transposed along the first two dimensions.
Raises:
ValueError: if `x` is rank 1 or lower.
"""
x_static_shape = x.get_shape()
if x_static_shape.ndims is not None and x_static_shape.ndims < 2:
raise ValueError(
"Expected input tensor %s to have rank at least 2, but saw shape: %s" %
(x, x_static_shape))
x_rank = array_ops.rank(x)
x_t = array_ops.transpose(
x, array_ops.concat(
([1, 0], math_ops.range(2, x_rank)), axis=0))
x_t.set_shape(
tensor_shape.TensorShape([
x_static_shape[1].value, x_static_shape[0].value
]).concatenate(x_static_shape[2:]))
return x_t
def _create_zero_outputs(size, dtype, batch_size):
"""Create a zero outputs Tensor structure."""
def _t(s):
return (s if isinstance(s, ops.Tensor) else constant_op.constant(
tensor_shape.TensorShape(s).as_list(),
dtype=dtypes.int32,
name="zero_suffix_shape"))
def _create(s, d):
return array_ops.zeros(
array_ops.concat(
([batch_size], _t(s)), axis=0), dtype=d)
return nest.map_structure(_create, size, dtype)