Python tensorflow.python.framework.ops 模块,convert_to_tensor() 实例源码
我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.python.framework.ops.convert_to_tensor()。
def dense(inputs, units, bias_shape, w_i, b_i=None, activation=tf.nn.relu):
# ??tf.layers?????flatten
# dense1 = tf.layers.dense(tf.contrib.layers.flatten(relu5), activation=tf.nn.relu, units=50)
if not isinstance(inputs, ops.Tensor):
inputs = ops.convert_to_tensor(inputs, dtype='float')
# dim_list = inputs.get_shape().as_list()
# flatten_shape = dim_list[1] if len(dim_list) <= 2 else reduce(lambda x, y: x * y, dim_list[1:])
# reshaped = tf.reshape(inputs, [dim_list[0], flatten_shape])
if len(inputs.shape) > 2:
inputs = tf.contrib.layers.flatten(inputs)
flatten_shape = inputs.shape[1]
weights = tf.get_variable('weights', shape=[flatten_shape, units], initializer=w_i)
dense = tf.matmul(inputs, weights)
if bias_shape is not None:
assert bias_shape[0] == units
biases = tf.get_variable('biases', shape=bias_shape, initializer=b_i)
return activation(dense + biases) if activation is not None else dense + biases
return activation(dense) if activation is not None else dense
def binary_cross_entropy(preds, targets, name=None):
"""Computes binary cross entropy given `preds`.
For brevity, let `x = `, `z = targets`. The logistic loss is
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Args:
preds: A `Tensor` of type `float32` or `float64`.
targets: A `Tensor` of the same type and shape as `preds`.
"""
eps = 1e-12
with ops.op_scope([preds, targets], name, "bce_loss") as name:
preds = ops.convert_to_tensor(preds, name="preds")
targets = ops.convert_to_tensor(targets, name="targets")
return tf.reduce_mean(-(targets * tf.log(preds + eps) +
(1. - targets) * tf.log(1. - preds + eps)))
def binary_cross_entropy(preds, targets, name=None):
"""Computes binary cross entropy given `preds`.
For brevity, let `x = `, `z = targets`. The logistic loss is
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Args:
preds: A `Tensor` of type `float32` or `float64`.
targets: A `Tensor` of the same type and shape as `preds`.
"""
eps = 1e-12
with ops.op_scope([preds, targets], name, "bce_loss") as name:
preds = ops.convert_to_tensor(preds, name="preds")
targets = ops.convert_to_tensor(targets, name="targets")
return tf.reduce_mean(-(targets * tf.log(preds + eps) +
(1. - targets) * tf.log(1. - preds + eps)))
def binary_cross_entropy(preds, targets, name=None):
'''Computes binary cross entropy given `preds`.
Let `x = `, `z = targets`. The logistic loss is
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Args:
preds: A `Tensor` of type `float32` or `float64`.
targets: A `Tensor` of the same type and shape as `preds`.
'''
eps = 1e-12
with ops.op_scope([preds, targets], name, 'bce_loss') as name:
preds = ops.convert_to_tensor(preds, name='preds')
targets = ops.convert_to_tensor(targets, name='targets')
return tf.reduce_mean(-(targets * tf.log(preds + eps) + (1. - targets) * tf.log(1. - preds + eps)))
# ==================================
# ---------- LAYER MAPS --------- #
# ==================================
def hardmax(logits, name=None):
"""Returns batched one-hot vectors.
The depth index containing the `1` is that of the maximum logit value.
Args:
logits: A batch tensor of logit values.
name: Name to use when creating ops.
Returns:
A batched one-hot tensor.
"""
with ops.name_scope(name, "Hardmax", [logits]):
logits = ops.convert_to_tensor(logits, name="logits")
if logits.get_shape()[-1].value is not None:
depth = logits.get_shape()[-1].value
else:
depth = array_ops.shape(logits)[-1]
return array_ops.one_hot(
math_ops.argmax(logits, -1), depth, dtype=logits.dtype)
def binary_cross_entropy(preds, targets, name=None):
"""Computes binary cross entropy given `preds`.
For brevity, let `x = `, `z = targets`. The logistic loss is
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Args:
preds: A `Tensor` of type `float32` or `float64`.
targets: A `Tensor` of the same type and shape as `preds`.
"""
eps = 1e-12
with ops.op_scope([preds, targets], name, "bce_loss") as name:
preds = ops.convert_to_tensor(preds, name="preds")
targets = ops.convert_to_tensor(targets, name="targets")
return tf.reduce_mean(-(targets * tf.log(preds + eps) +
(1. - targets) * tf.log(1. - preds + eps)))
def binary_cross_entropy(preds, targets, name=None):
"""Computes binary cross entropy given `preds`.
For brevity, let `x = `, `z = targets`. The logistic loss is
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Args:
preds: A `Tensor` of type `float32` or `float64`.
targets: A `Tensor` of the same type and shape as `preds`.
"""
eps = 1e-12
with ops.op_scope([preds, targets], name, "bce_loss") as name:
preds = ops.convert_to_tensor(preds, name="preds")
targets = ops.convert_to_tensor(targets, name="targets")
return tf.reduce_mean(-(targets * tf.log(preds + eps) +
(1. - targets) * tf.log(1. - preds + eps)))
def _is_shape(expected_shape, actual_tensor, actual_shape=None):
"""Returns whether actual_tensor's shape is expected_shape.
Args:
expected_shape: Integer list defining the expected shape, or tensor of same.
actual_tensor: Tensor to test.
actual_shape: Shape of actual_tensor, if we already have it.
Returns:
New tensor.
"""
with ops.name_scope('is_shape', values=[actual_tensor]) as scope:
is_rank = _is_rank(array_ops.size(expected_shape), actual_tensor)
if actual_shape is None:
actual_shape = array_ops.shape(actual_tensor, name='actual')
shape_equal = _all_equal(
ops.convert_to_tensor(expected_shape, name='expected'),
actual_shape)
return math_ops.logical_and(is_rank, shape_equal, name=scope)
def regularized_loss(self, examples):
"""Add operations to compute the loss with regularization loss included.
Args:
examples: Examples to compute loss on.
Returns:
An Operation that computes mean (regularized) loss for given set of
examples.
Raises:
ValueError: if examples are not well defined.
"""
self._assertSpecified(['example_labels', 'example_weights',
'sparse_features', 'dense_features'], examples)
self._assertList(['sparse_features', 'dense_features'], examples)
with name_scope('sdca/regularized_loss'):
weights = convert_to_tensor(examples['example_weights'])
return ((
self._l1_loss() +
# Note that here we are using the raw regularization
# (as specified by the user) and *not*
# self._symmetric_l2_regularization().
self._l2_loss(self._options['symmetric_l2_regularization'])) /
math_ops.reduce_sum(math_ops.cast(weights, dtypes.float64)) +
self.unregularized_loss(examples))
def __init__(self, table_ref, default_value, initializer):
"""Construct a table object from a table reference.
If requires a table initializer object (subclass of `TableInitializerBase`).
It provides the table key and value types, as well as the op to initialize
the table. The caller is responsible to execute the initialization op.
Args:
table_ref: The table reference, i.e. the output of the lookup table ops.
default_value: The value to use if a key is missing in the table.
initializer: The table initializer to use.
"""
super(InitializableLookupTableBase, self).__init__(
initializer.key_dtype, initializer.value_dtype,
table_ref.op.name.split("/")[-1])
self._table_ref = table_ref
self._default_value = ops.convert_to_tensor(default_value,
dtype=self._value_dtype)
self._default_value.get_shape().merge_with(tensor_shape.scalar())
self._init = initializer.initialize(self)
def __init__(self, keys, values, key_dtype=None, value_dtype=None, name=None):
"""Constructs a table initializer object based on keys and values tensors.
Args:
keys: The tensor for the keys.
values: The tensor for the values.
key_dtype: The `keys` data type. Used when `keys` is a python array.
value_dtype: The `values` data type. Used when `values` is a python array.
name: A name for the operation (optional).
"""
with ops.name_scope(name, "key_value_init", [keys, values]) as scope:
self._keys = ops.convert_to_tensor(keys, dtype=key_dtype, name="keys")
self._values = ops.convert_to_tensor(values,
dtype=value_dtype,
name="values")
self._name = scope
super(KeyValueTensorInitializer, self).__init__(self._keys.dtype,
self._values.dtype)
def get_ndims(self, x, name="get_ndims"):
"""Get `Tensor` number of dimensions (rank).
Args:
x: `Tensor`.
name: `String`. The name to give this op.
Returns:
ndims: Scalar number of dimensions associated with a `Tensor`.
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
ndims = x.get_shape().ndims
if ndims is None:
return array_ops.rank(x, name="ndims")
return ops.convert_to_tensor(ndims, dtype=dtypes.int32, name="ndims")
def _assert_non_negative_int32_scalar(self, x):
"""Helper which ensures that input is a non-negative, int32, scalar."""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype != dtypes.int32.base_dtype:
raise TypeError("%s.dtype=%s is not %s" % (x.name, x.dtype, dtypes.int32))
x_value_static = tensor_util.constant_value(x)
if x.get_shape().ndims is not None and x_value_static is not None:
if x.get_shape().ndims != 0:
raise ValueError("%s.ndims=%d is not 0 (scalar)" %
(x.name, x.get_shape().ndims))
if x_value_static < 0:
raise ValueError("%s.value=%d cannot be negative" %
(x.name, x_value_static))
return x
if self.validate_args:
x = control_flow_ops.with_dependencies([
check_ops.assert_rank(x, 0),
check_ops.assert_non_negative(x)], x)
return x
def _log_prob(self, event):
# TODO(jaana): The current sigmoid_cross_entropy_with_logits has
# inconsistent behavior for logits = inf/-inf.
event = ops.convert_to_tensor(event, name="event")
event = math_ops.cast(event, self.logits.dtype)
logits = self.logits
# sigmoid_cross_entropy_with_logits doesn't broadcast shape,
# so we do this here.
# TODO(b/30637701): Check dynamic shape, and don't broadcast if the
# dynamic shapes are the same.
if (not event.get_shape().is_fully_defined() or
not logits.get_shape().is_fully_defined() or
event.get_shape() != logits.get_shape()):
logits = array_ops.ones_like(event) * logits
event = array_ops.ones_like(logits) * event
return -nn.sigmoid_cross_entropy_with_logits(logits, event)
def inv_quadratic_form_on_vectors(
self, x, name="inv_quadratic_form_on_vectors"):
"""Compute the quadratic form: `x^T A^{-1} x` where `x` is a batch vector.
`x` is a batch vector with compatible shape if
self.shape = [N1,...,Nn] + [k, k]
x.shape = [M1,...,Mm] + [N1,...,Nn] + [k]
```
Args:
x: `Tensor` with compatible batch vector shape and same `dtype` as self.
name: A name scope to use for ops added by this method.
Returns:
`Tensor` with shape `[M1,...,Mm] + [N1,...,Nn]` and same `dtype`
as `self`.
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=[x] + self.inputs):
x = ops.convert_to_tensor(x, name="x")
return self._inv_quadratic_form_on_vectors(x)
```
def matmul(self, x, transpose_x=False, name="matmul"):
"""Left (batch) matmul `x` by this matrix: `Ax`.
`x` is a batch matrix with compatible shape if
self.shape = [N1,...,Nn] + [k, k]
x.shape = [N1,...,Nn] + [k, r]
```
Args:
x: `Tensor` with shape `self.batch_shape + [k, r]` and same `dtype` as
this `Operator`.
transpose_x: If `True`, `x` is transposed before multiplication.
name: A name to give this `Op`.
Returns:
A result equivalent to `tf.batch_matmul(self.to_dense(), x)`.
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=[x] + self.inputs):
x = ops.convert_to_tensor(x, name="x")
return self._dispatch_based_on_batch(
self._batch_matmul, self._matmul, x=x, transpose_x=transpose_x)
```
def sqrt_matmul(self, x, transpose_x=False, name="sqrt_matmul"):
"""Left (batch) matmul `x` by a sqrt of this matrix: `Sx` where `A = S S^T`.
`x` is a batch matrix with compatible shape if
self.shape = [N1,...,Nn] + [k, k]
x.shape = [N1,...,Nn] + [k, r]
```
Args:
x: `Tensor` with shape `self.batch_shape + [k, r]` and same `dtype` as
this `Operator`.
transpose_x: If `True`, `x` is transposed before multiplication.
name: A name scope to use for ops added by this method.
Returns:
A result equivalent to `tf.batch_matmul(self.sqrt_to_dense(), x)`.
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=[x] + self.inputs):
x = ops.convert_to_tensor(x, name="x")
return self._dispatch_based_on_batch(
self._batch_sqrt_matmul, self._sqrt_matmul, x=x,
transpose_x=transpose_x)
```
def extract_batch_shape(x, num_event_dims, name="extract_batch_shape"):
"""Extract the batch shape from `x`.
Assuming `x.shape = batch_shape + event_shape`, when `event_shape` has
`num_event_dims` dimensions. This `Op` returns the batch shape `Tensor`.
Args:
x: `Tensor` with rank at least `num_event_dims`. If rank is not high enough
this `Op` will fail.
num_event_dims: `int32` scalar `Tensor`. The number of trailing dimensions
in `x` to be considered as part of `event_shape`.
name: A name to prepend to created `Ops`.
Returns:
batch_shape: `1-D` `int32` `Tensor`
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
return array_ops.slice(
array_ops.shape(x), [0], [array_ops.rank(x) - num_event_dims])
def __init__(self, shape, dtype, verify_pd=True, name="OperatorPDIdentity"):
"""Initialize an `OperatorPDIdentity`.
Args:
shape: `int32` rank 1 `Tensor` of length at least 2, and with the last
two entries equal (since this is a square matrix).
dtype: Data type of the matrix that this operator represents.
verify_pd: `Boolean`, if `True`, asserts are added to the initialization
args to ensure they define this operator as a square (batch) matrix.
name: Name to prepend to `Ops`.
"""
# Grab static shape if available now.
with ops.name_scope(name):
with ops.name_scope("init", values=[shape]):
self._dtype = dtypes.as_dtype(dtype)
self._verify_pd = verify_pd
self._name = name
# Store the static shape (if possible) right now before adding the
# asserts, since the asserts prevent .constant_value from working.
shape = ops.convert_to_tensor(shape, name="shape")
self._get_shape = tensor_shape.TensorShape(
tensor_util.constant_value(shape))
self._shape_arg = self._check_shape(shape)
def _check_shape(self, shape):
"""Check that the init arg `shape` defines a valid operator."""
shape = ops.convert_to_tensor(shape, name="shape")
if not self._verify_pd:
return shape
# Further checks are equivalent to verification that this is positive
# definite. Why? Because the further checks simply check that this is a
# square matrix, and combining the fact that this is square (and thus maps
# a vector space R^k onto itself), with the behavior of .matmul(), this must
# be the identity operator.
rank = array_ops.size(shape)
assert_matrix = check_ops.assert_less_equal(2, rank)
with ops.control_dependencies([assert_matrix]):
last_dim = array_ops.gather(shape, rank - 1)
second_to_last_dim = array_ops.gather(shape, rank - 2)
assert_square = check_ops.assert_equal(last_dim, second_to_last_dim)
return control_flow_ops.with_dependencies([assert_matrix, assert_square],
shape)
def log_cdf(self, value, name="log_cdf"):
"""Log cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
log_cdf(x) := Log[ P[X <= x] ]
```
Often, a numerical approximation can be used for `log_cdf(x)` that yields
a more accurate answer than simply taking the logarithm of the `cdf` when
`x << -1`.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
logcdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
self._check_hasattr(self._log_cdf)
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
return self._log_cdf(value)
```
def cdf(self, value, name="cdf"):
"""Cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
cdf(x) := P[X <= x]
```
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
cdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
self._check_hasattr(self._cdf)
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
return self._cdf(value)
```
def survival_function(self, value, name="survival_function"):
"""Survival function.
Given random variable `X`, the survival function is defined:
survival_function(x) = P[X > x]
= 1 - P[X <= x]
= 1 - cdf(x).
```
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
self._check_hasattr(self._survival_function)
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
return self._survival_function(value)
```
def assert_integer_form(
x, data=None, summarize=None, message=None, name="assert_integer_form"):
"""Assert that x has integer components (or floats equal to integers).
Args:
x: Numeric `Tensor`
data: The tensors to print out if the condition is `False`. Defaults to
error message and first few entries of `x` and `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Returns:
Op raising `InvalidArgumentError` if round(x) != x.
"""
message = message or "x has non-integer components"
x = ops.convert_to_tensor(x, name="x")
casted_x = math_ops.to_int64(x)
return check_ops.assert_equal(
x, math_ops.cast(math_ops.round(casted_x), x.dtype),
data=data, summarize=summarize, message=message, name=name)
def _check_chol(self, chol):
"""Verify that `chol` is proper."""
chol = ops.convert_to_tensor(chol, name="chol")
if not self.verify_pd:
return chol
shape = array_ops.shape(chol)
rank = array_ops.rank(chol)
is_matrix = check_ops.assert_rank_at_least(chol, 2)
is_square = check_ops.assert_equal(
array_ops.gather(shape, rank - 2), array_ops.gather(shape, rank - 1))
deps = [is_matrix, is_square]
diag = array_ops.matrix_diag_part(chol)
deps.append(check_ops.assert_positive(diag))
return control_flow_ops.with_dependencies(deps, chol)
def forward(self, x, name="forward"):
"""Returns the forward `Bijector` evaluation, i.e., X = g(Y).
Args:
x: `Tensor`. The input to the "forward" evaluation.
name: The name to give this op.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `x.dtype` is not
`self.dtype`.
AttributeError: if `_forward` is not implemented.
"""
with self._name_scope(name, [x]):
x = ops.convert_to_tensor(x, name="x")
self._maybe_assert_dtype(x)
return self._forward(x)
def inverse(self, x, name="inverse"):
"""Returns the inverse `Bijector` evaluation, i.e., X = g^{-1}(Y).
Args:
x: `Tensor`. The input to the "inverse" evaluation.
name: The name to give this op.
Returns:
`Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `x.dtype` is not
`self.dtype`.
AttributeError: if neither `_inverse` nor
`_inverse_and_inverse_log_det_jacobian` are implemented.
"""
with self._name_scope(name, [x]):
x = ops.convert_to_tensor(x, name="x")
self._maybe_assert_dtype(x)
try:
return self._inverse(x)
except AttributeError:
# Since _inverse was not implemented, try to see if it's implemented
# by the _inverse_and_inverse_log_det_jacobian member.
return self._inverse_and_inverse_log_det_jacobian(x)[0]
def _is_shape(expected_shape, actual_tensor, actual_shape=None):
"""Returns whether actual_tensor's shape is expected_shape.
Args:
expected_shape: Integer list defining the expected shape, or tensor of same.
actual_tensor: Tensor to test.
actual_shape: Shape of actual_tensor, if we already have it.
Returns:
New tensor.
"""
with ops.name_scope('is_shape', values=[actual_tensor]) as scope:
is_rank = _is_rank(array_ops.size(expected_shape), actual_tensor)
if actual_shape is None:
actual_shape = array_ops.shape(actual_tensor, name='actual')
shape_equal = _all_equal(
ops.convert_to_tensor(expected_shape, name='expected'),
actual_shape)
return math_ops.logical_and(is_rank, shape_equal, name=scope)
def __init__(self, table_ref, default_value, initializer):
"""Construct a table object from a table reference.
If requires a table initializer object (subclass of `TableInitializerBase`).
It provides the table key and value types, as well as the op to initialize
the table. The caller is responsible to execute the initialization op.
Args:
table_ref: The table reference, i.e. the output of the lookup table ops.
default_value: The value to use if a key is missing in the table.
initializer: The table initializer to use.
"""
super(InitializableLookupTableBase, self).__init__(
initializer.key_dtype, initializer.value_dtype,
table_ref.op.name.split("/")[-1])
self._table_ref = table_ref
self._default_value = ops.convert_to_tensor(default_value,
dtype=self._value_dtype)
self._default_value.get_shape().merge_with(tensor_shape.scalar())
self._init = initializer.initialize(self)
def __init__(self, keys, values, key_dtype=None, value_dtype=None, name=None):
"""Constructs a table initializer object based on keys and values tensors.
Args:
keys: The tensor for the keys.
values: The tensor for the values.
key_dtype: The `keys` data type. Used when `keys` is a python array.
value_dtype: The `values` data type. Used when `values` is a python array.
name: A name for the operation (optional).
"""
with ops.name_scope(name, "key_value_init", [keys, values]) as scope:
self._keys = ops.convert_to_tensor(keys, dtype=key_dtype, name="keys")
self._values = ops.convert_to_tensor(values,
dtype=value_dtype,
name="values")
self._name = scope
super(KeyValueTensorInitializer, self).__init__(self._keys.dtype,
self._values.dtype)
def get_ndims(self, x, name="get_ndims"):
"""Get `Tensor` number of dimensions (rank).
Args:
x: `Tensor`.
name: `String`. The name to give this op.
Returns:
ndims: Scalar number of dimensions associated with a `Tensor`.
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
ndims = x.get_shape().ndims
if ndims is None:
return array_ops.rank(x, name="ndims")
return ops.convert_to_tensor(ndims, dtype=dtypes.int32, name="ndims")
def _assert_non_negative_int32_scalar(self, x):
"""Helper which ensures that input is a non-negative, int32, scalar."""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype != dtypes.int32.base_dtype:
raise TypeError("%s.dtype=%s is not %s" % (x.name, x.dtype, dtypes.int32))
x_value_static = tensor_util.constant_value(x)
if x.get_shape().ndims is not None and x_value_static is not None:
if x.get_shape().ndims != 0:
raise ValueError("%s.ndims=%d is not 0 (scalar)" %
(x.name, x.get_shape().ndims))
if x_value_static < 0:
raise ValueError("%s.value=%d cannot be negative" %
(x.name, x_value_static))
return x
if self.validate_args:
x = control_flow_ops.with_dependencies([
check_ops.assert_rank(x, 0),
check_ops.assert_non_negative(x)], x)
return x
def _log_prob(self, event):
# TODO(jaana): The current sigmoid_cross_entropy_with_logits has
# inconsistent behavior for logits = inf/-inf.
event = ops.convert_to_tensor(event, name="event")
event = math_ops.cast(event, self.logits.dtype)
logits = self.logits
# sigmoid_cross_entropy_with_logits doesn't broadcast shape,
# so we do this here.
# TODO(b/30637701): Check dynamic shape, and don't broadcast if the
# dynamic shapes are the same.
if (not event.get_shape().is_fully_defined() or
not logits.get_shape().is_fully_defined() or
event.get_shape() != logits.get_shape()):
logits = array_ops.ones_like(event) * logits
event = array_ops.ones_like(logits) * event
return -nn.sigmoid_cross_entropy_with_logits(logits, event)
def inv_quadratic_form_on_vectors(
self, x, name="inv_quadratic_form_on_vectors"):
"""Compute the quadratic form: `x^T A^{-1} x` where `x` is a batch vector.
`x` is a batch vector with compatible shape if
self.shape = [N1,...,Nn] + [k, k]
x.shape = [M1,...,Mm] + [N1,...,Nn] + [k]
```
Args:
x: `Tensor` with compatible batch vector shape and same `dtype` as self.
name: A name scope to use for ops added by this method.
Returns:
`Tensor` with shape `[M1,...,Mm] + [N1,...,Nn]` and same `dtype`
as `self`.
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=[x] + self.inputs):
x = ops.convert_to_tensor(x, name="x")
return self._inv_quadratic_form_on_vectors(x)
```
def matmul(self, x, transpose_x=False, name="matmul"):
"""Left (batch) matmul `x` by this matrix: `Ax`.
`x` is a batch matrix with compatible shape if
self.shape = [N1,...,Nn] + [k, k]
x.shape = [N1,...,Nn] + [k, r]
```
Args:
x: `Tensor` with shape `self.batch_shape + [k, r]` and same `dtype` as
this `Operator`.
transpose_x: If `True`, `x` is transposed before multiplication.
name: A name to give this `Op`.
Returns:
A result equivalent to `tf.matmul(self.to_dense(), x)`.
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=[x] + self.inputs):
x = ops.convert_to_tensor(x, name="x")
return self._dispatch_based_on_batch(
self._batch_matmul, self._matmul, x=x, transpose_x=transpose_x)
```
def sqrt_matmul(self, x, transpose_x=False, name="sqrt_matmul"):
"""Left (batch) matmul `x` by a sqrt of this matrix: `Sx` where `A = S S^T`.
`x` is a batch matrix with compatible shape if
self.shape = [N1,...,Nn] + [k, k]
x.shape = [N1,...,Nn] + [k, r]
```
Args:
x: `Tensor` with shape `self.batch_shape + [k, r]` and same `dtype` as
this `Operator`.
transpose_x: If `True`, `x` is transposed before multiplication.
name: A name scope to use for ops added by this method.
Returns:
A result equivalent to `tf.matmul(self.sqrt_to_dense(), x)`.
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=[x] + self.inputs):
x = ops.convert_to_tensor(x, name="x")
return self._dispatch_based_on_batch(
self._batch_sqrt_matmul, self._sqrt_matmul, x=x,
transpose_x=transpose_x)
```
def extract_batch_shape(x, num_event_dims, name="extract_batch_shape"):
"""Extract the batch shape from `x`.
Assuming `x.shape = batch_shape + event_shape`, when `event_shape` has
`num_event_dims` dimensions. This `Op` returns the batch shape `Tensor`.
Args:
x: `Tensor` with rank at least `num_event_dims`. If rank is not high enough
this `Op` will fail.
num_event_dims: `int32` scalar `Tensor`. The number of trailing dimensions
in `x` to be considered as part of `event_shape`.
name: A name to prepend to created `Ops`.
Returns:
batch_shape: `1-D` `int32` `Tensor`
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
return array_ops.slice(
array_ops.shape(x), [0], [array_ops.rank(x) - num_event_dims])
def __init__(self, shape, dtype, verify_pd=True, name="OperatorPDIdentity"):
"""Initialize an `OperatorPDIdentity`.
Args:
shape: `int32` rank 1 `Tensor` of length at least 2, and with the last
two entries equal (since this is a square matrix).
dtype: Data type of the matrix that this operator represents.
verify_pd: `Boolean`, if `True`, asserts are added to the initialization
args to ensure they define this operator as a square (batch) matrix.
name: Name to prepend to `Ops`.
"""
# Grab static shape if available now.
with ops.name_scope(name):
with ops.name_scope("init", values=[shape]):
self._dtype = dtypes.as_dtype(dtype)
self._verify_pd = verify_pd
self._name = name
# Store the static shape (if possible) right now before adding the
# asserts, since the asserts prevent .constant_value from working.
shape = ops.convert_to_tensor(shape, name="shape")
self._get_shape = tensor_shape.TensorShape(
tensor_util.constant_value(shape))
self._shape_arg = self._check_shape(shape)
def _check_shape(self, shape):
"""Check that the init arg `shape` defines a valid operator."""
shape = ops.convert_to_tensor(shape, name="shape")
if not self._verify_pd:
return shape
# Further checks are equivalent to verification that this is positive
# definite. Why? Because the further checks simply check that this is a
# square matrix, and combining the fact that this is square (and thus maps
# a vector space R^k onto itself), with the behavior of .matmul(), this must
# be the identity operator.
rank = array_ops.size(shape)
assert_matrix = check_ops.assert_less_equal(2, rank)
with ops.control_dependencies([assert_matrix]):
last_dim = array_ops.gather(shape, rank - 1)
second_to_last_dim = array_ops.gather(shape, rank - 2)
assert_square = check_ops.assert_equal(last_dim, second_to_last_dim)
return control_flow_ops.with_dependencies([assert_matrix, assert_square],
shape)
def prob(self, value, name="prob", **condition_kwargs):
"""Probability density/mass function (depending on `is_continuous`).
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._prob(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_prob(value, **condition_kwargs))
except NotImplementedError:
raise original_exception
def noisy_dense(inputs, units, bias_shape, c_names, w_i, b_i=None, activation=tf.nn.relu, noisy_distribution='factorised'):
def f(e_list):
return tf.multiply(tf.sign(e_list), tf.pow(tf.abs(e_list), 0.5))
# ??tf.layers?????flatten
# dense1 = tf.layers.dense(tf.contrib.layers.flatten(relu5), activation=tf.nn.relu, units=50)
if not isinstance(inputs, ops.Tensor):
inputs = ops.convert_to_tensor(inputs, dtype='float')
# dim_list = inputs.get_shape().as_list()
# flatten_shape = dim_list[1] if len(dim_list) <= 2 else reduce(lambda x, y: x * y, dim_list[1:])
# reshaped = tf.reshape(inputs, [dim_list[0], flatten_shape])
if len(inputs.shape) > 2:
inputs = tf.contrib.layers.flatten(inputs)
flatten_shape = inputs.shape[1]
weights = tf.get_variable('weights', shape=[flatten_shape, units], initializer=w_i)
w_noise = tf.get_variable('w_noise', [flatten_shape, units], initializer=w_i, collections=c_names)
if noisy_distribution == 'independent':
weights += tf.multiply(tf.random_normal(shape=w_noise.shape), w_noise)
elif noisy_distribution == 'factorised':
noise_1 = f(tf.random_normal(tf.TensorShape([flatten_shape, 1]), dtype=tf.float32)) # ???????????????
noise_2 = f(tf.random_normal(tf.TensorShape([1, units]), dtype=tf.float32))
weights += tf.multiply(noise_1 * noise_2, w_noise)
dense = tf.matmul(inputs, weights)
if bias_shape is not None:
assert bias_shape[0] == units
biases = tf.get_variable('biases', shape=bias_shape, initializer=b_i)
b_noise = tf.get_variable('b_noise', [1, units], initializer=b_i, collections=c_names)
if noisy_distribution == 'independent':
biases += tf.multiply(tf.random_normal(shape=b_noise.shape), b_noise)
elif noisy_distribution == 'factorised':
biases += tf.multiply(noise_2, b_noise)
return activation(dense + biases) if activation is not None else dense + biases
return activation(dense) if activation is not None else dense
# ???bias??????relu
def read_and_augment_data(image_list, label_list, image_size, batch_size, max_nrof_epochs,
random_crop, random_flip, random_rotate, nrof_preprocess_threads, shuffle=True):
images = ops.convert_to_tensor(image_list, dtype=tf.string)
labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
# Makes an input queue
input_queue = tf.train.slice_input_producer([images, labels],
num_epochs=max_nrof_epochs, shuffle=shuffle)
images_and_labels = []
for _ in range(nrof_preprocess_threads):
image, label = read_images_from_disk(input_queue)
if random_rotate:
image = tf.py_func(random_rotate_image, [image], tf.uint8)
if random_crop:
image = tf.random_crop(image, [image_size, image_size, 3])
else:
image = tf.image.resize_image_with_crop_or_pad(image, image_size, image_size)
if random_flip:
image = tf.image.random_flip_left_right(image)
#pylint: disable=no-member
image.set_shape((image_size, image_size, 3))
image = tf.image.per_image_standardization(image)
images_and_labels.append([image, label])
image_batch, label_batch = tf.train.batch_join(
images_and_labels, batch_size=batch_size,
capacity=4 * nrof_preprocess_threads * batch_size,
allow_smaller_final_batch=True)
return image_batch, label_batch
def __init__(self, inputs, sequence_length, time_major=False, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
name: Name scope for any created operations.
Raises:
ValueError: if `sequence_length` is not a 1D tensor.
"""
with ops.name_scope(name, "TrainingHelper", [inputs, sequence_length]):
inputs = ops.convert_to_tensor(inputs, name="inputs")
if not time_major:
inputs = nest.map_structure(_transpose_batch_time, inputs)
self._input_tas = nest.map_structure(_unstack_ta, inputs)
self._sequence_length = ops.convert_to_tensor(
sequence_length, name="sequence_length")
if self._sequence_length.get_shape().ndims != 1:
raise ValueError(
"Expected sequence_length to be a vector, but received shape: %s" %
self._sequence_length.get_shape())
self._zero_inputs = nest.map_structure(
lambda inp: array_ops.zeros_like(inp[0, :]), inputs)
self._batch_size = array_ops.size(sequence_length)
def __init__(self, inputs, sequence_length, embedding, sampling_probability,
time_major=False, seed=None, scheduling_seed=None, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
sampling_probability: A 0D `float32` tensor: the probability of sampling
categorically from the output ids instead of reading directly from the
inputs.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
scheduling_seed: The schedule decision rule sampling seed.
name: Name scope for any created operations.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector.
"""
with ops.name_scope(name, "ScheduledEmbeddingSamplingWrapper",
[embedding, sampling_probability]):
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._sampling_probability = ops.convert_to_tensor(
sampling_probability, name="sampling_probability")
if self._sampling_probability.get_shape().ndims not in (0, 1):
raise ValueError(
"sampling_probability must be either a scalar or a vector. "
"saw shape: %s" % (self._sampling_probability.get_shape()))
self._seed = seed
self._scheduling_seed = scheduling_seed
super(ScheduledEmbeddingTrainingHelper, self).__init__(
inputs=inputs,
sequence_length=sequence_length,
time_major=time_major,
name=name)
def __init__(self, embedding, start_tokens, end_token):
"""Initializer.
Args:
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
Raises:
ValueError: if `sequence_length` is not a 1D tensor.
"""
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._start_tokens = ops.convert_to_tensor(
start_tokens, dtype=dtypes.int32, name="start_tokens")
self._end_token = ops.convert_to_tensor(
end_token, dtype=dtypes.int32, name="end_token")
if self._start_tokens.get_shape().ndims != 1:
raise ValueError("start_tokens must be a vector")
self._batch_size = array_ops.size(start_tokens)
if self._end_token.get_shape().ndims != 0:
raise ValueError("end_token must be a scalar")
self._start_inputs = self._embedding_fn(self._start_tokens)
def binary_cross_entropy(preds, targets, name=None):
"""Computes binary cross entropy given `preds`.
For brevity, let `x = `, `z = targets`. The logistic loss is
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Args:
preds: A `Tensor` of type `float32` or `float64`.
targets: A `Tensor` of the same type and shape as `preds`.
"""
eps = 1e-12
with ops.op_scope([preds, targets], name, "bce_loss") as name:
preds = ops.convert_to_tensor(preds, name="preds")
targets = ops.convert_to_tensor(targets, name="targets")
return tf.reduce_mean(-(targets * tf.log(preds + eps) +
(1. - targets) * tf.log(1. - preds + eps)))
def embedding_attention_decoder(initial_state,
attention_states,
cell,
num_symbols,
time_steps,
batch_size,
embedding_size,
output_size=None,
output_projection=None,
feed_previous=False,
update_embedding_for_previous=True,
dtype=None,
scope=None):
if output_size is None:
output_size = cell.output_size
if output_projection is not None:
proj_biases = ops.convert_to_tensor(output_projection[1], dtype=dtype)
proj_biases.get_shape().assert_is_compatible_with([num_symbols])
with variable_scope.variable_scope(
scope or "embedding_attention_decoder", dtype=dtype) as scope:
embedding = variable_scope.get_variable("embedding",
[num_symbols, embedding_size])
loop_function = tf.nn.seq2seq._extract_argmax_and_embed(
embedding, output_projection,
update_embedding_for_previous) if feed_previous else None
return attention_decoder(
initial_state,
attention_states,
cell,
num_symbols,
time_steps,
batch_size,
output_size=output_size,
loop_function=loop_function)
def _prepare(self):
self._lr_t = ops.convert_to_tensor(self._lr, name="learning_rate")
self._beta1_t = ops.convert_to_tensor(self._beta1, name="beta1")
self._beta2_t = ops.convert_to_tensor(self._beta2, name="beta2")
self._epsilon_t = ops.convert_to_tensor(self._epsilon, name="epsilon")
self.clip_multiplier_t = ops.convert_to_tensor(self.clip_multiplier, name="clip_multiplier")
self.clip_epsilon_t = ops.convert_to_tensor(self.clip_epsilon, name="clip_epsilon")
def tf_ops(self, capacity=32):
images = ops.convert_to_tensor(self._image_fn_list, dtype=dtypes.string)
labels = ops.convert_to_tensor(self._label_list, dtype=dtypes.int32)
# Makes an input queue
im_fn_q, labl_q = tf.train.slice_input_producer(
[images, labels], capacity=capacity, shuffle=True)
file_contents_q = tf.read_file(im_fn_q)
im_q = self._decoder(file_contents_q, channels=3)
return im_q, labl_q
def __init__(self, inputs, sequence_length, time_major=False, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
name: Name scope for any created operations.
Raises:
ValueError: if `sequence_length` is not a 1D tensor.
"""
with ops.name_scope(name, "TrainingHelper", [inputs, sequence_length]):
inputs = ops.convert_to_tensor(inputs, name="inputs")
if not time_major:
inputs = nest.map_structure(_transpose_batch_time, inputs)
self._input_tas = nest.map_structure(_unstack_ta, inputs)
self._sequence_length = ops.convert_to_tensor(
sequence_length, name="sequence_length")
if self._sequence_length.get_shape().ndims != 1:
raise ValueError(
"Expected sequence_length to be a vector, but received shape: %s" %
self._sequence_length.get_shape())
self._zero_inputs = nest.map_structure(
lambda inp: array_ops.zeros_like(inp[0, :]), inputs)
self._batch_size = array_ops.size(sequence_length)