本文整理汇总了Python中tensorflow.python.layers.utils.smart_cond函数的典型用法代码示例。如果您正苦于以下问题:Python smart_cond函数的具体用法?Python smart_cond怎么用?Python smart_cond使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了smart_cond函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _renorm_correction_and_moments
def _renorm_correction_and_moments(self, mean, variance, training):
"""Returns the correction and update values for renorm."""
stddev = math_ops.sqrt(variance + self.epsilon)
# Compute the average mean and standard deviation, as if they were
# initialized with this batch's moments.
mixed_renorm_mean = (self.renorm_mean +
(1. - self.renorm_mean_weight) * mean)
mixed_renorm_stddev = (self.renorm_stddev +
(1. - self.renorm_stddev_weight) * stddev)
# Compute the corrections for batch renorm.
r = stddev / mixed_renorm_stddev
d = (mean - mixed_renorm_mean) / mixed_renorm_stddev
# Ensure the corrections use pre-update moving averages.
with ops.control_dependencies([r, d]):
mean = array_ops.identity(mean)
stddev = array_ops.identity(stddev)
rmin, rmax, dmax = [self.renorm_clipping.get(key)
for key in ['rmin', 'rmax', 'dmax']]
if rmin is not None:
r = math_ops.maximum(r, rmin)
if rmax is not None:
r = math_ops.minimum(r, rmax)
if dmax is not None:
d = math_ops.maximum(d, -dmax)
d = math_ops.minimum(d, dmax)
# When not training, use r=1, d=0, and decay=1 meaning no updates.
r = utils.smart_cond(training, lambda: r, lambda: array_ops.ones_like(r))
d = utils.smart_cond(training, lambda: d, lambda: array_ops.zeros_like(d))
decay = utils.smart_cond(training, lambda: self.renorm_momentum, lambda: 1.)
def _update_renorm_variable(var, weight, value):
"""Updates a moving average and weight, returns the unbiased value."""
# Update the variables without zero debiasing. The debiasing will be
# accomplished by dividing the exponential moving average by the weight.
# For example, after a single update, the moving average would be
# (1-decay) * value. and the weight will be 1-decay, with their ratio
# giving value.
# Make sure the weight is not updated until before r and d computation.
value = array_ops.identity(value)
with ops.control_dependencies([value]):
weight_value = array_ops.constant(1., dtype=weight.dtype)
new_var = moving_averages.assign_moving_average(
var, value, decay, zero_debias=False)
new_weight = moving_averages.assign_moving_average(
weight, weight_value, decay, zero_debias=False)
return new_var / new_weight
with ops.colocate_with(self.moving_mean):
new_mean = _update_renorm_variable(self.renorm_mean,
self.renorm_mean_weight,
mean)
with ops.colocate_with(self.moving_variance):
new_stddev = _update_renorm_variable(self.renorm_stddev,
self.renorm_stddev_weight,
stddev)
# Make sqrt(moving_variance + epsilon) = new_stddev.
new_variance = math_ops.square(new_stddev) - self.epsilon
return (r, d, new_mean, new_variance)
开发者ID:DjangoPeng,项目名称:tensorflow,代码行数:59,代码来源:normalization.py
示例2: _fused_batch_norm
def _fused_batch_norm(self, inputs, training):
"""Returns the output of fused batch norm."""
# TODO(reedwm): Add support for fp16 inputs.
beta = self.beta if self.center else self._beta_const
gamma = self.gamma if self.scale else self._gamma_const
def _fused_batch_norm_training():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
epsilon=self.epsilon,
data_format=self._data_format)
def _fused_batch_norm_inference():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
mean=self.moving_mean,
variance=self.moving_variance,
epsilon=self.epsilon,
is_training=False,
data_format=self._data_format)
output, mean, variance = utils.smart_cond(
training, _fused_batch_norm_training, _fused_batch_norm_inference)
mean = array_ops.reshape(mean, shape=self.moving_mean.get_shape())
variance = array_ops.reshape(variance,
shape=self.moving_variance.get_shape())
if not self._bessels_correction_test_only:
# Remove Bessel's correction to be consistent with non-fused batch norm.
# Note that the variance computed by fused batch norm is
# with Bessel's correction.
sample_size = math_ops.cast(
array_ops.size(inputs) / array_ops.size(variance), variance.dtype)
factor = (sample_size - math_ops.cast(1.0, variance.dtype)) / sample_size
variance *= factor
training_value = utils.constant_value(training)
if training_value is None:
one_minus_decay = utils.smart_cond(training,
lambda: self._one_minus_decay,
lambda: 0.)
else:
one_minus_decay = ops.convert_to_tensor(self._one_minus_decay)
if training_value or training_value is None:
mean_update = self._assign_moving_average(self.moving_mean, mean,
one_minus_decay)
variance_update = self._assign_moving_average(self.moving_variance,
variance, one_minus_decay)
if context.in_graph_mode():
# Note that in Eager mode, the updates are already executed when running
# assign_moving_averages. So we do not need to put them into
# collections.
self.add_update(mean_update, inputs=inputs)
self.add_update(variance_update, inputs=inputs)
return output
开发者ID:rajeev921,项目名称:tensorflow,代码行数:59,代码来源:normalization.py
示例3: _fused_batch_norm
def _fused_batch_norm(self, inputs, training):
"""Returns the output of fused batch norm."""
beta = self.beta if self.center else self._beta_const
gamma = self.gamma if self.scale else self._gamma_const
def _fused_batch_norm_training():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
epsilon=self.epsilon,
data_format=self._data_format)
def _fused_batch_norm_inference():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
mean=self.moving_mean,
variance=self.moving_variance,
epsilon=self.epsilon,
is_training=False,
data_format=self._data_format)
output, mean, variance = utils.smart_cond(
training, _fused_batch_norm_training, _fused_batch_norm_inference)
if not self._bessels_correction_test_only:
# Remove Bessel's correction to be consistent with non-fused batch norm.
# Note that the variance computed by fused batch norm is
# with Bessel's correction.
sample_size = math_ops.cast(
array_ops.size(inputs) / array_ops.size(variance), variance.dtype)
factor = (sample_size - math_ops.cast(1.0, variance.dtype)) / sample_size
variance *= factor
training_value = utils.constant_value(training)
if training_value is None:
momentum = utils.smart_cond(training, lambda: self.momentum, lambda: 1.0)
else:
momentum = ops.convert_to_tensor(self.momentum)
if training_value or training_value is None:
mean_update = self._assign_moving_average(self.moving_mean, mean,
momentum)
variance_update = self._assign_moving_average(self.moving_variance,
variance, momentum)
self.add_update(mean_update, inputs=inputs)
self.add_update(variance_update, inputs=inputs)
return output
开发者ID:syed-ahmed,项目名称:tensorflow,代码行数:49,代码来源:normalization.py
示例4: _fused_batch_norm_op
def _fused_batch_norm_op(self, input_batch, mean, variance, use_batch_stats):
"""Creates a fused batch normalization op."""
# The fused batch norm expects the mean, variance, gamma and beta
# tensors to have dimension 1, so we flatten them to remove the
# extra dimensions.
gamma_flatten = tf.reshape(self._gamma, shape=(-1,))
beta_flatten = tf.reshape(self._beta, shape=(-1,))
flatten_mean = tf.reshape(mean, shape=(-1,))
flatten_variance = tf.reshape(variance, shape=(-1,))
use_batch_stats = tf.convert_to_tensor(use_batch_stats)
common_args = {
"scale": gamma_flatten,
"offset": beta_flatten,
"epsilon": self._eps,
"data_format": self._infer_fused_data_format(input_batch),
"name": "batch_norm"
}
def use_batch_stats_fused_batch_norm():
return tf.nn.fused_batch_norm(input_batch, mean=None, variance=None,
is_training=True, **common_args)
def moving_average_fused_batch_norm():
return tf.nn.fused_batch_norm(input_batch, mean=flatten_mean,
variance=flatten_variance,
is_training=False, **common_args)
batch_norm_op, mean, variance = utils.smart_cond(
use_batch_stats, use_batch_stats_fused_batch_norm,
moving_average_fused_batch_norm)
return batch_norm_op, mean, variance
开发者ID:bch-runner-1,项目名称:sonnet,代码行数:33,代码来源:batch_norm.py
示例5: summary_writer_function
def summary_writer_function(name, tensor, function, family=None):
"""Helper function to write summaries.
Args:
name: name of the summary
tensor: main tensor to form the summary
function: function taking a tag and a scope which writes the summary
family: optional, the summary's family
Returns:
The result of writing the summary.
"""
def record():
with summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
with ops.control_dependencies([function(tag, scope)]):
return constant_op.constant(True)
if context.context().summary_writer_resource is None:
return control_flow_ops.no_op()
with ops.device("cpu:0"):
op = utils.smart_cond(
should_record_summaries(), record, _nothing, name="")
ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op) # pylint: disable=protected-access
return op
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:25,代码来源:summary_ops.py
示例6: _build_statistics
def _build_statistics(self, input_batch, axis, use_batch_stats, dtype):
"""Builds the statistics part of the graph when using moving variance.
Args:
input_batch: Input batch Tensor.
axis: Indices of `input_batch` to reduce over.
use_batch_stats: Boolean to indicate if batch statistics should be
calculated, otherwise moving averages are returned.
dtype: TensorFlow datatype to use for the moving mean and variance.
Returns:
Tuple of (mean, variance).
"""
# Set up our moving statistics. When connecting in parallel, this is shared.
if self.MOVING_MEAN not in self._initializers:
self._initializers[self.MOVING_MEAN] = create_mean_initializer()
self._moving_mean = tf.get_variable(
"moving_mean",
dtype=dtype,
shape=self._mean_shape,
collections=[
tf.GraphKeys.MOVING_AVERAGE_VARIABLES,
tf.GraphKeys.GLOBAL_VARIABLES,
],
initializer=self._initializers[self.MOVING_MEAN],
trainable=False)
if self.MOVING_VARIANCE not in self._initializers:
self._initializers[self.MOVING_VARIANCE] = create_variance_initializer()
self._moving_variance = tf.get_variable(
"moving_variance",
dtype=dtype,
shape=self._mean_shape,
collections=[
tf.GraphKeys.MOVING_AVERAGE_VARIABLES,
tf.GraphKeys.GLOBAL_VARIABLES,
],
initializer=self._initializers[self.MOVING_VARIANCE],
trainable=False)
def build_batch_stats():
"""Builds the batch statistics calculation ops."""
mean, variance = tf.nn.moments(input_batch, axis,
keep_dims=True, name="normalize_moments")
return mean, variance
def build_moving_stats():
return (
tf.identity(self._moving_mean),
tf.identity(self._moving_variance),
)
mean, variance = utils.smart_cond(
use_batch_stats,
build_batch_stats,
build_moving_stats,
)
return mean, variance
开发者ID:bch-runner-1,项目名称:sonnet,代码行数:60,代码来源:batch_norm.py
示例7: call
def call(self, inputs, training=False):
def dropped_inputs():
return nn.dropout(inputs, 1 - self.rate,
noise_shape=self.noise_shape,
seed=self.seed)
return utils.smart_cond(training,
dropped_inputs,
lambda: array_ops.identity(inputs))
开发者ID:brainwy12,项目名称:tensorflow,代码行数:8,代码来源:core.py
示例8: crf_log_norm
def crf_log_norm(inputs, sequence_lengths, transition_params):
"""Computes the normalization for a CRF.
Args:
inputs: A [batch_size, max_seq_len, num_tags] tensor of unary potentials
to use as input to the CRF layer.
sequence_lengths: A [batch_size] vector of true sequence lengths.
transition_params: A [num_tags, num_tags] transition matrix.
Returns:
log_norm: A [batch_size] vector of normalizers for a CRF.
"""
# Split up the first and rest of the inputs in preparation for the forward
# algorithm.
first_input = array_ops.slice(inputs, [0, 0, 0], [-1, 1, -1])
first_input = array_ops.squeeze(first_input, [1])
# If max_seq_len is 1, we skip the algorithm and simply reduce_logsumexp over
# the "initial state" (the unary potentials).
def _single_seq_fn():
log_norm = math_ops.reduce_logsumexp(first_input, [1])
# Mask `log_norm` of the sequences with length <= zero.
log_norm = array_ops.where(math_ops.less_equal(sequence_lengths, 0),
array_ops.zeros_like(log_norm),
log_norm)
return log_norm
def _multi_seq_fn():
"""Forward computation of alpha values."""
rest_of_input = array_ops.slice(inputs, [0, 1, 0], [-1, -1, -1])
# Compute the alpha values in the forward algorithm in order to get the
# partition function.
forward_cell = CrfForwardRnnCell(transition_params)
# Sequence length is not allowed to be less than zero.
sequence_lengths_less_one = math_ops.maximum(
constant_op.constant(0, dtype=sequence_lengths.dtype),
sequence_lengths - 1)
_, alphas = rnn.dynamic_rnn(
cell=forward_cell,
inputs=rest_of_input,
sequence_length=sequence_lengths_less_one,
initial_state=first_input,
dtype=dtypes.float32)
log_norm = math_ops.reduce_logsumexp(alphas, [1])
# Mask `log_norm` of the sequences with length <= zero.
log_norm = array_ops.where(math_ops.less_equal(sequence_lengths, 0),
array_ops.zeros_like(log_norm),
log_norm)
return log_norm
return utils.smart_cond(
pred=math_ops.equal(
tensor_shape.dimension_value(
inputs.shape[1]) or array_ops.shape(inputs)[1],
1),
true_fn=_single_seq_fn,
false_fn=_multi_seq_fn)
开发者ID:ahmedsaiduk,项目名称:tensorflow,代码行数:57,代码来源:crf.py
示例9: testConstantValue
def testConstantValue(self):
f1 = lambda: constant_op.constant(5)
f2 = lambda: constant_op.constant(32)
# Boolean pred
self.assertEqual(5, utils.constant_value(utils.smart_cond(True, f1, f2)))
self.assertEqual(32, utils.constant_value(utils.smart_cond(False, f1, f2)))
# Integer pred
self.assertEqual(5, utils.constant_value(utils.smart_cond(1, f1, f2)))
self.assertEqual(32, utils.constant_value(utils.smart_cond(0, f1, f2)))
# Unknown pred
pred = array_ops.placeholder_with_default(True, shape=())
self.assertIsNone(utils.constant_value(utils.smart_cond(pred, f1, f2)))
#Error case
with self.assertRaises(TypeError):
utils.constant_value(5)
开发者ID:aritratony,项目名称:tensorflow,代码行数:19,代码来源:utils_test.py
示例10: distort_color
def distort_color(image, batch_position=0, distort_color_in_yiq=False,
scope=None):
"""Distort the color of the image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops based on the position of the image in a batch.
Args:
image: float32 Tensor containing single image. Tensor values should be in
range [0, 1].
batch_position: the position of the image in a batch. NOTE: this argument
can be an integer or a tensor
distort_color_in_yiq: distort color of input images in YIQ space.
scope: Optional scope for op_scope.
Returns:
color-distorted image
"""
with tf.name_scope(scope or 'distort_color'):
def distort_fn_0(image=image):
"""Variant 0 of distort function."""
image = tf.image.random_brightness(image, max_delta=32. / 255.)
if distort_color_in_yiq:
image = distort_image_ops.random_hsv_in_yiq(
image, lower_saturation=0.5, upper_saturation=1.5,
max_delta_hue=0.2 * math.pi)
else:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
return image
def distort_fn_1(image=image):
"""Variant 1 of distort function."""
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
if distort_color_in_yiq:
image = distort_image_ops.random_hsv_in_yiq(
image, lower_saturation=0.5, upper_saturation=1.5,
max_delta_hue=0.2 * math.pi)
else:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
return image
image = utils.smart_cond(batch_position % 2 == 0, distort_fn_0,
distort_fn_1)
# The random_* ops do not necessarily clamp.
image = tf.clip_by_value(image, 0.0, 1.0)
return image
开发者ID:bowrian,项目名称:tf-imagenet,代码行数:52,代码来源:image.py
示例11: crf_multitag_sequence_score
def crf_multitag_sequence_score(inputs, tag_bitmap, sequence_lengths,
transition_params):
"""Computes the unnormalized score of all tag sequences matching tag_bitmap.
tag_bitmap enables more than one tag to be considered correct at each time
step. This is useful when an observed output at a given time step is
consistent with more than one tag, and thus the log likelihood of that
observation must take into account all possible consistent tags.
Using one-hot vectors in tag_bitmap gives results identical to
crf_sequence_score.
Args:
inputs: A [batch_size, max_seq_len, num_tags] tensor of unary potentials
to use as input to the CRF layer.
tag_bitmap: A [batch_size, max_seq_len, num_tags] boolean tensor
representing all active tags at each index for which to calculate the
unnormalized score.
sequence_lengths: A [batch_size] vector of true sequence lengths.
transition_params: A [num_tags, num_tags] transition matrix.
Returns:
sequence_scores: A [batch_size] vector of unnormalized sequence scores.
"""
# If max_seq_len is 1, we skip the score calculation and simply gather the
# unary potentials of all active tags.
def _single_seq_fn():
filtered_inputs = array_ops.where(
tag_bitmap, inputs,
array_ops.fill(array_ops.shape(inputs), float("-inf")))
return math_ops.reduce_logsumexp(
filtered_inputs, axis=[1, 2], keepdims=False)
def _multi_seq_fn():
# Compute the logsumexp of all scores of sequences matching the given tags.
filtered_inputs = array_ops.where(
tag_bitmap, inputs,
array_ops.fill(array_ops.shape(inputs), float("-inf")))
return crf_log_norm(
inputs=filtered_inputs,
sequence_lengths=sequence_lengths,
transition_params=transition_params)
return utils.smart_cond(
pred=math_ops.equal(
tensor_shape.dimension_value(
inputs.shape[1]) or array_ops.shape(inputs)[1],
1),
true_fn=_single_seq_fn,
false_fn=_multi_seq_fn)
开发者ID:ahmedsaiduk,项目名称:tensorflow,代码行数:50,代码来源:crf.py
示例12: _build_update_ops
def _build_update_ops(self, mean, variance, is_training):
"""Builds the moving average update ops when using moving variance.
Args:
mean: The mean value to update with.
variance: The variance value to update with.
is_training: Boolean Tensor to indicate if we're currently in
training mode.
Returns:
Tuple of `(update_mean_op, update_variance_op)` when `is_training` is or
could be `True`. Returns `None` when `is_training=False`.
"""
def build_update_ops():
"""Builds the exponential moving average update ops."""
update_mean_op = moving_averages.assign_moving_average(
variable=self._moving_mean,
value=mean,
decay=self._decay_rate,
zero_debias=False,
name="update_moving_mean").op
update_variance_op = moving_averages.assign_moving_average(
variable=self._moving_variance,
value=variance,
decay=self._decay_rate,
zero_debias=False,
name="update_moving_variance").op
return update_mean_op, update_variance_op
def build_no_ops():
return (tf.no_op(), tf.no_op())
# Only make the ops if we know that `is_training=True`, or the value of
# `is_training` is unknown.
is_training_const = utils.constant_value(is_training)
if is_training_const is None or is_training_const:
update_mean_op, update_variance_op = utils.smart_cond(
is_training,
build_update_ops,
build_no_ops,
)
return (update_mean_op, update_variance_op)
else:
return None
开发者ID:bch-runner-1,项目名称:sonnet,代码行数:48,代码来源:batch_norm.py
示例13: _get_examples
def _get_examples(file_name_queue, reader, num_threads, read_batch_size,
filter_fn, parse_fn):
"""Get example filenames matching.
Args:
file_name_queue: A queue implementation that dequeues elements in
first-in first-out order.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
num_threads: The number of threads enqueuing examples.
read_batch_size: An int or scalar `Tensor` specifying the number of
records to read at once.
filter_fn: Filtering function, takes both keys as well as an `Example`
Tensors and returns a boolean mask of the same shape as the input Tensors
to be applied for filtering. If `None`, no filtering is done.
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
Returns:
List of example file names matching `file_name_queue`.
"""
with ops.name_scope('read'):
example_list = []
for _ in range(num_threads):
keys, examples_proto = utils.smart_cond(
read_batch_size > 1,
lambda: reader().read_up_to(file_name_queue, read_batch_size),
lambda: reader().read(file_name_queue))
if filter_fn:
mask = filter_fn(keys, examples_proto)
keys = array_ops.boolean_mask(keys, mask)
examples_proto = array_ops.boolean_mask(examples_proto, mask)
if parse_fn:
parsed_examples = parse_fn(examples_proto)
# Map keys into example map because batch_join doesn't support
# tuple of Tensor + dict.
if isinstance(parsed_examples, dict):
parsed_examples[KEY_FEATURE_NAME] = keys
example_list.append(parsed_examples)
else:
example_list.append((keys, parsed_examples))
else:
example_list.append((keys, examples_proto))
return example_list
开发者ID:ahmedsaiduk,项目名称:tensorflow,代码行数:45,代码来源:graph_io.py
示例14: crf_sequence_score
def crf_sequence_score(inputs, tag_indices, sequence_lengths,
transition_params):
"""Computes the unnormalized score for a tag sequence.
Args:
inputs: A [batch_size, max_seq_len, num_tags] tensor of unary potentials
to use as input to the CRF layer.
tag_indices: A [batch_size, max_seq_len] matrix of tag indices for which we
compute the unnormalized score.
sequence_lengths: A [batch_size] vector of true sequence lengths.
transition_params: A [num_tags, num_tags] transition matrix.
Returns:
sequence_scores: A [batch_size] vector of unnormalized sequence scores.
"""
# If max_seq_len is 1, we skip the score calculation and simply gather the
# unary potentials of the single tag.
def _single_seq_fn():
batch_size = array_ops.shape(inputs, out_type=tag_indices.dtype)[0]
example_inds = array_ops.reshape(
math_ops.range(batch_size, dtype=tag_indices.dtype), [-1, 1])
sequence_scores = array_ops.gather_nd(
array_ops.squeeze(inputs, [1]),
array_ops.concat([example_inds, tag_indices], axis=1))
sequence_scores = array_ops.where(math_ops.less_equal(sequence_lengths, 0),
array_ops.zeros_like(sequence_scores),
sequence_scores)
return sequence_scores
def _multi_seq_fn():
# Compute the scores of the given tag sequence.
unary_scores = crf_unary_score(tag_indices, sequence_lengths, inputs)
binary_scores = crf_binary_score(tag_indices, sequence_lengths,
transition_params)
sequence_scores = unary_scores + binary_scores
return sequence_scores
return utils.smart_cond(
pred=math_ops.equal(
tensor_shape.dimension_value(
inputs.shape[1]) or array_ops.shape(inputs)[1],
1),
true_fn=_single_seq_fn,
false_fn=_multi_seq_fn)
开发者ID:ahmedsaiduk,项目名称:tensorflow,代码行数:43,代码来源:crf.py
示例15: summary_writer_function
def summary_writer_function(name, tensor, function, family=None):
"""Helper function to write summaries.
Args:
name: name of the summary
tensor: main tensor to form the summary
function: function taking a tag and a scope which writes the summary
family: optional, the summary's family
Returns:
The result of writing the summary.
"""
def record():
with summary_op_util.summary_scope(
name, family, values=[tensor]) as (tag, scope):
function(tag, scope)
return True
return utils.smart_cond(
should_record_summaries(), record, _nothing, name="")
开发者ID:benoitsteiner,项目名称:tensorflow-opencl,代码行数:20,代码来源:summary_ops.py
示例16: _update_renorm_variable
def _update_renorm_variable(var, weight, value):
"""Updates a moving average and weight, returns the unbiased value."""
value = array_ops.identity(value)
def _do_update():
# Update the variables without zero debiasing. The debiasing will be
# accomplished by dividing the exponential moving average by the weight.
# For example, after a single update, the moving average would be
# (1-decay) * value. and the weight will be 1-decay, with their ratio
# giving the value.
# Make sure the weight is not updated until before r and d computation.
with ops.control_dependencies([value]):
weight_value = array_ops.constant(1., dtype=weight.dtype)
new_var = moving_averages.assign_moving_average(
var, value, self.renorm_momentum, zero_debias=False)
new_weight = moving_averages.assign_moving_average(
weight, weight_value, self.renorm_momentum, zero_debias=False)
return new_var / new_weight
def _fake_update():
return array_ops.identity(var)
return utils.smart_cond(training, _do_update, _fake_update)
开发者ID:dansbecker,项目名称:tensorflow,代码行数:20,代码来源:normalization.py
示例17: dropout_selu
def dropout_selu(x, rate, alpha= -1.7580993408473766, fixedPointMean=0.0, fixedPointVar=1.0,
noise_shape=None, seed=None, name=None, training=False):
from tensorflow.python.framework import ops, tensor_shape, tensor_util
from tensorflow.python.ops import array_ops, random_ops, math_ops
from tensorflow.python.layers import utils
import numbers
import tensorflow as tf
"""Dropout to a value with rescaling."""
def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name):
keep_prob = 1.0 - rate
x = ops.convert_to_tensor(x, name="x")
if isinstance(keep_prob, numbers.Real) and not 0. < keep_prob <= 1.:
raise ValueError("keep_prob must be a scalar tensor or a float in the "
"range (0, 1], got %g" % keep_prob)
keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob")
keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())
alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha")
keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())
if tensor_util.constant_value(keep_prob) == 1:
return x
noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x)
random_tensor = keep_prob
random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype)
binary_tensor = math_ops.floor(random_tensor)
ret = x * binary_tensor + alpha * (1-binary_tensor)
a = tf.sqrt(fixedPointVar / (keep_prob *((1-keep_prob) * tf.pow(alpha-fixedPointMean,2) + fixedPointVar)))
b = fixedPointMean - a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha)
ret = a * ret + b
ret.set_shape(x.get_shape())
return ret
with ops.name_scope(name, "dropout", [x]) as name:
return utils.smart_cond(training,
lambda: dropout_selu_impl(x, rate, alpha, noise_shape, seed, name),
lambda: array_ops.identity(x))
开发者ID:waxz,项目名称:ppo_torcs,代码行数:41,代码来源:selu.py
示例18: _fused_batch_norm
def _fused_batch_norm(self, inputs, training):
"""Returns the output of fused batch norm."""
beta = self.beta if self.center else self._beta_const
gamma = self.gamma if self.scale else self._gamma_const
def _fused_batch_norm_training():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
epsilon=self.epsilon,
data_format=self._data_format)
def _fused_batch_norm_inference():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
mean=self.moving_mean,
variance=self.moving_variance,
epsilon=self.epsilon,
is_training=False,
data_format=self._data_format)
output, mean, variance = utils.smart_cond(
training, _fused_batch_norm_training, _fused_batch_norm_inference)
training_value = utils.constant_value(training)
if training_value is not False:
decay = _smart_select(training, lambda: self.momentum, lambda: 1.)
mean_update = moving_averages.assign_moving_average(
self.moving_mean, mean, decay, zero_debias=False)
variance_update = moving_averages.assign_moving_average(
self.moving_variance, variance, decay, zero_debias=False)
self.add_update(mean_update, inputs=inputs)
self.add_update(variance_update, inputs=inputs)
return output
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:38,代码来源:normalization.py
示例19: _update_renorm_variable
def _update_renorm_variable(var, weight, value):
"""Updates a moving average and weight, returns the unbiased value."""
value = array_ops.identity(value)
def _do_update():
"""Updates the var and weight, returns their updated ratio."""
# Update the variables without zero debiasing. The debiasing will be
# accomplished by dividing the exponential moving average by the weight.
# For example, after a single update, the moving average would be
# (1-decay) * value. and the weight will be 1-decay, with their ratio
# giving the value.
# Make sure the weight is not updated until before r and d computation.
with ops.control_dependencies([value]):
weight_value = array_ops.constant(1., dtype=weight.dtype)
new_var = self._assign_moving_average(var, value, self.renorm_momentum)
new_weight = self._assign_moving_average(weight, weight_value,
self.renorm_momentum)
# TODO(yuefengz): the updates to var and weighted can not be batched
# together if we fetch their updated values here. Consider calculating
# new values and delaying the updates.
return new_var / new_weight
def _fake_update():
return array_ops.identity(var)
return utils.smart_cond(training, _do_update, _fake_update)
开发者ID:syed-ahmed,项目名称:tensorflow,代码行数:24,代码来源:normalization.py
示例20: resize_method_1
def resize_method_1():
return utils.smart_cond(batch_position % len(resize_methods) == 1,
lambda: lookup(1), resize_method_2)
开发者ID:bowrian,项目名称:tf-imagenet,代码行数:3,代码来源:image.py
注:本文中的tensorflow.python.layers.utils.smart_cond函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论