本文整理汇总了Python中tensorflow.python.ops.array_ops.tile函数的典型用法代码示例。如果您正苦于以下问题:Python tile函数的具体用法?Python tile怎么用?Python tile使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了tile函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _mask_probs
def _mask_probs(probs, eos_token, finished):
"""Masks log probabilities.
The result is that finished beams allocate all probability mass to eos and
unfinished beams remain unchanged.
Args:
probs: Log probabiltiies of shape `[batch_size, beam_width, vocab_size]`
eos_token: An int32 id corresponding to the EOS token to allocate
probability to.
finished: A boolean tensor of shape `[batch_size, beam_width]` that
specifies which elements in the beam are finished already.
Returns:
A tensor of shape `[batch_size, beam_width, vocab_size]`, where unfinished
beams stay unchanged and finished beams are replaced with a tensor with all
probability on the EOS token.
"""
vocab_size = array_ops.shape(probs)[2]
# All finished examples are replaced with a vector that has all
# probability on EOS
finished_row = array_ops.one_hot(
eos_token,
vocab_size,
dtype=probs.dtype,
on_value=0.,
off_value=probs.dtype.min)
finished_probs = array_ops.tile(
array_ops.reshape(finished_row, [1, 1, -1]),
array_ops.concat([array_ops.shape(finished), [1]], 0))
finished_mask = array_ops.tile(
array_ops.expand_dims(finished, 2), [1, 1, vocab_size])
return array_ops.where(finished_mask, finished_probs, probs)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:34,代码来源:beam_search_decoder.py
示例2: _SumGrad
def _SumGrad(op, grad):
"""Gradient for Sum."""
# Fast path for when reducing to a scalar and ndims is known: adds only
# Reshape and Tile ops (and possibly a Shape).
input_0_shape = op.inputs[0]._shape_tuple() # pylint: disable=protected-access
if input_0_shape is not None:
axes = tensor_util.constant_value(op.inputs[1])
if axes is not None:
rank = len(input_0_shape)
if np.array_equal(axes, np.arange(rank)): # Reduce all dims.
grad = array_ops.reshape(grad, [1] * rank)
# If shape is not fully defined (but rank is), we use Shape.
if None not in input_0_shape:
input_shape = input_0_shape
else:
input_shape = array_ops.shape(op.inputs[0])
return [array_ops.tile(grad, input_shape), None]
input_shape = array_ops.shape(op.inputs[0])
# TODO(apassos) remove this once device placement for eager ops makes more
# sense.
with ops.colocate_with(input_shape):
output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)
grad = array_ops.reshape(grad, output_shape_kept_dims)
return [array_ops.tile(grad, tile_scaling), None]
开发者ID:neuroradiology,项目名称:tensorflow,代码行数:26,代码来源:math_grad.py
示例3: testShapeFunctionEdgeCases
def testShapeFunctionEdgeCases(self):
# Unknown multiples shape.
inp = constant_op.constant(0.0, shape=[4, 4, 4, 4])
tiled = array_ops.tile(inp, array_ops.placeholder(dtypes.int32))
self.assertEqual([None, None, None, None], tiled.get_shape().as_list())
# Unknown input shape.
inp = array_ops.placeholder(dtypes.float32)
tiled = array_ops.tile(inp, [2, 2, 2, 2])
self.assertEqual([None, None, None, None], tiled.get_shape().as_list())
# Unknown input and multiples shape.
inp = array_ops.placeholder(dtypes.float32)
tiled = array_ops.tile(inp, array_ops.placeholder(dtypes.int32))
self.assertIs(None, tiled.get_shape().ndims)
# Known input and partially known multiples.
inp = constant_op.constant(0.0, shape=[1, 1])
tiled = array_ops.tile(inp, [array_ops.placeholder(dtypes.int32), 7])
self.assertEqual([None, 7], tiled.get_shape().as_list())
# Mismatched input rank and multiples length.
inp = array_ops.placeholder(dtypes.float32, shape=[None, None])
with self.assertRaises(ValueError):
tiled = array_ops.tile(
inp, array_ops.placeholder(
dtypes.int32, shape=[3]))
开发者ID:DjangoPeng,项目名称:tensorflow,代码行数:27,代码来源:shape_ops_test.py
示例4: frames
def frames(signal, frame_length, frame_step, name=None):
"""Frame a signal into overlapping frames.
May be used in front of spectral functions.
For example:
```python
pcm = tf.placeholder(tf.float32, [None, 9152])
frames = tf.contrib.signal.frames(pcm, 512, 180)
magspec = tf.abs(tf.spectral.rfft(frames, [512]))
image = tf.expand_dims(magspec, 3)
```
Args:
signal: A `Tensor` of shape `[batch_size, signal_length]`.
frame_length: An `int32` or `int64` `Tensor`. The length of each frame.
frame_step: An `int32` or `int64` `Tensor`. The step between frames.
name: A name for the operation (optional).
Returns:
A `Tensor` of frames with shape `[batch_size, num_frames, frame_length]`.
Raises:
ValueError: if signal does not have rank 2.
"""
with ops.name_scope(name, "frames", [signal, frame_length, frame_step]):
signal = ops.convert_to_tensor(signal, name="signal")
frame_length = ops.convert_to_tensor(frame_length, name="frame_length")
frame_step = ops.convert_to_tensor(frame_step, name="frame_step")
signal_rank = signal.shape.ndims
if signal_rank != 2:
raise ValueError("expected signal to have rank 2 but was " + signal_rank)
signal_length = array_ops.shape(signal)[1]
num_frames = math_ops.ceil((signal_length - frame_length) / frame_step)
num_frames = 1 + math_ops.cast(num_frames, dtypes.int32)
pad_length = (num_frames - 1) * frame_step + frame_length
pad_signal = array_ops.pad(signal, [[0, 0], [0,
pad_length - signal_length]])
indices_frame = array_ops.expand_dims(math_ops.range(frame_length), 0)
indices_frames = array_ops.tile(indices_frame, [num_frames, 1])
indices_step = array_ops.expand_dims(
math_ops.range(num_frames) * frame_step, 1)
indices_steps = array_ops.tile(indices_step, [1, frame_length])
indices = indices_frames + indices_steps
# TODO(androbin): remove `transpose` when `gather` gets `axis` support
pad_signal = array_ops.transpose(pad_signal)
signal_frames = array_ops.gather(pad_signal, indices)
signal_frames = array_ops.transpose(signal_frames, perm=[2, 0, 1])
return signal_frames
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:60,代码来源:shape_ops.py
示例5: to_weighted_sum
def to_weighted_sum(self,
input_tensor,
num_outputs=1,
weight_collections=None,
trainable=True):
"""Returns a Tensor as linear predictions and a list of created Variable."""
dimension = self.source_column.dimension
batch_size = array_ops.shape(input_tensor)[0]
if dimension > 1:
i1 = array_ops.reshape(array_ops.tile(array_ops.expand_dims(
math_ops.range(0, batch_size), 1), [1, dimension]), [-1])
i2 = array_ops.tile(math_ops.range(0, dimension), [batch_size])
# Flatten the bucket indices and unique them across dimensions
# E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
# TODO(chapelle): move that logic to insert_transformed_feature to ensure
# unique buckets across dimensions after crossing.
bucket_indices = array_ops.reshape(input_tensor, [-1]) + self.length * i2
else:
# Simpler indices when dimension=1
i1 = math_ops.range(0, batch_size)
i2 = array_ops.zeros([batch_size], dtype=dtypes.int32)
bucket_indices = array_ops.reshape(input_tensor, [-1])
indices = math_ops.to_int64(array_ops.transpose(array_ops.pack((i1, i2))))
shape = math_ops.to_int64(array_ops.pack([batch_size, 1]))
sparse_id_values = ops.SparseTensor(indices, bucket_indices, shape)
vocab_size = self.length * self.source_column.dimension
return _create_embedding_lookup(
sparse_id_values, vocab_size, num_outputs,
_add_variable_collection(weight_collections), 0., "sum",
trainable, self.name + "_weights")
开发者ID:YanLongDong,项目名称:tensorflow,代码行数:33,代码来源:feature_column.py
示例6: gather_tree_from_array
def gather_tree_from_array(t, parent_ids, sequence_length):
"""Calculates the full beams for `TensorArray`s.
Args:
t: A stacked `TensorArray` of size `max_time` that contains `Tensor`s of
shape `[batch_size, beam_width, s]` or `[batch_size * beam_width, s]`
where `s` is the depth shape.
parent_ids: The parent ids of shape `[max_time, batch_size, beam_width]`.
sequence_length: The sequence length of shape `[batch_size, beam_width]`.
Returns:
A `Tensor` which is a stacked `TensorArray` of the same size and type as
`t` and where beams are sorted in each `Tensor` according to `parent_ids`.
"""
max_time = parent_ids.shape[0].value or array_ops.shape(parent_ids)[0]
batch_size = parent_ids.shape[1].value or array_ops.shape(parent_ids)[1]
beam_width = parent_ids.shape[2].value or array_ops.shape(parent_ids)[2]
# Generate beam ids that will be reordered by gather_tree.
beam_ids = array_ops.expand_dims(
array_ops.expand_dims(math_ops.range(beam_width), 0), 0)
beam_ids = array_ops.tile(beam_ids, [max_time, batch_size, 1])
mask = array_ops.sequence_mask(
sequence_length, maxlen=max_time, dtype=dtypes.int32)
mask = array_ops.transpose(mask, perm=[2, 0, 1])
# Use beam_width + 1 to mark the end of beam.
masked_beam_ids = (beam_ids * mask) + (1 - mask) * (beam_width + 1)
max_sequence_lengths = math_ops.to_int32(
math_ops.reduce_max(sequence_length, axis=1))
sorted_beam_ids = beam_search_ops.gather_tree(
step_ids=masked_beam_ids,
parent_ids=parent_ids,
max_sequence_lengths=max_sequence_lengths,
end_token=beam_width + 1)
# For out of range steps, simply copy the same beam.
sorted_beam_ids = array_ops.where(
math_ops.cast(mask, dtypes.bool), x=sorted_beam_ids, y=beam_ids)
# Generate indices for gather_nd.
time_ind = array_ops.tile(array_ops.reshape(
math_ops.range(max_time), [-1, 1, 1]), [1, batch_size, beam_width])
batch_ind = array_ops.tile(array_ops.reshape(
math_ops.range(batch_size), [-1, 1, 1]), [1, max_time, beam_width])
batch_ind = array_ops.transpose(batch_ind, perm=[1, 0, 2])
indices = array_ops.stack([time_ind, batch_ind, sorted_beam_ids], -1)
# Gather from a tensor with collapsed additional dimensions.
gather_from = t
final_shape = array_ops.shape(gather_from)
gather_from = array_ops.reshape(
gather_from, [max_time, batch_size, beam_width, -1])
ordered = array_ops.gather_nd(gather_from, indices)
ordered = array_ops.reshape(ordered, final_shape)
return ordered
开发者ID:BhaskarNallani,项目名称:tensorflow,代码行数:59,代码来源:beam_search_decoder.py
示例7: make_tril_ids
def make_tril_ids(n):
"""Internal helper to create vector of linear indices into y."""
cols = array_ops.reshape(array_ops.tile(math_ops.range(n), [n]), [n, n])
rows = array_ops.tile(
array_ops.expand_dims(math_ops.range(n), -1), [1, n])
pred = math_ops.greater(cols, rows)
tril_ids = array_ops.tile(array_ops.reshape(
math_ops.cumsum(math_ops.range(n)), [n, 1]), [1, n]) + cols
tril_ids = math_ops.select(pred,
array_ops.zeros([n, n], dtype=dtypes.int32),
tril_ids + 1)
tril_ids = array_ops.reshape(tril_ids, [-1])
return tril_ids
开发者ID:DavidNemeskey,项目名称:tensorflow,代码行数:13,代码来源:distribution_util.py
示例8: testInvalidDim
def testInvalidDim(self):
with self.test_session():
inp = np.random.rand(4, 1).astype("f")
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=[4, 1],
dtype=dtypes.float32)
# Wrong length of multiples.
with self.assertRaises(ValueError):
array_ops.tile(a, [1, 4, 2])
# Wrong rank for multiples.
with self.assertRaises(ValueError):
array_ops.tile(a, [[2, 3], [3, 4]]).eval()
开发者ID:DjangoPeng,项目名称:tensorflow,代码行数:13,代码来源:shape_ops_test.py
示例9: _initialize_variables
def _initialize_variables(self, data, initial_means=None):
"""Initializes variables.
Args:
data: a list of Tensors with data, each row is a new example.
initial_means: a Tensor with a matrix of means.
"""
first_shard = data[0]
# Initialize means: num_classes X 1 X dimensions.
if initial_means is not None:
means = array_ops.expand_dims(initial_means, 1)
else:
# Sample data randomly
means = array_ops.expand_dims(
_init_clusters_random(data, self._num_classes, self._random_seed), 1)
# Initialize covariances.
if self._covariance_type == FULL_COVARIANCE:
cov = _covariance(first_shard, False) + self._min_var
# A matrix per class, num_classes X dimensions X dimensions
covs = array_ops.tile(
array_ops.expand_dims(cov, 0), [self._num_classes, 1, 1])
elif self._covariance_type == DIAG_COVARIANCE:
cov = _covariance(first_shard, True) + self._min_var
# A diagonal per row, num_classes X dimensions.
covs = array_ops.tile(
array_ops.expand_dims(array_ops.diag_part(cov), 0),
[self._num_classes, 1])
with ops.colocate_with(self._cluster_centers_initialized):
initialized = control_flow_ops.with_dependencies(
[means, covs],
array_ops.identity(self._cluster_centers_initialized))
self._init_ops = []
with ops.colocate_with(self._means):
init_means = state_ops.assign(self._means, means, validate_shape=False)
init_means = control_flow_ops.with_dependencies(
[init_means],
state_ops.assign(self._cluster_centers_initialized, True))
self._init_ops.append(control_flow_ops.cond(initialized,
control_flow_ops.no_op,
lambda: init_means).op)
with ops.colocate_with(self._covs):
init_covs = state_ops.assign(self._covs, covs, validate_shape=False)
init_covs = control_flow_ops.with_dependencies(
[init_covs],
state_ops.assign(self._cluster_centers_initialized, True))
self._init_ops.append(control_flow_ops.cond(initialized,
control_flow_ops.no_op,
lambda: init_covs).op)
开发者ID:AndreasGocht,项目名称:tensorflow,代码行数:50,代码来源:gmm_ops.py
示例10: _ApplyLengthsToBatch
def _ApplyLengthsToBatch(sequence_lengths, tf_output):
# TODO(drpng): just use Update so that we don't carry over the gradients?
"""Sets the output to be zero at the end of the sequence."""
# output is batch major.
batch_size, max_time, vector_size = tf_output.shape
output_time = array_ops.tile(math_ops.range(0, max_time), [batch_size])
output_time = array_ops.reshape(output_time, [batch_size, max_time])
lengths = array_ops.tile(
array_ops.reshape(sequence_lengths, [-1, 1]), [1, max_time])
is_less = math_ops.cast(
math_ops.less(output_time, lengths), dtype=dtypes.float32)
keep_mask = array_ops.tile(
array_ops.expand_dims(is_less, -1),
[1, 1, vector_size])
final_output = keep_mask * tf_output
return final_output
开发者ID:BhaskarNallani,项目名称:tensorflow,代码行数:16,代码来源:functional_rnn.py
示例11: inverse_stft_window_fn_inner
def inverse_stft_window_fn_inner(frame_length, dtype):
"""Computes a window that can be used in `inverse_stft`.
Args:
frame_length: An integer scalar `Tensor`. The window length in samples.
dtype: Data type of waveform passed to `stft`.
Returns:
A window suitable for reconstructing original waveform in `inverse_stft`.
Raises:
ValueError: If `frame_length` is not scalar, `forward_window_fn` is not a
callable that takes a window length and a `dtype` keyword argument and
returns a `[window_length]` `Tensor` of samples in the provided datatype
`frame_step` is not scalar, or `frame_step` is not scalar.
"""
with ops.name_scope(name, 'inverse_stft_window_fn', [forward_window_fn]):
frame_length = ops.convert_to_tensor(frame_length, name='frame_length')
frame_length.shape.assert_has_rank(0)
# Use equation 7 from Griffin + Lim.
forward_window = forward_window_fn(frame_length, dtype=dtype)
denom = math_ops.square(forward_window)
overlaps = -(-frame_length // frame_step) # Ceiling division.
denom = array_ops.pad(denom, [(0, overlaps * frame_step - frame_length)])
denom = array_ops.reshape(denom, [overlaps, frame_step])
denom = math_ops.reduce_sum(denom, 0, keep_dims=True)
denom = array_ops.tile(denom, [overlaps, 1])
denom = array_ops.reshape(denom, [overlaps * frame_step])
return forward_window / denom[:frame_length]
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:31,代码来源:spectral_ops.py
示例12: dataset_fn
def dataset_fn():
dataset = dataset_ops.Dataset.range(10).map(
lambda x: array_ops.tile([x], ops.convert_to_tensor([x])),
num_parallel_calls=optimization.AUTOTUNE)
options = dataset_ops.Options()
options.experimental_autotune = True
return dataset.with_options(options)
开发者ID:aeverall,项目名称:tensorflow,代码行数:7,代码来源:stats_dataset_ops_test.py
示例13: testPrefetchBufferUtilization
def testPrefetchBufferUtilization(self, dataset_transformation):
aggregator = stats_aggregator.StatsAggregator()
dataset = dataset_ops.Dataset.range(100).map(
lambda x: array_ops.tile([x], ops.convert_to_tensor([x]))).prefetch(-1)
dataset = dataset_transformation(dataset, aggregator)
iterator = dataset_ops.make_initializable_iterator(dataset)
next_element = iterator.get_next()
summary_t = aggregator.get_summary()
with self.cached_session() as sess:
self.evaluate(iterator.initializer)
for i in range(100):
self.assertAllEqual(
np.array([i] * i, dtype=np.int64), self.evaluate(next_element))
summary_str = self.evaluate(summary_t)
self._assertSummaryHasCount(summary_str, "Prefetch::buffer_utilization",
float(i + 1))
self._assertSummaryContains(summary_str, "Prefetch::buffer_capacity")
self._assertSummaryContains(summary_str, "Prefetch::buffer_size")
self._assertSummaryHasRange(summary_str, "Prefetch::buffer_utilization",
0, 1)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
summary_str = self.evaluate(summary_t)
self._assertSummaryHasCount(summary_str, "Prefetch::buffer_utilization",
100)
开发者ID:aeverall,项目名称:tensorflow,代码行数:26,代码来源:stats_dataset_ops_test.py
示例14: runFiniteDifferences
def runFiniteDifferences(self, shapes, dtypes=(dtypes_lib.float32, dtypes_lib.float64), scalarTest=False):
with self.test_session(use_gpu=False):
for shape in shapes:
for batch in False, True:
for dtype in dtypes:
if not scalarTest:
x = constant_op.constant(np.random.randn(shape[0], shape[1]), dtype)
tensor = math_ops.matmul(x, array_ops.transpose(x)) / shape[0]
else:
# This is designed to be a faster test for larger matrices.
x = constant_op.constant(np.random.randn(), dtype)
R = constant_op.constant(np.random.randn(shape[0], shape[1]), dtype)
e = math_ops.mul(R, x)
tensor = math_ops.matmul(e, array_ops.transpose(e)) / shape[0]
# Inner-most matrices in tensor are positive definite.
if batch:
tensor = array_ops.tile(array_ops.expand_dims(tensor, 0), [4, 1, 1])
y = linalg_ops.cholesky(tensor)
if scalarTest:
y = math_ops.reduce_mean(y)
error = gradient_checker.compute_gradient_error(x, x._shape_as_list(), y, y._shape_as_list())
tf_logging.info("error = %f", error)
if dtype == dtypes_lib.float64:
self.assertLess(error, 1e-5)
else:
self.assertLess(error, 3e-3)
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:27,代码来源:cholesky_op_test.py
示例15: testPrefetchBufferUtilization
def testPrefetchBufferUtilization(self, dataset_transformation):
aggregator = stats_aggregator.StatsAggregator()
dataset = dataset_ops.Dataset.range(100).map(
lambda x: array_ops.tile([x], ops.convert_to_tensor([x]))).prefetch(-1)
dataset = dataset_transformation(dataset, aggregator)
next_element = self.getNext(dataset, requires_initialization=True)
for i in range(100):
self.assertAllEqual(
np.array([i] * i, dtype=np.int64), self.evaluate(next_element()))
summary_str = self.evaluate(aggregator.get_summary())
self._assertSummaryHasCount(
summary_str,
self.regexForNodeName("PrefetchDataset", "buffer_utilization"),
float(i + 1))
self._assertSummaryContains(
summary_str,
self.regexForNodeName("PrefetchDataset", "buffer_capacity"))
self._assertSummaryContains(
summary_str, self.regexForNodeName("PrefetchDataset", "buffer_size"))
self._assertSummaryHasRange(
summary_str,
self.regexForNodeName("PrefetchDataset", "buffer_utilization"), 0, 1)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
summary_str = self.evaluate(aggregator.get_summary())
self._assertSummaryHasCount(
summary_str,
self.regexForNodeName("PrefetchDataset", "buffer_utilization"), 100)
开发者ID:kylin9872,项目名称:tensorflow,代码行数:28,代码来源:stats_dataset_ops_test.py
示例16: _align_matrices
def _align_matrices(x, y):
"""Aligns x and y tensors to allow computations over pairs of their rows."""
x_matrix = _to_matrix(x)
y_matrix = _to_matrix(y)
x_shape = x_matrix.shape
y_shape = y_matrix.shape
if y_shape[1] != x_shape[1]: # dimensions do not match.
raise ValueError(
'The outermost dimensions of the input tensors should match. Given: {} '
'vs {}.'.format(y_shape[1], x_shape[1]))
x_tile = array_ops.tile(
array_ops.expand_dims(x_matrix, 1), [1, y_shape[0], 1])
y_tile = array_ops.tile(
array_ops.expand_dims(y_matrix, 0), [x_shape[0], 1, 1])
return x_tile, y_tile
开发者ID:rmlarsen,项目名称:tensorflow,代码行数:16,代码来源:kernelized_utils.py
示例17: testUnknownInputShape
def testUnknownInputShape(self):
"""Importing can call _TileShape without shape of <multiples> known."""
with self.test_session():
inp = array_ops.placeholder(dtypes.float32) # unknown shape
multiples = constant_op.constant([1, 2, 3, 4], dtype=np.int32)
tiled = array_ops.tile(inp, multiples)
gdef = tiled.graph.as_graph_def()
# Move the tile op to the start of the graph so that shapes of its inputs
# are not available when the shape function runs on import.
swapped = False
for i, n in enumerate(gdef.node):
if n.op == "Tile":
# Swap tile op to be first in gdef.node
assert i != 0
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(gdef.node[i])
gdef.node[i].CopyFrom(gdef.node[0])
gdef.node[0].CopyFrom(new_node)
swapped = True
assert swapped
tiled_imported, = importer.import_graph_def(
gdef, return_elements=[tiled.name])
self.assertEqual(4, tiled_imported.get_shape().ndims)
开发者ID:DjangoPeng,项目名称:tensorflow,代码行数:25,代码来源:shape_ops_test.py
示例18: testTypes
def testTypes(self):
types_to_test = {
"bool": (dtypes.bool, bool),
"float32": (dtypes.float32, float),
"float64": (dtypes.float64, float),
"complex64": (dtypes.complex64, complex),
"complex128": (dtypes.complex128, complex),
"uint8": (dtypes.uint8, int),
"int8": (dtypes.int8, int),
"int16": (dtypes.int16, int),
"int32": (dtypes.int32, int),
"int64": (dtypes.int64, int),
bytes: (dtypes.string, bytes)
}
for dtype_np, (dtype_tf, cast) in types_to_test.items():
with self.cached_session(use_gpu=True):
inp = np.random.rand(4, 1).astype(dtype_np)
a = constant_op.constant(
[cast(x) for x in inp.ravel(order="C")],
shape=[4, 1],
dtype=dtype_tf)
tiled = array_ops.tile(a, [1, 4])
result = self.evaluate(tiled)
self.assertEqual(result.shape, (4, 4))
self.assertEqual([4, 4], tiled.get_shape())
self.assertAllEqual(result, np.tile(inp, (1, 4)))
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:26,代码来源:shape_ops_test.py
示例19: _entropy
def _entropy(self):
if (not self.distribution.is_continuous or
not self.bijector.is_constant_jacobian):
raise NotImplementedError("entropy is not implemented")
# Suppose Y = g(X) where g is a diffeomorphism and X is a continuous rv. It
# can be shown that:
# H[Y] = H[X] + E_X[(log o abs o det o J o g)(X)].
# If is_constant_jacobian then:
# E_X[(log o abs o det o J o g)(X)] = (log o abs o det o J o g)(c)
# where c can by anything.
entropy = self.distribution.entropy()
if self._is_maybe_event_override:
# H[X] = sum_i H[X_i] if X_i are mutually independent.
# This means that a reduce_sum is a simple rescaling.
entropy *= math_ops.cast(math_ops.reduce_prod(self._override_event_shape),
dtype=entropy.dtype.base_dtype)
if self._is_maybe_batch_override:
new_shape = array_ops.concat([
_ones_like(self._override_batch_shape),
self.distribution.batch_shape_tensor()
], 0)
entropy = array_ops.reshape(entropy, new_shape)
multiples = array_ops.concat([
self._override_batch_shape,
_ones_like(self.distribution.batch_shape_tensor())
], 0)
entropy = array_ops.tile(entropy, multiples)
dummy = array_ops.zeros([], self.dtype)
entropy -= self.bijector.inverse_log_det_jacobian(dummy)
entropy.set_shape(self.batch_shape)
return entropy
开发者ID:arnonhongklay,项目名称:tensorflow,代码行数:31,代码来源:transformed_distribution.py
示例20: _BiasAddGradGrad
def _BiasAddGradGrad(op, received_grad):
"""Gradient for the BiasAddGrad op.
Args:
op: BiasAddGrad op for which we are calculating gradients.
received_grad: The gradients passed to the BiasAddGrad op.
Returns:
A single gradient Tensor for the input to BiasAddGrad (which
is the gradient of the bias term in BiasAdd)
"""
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
shape = array_ops.shape(op.inputs[0])
rank = array_ops.rank(op.inputs[0])
bias_shape = array_ops.shape(received_grad)
if data_format == b"NCHW":
expanded_shape = array_ops.concat([
array_ops.ones_like(shape[:-3]), bias_shape,
array_ops.ones_like(shape[-2:])
], 0)
tile_mults = array_ops.concat([shape[:-3], [1], shape[-2:]], 0)
else:
expanded_shape = array_ops.concat(
[array_ops.ones_like(shape[:-1]), bias_shape], 0)
tile_mults = array_ops.concat([shape[:-1], [1]], 0)
expanded_grad = array_ops.reshape(received_grad, expanded_shape)
return array_ops.tile(expanded_grad, tile_mults)
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:34,代码来源:nn_grad.py
注:本文中的tensorflow.python.ops.array_ops.tile函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论