本文整理汇总了Python中nets.resnet_utils.subsample函数的典型用法代码示例。如果您正苦于以下问题:Python subsample函数的具体用法?Python subsample怎么用?Python subsample使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了subsample函数的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: testAtrousFullyConvolutionalValues
def testAtrousFullyConvolutionalValues(self):
"""Verify dense feature extraction with atrous convolution."""
nominal_stride = 32
for output_stride in [4, 8, 16, 32, None]:
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(2, 81, 81, 3)
# Dense feature extraction followed by subsampling.
output, _ = self._resnet_small(inputs, None,
is_training=False,
global_pool=False,
output_stride=output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
tf.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected, _ = self._resnet_small(inputs, None,
is_training=False,
global_pool=False)
sess.run(tf.global_variables_initializer())
self.assertAllClose(output.eval(), expected.eval(),
atol=1e-4, rtol=1e-4)
开发者ID:mbossX,项目名称:RRPN_FPN_Tensorflow,代码行数:28,代码来源:resnet_v2_test.py
示例2: bottleneck
def bottleneck(inputs, depth, depth_bottleneck, stride, rate=1,
outputs_collections=None, scope=None):
"""Bottleneck residual unit variant with BN before convolutions.
This is the full preactivation residual unit variant proposed in [2]. See
Fig. 1(b) of [2] for its definition. Note that we use here the bottleneck
variant which has an extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
Returns:
The ResNet unit's output.
"""
with tf.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
preact = slim.batch_norm(inputs, activation_fn=tf.nn.relu, scope='preact')
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = slim.conv2d(preact, depth, [1, 1], stride=stride,
normalizer_fn=None, activation_fn=None,
scope='shortcut')
residual = slim.conv2d(preact, depth_bottleneck, [1, 1], stride=1,
scope='conv1')
residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride,
rate=rate, scope='conv2')
residual = slim.conv2d(residual, depth, [1, 1], stride=1,
normalizer_fn=None, activation_fn=None,
scope='conv3')
output = shortcut + residual
return slim.utils.collect_named_outputs(outputs_collections,
sc.original_name_scope,
output)
开发者ID:mbossX,项目名称:RRPN_FPN_Tensorflow,代码行数:47,代码来源:resnet_v2.py
示例3: _atrousValues
def _atrousValues(self, bottleneck):
"""Verify the values of dense feature extraction by atrous convolution.
Make sure that dense feature extraction by stack_blocks_dense() followed by
subsampling gives identical results to feature extraction at the nominal
network output stride using the simple self._stack_blocks_nondense() above.
Args:
bottleneck: The bottleneck function.
"""
blocks = [
resnet_utils.Block('block1', bottleneck, [(4, 1, 1), (4, 1, 2)]),
resnet_utils.Block('block2', bottleneck, [(8, 2, 1), (8, 2, 2)]),
resnet_utils.Block('block3', bottleneck, [(16, 4, 1), (16, 4, 2)]),
resnet_utils.Block('block4', bottleneck, [(32, 8, 1), (32, 8, 1)])
]
nominal_stride = 8
# Test both odd and even input dimensions.
height = 30
width = 31
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with slim.arg_scope([slim.batch_norm], is_training=False):
for output_stride in [1, 2, 4, 8, None]:
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(1, height, width, 3)
# Dense feature extraction followed by subsampling.
output = resnet_utils.stack_blocks_dense(inputs,
blocks,
output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
tf.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected = self._stack_blocks_nondense(inputs, blocks)
sess.run(tf.global_variables_initializer())
output, expected = sess.run([output, expected])
self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)
开发者ID:Aravindreddy986,项目名称:TensorFlowOnSpark,代码行数:45,代码来源:resnet_v2_test.py
示例4: testConv2DSameEven
def testConv2DSameEven(self):
n, n2 = 4, 2
# Input image.
x = create_test_input(1, n, n, 1)
# Convolution kernel.
w = create_test_input(1, 3, 3, 1)
w = tf.reshape(w, [3, 3, 1, 1])
tf.get_variable('Conv/weights', initializer=w)
tf.get_variable('Conv/biases', initializer=tf.zeros([1]))
tf.get_variable_scope().reuse_variables()
y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv')
y1_expected = tf.to_float([[14, 28, 43, 26],
[28, 48, 66, 37],
[43, 66, 84, 46],
[26, 37, 46, 22]])
y1_expected = tf.reshape(y1_expected, [1, n, n, 1])
y2 = resnet_utils.subsample(y1, 2)
y2_expected = tf.to_float([[14, 43],
[43, 84]])
y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1])
y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv')
y3_expected = y2_expected
y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv')
y4_expected = tf.to_float([[48, 37],
[37, 22]])
y4_expected = tf.reshape(y4_expected, [1, n2, n2, 1])
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(y1.eval(), y1_expected.eval())
self.assertAllClose(y2.eval(), y2_expected.eval())
self.assertAllClose(y3.eval(), y3_expected.eval())
self.assertAllClose(y4.eval(), y4_expected.eval())
开发者ID:mbossX,项目名称:RRPN_FPN_Tensorflow,代码行数:40,代码来源:resnet_v2_test.py
示例5: testSubsampleFourByFour
def testSubsampleFourByFour(self):
x = tf.reshape(tf.to_float(tf.range(16)), [1, 4, 4, 1])
x = resnet_utils.subsample(x, 2)
expected = tf.reshape(tf.constant([0, 2, 8, 10]), [1, 2, 2, 1])
with self.test_session():
self.assertAllClose(x.eval(), expected.eval())
开发者ID:mbossX,项目名称:RRPN_FPN_Tensorflow,代码行数:6,代码来源:resnet_v2_test.py
示例6: testSubsampleThreeByThree
def testSubsampleThreeByThree(self):
x = tf.reshape(tf.to_float(tf.range(9)), [1, 3, 3, 1])
x = resnet_utils.subsample(x, 2)
expected = tf.reshape(tf.constant([0, 2, 6, 8]), [1, 2, 2, 1])
with self.test_session():
self.assertAllClose(x.eval(), expected.eval())
开发者ID:mbossX,项目名称:RRPN_FPN_Tensorflow,代码行数:6,代码来源:resnet_v2_test.py
示例7: bottleneck
def bottleneck(inputs,
depth,
depth_bottleneck,
stride,
rate=1,
outputs_collections=None,
scope=None,
use_bounded_activations=False):
"""Bottleneck residual unit variant with BN after convolutions.
This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for
its definition. Note that we use here the bottleneck variant which has an
extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
use_bounded_activations: Whether or not to use bounded activations. Bounded
activations better lend themselves to quantized inference.
Returns:
The ResNet unit's output.
"""
with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = slim.conv2d(
inputs,
depth, [1, 1],
stride=stride,
activation_fn=tf.nn.relu6 if use_bounded_activations else None,
scope='shortcut')
residual = slim.conv2d(inputs, depth_bottleneck, [1, 1], stride=1,
scope='conv1')
residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride,
rate=rate, scope='conv2')
residual = slim.conv2d(residual, depth, [1, 1], stride=1,
activation_fn=None, scope='conv3')
if use_bounded_activations:
# Use clip_by_value to simulate bandpass activation.
residual = tf.clip_by_value(residual, -6.0, 6.0)
output = tf.nn.relu6(shortcut + residual)
else:
output = tf.nn.relu(shortcut + residual)
return slim.utils.collect_named_outputs(outputs_collections,
sc.name,
output)
开发者ID:codeinpeace,项目名称:models,代码行数:61,代码来源:resnet_v1.py
示例8: testStridingLastUnitVsSubsampleBlockEnd
def testStridingLastUnitVsSubsampleBlockEnd(self):
"""Compares subsampling at the block's last unit or block's end.
Makes sure that the final output is the same when we use a stride at the
last unit of a block vs. we subsample activations at the end of a block.
"""
block = resnet_v1.resnet_v1_block
blocks = [
block('block1', base_depth=1, num_units=2, stride=2),
block('block2', base_depth=2, num_units=2, stride=2),
block('block3', base_depth=4, num_units=2, stride=2),
block('block4', base_depth=8, num_units=2, stride=1),
]
# Test both odd and even input dimensions.
height = 30
width = 31
with slim.arg_scope(resnet_utils.resnet_arg_scope()):
with slim.arg_scope([slim.batch_norm], is_training=False):
for output_stride in [1, 2, 4, 8, None]:
with tf.Graph().as_default():
with self.test_session() as sess:
tf.set_random_seed(0)
inputs = create_test_input(1, height, width, 3)
# Subsampling at the last unit of the block.
output = resnet_utils.stack_blocks_dense(
inputs, blocks, output_stride,
store_non_strided_activations=False,
outputs_collections='output')
output_end_points = slim.utils.convert_collection_to_dict(
'output')
# Make the two networks use the same weights.
tf.get_variable_scope().reuse_variables()
# Subsample activations at the end of the blocks.
expected = resnet_utils.stack_blocks_dense(
inputs, blocks, output_stride,
store_non_strided_activations=True,
outputs_collections='expected')
expected_end_points = slim.utils.convert_collection_to_dict(
'expected')
sess.run(tf.global_variables_initializer())
# Make sure that the final output is the same.
output, expected = sess.run([output, expected])
self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)
# Make sure that intermediate block activations in
# output_end_points are subsampled versions of the corresponding
# ones in expected_end_points.
for i, block in enumerate(blocks[:-1:]):
output = output_end_points[block.scope]
expected = expected_end_points[block.scope]
atrous_activated = (output_stride is not None and
2 ** i >= output_stride)
if not atrous_activated:
expected = resnet_utils.subsample(expected, 2)
output, expected = sess.run([output, expected])
self.assertAllClose(output, expected, atol=1e-4, rtol=1e-4)
开发者ID:codeinpeace,项目名称:models,代码行数:63,代码来源:resnet_v1_test.py
注:本文中的nets.resnet_utils.subsample函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论