• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python mobilenet_v1.mobilenet_v1_base函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中nets.mobilenet_v1.mobilenet_v1_base函数的典型用法代码示例。如果您正苦于以下问题:Python mobilenet_v1_base函数的具体用法?Python mobilenet_v1_base怎么用?Python mobilenet_v1_base使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了mobilenet_v1_base函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testModelHasExpectedNumberOfParameters

 def testModelHasExpectedNumberOfParameters(self):
   batch_size = 5
   height, width = 224, 224
   inputs = tf.random_uniform((batch_size, height, width, 3))
   with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
                       normalizer_fn=slim.batch_norm):
     mobilenet_v1.mobilenet_v1_base(inputs)
     total_params, _ = slim.model_analyzer.analyze_vars(
         slim.get_model_variables())
     self.assertAlmostEqual(3217920, total_params)
开发者ID:ALISCIFP,项目名称:models,代码行数:10,代码来源:mobilenet_v1_test.py


示例2: testOutputStride8BuildAndCheckAllEndPointsUptoConv2d_13

  def testOutputStride8BuildAndCheckAllEndPointsUptoConv2d_13(self):
    batch_size = 5
    height, width = 224, 224
    output_stride = 8

    inputs = tf.random_uniform((batch_size, height, width, 3))
    with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
                        normalizer_fn=slim.batch_norm):
      _, end_points = mobilenet_v1.mobilenet_v1_base(
          inputs, output_stride=output_stride,
          final_endpoint='Conv2d_13_pointwise')
      _, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base(
          inputs, output_stride=output_stride,
          final_endpoint='Conv2d_13_pointwise', use_explicit_padding=True)
    endpoints_shapes = {'Conv2d_0': [batch_size, 112, 112, 32],
                        'Conv2d_1_depthwise': [batch_size, 112, 112, 32],
                        'Conv2d_1_pointwise': [batch_size, 112, 112, 64],
                        'Conv2d_2_depthwise': [batch_size, 56, 56, 64],
                        'Conv2d_2_pointwise': [batch_size, 56, 56, 128],
                        'Conv2d_3_depthwise': [batch_size, 56, 56, 128],
                        'Conv2d_3_pointwise': [batch_size, 56, 56, 128],
                        'Conv2d_4_depthwise': [batch_size, 28, 28, 128],
                        'Conv2d_4_pointwise': [batch_size, 28, 28, 256],
                        'Conv2d_5_depthwise': [batch_size, 28, 28, 256],
                        'Conv2d_5_pointwise': [batch_size, 28, 28, 256],
                        'Conv2d_6_depthwise': [batch_size, 28, 28, 256],
                        'Conv2d_6_pointwise': [batch_size, 28, 28, 512],
                        'Conv2d_7_depthwise': [batch_size, 28, 28, 512],
                        'Conv2d_7_pointwise': [batch_size, 28, 28, 512],
                        'Conv2d_8_depthwise': [batch_size, 28, 28, 512],
                        'Conv2d_8_pointwise': [batch_size, 28, 28, 512],
                        'Conv2d_9_depthwise': [batch_size, 28, 28, 512],
                        'Conv2d_9_pointwise': [batch_size, 28, 28, 512],
                        'Conv2d_10_depthwise': [batch_size, 28, 28, 512],
                        'Conv2d_10_pointwise': [batch_size, 28, 28, 512],
                        'Conv2d_11_depthwise': [batch_size, 28, 28, 512],
                        'Conv2d_11_pointwise': [batch_size, 28, 28, 512],
                        'Conv2d_12_depthwise': [batch_size, 28, 28, 512],
                        'Conv2d_12_pointwise': [batch_size, 28, 28, 1024],
                        'Conv2d_13_depthwise': [batch_size, 28, 28, 1024],
                        'Conv2d_13_pointwise': [batch_size, 28, 28, 1024]}
    self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
    for endpoint_name, expected_shape in endpoints_shapes.items():
      self.assertTrue(endpoint_name in end_points)
      self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
                           expected_shape)
    self.assertItemsEqual(endpoints_shapes.keys(),
                          explicit_padding_end_points.keys())
    for endpoint_name, expected_shape in endpoints_shapes.items():
      self.assertTrue(endpoint_name in explicit_padding_end_points)
      self.assertListEqual(
          explicit_padding_end_points[endpoint_name].get_shape().as_list(),
          expected_shape)
开发者ID:ALISCIFP,项目名称:models,代码行数:53,代码来源:mobilenet_v1_test.py


示例3: testBuildAndCheckAllEndPointsApproximateFaceNet

  def testBuildAndCheckAllEndPointsApproximateFaceNet(self):
    batch_size = 5
    height, width = 128, 128

    inputs = tf.random_uniform((batch_size, height, width, 3))
    with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
                        normalizer_fn=slim.batch_norm):
      _, end_points = mobilenet_v1.mobilenet_v1_base(
          inputs, final_endpoint='Conv2d_13_pointwise', depth_multiplier=0.75)
      _, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base(
          inputs, final_endpoint='Conv2d_13_pointwise', depth_multiplier=0.75,
          use_explicit_padding=True)
    # For the Conv2d_0 layer FaceNet has depth=16
    endpoints_shapes = {'Conv2d_0': [batch_size, 64, 64, 24],
                        'Conv2d_1_depthwise': [batch_size, 64, 64, 24],
                        'Conv2d_1_pointwise': [batch_size, 64, 64, 48],
                        'Conv2d_2_depthwise': [batch_size, 32, 32, 48],
                        'Conv2d_2_pointwise': [batch_size, 32, 32, 96],
                        'Conv2d_3_depthwise': [batch_size, 32, 32, 96],
                        'Conv2d_3_pointwise': [batch_size, 32, 32, 96],
                        'Conv2d_4_depthwise': [batch_size, 16, 16, 96],
                        'Conv2d_4_pointwise': [batch_size, 16, 16, 192],
                        'Conv2d_5_depthwise': [batch_size, 16, 16, 192],
                        'Conv2d_5_pointwise': [batch_size, 16, 16, 192],
                        'Conv2d_6_depthwise': [batch_size, 8, 8, 192],
                        'Conv2d_6_pointwise': [batch_size, 8, 8, 384],
                        'Conv2d_7_depthwise': [batch_size, 8, 8, 384],
                        'Conv2d_7_pointwise': [batch_size, 8, 8, 384],
                        'Conv2d_8_depthwise': [batch_size, 8, 8, 384],
                        'Conv2d_8_pointwise': [batch_size, 8, 8, 384],
                        'Conv2d_9_depthwise': [batch_size, 8, 8, 384],
                        'Conv2d_9_pointwise': [batch_size, 8, 8, 384],
                        'Conv2d_10_depthwise': [batch_size, 8, 8, 384],
                        'Conv2d_10_pointwise': [batch_size, 8, 8, 384],
                        'Conv2d_11_depthwise': [batch_size, 8, 8, 384],
                        'Conv2d_11_pointwise': [batch_size, 8, 8, 384],
                        'Conv2d_12_depthwise': [batch_size, 4, 4, 384],
                        'Conv2d_12_pointwise': [batch_size, 4, 4, 768],
                        'Conv2d_13_depthwise': [batch_size, 4, 4, 768],
                        'Conv2d_13_pointwise': [batch_size, 4, 4, 768]}
    self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
    for endpoint_name, expected_shape in endpoints_shapes.items():
      self.assertTrue(endpoint_name in end_points)
      self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
                           expected_shape)
    self.assertItemsEqual(endpoints_shapes.keys(),
                          explicit_padding_end_points.keys())
    for endpoint_name, expected_shape in endpoints_shapes.items():
      self.assertTrue(endpoint_name in explicit_padding_end_points)
      self.assertListEqual(
          explicit_padding_end_points[endpoint_name].get_shape().as_list(),
          expected_shape)
开发者ID:ALISCIFP,项目名称:models,代码行数:52,代码来源:mobilenet_v1_test.py


示例4: testBuildOnlyUptoFinalEndpoint

 def testBuildOnlyUptoFinalEndpoint(self):
   batch_size = 5
   height, width = 224, 224
   endpoints = ['Conv2d_0',
                'Conv2d_1_depthwise', 'Conv2d_1_pointwise',
                'Conv2d_2_depthwise', 'Conv2d_2_pointwise',
                'Conv2d_3_depthwise', 'Conv2d_3_pointwise',
                'Conv2d_4_depthwise', 'Conv2d_4_pointwise',
                'Conv2d_5_depthwise', 'Conv2d_5_pointwise',
                'Conv2d_6_depthwise', 'Conv2d_6_pointwise',
                'Conv2d_7_depthwise', 'Conv2d_7_pointwise',
                'Conv2d_8_depthwise', 'Conv2d_8_pointwise',
                'Conv2d_9_depthwise', 'Conv2d_9_pointwise',
                'Conv2d_10_depthwise', 'Conv2d_10_pointwise',
                'Conv2d_11_depthwise', 'Conv2d_11_pointwise',
                'Conv2d_12_depthwise', 'Conv2d_12_pointwise',
                'Conv2d_13_depthwise', 'Conv2d_13_pointwise']
   for index, endpoint in enumerate(endpoints):
     with tf.Graph().as_default():
       inputs = tf.random_uniform((batch_size, height, width, 3))
       out_tensor, end_points = mobilenet_v1.mobilenet_v1_base(
           inputs, final_endpoint=endpoint)
       self.assertTrue(out_tensor.op.name.startswith(
           'MobilenetV1/' + endpoint))
       self.assertItemsEqual(endpoints[:index+1], end_points.keys())
开发者ID:ALISCIFP,项目名称:models,代码行数:25,代码来源:mobilenet_v1_test.py


示例5: _extract_features

  def _extract_features(self, preprocessed_inputs):
    """Extract features from preprocessed inputs.

    Args:
      preprocessed_inputs: a [batch, height, width, channels] float tensor
        representing a batch of images.

    Returns:
      feature_maps: a list of tensors where the ith tensor has shape
        [batch, height_i, width_i, depth_i]

    Raises:
      ValueError: if image height or width are not 256 pixels.
    """
    image_shape = preprocessed_inputs.get_shape()
    image_shape.assert_has_rank(4)
    image_height = image_shape[1].value
    image_width = image_shape[2].value

    if image_height is None or image_width is None:
      shape_assert = tf.Assert(
          tf.logical_and(tf.equal(tf.shape(preprocessed_inputs)[1], 256),
                         tf.equal(tf.shape(preprocessed_inputs)[2], 256)),
          ['image size must be 256 in both height and width.'])
      with tf.control_dependencies([shape_assert]):
        preprocessed_inputs = tf.identity(preprocessed_inputs)
    elif image_height != 256 or image_width != 256:
      raise ValueError('image size must be = 256 in both height and width;'
                       ' image dim = %d,%d' % (image_height, image_width))

    feature_map_layout = {
        'from_layer': [
            'Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', ''
        ],
        'layer_depth': [-1, -1, 512, 256, 256],
        'conv_kernel_size': [-1, -1, 3, 3, 2],
        'use_explicit_padding': self._use_explicit_padding,
        'use_depthwise': self._use_depthwise,
    }

    with slim.arg_scope(self._conv_hyperparams):
      with slim.arg_scope([slim.batch_norm], fused=False):
        with tf.variable_scope('MobilenetV1',
                               reuse=self._reuse_weights) as scope:
          _, image_features = mobilenet_v1.mobilenet_v1_base(
              ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
              final_endpoint='Conv2d_13_pointwise',
              min_depth=self._min_depth,
              depth_multiplier=self._depth_multiplier,
              scope=scope)
          feature_maps = feature_map_generators.multi_resolution_feature_maps(
              feature_map_layout=feature_map_layout,
              depth_multiplier=self._depth_multiplier,
              min_depth=self._min_depth,
              insert_1x1_conv=True,
              image_features=image_features)

    return feature_maps.values()
开发者ID:codeinpeace,项目名称:models,代码行数:58,代码来源:embedded_ssd_mobilenet_v1_feature_extractor.py


示例6: extract_features

  def extract_features(self, preprocessed_inputs):
    """Extract features from preprocessed inputs.

    Args:
      preprocessed_inputs: a [batch, height, width, channels] float tensor
        representing a batch of images.

    Returns:
      feature_maps: a list of tensors where the ith tensor has shape
        [batch, height_i, width_i, depth_i]
    """
    preprocessed_inputs = shape_utils.check_min_image_dim(
        33, preprocessed_inputs)

    feature_map_layout = {
        'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '',
                       '', ''],
        'layer_depth': [-1, -1, 512, 256, 256, 128],
        'use_explicit_padding': self._use_explicit_padding,
        'use_depthwise': self._use_depthwise,
    }

    with tf.variable_scope('MobilenetV1',
                           reuse=self._reuse_weights) as scope:
      with slim.arg_scope(
          mobilenet_v1.mobilenet_v1_arg_scope(
              is_training=True, regularize_depthwise=True)):
        with (slim.arg_scope(self._conv_hyperparams_fn())
              if self._override_base_feature_extractor_hyperparams
              else context_manager.IdentityContextManager()):
        # TODO(skligys): Enable fused batch norm once quantization supports it.
          with slim.arg_scope([slim.batch_norm], fused=False):
            _, image_features = mobilenet_v1.mobilenet_v1_base(
                ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
                final_endpoint='Conv2d_13_pointwise',
                min_depth=self._min_depth,
                depth_multiplier=self._depth_multiplier,
                use_explicit_padding=self._use_explicit_padding,
                scope=scope)
      with slim.arg_scope(self._conv_hyperparams_fn()):
        # TODO(skligys): Enable fused batch norm once quantization supports it.
        with slim.arg_scope([slim.batch_norm], fused=False):
          feature_maps = feature_map_generators.multi_resolution_feature_maps(
              feature_map_layout=feature_map_layout,
              depth_multiplier=self._depth_multiplier,
              min_depth=self._min_depth,
              insert_1x1_conv=True,
              image_features=image_features)

    return feature_maps.values()
开发者ID:smajida,项目名称:models,代码行数:50,代码来源:ssd_mobilenet_v1_feature_extractor.py


示例7: extract_features

  def extract_features(self, preprocessed_inputs):
    """Extract features from preprocessed inputs.

    Args:
      preprocessed_inputs: a [batch, height, width, channels] float tensor
        representing a batch of images.

    Returns:
      feature_maps: a list of tensors where the ith tensor has shape
        [batch, height_i, width_i, depth_i]
    """
    preprocessed_inputs.get_shape().assert_has_rank(4)
    shape_assert = tf.Assert(
        tf.logical_and(
            tf.equal(tf.shape(preprocessed_inputs)[1], 256),
            tf.equal(tf.shape(preprocessed_inputs)[2], 256)),
        ['image size must be 256 in both height and width.'])

    feature_map_layout = {
        'from_layer': [
            'Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', ''
        ],
        'layer_depth': [-1, -1, 512, 256, 256],
        'conv_kernel_size': [-1, -1, 3, 3, 2],
    }

    with tf.control_dependencies([shape_assert]):
      with slim.arg_scope(self._conv_hyperparams):
        with tf.variable_scope('MobilenetV1',
                               reuse=self._reuse_weights) as scope:
          _, image_features = mobilenet_v1.mobilenet_v1_base(
              ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
              final_endpoint='Conv2d_13_pointwise',
              min_depth=self._min_depth,
              depth_multiplier=self._depth_multiplier,
              scope=scope)
          feature_maps = feature_map_generators.multi_resolution_feature_maps(
              feature_map_layout=feature_map_layout,
              depth_multiplier=self._depth_multiplier,
              min_depth=self._min_depth,
              insert_1x1_conv=True,
              image_features=image_features)

    return feature_maps.values()
开发者ID:DaRealLazyPanda,项目名称:models,代码行数:44,代码来源:embedded_ssd_mobilenet_v1_feature_extractor.py


示例8: _extract_proposal_features

  def _extract_proposal_features(self, preprocessed_inputs, scope):
    """Extracts first stage RPN features.

    Args:
      preprocessed_inputs: A [batch, height, width, channels] float32 tensor
        representing a batch of images.
      scope: A scope name.

    Returns:
      rpn_feature_map: A tensor with shape [batch, height, width, depth]
      activations: A dictionary mapping feature extractor tensor names to
        tensors

    Raises:
      InvalidArgumentError: If the spatial size of `preprocessed_inputs`
        (height or width) is less than 33.
      ValueError: If the created network is missing the required activation.
    """

    preprocessed_inputs.get_shape().assert_has_rank(4)
    preprocessed_inputs = shape_utils.check_min_image_dim(
        min_dim=33, image_tensor=preprocessed_inputs)

    with slim.arg_scope(
        mobilenet_v1.mobilenet_v1_arg_scope(
            is_training=self._train_batch_norm,
            weight_decay=self._weight_decay)):
      with tf.variable_scope('MobilenetV1',
                             reuse=self._reuse_weights) as scope:
        params = {}
        if self._skip_last_stride:
          params['conv_defs'] = _get_mobilenet_conv_no_last_stride_defs(
              conv_depth_ratio_in_percentage=self.
              _conv_depth_ratio_in_percentage)
        _, activations = mobilenet_v1.mobilenet_v1_base(
            preprocessed_inputs,
            final_endpoint='Conv2d_11_pointwise',
            min_depth=self._min_depth,
            depth_multiplier=self._depth_multiplier,
            scope=scope,
            **params)
    return activations['Conv2d_11_pointwise'], activations
开发者ID:ALISCIFP,项目名称:models,代码行数:42,代码来源:faster_rcnn_mobilenet_v1_feature_extractor.py


示例9: testBuildCustomNetworkUsingConvDefs

  def testBuildCustomNetworkUsingConvDefs(self):
    batch_size = 5
    height, width = 224, 224
    conv_defs = [
        mobilenet_v1.Conv(kernel=[3, 3], stride=2, depth=32),
        mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=64),
        mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=128),
        mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=512)
    ]

    inputs = tf.random_uniform((batch_size, height, width, 3))
    net, end_points = mobilenet_v1.mobilenet_v1_base(
        inputs, final_endpoint='Conv2d_3_pointwise', conv_defs=conv_defs)
    self.assertTrue(net.op.name.startswith('MobilenetV1/Conv2d_3'))
    self.assertListEqual(net.get_shape().as_list(),
                         [batch_size, 56, 56, 512])
    expected_endpoints = ['Conv2d_0',
                          'Conv2d_1_depthwise', 'Conv2d_1_pointwise',
                          'Conv2d_2_depthwise', 'Conv2d_2_pointwise',
                          'Conv2d_3_depthwise', 'Conv2d_3_pointwise']
    self.assertItemsEqual(end_points.keys(), expected_endpoints)
开发者ID:ALISCIFP,项目名称:models,代码行数:21,代码来源:mobilenet_v1_test.py


示例10: _extract_proposal_features

  def _extract_proposal_features(self, preprocessed_inputs, scope):
    """Extracts first stage RPN features.

    Args:
      preprocessed_inputs: A [batch, height, width, channels] float32 tensor
        representing a batch of images.
      scope: A scope name.

    Returns:
      rpn_feature_map: A tensor with shape [batch, height, width, depth]
      activations: A dictionary mapping feature extractor tensor names to
        tensors

    Raises:
      InvalidArgumentError: If the spatial size of `preprocessed_inputs`
        (height or width) is less than 33.
      ValueError: If the created network is missing the required activation.
    """

    preprocessed_inputs.get_shape().assert_has_rank(4)
    shape_assert = tf.Assert(
        tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33),
                       tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)),
        ['image size must at least be 33 in both height and width.'])

    with tf.control_dependencies([shape_assert]):
      with slim.arg_scope(
          mobilenet_v1.mobilenet_v1_arg_scope(
              is_training=self._train_batch_norm,
              weight_decay=self._weight_decay)):
        with tf.variable_scope('MobilenetV1',
                               reuse=self._reuse_weights) as scope:
          _, activations = mobilenet_v1.mobilenet_v1_base(
              preprocessed_inputs,
              final_endpoint='Conv2d_11_pointwise',
              min_depth=self._min_depth,
              depth_multiplier=self._depth_multiplier,
              scope=scope)
    return activations['Conv2d_11_pointwise'], activations
开发者ID:codeinpeace,项目名称:models,代码行数:39,代码来源:faster_rcnn_mobilenet_v1_feature_extractor.py


示例11: testBuildBaseNetwork

  def testBuildBaseNetwork(self):
    batch_size = 5
    height, width = 224, 224

    inputs = tf.random_uniform((batch_size, height, width, 3))
    net, end_points = mobilenet_v1.mobilenet_v1_base(inputs)
    self.assertTrue(net.op.name.startswith('MobilenetV1/Conv2d_13'))
    self.assertListEqual(net.get_shape().as_list(),
                         [batch_size, 7, 7, 1024])
    expected_endpoints = ['Conv2d_0',
                          'Conv2d_1_depthwise', 'Conv2d_1_pointwise',
                          'Conv2d_2_depthwise', 'Conv2d_2_pointwise',
                          'Conv2d_3_depthwise', 'Conv2d_3_pointwise',
                          'Conv2d_4_depthwise', 'Conv2d_4_pointwise',
                          'Conv2d_5_depthwise', 'Conv2d_5_pointwise',
                          'Conv2d_6_depthwise', 'Conv2d_6_pointwise',
                          'Conv2d_7_depthwise', 'Conv2d_7_pointwise',
                          'Conv2d_8_depthwise', 'Conv2d_8_pointwise',
                          'Conv2d_9_depthwise', 'Conv2d_9_pointwise',
                          'Conv2d_10_depthwise', 'Conv2d_10_pointwise',
                          'Conv2d_11_depthwise', 'Conv2d_11_pointwise',
                          'Conv2d_12_depthwise', 'Conv2d_12_pointwise',
                          'Conv2d_13_depthwise', 'Conv2d_13_pointwise']
    self.assertItemsEqual(end_points.keys(), expected_endpoints)
开发者ID:ALISCIFP,项目名称:models,代码行数:24,代码来源:mobilenet_v1_test.py


示例12: _construct_model

def _construct_model(model_type='resnet_v1_50'):
  """Constructs model for the desired type of CNN.

  Args:
    model_type: Type of model to be used.

  Returns:
    end_points: A dictionary from components of the network to the corresponding
      activations.

  Raises:
    ValueError: If the model_type is not supported.
  """
  # Placeholder input.
  images = array_ops.placeholder(
      dtypes.float32, shape=(1, None, None, 3), name=_INPUT_NODE)

  # Construct model.
  if model_type == 'inception_resnet_v2':
    _, end_points = inception.inception_resnet_v2_base(images)
  elif model_type == 'inception_resnet_v2-same':
    _, end_points = inception.inception_resnet_v2_base(
        images, align_feature_maps=True)
  elif model_type == 'inception_v2':
    _, end_points = inception.inception_v2_base(images)
  elif model_type == 'inception_v2-no-separable-conv':
    _, end_points = inception.inception_v2_base(
        images, use_separable_conv=False)
  elif model_type == 'inception_v3':
    _, end_points = inception.inception_v3_base(images)
  elif model_type == 'inception_v4':
    _, end_points = inception.inception_v4_base(images)
  elif model_type == 'alexnet_v2':
    _, end_points = alexnet.alexnet_v2(images)
  elif model_type == 'vgg_a':
    _, end_points = vgg.vgg_a(images)
  elif model_type == 'vgg_16':
    _, end_points = vgg.vgg_16(images)
  elif model_type == 'mobilenet_v1':
    _, end_points = mobilenet_v1.mobilenet_v1_base(images)
  elif model_type == 'mobilenet_v1_075':
    _, end_points = mobilenet_v1.mobilenet_v1_base(
        images, depth_multiplier=0.75)
  elif model_type == 'resnet_v1_50':
    _, end_points = resnet_v1.resnet_v1_50(
        images, num_classes=None, is_training=False, global_pool=False)
  elif model_type == 'resnet_v1_101':
    _, end_points = resnet_v1.resnet_v1_101(
        images, num_classes=None, is_training=False, global_pool=False)
  elif model_type == 'resnet_v1_152':
    _, end_points = resnet_v1.resnet_v1_152(
        images, num_classes=None, is_training=False, global_pool=False)
  elif model_type == 'resnet_v1_200':
    _, end_points = resnet_v1.resnet_v1_200(
        images, num_classes=None, is_training=False, global_pool=False)
  elif model_type == 'resnet_v2_50':
    _, end_points = resnet_v2.resnet_v2_50(
        images, num_classes=None, is_training=False, global_pool=False)
  elif model_type == 'resnet_v2_101':
    _, end_points = resnet_v2.resnet_v2_101(
        images, num_classes=None, is_training=False, global_pool=False)
  elif model_type == 'resnet_v2_152':
    _, end_points = resnet_v2.resnet_v2_152(
        images, num_classes=None, is_training=False, global_pool=False)
  elif model_type == 'resnet_v2_200':
    _, end_points = resnet_v2.resnet_v2_200(
        images, num_classes=None, is_training=False, global_pool=False)
  else:
    raise ValueError('Unsupported model_type %s.' % model_type)

  return end_points
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:71,代码来源:rf_benchmark.py


示例13: extract_features

  def extract_features(self, preprocessed_inputs):
    """Extract features from preprocessed inputs.

    Args:
      preprocessed_inputs: a [batch, height, width, channels] float tensor
        representing a batch of images.

    Returns:
      feature_maps: a list of tensors where the ith tensor has shape
        [batch, height_i, width_i, depth_i]
    """
    preprocessed_inputs.get_shape().assert_has_rank(4)
    shape_assert = tf.Assert(
        tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33),
                       tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)),
        ['image size must at least be 33 in both height and width.'])

    feature_map_layout = {
        'from_layer': ['east_conv1_3x3'],
        'layer_depth': [-1],
    }

    with tf.control_dependencies([shape_assert]):
      with slim.arg_scope(self._conv_hyperparams):
        with tf.variable_scope('MobilenetV1',
                               reuse=self._reuse_weights) as scope:
          _, image_features = mobilenet_v1.mobilenet_v1_base(
              preprocessed_inputs,
              final_endpoint='Conv2d_13_pointwise',
              min_depth=self._min_depth,
              depth_multiplier=self._depth_multiplier,
              scope=scope)
          """
          by chenx
          """
          east_conv_1 = image_features['Conv2d_3_pointwise']
          east_conv_2 = image_features['Conv2d_5_pointwise']
          east_conv_3 = image_features['Conv2d_11_pointwise']
          east_conv_4 = image_features['Conv2d_13_pointwise']

          east_deconv4 = slim.conv2d_transpose(east_conv_4, 512, [4, 4], 2, \
                                          padding='SAME', scope='east_deconv4')
          east_conv4_concat = tf.concat([east_conv_4, east_deconv4], axis=3)
          east_conv4_1x1 = slim.conv2d(east_conv4_concat, 256, [1,1],
                                       stride=1,
                                       normalizer_fn=slim.batch_norm,
                                       scope='east_conv4_1x1')
          east_conv4_3x3 = slim.conv2d(east_conv4_1x1, 256, [3,3],
                                       stride=1,
                                       normalizer_fn=slim.batch_norm,
                                       scope='east_conv4_3x3')
          image_features['east_conv4_3x3'] = east_conv4_3x3

          east_deconv3 = slim.conv2d_transpose(east_conv4_3x3, 256, [4, 4], 2, \
                                          padding='SAME', scope='east_deconv3')
          east_conv3_concat = tf.concat([east_conv_3, east_deconv3], axis=3)
          east_conv3_1x1 = slim.conv2d(east_conv4_concat, 128, [1,1],
                                       stride=1,
                                       normalizer_fn=slim.batch_norm,
                                       scope='east_conv3_1x1')
          east_conv3_3x3 = slim.conv2d(east_conv4_1x1, 128, [3,3],
                                       stride=1,
                                       normalizer_fn=slim.batch_norm,
                                       scope='east_conv3_3x3')
          image_features['east_conv3_3x3'] = east_conv3_3x3

          east_deconv2 = slim.conv2d_transpose(east_conv3_3x3, 128, [4, 4], 2, \
                                          padding='SAME', scope='east_deconv2')
          east_conv2_concat = tf.concat([east_conv_2, east_deconv3], axis=3)
          east_conv2_1x1 = slim.conv2d(east_conv2_concat, 64, [1,1],
                                       stride=1,
                                       normalizer_fn=slim.batch_norm,
                                       scope='east_conv2_1x1')
          east_conv2_3x3 = slim.conv2d(east_conv2_1x1, 64, [3,3],
                                       stride=1,
                                       normalizer_fn=slim.batch_norm,
                                       scope='east_conv2_3x3')
          image_features['east_conv2_3x3'] = east_conv2_3x3

          east_deconv1 = slim.conv2d_transpose(east_conv2_3x3, 64, [4, 4], 2, \
                                          padding='SAME', scope='east_deconv1')
          east_conv1_concat = tf.concat([east_conv_1, east_deconv1], axis=3)
          east_conv1_1x1 = slim.conv2d(east_conv1_concat, 32, [1,1],
                                       stride=1,
                                       normalizer_fn=slim.batch_norm,
                                       scope='east_conv1_1x1')
          east_conv1_3x3 = slim.conv2d(east_conv1_1x1, 32, [3,3],
                                       stride=1,
                                       normalizer_fn=slim.batch_norm,
                                       scope='east_conv1_3x3')
          image_features['east_conv1_3x3'] = east_conv1_3x3

          feature_maps = feature_map_generators.multi_resolution_feature_maps(
              feature_map_layout=feature_map_layout,
              depth_multiplier=self._depth_multiplier,
              min_depth=self._min_depth,
              insert_1x1_conv=True,
              image_features=image_features)

    return feature_maps.values()
开发者ID:chenxiang204,项目名称:code,代码行数:100,代码来源:east_mobilenet_v1_feature_extractor.py


示例14: extract_features

  def extract_features(self, preprocessed_inputs):
    """Extract features from preprocessed inputs.

    Args:
      preprocessed_inputs: a [batch, height, width, channels] float tensor
        representing a batch of images.

    Returns:
      feature_maps: a list of tensors where the ith tensor has shape
        [batch, height_i, width_i, depth_i]
    """
    preprocessed_inputs = shape_utils.check_min_image_dim(
        33, preprocessed_inputs)

    with tf.variable_scope('MobilenetV1',
                           reuse=self._reuse_weights) as scope:
      with slim.arg_scope(
          mobilenet_v1.mobilenet_v1_arg_scope(
              is_training=None, regularize_depthwise=True)):
        with (slim.arg_scope(self._conv_hyperparams_fn())
              if self._override_base_feature_extractor_hyperparams
              else context_manager.IdentityContextManager()):
          _, image_features = mobilenet_v1.mobilenet_v1_base(
              ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
              final_endpoint='Conv2d_13_pointwise',
              min_depth=self._min_depth,
              depth_multiplier=self._depth_multiplier,
              use_explicit_padding=self._use_explicit_padding,
              scope=scope)

      depth_fn = lambda d: max(int(d * self._depth_multiplier), self._min_depth)
      with slim.arg_scope(self._conv_hyperparams_fn()):
        with tf.variable_scope('fpn', reuse=self._reuse_weights):
          feature_blocks = [
              'Conv2d_3_pointwise', 'Conv2d_5_pointwise', 'Conv2d_11_pointwise',
              'Conv2d_13_pointwise'
          ]
          base_fpn_max_level = min(self._fpn_max_level, 5)
          feature_block_list = []
          for level in range(self._fpn_min_level, base_fpn_max_level + 1):
            feature_block_list.append(feature_blocks[level - 2])
          fpn_features = feature_map_generators.fpn_top_down_feature_maps(
              [(key, image_features[key]) for key in feature_block_list],
              depth=depth_fn(256))
          feature_maps = []
          for level in range(self._fpn_min_level, base_fpn_max_level + 1):
            feature_maps.append(fpn_features['top_down_{}'.format(
                feature_blocks[level - 2])])
          last_feature_map = fpn_features['top_down_{}'.format(
              feature_blocks[base_fpn_max_level - 2])]
          # Construct coarse features
          for i in range(base_fpn_max_level + 1, self._fpn_max_level + 1):
            last_feature_map = slim.conv2d(
                last_feature_map,
                num_outputs=depth_fn(256),
                kernel_size=[3, 3],
                stride=2,
                padding='SAME',
                scope='bottom_up_Conv2d_{}'.format(i - base_fpn_max_level + 13))
            feature_maps.append(last_feature_map)
    return feature_maps
开发者ID:ALISCIFP,项目名称:models,代码行数:61,代码来源:ssd_mobilenet_v1_fpn_feature_extractor.py


示例15: extract_features

  def extract_features(self,
                       preprocessed_inputs,
                       state_saver=None,
                       state_name='lstm_state',
                       unroll_length=5,
                       scope=None):
    """Extracts features from preprocessed inputs.

    The features include the base network features, lstm features and SSD
    features, organized in the following name scope:

    <parent scope>/MobilenetV1/...
    <parent scope>/LSTM/...
    <parent scope>/FeatureMaps/...

    Args:
      preprocessed_inputs: A [batch, height, width, channels] float tensor
        representing a batch of consecutive frames from video clips.
      state_saver: A state saver object with methods `state` and `save_state`.
      state_name: A python string for the name to use with the state_saver.
      unroll_length: The number of steps to unroll the lstm.
      scope: The scope for the base network of the feature extractor.

    Returns:
      A list of tensors where the ith tensor has shape [batch, height_i,
      width_i, depth_i]
    """
    preprocessed_inputs = shape_utils.check_min_image_dim(
        33, preprocessed_inputs)
    with slim.arg_scope(
        mobilenet_v1.mobilenet_v1_arg_scope(is_training=self._is_training)):
      with (slim.arg_scope(self._conv_hyperparams_fn())
            if self._override_base_feature_extractor_hyperparams else
            context_manager.IdentityContextManager()):
        with slim.arg_scope([slim.batch_norm], fused=False):
          # Base network.
          with tf.variable_scope(
              scope, self._base_network_scope,
              reuse=self._reuse_weights) as scope:
            net, image_features = mobilenet_v1.mobilenet_v1_base(
                ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
                final_endpoint='Conv2d_13_pointwise',
                min_depth=self._min_depth,
                depth_multiplier=self._depth_multiplier,
                scope=scope)

    with slim.arg_scope(self._conv_hyperparams_fn()):
      with slim.arg_scope(
          [slim.batch_norm], fused=False, is_training=self._is_training):
        # ConvLSTM layers.
        with tf.variable_scope('LSTM', reuse=self._reuse_weights) as lstm_scope:
          lstm_cell = lstm_cells.BottleneckConvLSTMCell(
              filter_size=(3, 3),
              output_size=(net.shape[1].value, net.shape[2].value),
              num_units=max(self._min_depth, self._lstm_state_depth),
              activation=tf.nn.relu6,
              visualize_gates=True)

          net_seq = list(tf.split(net, unroll_length))
          if state_saver is None:
            init_state = lstm_cell.init_state(
                state_name, net.shape[0].value / unroll_length, tf.float32)
          else:
            c = state_saver.state('%s_c' % state_name)
            h = state_saver.state('%s_h' % state_name)
            init_state = (c, h)

          # Identities added for inputing state tensors externally.
          c_ident = tf.identity(init_state[0], name='lstm_state_in_c')
          h_ident = tf.identity(init_state[1], name='lstm_state_in_h')
          init_state = (c_ident, h_ident)

          net_seq, states_out = rnn_decoder.rnn_decoder(
              net_seq, init_state, lstm_cell, scope=lstm_scope)
          batcher_ops = None
          self._states_out = states_out
          if state_saver is not None:
            self._step = state_saver.state('%s_step' % state_name)
            batcher_ops = [
                state_saver.save_state('%s_c' % state_name, states_out[-1][0]),
                state_saver.save_state('%s_h' % state_name, states_out[-1][1]),
                state_saver.save_state('%s_step' % state_name, self._step - 1)
            ]
          with tf_ops.control_dependencies(batcher_ops):
            image_features['Conv2d_13_pointwise_lstm'] = tf.concat(net_seq, 0)

          # Identities added for reading output states, to be reused externally.
          tf.identity(states_out[-1][0], name='lstm_state_out_c')
          tf.identity(states_out[-1][1], name='lstm_state_out_h')

        # SSD layers.
        with tf.variable_scope('FeatureMaps', reuse=self._reuse_weights):
          feature_maps = feature_map_generators.multi_resolution_feature_maps(
              feature_map_layout=self._feature_map_layout,
              depth_multiplier=(self._depth_multiplier),
              min_depth=self._min_depth,
              insert_1x1_conv=True,
              image_features=image_features)

    return feature_maps.values()
开发者ID:Exscotticus,项目名称:models,代码行数:100,代码来源:lstm_ssd_mobilenet_v1_feature_extractor.py



注:本文中的nets.mobilenet_v1.mobilenet_v1_base函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python resnet_utils.conv2d_same函数代码示例发布时间:2022-05-27
下一篇:
Python mobilenet_v1.mobilenet_v1函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap