• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python file_io.file_exists函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.lib.io.file_io.file_exists函数的典型用法代码示例。如果您正苦于以下问题:Python file_exists函数的具体用法?Python file_exists怎么用?Python file_exists使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了file_exists函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _run_training_transform

  def _run_training_transform(self):
    """Runs training starting with transformed tf.example files."""

    cloud = True
    if cloud:
      cmd = ['gcloud ml-engine jobs submit training test_mltoolbox_train_%s' % uuid.uuid4().hex,
             '--runtime-version=1.0',
             '--scale-tier=STANDARD_1',
             '--stream-logs']
    else:
      cmd = ['gcloud ml-engine local train']

    cmd = cmd + [
        '--module-name trainer.task',
        '--job-dir=' + self._train_output,
        '--package-path=' + os.path.join(CODE_PATH, 'trainer'),
        '--',
        '--train=' + os.path.join(self._transform_output, 'features_train*'),
        '--eval=' + os.path.join(self._transform_output, 'features_eval*'),
        '--analysis=' + self._analysis_output,
        '--model=linear_regression',
        '--train-batch-size=10',
        '--eval-batch-size=10',
        '--max-steps=' + str(self._max_steps)]

    self._logger.debug('Running subprocess: %s \n\n' % ' '.join(cmd))
    subprocess.check_call(' '.join(cmd), shell=True)

    # Check the saved model was made.
    self.assertTrue(file_io.file_exists(
        os.path.join(self._train_output, 'model', 'saved_model.pb')))
    self.assertTrue(file_io.file_exists(
        os.path.join(self._train_output, 'evaluation_model', 'saved_model.pb')))
开发者ID:googledatalab,项目名称:pydatalab,代码行数:33,代码来源:test_cloud_workflow.py


示例2: _save_and_write_assets

  def _save_and_write_assets(self, assets_collection_to_add=None):
    """Saves asset to the meta graph and writes asset files to disk.

    Args:
      assets_collection_to_add: The collection where the asset paths are setup.
    """
    asset_source_filepath_list = _maybe_save_assets(assets_collection_to_add)

    # Return if there are no assets to write.
    if len(asset_source_filepath_list) is 0:
      tf_logging.info("No assets to write.")
      return

    assets_destination_dir = os.path.join(
        compat.as_bytes(self._export_dir),
        compat.as_bytes(constants.ASSETS_DIRECTORY))

    if not file_io.file_exists(assets_destination_dir):
      file_io.recursive_create_dir(assets_destination_dir)

    # Copy each asset from source path to destination path.
    for asset_source_filepath in asset_source_filepath_list:
      asset_source_filename = os.path.basename(asset_source_filepath)

      asset_destination_filepath = os.path.join(
          compat.as_bytes(assets_destination_dir),
          compat.as_bytes(asset_source_filename))

      # Only copy the asset file to the destination if it does not already
      # exist. This is to ensure that an asset with the same name defined as
      # part of multiple graphs is only copied the first time.
      if not file_io.file_exists(asset_destination_filepath):
        file_io.copy(asset_source_filepath, asset_destination_filepath)

    tf_logging.info("Assets written to: %s", assets_destination_dir)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:35,代码来源:builder_impl.py


示例3: test_relative_path

  def test_relative_path(self):
    m = keras.Model()
    v = m.add_weight(name='v', shape=[])
    os.chdir(self.get_temp_dir())

    prefix = 'ackpt'
    self.evaluate(v.assign(42.))
    m.save_weights(prefix)
    self.assertTrue(file_io.file_exists('ackpt.index'))
    self.evaluate(v.assign(1.))
    m.load_weights(prefix)
    self.assertEqual(42., self.evaluate(v))

    prefix = 'subdir/ackpt'
    self.evaluate(v.assign(43.))
    m.save_weights(prefix)
    self.assertTrue(file_io.file_exists('subdir/ackpt.index'))
    self.evaluate(v.assign(2.))
    m.load_weights(prefix)
    self.assertEqual(43., self.evaluate(v))

    prefix = 'ackpt/'
    self.evaluate(v.assign(44.))
    m.save_weights(prefix)
    self.assertTrue(file_io.file_exists('ackpt/.index'))
    self.evaluate(v.assign(3.))
    m.load_weights(prefix)
    self.assertEqual(44., self.evaluate(v))
开发者ID:terrytangyuan,项目名称:tensorflow,代码行数:28,代码来源:hdf5_format_test.py


示例4: testRename

 def testRename(self):
   file_path = os.path.join(self._base_dir, "temp_file")
   file_io.FileIO(file_path, mode="w").write("testing")
   rename_path = os.path.join(self._base_dir, "rename_file")
   file_io.rename(file_path, rename_path)
   self.assertTrue(file_io.file_exists(rename_path))
   self.assertFalse(file_io.file_exists(file_path))
开发者ID:1000sprites,项目名称:tensorflow,代码行数:7,代码来源:file_io_test.py


示例5: testRename

 def testRename(self):
   file_path = os.path.join(self._base_dir, "temp_file")
   file_io.write_string_to_file(file_path, "testing")
   rename_path = os.path.join(self._base_dir, "rename_file")
   file_io.rename(file_path, rename_path)
   self.assertTrue(file_io.file_exists(rename_path))
   self.assertFalse(file_io.file_exists(file_path))
开发者ID:AriaAsuka,项目名称:tensorflow,代码行数:7,代码来源:file_io_test.py


示例6: _read_config_files

  def _read_config_files(self, run_paths):
    configs = {}
    config_fpaths = {}
    for run_name, logdir in run_paths.items():
      config_fpath = os.path.join(logdir, PROJECTOR_FILENAME)
      if not file_io.file_exists(config_fpath):
        # Skip runs that have no config file.
        continue
      # Read the config file.
      file_content = file_io.read_file_to_string(config_fpath).decode('utf-8')
      config = ProjectorConfig()
      text_format.Merge(file_content, config)

      if not config.model_checkpoint_path:
        # See if you can find a checkpoint file in the logdir.
        ckpt_path = latest_checkpoint(logdir)
        if not ckpt_path:
          # Or in the parent of logdir.
          ckpt_path = latest_checkpoint(os.path.join('../', logdir))
          if not ckpt_path:
            logging.warning('Cannot find model checkpoint in %s', logdir)
            continue
        config.model_checkpoint_path = ckpt_path

      # Sanity check for the checkpoint file.
      if not file_io.file_exists(config.model_checkpoint_path):
        logging.warning('Checkpoint file %s not found',
                        config.model_checkpoint_path)
        continue
      configs[run_name] = config
      config_fpaths[run_name] = config_fpath
    return configs, config_fpaths
开发者ID:KalraA,项目名称:tensorflow,代码行数:32,代码来源:plugin.py


示例7: testCreateRecursiveDir

 def testCreateRecursiveDir(self):
   dir_path = os.path.join(self._base_dir, "temp_dir/temp_dir1/temp_dir2")
   file_io.recursive_create_dir(dir_path)
   file_path = os.path.join(dir_path, "temp_file")
   file_io.FileIO(file_path, mode="w").write("testing")
   self.assertTrue(file_io.file_exists(file_path))
   file_io.delete_recursively(os.path.join(self._base_dir, "temp_dir"))
   self.assertFalse(file_io.file_exists(file_path))
开发者ID:JamesFysh,项目名称:tensorflow,代码行数:8,代码来源:file_io_test.py


示例8: testRenameOverwriteFalse

 def testRenameOverwriteFalse(self):
   file_path = os.path.join(self._base_dir, "temp_file")
   file_io.FileIO(file_path, mode="w").write("testing")
   rename_path = os.path.join(self._base_dir, "rename_file")
   file_io.FileIO(rename_path, mode="w").write("rename")
   with self.assertRaises(errors.AlreadyExistsError):
     file_io.rename(file_path, rename_path, overwrite=False)
   self.assertTrue(file_io.file_exists(rename_path))
   self.assertTrue(file_io.file_exists(file_path))
开发者ID:1000sprites,项目名称:tensorflow,代码行数:9,代码来源:file_io_test.py


示例9: testRenameOverwrite

 def testRenameOverwrite(self):
   file_path = os.path.join(self.get_temp_dir(), "temp_file")
   file_io.write_string_to_file(file_path, "testing")
   rename_path = os.path.join(self.get_temp_dir(), "rename_file")
   file_io.write_string_to_file(rename_path, "rename")
   file_io.rename(file_path, rename_path, overwrite=True)
   self.assertTrue(file_io.file_exists(rename_path))
   self.assertFalse(file_io.file_exists(file_path))
   file_io.delete_file(rename_path)
开发者ID:AI-MR-Related,项目名称:tensorflow,代码行数:9,代码来源:file_io_test.py


示例10: testRenameOverwriteFalse

 def testRenameOverwriteFalse(self):
   file_path = os.path.join(self.get_temp_dir(), "temp_file")
   file_io.write_string_to_file(file_path, "testing")
   rename_path = os.path.join(self.get_temp_dir(), "rename_file")
   file_io.write_string_to_file(rename_path, "rename")
   with self.assertRaises(errors.AlreadyExistsError):
     file_io.rename(file_path, rename_path, overwrite=False)
   self.assertTrue(file_io.file_exists(rename_path))
   self.assertTrue(file_io.file_exists(file_path))
   file_io.delete_file(rename_path)
   file_io.delete_file(file_path)
开发者ID:AI-MR-Related,项目名称:tensorflow,代码行数:11,代码来源:file_io_test.py


示例11: save_model

def save_model(model, saved_model_path):
  """Save a `tf.keras.Model` into Tensorflow SavedModel format.

  `save_model` generates such files/folders under the `saved_model_path` folder:
  1) an asset folder containing the json string of the model's
  configuration(topology).
  2) a checkpoint containing the model weights.

  Note that subclassed models can not be saved via this function, unless you
  provide an implementation for get_config() and from_config().
  Also note that `tf.keras.optimizers.Optimizer` instances can not currently be
  saved to checkpoints. Use optimizers from `tf.train`.

  Args:
    model: A `tf.keras.Model` to be saved.
    saved_model_path: a string specifying the path to the SavedModel directory.

  Raises:
    NotImplementedError: If the passed in model is a subclassed model.
  """
  if not model._is_graph_network:
    raise NotImplementedError

  # save model configuration as a json string under assets folder.
  model_json = model.to_json()
  assets_destination_dir = os.path.join(
      compat.as_bytes(saved_model_path),
      compat.as_bytes(constants.ASSETS_DIRECTORY))

  if not file_io.file_exists(assets_destination_dir):
    file_io.recursive_create_dir(assets_destination_dir)

  model_json_filepath = os.path.join(
      compat.as_bytes(assets_destination_dir),
      compat.as_bytes(constants.SAVED_MODEL_FILENAME_JSON))
  file_io.write_string_to_file(model_json_filepath, model_json)

  # save model weights in checkpoint format.
  checkpoint_destination_dir = os.path.join(
      compat.as_bytes(saved_model_path),
      compat.as_bytes(constants.VARIABLES_DIRECTORY))

  if not file_io.file_exists(checkpoint_destination_dir):
    file_io.recursive_create_dir(checkpoint_destination_dir)

  checkpoint_prefix = os.path.join(
      compat.as_text(checkpoint_destination_dir),
      compat.as_text(constants.VARIABLES_FILENAME))
  model.save_weights(checkpoint_prefix, save_format='tf', overwrite=True)
开发者ID:ZhangXinNan,项目名称:tensorflow,代码行数:49,代码来源:keras_saved_model.py


示例12: testCopy

 def testCopy(self):
   file_path = os.path.join(self._base_dir, "temp_file")
   file_io.FileIO(file_path, mode="w").write("testing")
   copy_path = os.path.join(self._base_dir, "copy_file")
   file_io.copy(file_path, copy_path)
   self.assertTrue(file_io.file_exists(copy_path))
   self.assertEqual(b"testing", file_io.read_file_to_string(file_path))
开发者ID:JamesFysh,项目名称:tensorflow,代码行数:7,代码来源:file_io_test.py


示例13: testAssets

  def testAssets(self):
    export_dir = self._get_export_dir("test_assets")
    builder = saved_model_builder.SavedModelBuilder(export_dir)

    with self.test_session(graph=ops.Graph()) as sess:
      self._init_and_validate_variable(sess, "v", 42)

      # Build an asset collection.
      ignored_filepath = os.path.join(
          compat.as_bytes(test.get_temp_dir()), compat.as_bytes("ignored.txt"))
      file_io.write_string_to_file(ignored_filepath, "will be ignored")

      asset_collection = self._build_asset_collection("hello42.txt",
                                                      "foo bar baz",
                                                      "asset_file_tensor")

      builder.add_meta_graph_and_variables(
          sess, ["foo"], assets_collection=asset_collection)

    # Save the SavedModel to disk.
    builder.save()

    with self.test_session(graph=ops.Graph()) as sess:
      foo_graph = loader.load(sess, ["foo"], export_dir)
      self._validate_asset_collection(export_dir, foo_graph.collection_def,
                                      "hello42.txt", "foo bar baz",
                                      "asset_file_tensor:0")
      ignored_asset_path = os.path.join(
          compat.as_bytes(export_dir),
          compat.as_bytes(constants.ASSETS_DIRECTORY),
          compat.as_bytes("ignored.txt"))
      self.assertFalse(file_io.file_exists(ignored_asset_path))
开发者ID:KiaraStarlab,项目名称:tensorflow,代码行数:32,代码来源:saved_model_test.py


示例14: cloud_batch_predict

def cloud_batch_predict(training_dir, prediction_input_file, output_dir, mode, batch_size,
                        shard_files, output_format):
  """See batch_predict"""
  # from . import predict as predict_module
  from .prediction import predict as predict_module

  if mode == 'evaluation':
    model_dir = os.path.join(training_dir, 'evaluation_model')
  elif mode == 'prediction':
    model_dir = os.path.join(training_dir, 'model')
  else:
    raise ValueError('mode must be evaluation or prediction')

  if not file_io.file_exists(model_dir):
    raise ValueError('Model folder %s does not exist' % model_dir)

  _assert_gcs_files([training_dir, prediction_input_file, output_dir])

  cmd = ['predict.py',
         '--cloud',
         '--project-id=%s' % _default_project(),
         '--predict-data=%s' % prediction_input_file,
         '--trained-model-dir=%s' % model_dir,
         '--output-dir=%s' % output_dir,
         '--output-format=%s' % output_format,
         '--batch-size=%s' % str(batch_size),
         '--shard-files' if shard_files else '--no-shard-files',
         '--extra-package=%s' % _TF_GS_URL,
         '--extra-package=%s' % _PROTOBUF_GS_URL,
         '--extra-package=%s' % _package_to_staging(output_dir)
         ]

  return predict_module.main(cmd)
开发者ID:googledatalab,项目名称:pydatalab,代码行数:33,代码来源:_package.py


示例15: _save_and_write_assets

  def _save_and_write_assets(self, assets_collection_to_add=None):
    """Saves asset to the meta graph and writes asset files to disk.

    Args:
      assets_collection_to_add: The collection where the asset paths are setup.
    """
    asset_filename_map = _maybe_save_assets(assets_collection_to_add)

    # Return if there are no assets to write.
    if not asset_filename_map:
      tf_logging.info("No assets to write.")
      return

    assets_destination_dir = saved_model_utils.get_or_create_assets_dir(
        self._export_dir)

    # Copy each asset from source path to destination path.
    for asset_basename, asset_source_filepath in asset_filename_map.items():
      asset_destination_filepath = os.path.join(
          compat.as_bytes(assets_destination_dir),
          compat.as_bytes(asset_basename))

      # Only copy the asset file to the destination if it does not already
      # exist. This is to ensure that an asset with the same name defined as
      # part of multiple graphs is only copied the first time.
      if not file_io.file_exists(asset_destination_filepath):
        file_io.copy(asset_source_filepath, asset_destination_filepath)

    tf_logging.info("Assets written to: %s",
                    compat.as_text(assets_destination_dir))
开发者ID:AnishShah,项目名称:tensorflow,代码行数:30,代码来源:builder_impl.py


示例16: testFileWrite

 def testFileWrite(self):
   file_path = os.path.join(self.get_temp_dir(), "temp_file")
   file_io.write_string_to_file(file_path, "testing")
   self.assertTrue(file_io.file_exists(file_path))
   file_contents = file_io.read_file_to_string(file_path)
   self.assertEqual(b"testing", file_contents)
   file_io.delete_file(file_path)
开发者ID:AI-MR-Related,项目名称:tensorflow,代码行数:7,代码来源:file_io_test.py


示例17: _GetBaseApiMap

  def _GetBaseApiMap(self):
    """Get a map from graph op name to its base ApiDef.

    Returns:
      Dictionary mapping graph op name to corresponding ApiDef.
    """
    # Convert base ApiDef in Multiline format to Proto format.
    converted_base_api_dir = os.path.join(
        test.get_temp_dir(), 'temp_base_api_defs')
    subprocess.check_call(
        [os.path.join(resource_loader.get_root_dir_with_all_resources(),
                      _CONVERT_FROM_MULTILINE_SCRIPT),
         _BASE_API_DIR, converted_base_api_dir])

    name_to_base_api_def = {}
    base_api_files = file_io.get_matching_files(
        os.path.join(converted_base_api_dir, 'api_def_*.pbtxt'))
    for base_api_file in base_api_files:
      if file_io.file_exists(base_api_file):
        api_defs = api_def_pb2.ApiDefs()
        text_format.Merge(
            file_io.read_file_to_string(base_api_file), api_defs)
        for api_def in api_defs.op:
          name_to_base_api_def[api_def.graph_op_name] = api_def
    return name_to_base_api_def
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:25,代码来源:api_compatibility_test.py


示例18: get_experiment

  def get_experiment(output_dir):
    # Merge schema, input features, and transforms.
    train_config = util.merge_metadata(args.preprocess_output_dir,
                                       args.transforms_file)

    # Get the model to train.
    estimator = util.get_estimator(output_dir, train_config, args)

    # Save a copy of the scehma and input to the model folder.
    schema_file = os.path.join(args.preprocess_output_dir, util.SCHEMA_FILE)

    # Make list of files to save with the trained model.
    additional_assets = {'features.json': args.transforms_file,
                         util.SCHEMA_FILE: schema_file}
    if util.is_classification_model(args.model_type):
      target_name = train_config['target_column']
      vocab_file_name = util.CATEGORICAL_ANALYSIS % target_name
      vocab_file_path = os.path.join(
          args.preprocess_output_dir, vocab_file_name)
      assert file_io.file_exists(vocab_file_path)
      additional_assets[vocab_file_name] = vocab_file_path

    export_strategy_target = util.make_export_strategy(
        train_config=train_config,
        args=args,
        keep_target=True,
        assets_extra=additional_assets)
    export_strategy_notarget = util.make_export_strategy(
        train_config=train_config,
        args=args,
        keep_target=False,
        assets_extra=additional_assets)

    input_reader_for_train = get_reader_input_fn(
        train_config=train_config,
        preprocess_output_dir=args.preprocess_output_dir,
        model_type=args.model_type,
        data_paths=args.train_data_paths,
        batch_size=args.train_batch_size,
        shuffle=True,
        num_epochs=args.num_epochs)

    input_reader_for_eval = get_reader_input_fn(
        train_config=train_config,
        preprocess_output_dir=args.preprocess_output_dir,
        model_type=args.model_type,
        data_paths=args.eval_data_paths,
        batch_size=args.eval_batch_size,
        shuffle=False,
        num_epochs=1)

    return tf.contrib.learn.Experiment(
        estimator=estimator,
        train_input_fn=input_reader_for_train,
        eval_input_fn=input_reader_for_eval,
        train_steps=args.max_steps,
        export_strategies=[export_strategy_target, export_strategy_notarget],
        min_eval_frequency=args.min_eval_frequency,
        eval_steps=None,
    )
开发者ID:googledatalab,项目名称:pydatalab,代码行数:60,代码来源:task.py


示例19: _serve_metadata

  def _serve_metadata(self, query_params):
    run = query_params.get('run')
    if run is None:
      self.handler.respond('query parameter "run" is required',
                           'text/plain', 400)
      return

    name = query_params.get('name')
    if name is None:
      self.handler.respond('query parameter "name" is required',
                           'text/plain', 400)
      return
    if run not in self.configs:
      self.handler.respond('Unknown run: %s' % run, 'text/plain', 400)
      return

    config = self.configs[run]
    fpath = self._get_metadata_file_for_tensor(name, config)
    if not fpath:
      self.handler.respond(
          'Not metadata file found for tensor %s in the config file %s' %
          (name, self.config_fpaths[run]), 'text/plain', 400)
      return
    if not file_io.file_exists(fpath) or file_io.is_directory(fpath):
      self.handler.respond('%s is not a file' % fpath, 'text/plain', 400)
      return

    with file_io.FileIO(fpath, 'r') as f:
      lines = []
      for line in f:
        lines.append(line)
        if len(lines) >= LIMIT_NUM_POINTS:
          break
    self.handler.respond(''.join(lines), 'text/plain')
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:34,代码来源:plugin.py


示例20: _serve_bookmarks

  def _serve_bookmarks(self, query_params):
    run = query_params.get('run')
    if not run:
      self.handler.respond('query parameter "run" is required', 'text/plain',
                           400)
      return

    name = query_params.get('name')
    if name is None:
      self.handler.respond('query parameter "name" is required', 'text/plain',
                           400)
      return

    if run not in self.configs:
      self.handler.respond('Unknown run: %s' % run, 'text/plain', 400)
      return

    config = self.configs[run]
    fpath = self._get_bookmarks_file_for_tensor(name, config)
    if not fpath:
      self.handler.respond(
          'No bookmarks file found for tensor %s in the config file %s' %
          (name, self.config_fpaths[run]), 'text/plain', 400)
      return
    if not file_io.file_exists(fpath) or file_io.is_directory(fpath):
      self.handler.respond('%s is not a file' % fpath, 'text/plain', 400)
      return

    bookmarks_json = None
    with file_io.FileIO(fpath, 'r') as f:
      bookmarks_json = f.read()
    self.handler.respond(bookmarks_json, 'application/json')
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:32,代码来源:plugin.py



注:本文中的tensorflow.python.lib.io.file_io.file_exists函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python file_io.get_matching_files函数代码示例发布时间:2022-05-27
下一篇:
Python file_io.delete_recursively函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap