• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python reader.ptb_raw_data函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.models.rnn.ptb.reader.ptb_raw_data函数的典型用法代码示例。如果您正苦于以下问题:Python ptb_raw_data函数的具体用法?Python ptb_raw_data怎么用?Python ptb_raw_data使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了ptb_raw_data函数的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: main

def main(_):
  if not FLAGS.data_path:
    raise ValueError("Must set --data_path to PTB data directory")

  raw_data = reader.ptb_raw_data(FLAGS.data_path)
  train_data, valid_data, test_data, _ = raw_data

  config = get_config()
  eval_config = get_config()
  eval_config.batch_size = 1
  eval_config.num_steps = 1

  with tf.Graph().as_default(), tf.Session() as session:
    initializer = tf.random_uniform_initializer(-config.init_scale,
                                                config.init_scale)
    with tf.variable_scope("model", reuse=None, initializer=initializer):
      m = PTBModel(is_training=True, config=config)
    with tf.variable_scope("model", reuse=True, initializer=initializer):
      mvalid = PTBModel(is_training=False, config=config)
      mtest = PTBModel(is_training=False, config=eval_config)

    tf.initialize_all_variables().run()

    for i in range(config.max_max_epoch):
      lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
      m.assign_lr(session, config.learning_rate * lr_decay)

      print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
      train_perplexity = run_epoch(session, m, train_data, m.train_op,
                                   verbose=True)
      print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
      

    test_perplexity = run_epoch(session, mtest, test_data, tf.no_op())
    print("Test Perplexity: %.3f" % test_perplexity)
开发者ID:Temmame,项目名称:web,代码行数:35,代码来源:ptb_word_lm.py


示例2: load_data

def load_data(seqlength=20):
    raw_data = reader.ptb_raw_data('data/ptb')
    train_data, val_data, test_data, _ = raw_data
    train_data = indices_to_seq_data(train_data, seqlength)
    val_data = indices_to_seq_data(val_data, seqlength)
    return {'train': train_data,
            'test': val_data}
开发者ID:agajews,项目名称:tfbrain,代码行数:7,代码来源:ptb.py


示例3: main

def main(unused_args):
  if not FLAGS.data_path:
    raise ValueError("Must specify --data_path to PTB data directory")

  if not FLAGS.save_path:
    raise ValueError("Must specify --save_path to model directory")

  raw_data = reader.ptb_raw_data(FLAGS.data_path)
  train_data, valid_data, test_data, _ = raw_data

  config = get_config()
  eval_config = get_config()
  eval_config.batch_size = 1
  eval_config.num_steps = 1

  with tf.Graph().as_default(), tf.Session() as session:
    initializer = tf.random_uniform_initializer(-config.init_scale,
                                                config.init_scale)
    with tf.variable_scope("model", reuse=None, initializer=initializer):
      m = PTBModel(is_training=True, config=config)
    with tf.variable_scope("model", reuse=True, initializer=initializer):
      mvalid = PTBModel(is_training=False, config=config)
      mtest = PTBModel(is_training=False, config=eval_config)

    # Add ops to save and restore all the variables.
    saver = tf.train.Saver()

    ckpt=tf.train.get_checkpoint_state(FLAGS.save_path)
    if (ckpt):
        print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
        saver.restore(session, ckpt.model_checkpoint_path)
    else:
        print("Created model with fresh parameters.")
        tf.initialize_all_variables().run()

    if not FLAGS.testonly:

        for i in range(config.max_max_epoch):
        
           lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
           m.assign_lr(session, config.learning_rate * lr_decay)

           print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
           train_perplexity = run_epoch(session, m, train_data, m.train_op,
                                   verbose=True)
           print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
      
           save_path = saver.save(session, FLAGS.save_path+'/model.ckpt',i)
           print("Model saved in: %s" % save_path)
      
           valid_perplexity = run_epoch(session, mvalid, valid_data, tf.no_op())
           print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))

    else:
         print("Running only a perplexity test")

    test_perplexity = run_epoch(session, mtest, test_data, tf.no_op(),verbose=True)
    print("Test Perplexity: %.3f" % test_perplexity)
开发者ID:hlt-mt,项目名称:tensorflow,代码行数:58,代码来源:mf_ptb_word_lm.py


示例4: testPtbRawData

 def testPtbRawData(self):
   tmpdir = tf.test.get_temp_dir()
   for suffix in "train", "valid", "test":
     filename = os.path.join(tmpdir, "ptb.%s.txt" % suffix)
     with tf.gfile.GFile(filename, "w") as fh:
       fh.write(self._string_data)
   # Smoke test
   output = reader.ptb_raw_data(tmpdir)
   self.assertEqual(len(output), 4)
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:9,代码来源:reader_test.py


示例5: main

def main(_):
    if not FLAGS.data_path:
        # raise ValueError("Must set --data_path to PTB data directory")
        FLAGS.data_path = 'data/'

    raw_data = reader.ptb_raw_data(FLAGS.data_path)
    train_data, valid_data, My_data, _ = raw_data

    config = get_config()
    eval_config = get_config()
    eval_config.batch_size = 1
    eval_config.num_steps = 1

    with tf.Graph().as_default(), tf.Session() as session:
        initializer = tf.random_uniform_initializer(-config.init_scale,
                                                    config.init_scale)
        with tf.variable_scope("model", reuse=None, initializer=initializer):
            m = PTBModel(is_training=True, config=config)
        # with tf.variable_scope("model", reuse=True, initializer=initializer):
        #     mvalid = PTBModel(is_training=False, config=config)
        #     mMy = PTBModel(is_training=False, config=eval_config)

        summary_op = tf.merge_all_summaries()

        saver = tf.train.Saver(tf.all_variables())

        tf.initialize_all_variables().run()

        summary_writer = tf.train.SummaryWriter(FLAGS.data_path,
                                                graph_def=session.graph_def)

        for i in range(config.max_max_epoch):
            lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
            m.assign_lr(session, config.learning_rate * lr_decay)

            print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
            train_perplexity = run_epoch(session,
                                         m,
                                         train_data,
                                         m.train_op,
                                         summary_writer,
                                         summary_op,
                                         verbose=True)

            # print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
            # valid_perplexity = run_epoch(session, mvalid, valid_data, tf.no_op())
            # print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))
            #
            # My_perplexity = run_epoch(session, mMy, My_data, tf.no_op())
            # print("My Perplexity: %.3f" % My_perplexity)
            if i % 20 == 0:
                print('Now perplexity %.3f' % (train_perplexity))
                print("SAVEING:")
                checkpoint_path = os.path.join(FLAGS.data_path, 'model.ckpt')
                saver.save(sess=session, save_path=checkpoint_path, global_step=i)
                print("save model to {}".format(checkpoint_path))
开发者ID:IgorWang,项目名称:MachineLearningPracticer,代码行数:56,代码来源:ptb_word_lm.py


示例6: main

def main(_):
  if not FLAGS.data_path:
    raise ValueError("Must set --data_path to PTB data directory")

  raw_data = reader.ptb_raw_data(FLAGS.data_path)
  train_data, valid_data, test_data, _ = raw_data

  config = get_config()
  eval_config = get_config()
  eval_config.batch_size = 1
  eval_config.num_steps = 1

  with tf.Graph().as_default():
    initializer = tf.random_uniform_initializer(-config.init_scale,
                                                config.init_scale)

    with tf.name_scope("Train"):
      train_input = PTBInput(config=config, data=train_data, name="TrainInput")
      with tf.variable_scope("Model", reuse=None, initializer=initializer):
        m = PTBModel(is_training=True, config=config, input_=train_input)
      tf.scalar_summary("Training Loss", m.cost)
      tf.scalar_summary("Learning Rate", m.lr)

    with tf.name_scope("Valid"):
      valid_input = PTBInput(config=config, data=valid_data, name="ValidInput")
      with tf.variable_scope("Model", reuse=True, initializer=initializer):
        mvalid = PTBModel(is_training=False, config=config, input_=valid_input)
      tf.scalar_summary("Validation Loss", mvalid.cost)

    with tf.name_scope("Test"):
      test_input = PTBInput(config=eval_config, data=test_data, name="TestInput")
      with tf.variable_scope("Model", reuse=True, initializer=initializer):
        mtest = PTBModel(is_training=False, config=eval_config,
                         input_=test_input)

    sv = tf.train.Supervisor(logdir=FLAGS.save_path)
    with sv.managed_session() as session:
      for i in range(config.max_max_epoch):
        lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0)
        m.assign_lr(session, config.learning_rate * lr_decay)

        print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
        train_perplexity = run_epoch(session, m, eval_op=m.train_op,
                                     verbose=True)
        print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
        valid_perplexity = run_epoch(session, mvalid)
        print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))

      test_perplexity = run_epoch(session, mtest)
      print("Test Perplexity: %.3f" % test_perplexity)

      if FLAGS.save_path:
        print("Saving model to %s." % FLAGS.save_path)
        sv.saver.save(session, FLAGS.save_path, global_step=sv.global_step)
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:54,代码来源:ptb_word_lm.py


示例7: load_ptb_dataset

def load_ptb_dataset(data_path):
    """Load the PTB dataset.

    You can download the PTB dataset from here:
    http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz

    :param data_path: path to the data/ dir of the PTB dataset.
    :return: train, validation, test data
    """
    raw_data = reader.ptb_raw_data(data_path)
    trX, vlX, teX, _ = raw_data
    return trX, vlX, teX
开发者ID:alvarojoao,项目名称:Deep-Learning-TensorFlow,代码行数:12,代码来源:datasets.py


示例8: main

def main():
    data_directory = "data"
    word_to_id = reader._build_vocab(os.path.join(data_directory, 
                                                  "ptb.train.txt"))
    train, cv, test, _ = reader.ptb_raw_data(data_directory)

    train_batch_size = 128
    train_num_steps = len(train) // train_batch_size - 1
    train_num_steps = 10
    ptb_iterator = reader.ptb_iterator(train, train_batch_size, train_num_steps)

    learner = Learner(word_to_id)
    learner.Train(ptb_iterator, train_batch_size, train_num_steps)
开发者ID:mjchao,项目名称:Machine-Learning-Experiments,代码行数:13,代码来源:Model_Prototype.py


示例9: main

def main(_):
  t0 = time.time()
  if not FLAGS.data_path:
    raise ValueError("Must set --data_path to PTB data directory")

  raw_data = reader.ptb_raw_data(FLAGS.data_path)
  train_data, valid_data, test_data, _ = raw_data

  config = get_config()
  eval_config = get_config()
  eval_config.batch_size = 1
  eval_config.num_steps = 1
  # changed from tensorflow - add peak_wps calculation
  peak_wps = 0

  with tf.Graph().as_default(), tf.Session() as session:
    initializer = tf.random_uniform_initializer(-config.init_scale,
                                                config.init_scale)
    with tf.variable_scope("model", reuse=None, initializer=initializer):
      m = PTBModel(is_training=True, config=config)
    with tf.variable_scope("model", reuse=True, initializer=initializer):
      mvalid = PTBModel(is_training=False, config=config)
      mtest = PTBModel(is_training=False, config=eval_config)

    tf.initialize_all_variables().run()

    for i in range(config.max_max_epoch):
      lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
      m.assign_lr(session, config.learning_rate * lr_decay)

      print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
      train_perplexity, cur_peak_wps = run_epoch(session, m, peak_wps, train_data, m.train_op,
                                   verbose=True)
      if cur_peak_wps > peak_wps:
          peak_wps = cur_peak_wps

      print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
      valid_perplexity, cur_peak_wps = run_epoch(session, mvalid, peak_wps, valid_data, tf.no_op())
      print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))

    test_perplexity, cur_peak_wps = run_epoch(session, mtest, peak_wps, test_data, tf.no_op())
    print("Test Perplexity: %.3f" % test_perplexity)

    # change form tensorflow - print out timing info
    t1 = time.time()
    print('total time: ', t1-t0)
    print('peak wps:', peak_wps)
开发者ID:hewlettpackardlabs,项目名称:opveclib,代码行数:47,代码来源:ptb_word_lm.py


示例10: main

def main(_):
  if FLAGS.rename_variable_prefix:
    if not FLAGS.model_path or not FLAGS.new_model_path:
      logging.error("Must set --model_path and --new_model_path to rename model variables")
      exit(1)
  else:
    if not FLAGS.train_dir:
      logging.error("Must set --train_dir")
      exit(1)
    if not FLAGS.data_dir and (not FLAGS.train_idx or not FLAGS.dev_idx):
      logging.error("Must set --data_dir to PTB data directory or specify data using --train_idx,--dev_idx")
      exit(1)

  logging.getLogger().setLevel(logging.INFO)
  logging.info("Start: {}".format(datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')))

  device = "/gpu:0"
  log_device_placement = False
  allow_soft_placement = True
  if FLAGS.device:
    device = '/'+FLAGS.device
  logging.info("Use device %s" % device)

  with tf.Graph().as_default(), tf.Session(config=tf.ConfigProto(allow_soft_placement=allow_soft_placement, log_device_placement=log_device_placement)) \
    as session, tf.device(device):

    if FLAGS.rename_variable_prefix:
      model_utils.rename_variable_prefix(session, FLAGS.config_file, FLAGS.model_path, FLAGS.new_model_path,
                           FLAGS.variable_prefix, FLAGS.rename_variable_prefix)
    elif FLAGS.score:
      logging.info("Run model in scoring mode")
      use_log_probs = True
      train_dir = "train.rnn.de"
      model, _ = model_utils.load_model(session, "large50k", train_dir, use_log_probs)

      #test_path = os.path.join(FLAGS.data_dir, "test15/test15.ids50003.de")
      #test_data = reader.read_indexed_data(test_path)
      #test_sentences = [ test_data ]
  
      # Add eos symbol to the beginning to score first word as well
      test_sentences = [[2, 5, 3316, 7930, 7, 7312, 9864, 30, 8, 10453, 4, 2],
                        [2, 7, 5, 30, 8, 10453, 7930, 3316, 7312, 9864, 4, 2],
                        [2, 5, 8, 30, 7, 4, 9864, 3316, 7312, 7930, 10453, 2],
                        [2, 8, 10453, 9864, 30, 5, 3316, 7312, 7, 7930, 4]]
      for test_data in test_sentences:
        # using log probs or cross entropies gives the same perplexities
        if use_log_probs:
          # Run model as in training, with an iterator over inputs
          train_utils.run_epoch_eval(session, model, test_data, tf.no_op(), use_log_probs=use_log_probs)
          # Run model step by step (yields the same result)
          #score_sentence(session, model, test_data)      
        else:
          train_utils.run_epoch_eval(session, model, test_data, tf.no_op(), use_log_probs=use_log_probs)
    else:
      logging.info("Run model in training mode")
      if FLAGS.fixed_random_seed:
        tf.set_random_seed(1234)

      if FLAGS.model:
        config = model_utils.get_config(FLAGS.model)
        eval_config = model_utils.get_config(FLAGS.model)
      elif FLAGS.config_file:
        config = model_utils.read_config(FLAGS.config_file)
        eval_config = copy.copy(config)
      else:
        logging.error("Must specify either model name or config file.")
        exit(1)

      eval_config.batch_size = 1
      eval_config.num_steps = 1
      model, mvalid, mtest = model_utils.create_model(session, config, eval_config, FLAGS.train_dir, FLAGS.optimizer)

      # Restore saved train variable
      start_epoch = 1
      start_idx = 0
      start_state = None
      tmpfile = FLAGS.train_dir+"/tmp_idx.pkl"
      if model.global_step.eval() >= FLAGS.steps_per_checkpoint and \
        os.path.isfile(tmpfile):
          with open(tmpfile, "rb") as f:
            start_epoch, start_idx, start_state = pickle.load(f)
            logging.info("Restore saved train variables from %s, resume from epoch=%i and train idx=%i and last state" % (tmpfile, start_epoch, start_idx))

      if FLAGS.data_dir:
        raw_data = reader.ptb_raw_data(FLAGS.data_dir)
        train_data, valid_data, test_data, _ = raw_data
      else:
        train_data = reader.read_indexed_data(FLAGS.train_idx, FLAGS.max_train_data_size, config.vocab_size)
        valid_data = reader.read_indexed_data(FLAGS.dev_idx, vocab_size=config.vocab_size)
        if FLAGS.test_idx:
          test_data = reader.read_indexed_data(FLAGS.test_idx, vocab_size=config.vocab_size)

      for epoch in range(start_epoch, config.max_max_epoch+1):
        if not (FLAGS.optimizer == "adadelta" or FLAGS.optimizer == "adam"):
          if start_idx == 0:
            lr_decay = config.lr_decay ** max(epoch - config.max_epoch+1, 0.0)
            model.assign_lr(session, config.learning_rate * lr_decay)
        logging.info("Epoch: %d Learning rate: %.3f" % (epoch, session.run(model.lr)))

        train_perplexity = train_utils.run_epoch(session, model, train_data, model.train_op, FLAGS.train_dir, FLAGS.steps_per_checkpoint,
#.........这里部分代码省略.........
开发者ID:ehasler,项目名称:tensorflow,代码行数:101,代码来源:ptb_word_lm.py


示例11: max

    handler.setFormatter(formatter)
    logger.addHandler(handler)

    if DEBUG:
        logger.setLevel(logging.DEBUG)
    else:
        logger.setLevel(logging.INFO)

    from tensorflow.models.rnn.ptb import reader
    import numpy as np

    DATAURL = "https://github.com/mil-tokyo/neural_network/tree/master/saito/sample/simple-examples/data"
    data_path = "simple-examples/data/"
    #"../mldatasets/simple-examples/data/"
    try:
        raw_data = reader.ptb_raw_data( data_path)
    except:
        # wget http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
        # tar xvf simple-examples.tgz

        logging.warn("download data from\t%s\tto\t%s" % (DATAURL , data_path) )

    train_data, valid_data, test_data, _ = raw_data


    "define shapes and generate data"
    vocab_size = max(train_data) 
    emb_size = 120
    num_steps = 17
    batch_size =  20
    logging.debug("batch_size\t%s" % batch_size )
开发者ID:DSLituiev,项目名称:rnn_sandbox,代码行数:31,代码来源:test_cell_ptb.py


示例12: range

    tf.app.flags.DEFINE_integer("seed", 12345, "Random seed.")
    tf.app.flags.DEFINE_integer("runs", 3, "How many runs.")
    tf.app.flags.DEFINE_float("keep_prob", 1.0, "Keep probability for dropout.")
    tf.app.flags.DEFINE_string("result_file", None, "Where to write results.")
    tf.app.flags.DEFINE_string("moru_ops", 'keep,replace', "operations of moru cell.")
    tf.app.flags.DEFINE_string("moru_op_biases", None, "biases of moru operations at beginning of training. "
                                                       "Defaults to 0 for each.")
    tf.app.flags.DEFINE_integer("moru_op_ctr", None, "Size of op ctr. By default ops are controlled by current input"
                                                     "and previous state. Given a positive integer, an additional"
                                                     "recurrent op ctr is introduced in MORUCell.")
    tf.app.flags.DEFINE_string('device', '/gpu:0', 'device to run on')


    FLAGS = tf.app.flags.FLAGS
    FLAGS._parse_flags()
    raw_data = reader.ptb_raw_data(FLAGS.data)
    train_data, valid_data, test_data, _ = raw_data
    perplexities = []

    batch_size = FLAGS.batch_size
    num_steps = FLAGS.num_steps

    rng = random.Random(FLAGS.seed)
    for run_id in range(FLAGS.runs):
        tf.reset_default_graph()
        last_valid_perplexities = [float("inf")] * 3
        with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
            tf.set_random_seed(rng.randint(0, 10000))
            initializer = tf.random_uniform_initializer(-FLAGS.init_scale,
                                                        FLAGS.init_scale)
            with tf.device(FLAGS.device):
开发者ID:dirkweissenborn,项目名称:temo,代码行数:31,代码来源:train_ptb_lm.py



注:本文中的tensorflow.models.rnn.ptb.reader.ptb_raw_data函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python rnn.rnn函数代码示例发布时间:2022-05-27
下一篇:
Python cifar10.maybe_download_and_extract函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap