• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python functions.trace函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中util.functions.trace函数的典型用法代码示例。如果您正苦于以下问题:Python trace函数的具体用法?Python trace怎么用?Python trace使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了trace函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: train_model

    def train_model(self):
        trace('making vocaburaries ...')
        src_vocab = Vocabulary.new(gens.word_list(self.source), self.vocab)
        trg_vocab = Vocabulary.new(gens.word_list(self.target), self.vocab)

        trace('making model ...')
        model = self.new(src_vocab, trg_vocab, self.embed, self.hidden, self.parameter_dict)

        random_number = random.randint(0, self.minibatch)
        for i_epoch in range(self.epoch):
            trace('epoch %d/%d: ' % (i_epoch + 1, self.epoch))
            trained = 0
            gen1 = gens.word_list(self.source)
            gen2 = gens.word_list(self.target)
            gen3 = gens.batch(gens.sorted_parallel(gen1, gen2, 100 * self.minibatch), self.minibatch)
            model.init_optimizer()

            for src_batch, trg_batch in gen3:
                src_batch = fill_batch(src_batch)
                trg_batch = fill_batch(trg_batch)
                K = len(src_batch)
                hyp_batch = model.train(src_batch, trg_batch)

                if trained == 0:
                    self.print_out(random_number, i_epoch, trained, src_batch, trg_batch, hyp_batch)

                trained += K

            trace('saving model ...')
            model.save("ChainerMachineTranslation" + '.%03d' % (self.epoch + 1))

        trace('finished.')
开发者ID:tksugimoto,项目名称:Chainer_Machine_Translation_ipython_notebook,代码行数:32,代码来源:EncoderDecoderModel.py


示例2: train

 def train(self):
     """
     Call the Dialogue Training
     """
     trace('initializing ...')
     encoderDecoderModel = EncoderDecoderModelAttention(self.parameter_dict)
     encoderDecoderModel.train()
开发者ID:SnowMasaya,项目名称:Chainer-Slack-Twitter-Dialogue,代码行数:7,代码来源:execute_dialogue_attention.py


示例3: test

 def test(self):
     """
     Call the Attention Dialogue Test
     """
     trace('initializing ...')
     encoderDecoderModel = EncoderDecoderModelAttention(self.parameter_dict)
     encoderDecoderModel.test()
开发者ID:SnowMasaya,项目名称:Chainer-Slack-Twitter-Dialogue,代码行数:7,代码来源:execute_dialogue_attention.py


示例4: test

    def test(self):
        """
        Call the Dialogue Test
        """
        trace("initializing ...")

        encoderDecoderModel = EncoderDecoderModel(self.parameter_dict)
        encoderDecoderModel.test()
开发者ID:SnowMasaya,项目名称:Chainer-Slack-Twitter-Dialogue,代码行数:8,代码来源:execute_dialogue.py


示例5: train

    def train(self):
        """
        Call the Dialogue Training
        """
        trace("initializing ...")

        encoderDecoderModel = EncoderDecoderModel(self.parameter_dict)
        encoderDecoderModel.train()
开发者ID:SnowMasaya,项目名称:Chainer-Slack-Twitter-Dialogue,代码行数:8,代码来源:execute_dialogue.py


示例6: main

def main():
    args = parse_args()

    trace('initializing ...')
    wrapper.init()

    if args.mode == 'train': train_model(args)
    elif args.mode == 'test': test_model(args)
开发者ID:benob,项目名称:chainer_examples,代码行数:8,代码来源:vec2seq.py


示例7: main

def main():
    args = parse_args()

    trace("initializing CUDA ...")
    wrapper.init()

    if args.mode == "train":
        train_model(args)
    elif args.mode == "test":
        test_model(args)
开发者ID:jheymann85,项目名称:chainer_examples,代码行数:10,代码来源:seg_ffnn.py


示例8: __init__

  def __init__(self, args):
    trace('loading model ...')
    self.args = args
    self.src_vocab = Vocabulary.load(args.model + '.srcvocab')
    self.trg_vocab = Vocabulary.load(args.model + '.trgvocab')
    self.encdec = EncoderDecoder.load_spec(args.model + '.spec')
    if args.use_gpu:
      self.encdec.to_gpu()
    serializers.load_hdf5(args.model + '.weights', self.encdec)

    trace('generating translation ...')
开发者ID:delihiros,项目名称:dqname,代码行数:11,代码来源:mt_s2s_encdec.py


示例9: train

    def train(self):
        """
        Train method
        If you use the word2vec model, you possible to use the copy weight
        Optimizer method use the Adagrad
        """
        trace("making vocabularies ...")
        src_vocab = Vocabulary.new(gens.word_list(self.source), self.vocab)
        trg_vocab = Vocabulary.new(gens.word_list(self.target), self.vocab)

        trace("making model ...")
        self.attention_dialogue = AttentionDialogue(self.vocab, self.embed, self.hidden, self.XP)
        if self.word2vecFlag:
            self.copy_model(self.word2vec, self.attention_dialogue.emb)
            self.copy_model(self.word2vec, self.attention_dialogue.dec, dec_flag=True)

        for epoch in range(self.epoch):
            trace("epoch %d/%d: " % (epoch + 1, self.epoch))
            trained = 0
            gen1 = gens.word_list(self.source)
            gen2 = gens.word_list(self.target)
            gen3 = gens.batch(gens.sorted_parallel(gen1, gen2, 100 * self.minibatch), self.minibatch)
            opt = optimizers.AdaGrad(lr=0.01)
            opt.setup(self.attention_dialogue)
            opt.add_hook(optimizer.GradientClipping(5))

            random_number = random.randint(0, self.minibatch - 1)
            for src_batch, trg_batch in gen3:
                src_batch = fill_batch(src_batch)
                trg_batch = fill_batch(trg_batch)
                K = len(src_batch)
                hyp_batch, loss = self.forward_implement(
                    src_batch, trg_batch, src_vocab, trg_vocab, self.attention_dialogue, True, 0
                )
                loss.backward()
                opt.update()

                self.print_out(random_number, epoch, trained, src_batch, trg_batch, hyp_batch)

                trained += K

        trace("saving model ...")
        prefix = self.model
        model_path = APP_ROOT + "/model/" + prefix
        src_vocab.save(model_path + ".srcvocab")
        trg_vocab.save(model_path + ".trgvocab")
        self.attention_dialogue.save_spec(model_path + ".spec")
        serializers.save_hdf5(model_path + ".weights", self.attention_dialogue)

        trace("finished.")
开发者ID:SnowMasaya,项目名称:Chainer-Slack-Twitter-Dialogue,代码行数:50,代码来源:EncoderDecoderModelAttention.py


示例10: test

    def test(self):
        trace('loading model ...')
        src_vocab = Vocabulary.load(self.model + '.srcvocab')
        trg_vocab = Vocabulary.load(self.model + '.trgvocab')
        encdec = EncoderDecoder.load_spec(self.model + '.spec')
        serializers.load_hdf5(self.model + '.weights', encdec)

        trace('generating translation ...')
        generated = 0

        with open(self.target, 'w') as fp:
            for src_batch in gens.batch(gens.word_list(self.source), self.minibatch):
                src_batch = fill_batch(src_batch)
                K = len(src_batch)

                trace('sample %8d - %8d ...' % (generated + 1, generated + K))
                hyp_batch = self.forward(src_batch, None, src_vocab, trg_vocab, encdec, False, self.generation_limit)

                source_cuont = 0
                for hyp in hyp_batch:
                    hyp.append('</s>')
                    hyp = hyp[:hyp.index('</s>')]
                    print("src : " + "".join(src_batch[source_cuont]).replace("</s>", ""))
                    print('hyp : ' +''.join(hyp))
                    print(' '.join(hyp), file=fp)
                    source_cuont = source_cuont + 1

                generated += K

        trace('finished.')
开发者ID:fedorajzf,项目名称:Chainer-Slack-Twitter-Dialogue,代码行数:30,代码来源:EncoderDecoderModel.py


示例11: test_model

    def test_model(self, model_name):
        trace('loading model ...')
        model = self.load(model_name)
    
        trace('generating translation ...')
        generated = 0

        with open(self.test_target, 'w') as fp:
            for src_batch in gens.batch(gens.word_list(self.test_source), self.minibatch):
                src_batch = fill_batch(src_batch)
                K = len(src_batch)

                trace('sample %8d - %8d ...' % (generated + 1, generated + K))
                hyp_batch = model.predict(src_batch, self.generation_limit)

                source_cuont = 0
                for hyp in hyp_batch:
                    hyp.append('</s>')
                    hyp = hyp[:hyp.index('</s>')]
                    # BLEUの結果を計算するため.
                    print("".join(src_batch[source_cuont]).replace("</s>", ""))
                    print(' '.join(hyp))
                    print(' '.join(hyp), file=fp)
                    source_cuont = source_cuont + 1

                generated += K

        trace('finished.')
开发者ID:tksugimoto,项目名称:Chainer_Machine_Translation_ipython_notebook,代码行数:28,代码来源:EncoderDecoderModel.py


示例12: test_model

def test_model(args):
    trace('loading model ...')
    model = EncoderDecoderModel.load(args.model)
    
    trace('generating translation ...')
    generated = 0

    src_vectors = read_src_vectors(args.source)
    src_size = len(src_vectors[0])

    with open(args.target, 'w') as fp:
        for src_batch in gens.batch(src_vectors, args.minibatch):
            #src_batch = fill_batch(src_batch)
            K = len(src_batch)

            trace('sample %8d - %8d ...' % (generated + 1, generated + K))
            hyp_batch = model.predict(src_batch, args.generation_limit)

            for hyp in hyp_batch:
                hyp.append('</s>')
                hyp = hyp[:hyp.index('</s>')]
                six.print_(' '.join(hyp), file=fp)

            generated += K

    trace('finished.')
开发者ID:benob,项目名称:chainer_examples,代码行数:26,代码来源:vec2seq.py


示例13: test

def test(args):
  trace('loading model ...')
  src_vocab = Vocabulary.load(args.model + '.srcvocab')
  trg_vocab = Vocabulary.load(args.model + '.trgvocab')
  attmt = AttentionMT.load_spec(args.model + '.spec')
  if args.use_gpu:
    attmt.to_gpu()
  serializers.load_hdf5(args.model + '.weights', attmt)
  
  trace('generating translation ...')
  generated = 0

  with open(args.target, 'w') as fp:
    for src_batch in gens.batch(gens.word_list(args.source), args.minibatch):
      src_batch = fill_batch(src_batch)
      K = len(src_batch)

      trace('sample %8d - %8d ...' % (generated + 1, generated + K))
      hyp_batch = forward(src_batch, None, src_vocab, trg_vocab, attmt, False, args.generation_limit)

      for hyp in hyp_batch:
        hyp.append('</s>')
        hyp = hyp[:hyp.index('</s>')]
        print(' '.join(hyp), file=fp)

      generated += K

  trace('finished.')
开发者ID:prajdabre,项目名称:chainer_examples,代码行数:28,代码来源:mt_s2s_attention.py


示例14: train

    def train(self):
        trace('making vocabularies ...')
        src_vocab = Vocabulary.new(gens.word_list(self.source), self.vocab)
        trg_vocab = Vocabulary.new(gens.word_list(self.target), self.vocab)

        trace('making model ...')
        encdec = EncoderDecoder(self.vocab, self.embed, self.hidden)
        if self.word2vecFlag:
            self.copy_model(self.word2vec, encdec.enc)
            self.copy_model(self.word2vec, encdec.dec, dec_flag=True)
        else:
            encdec = self.encdec

        for epoch in range(self.epoch):
            trace('epoch %d/%d: ' % (epoch + 1, self.epoch))
            trained = 0
            gen1 = gens.word_list(self.source)
            gen2 = gens.word_list(self.target)
            gen3 = gens.batch(gens.sorted_parallel(gen1, gen2, 100 * self.minibatch), self.minibatch)
            opt = optimizers.AdaGrad(lr = 0.01)
            opt.setup(encdec)
            opt.add_hook(optimizer.GradientClipping(5))

            random_number = random.randint(0, self.minibatch - 1)
            for src_batch, trg_batch in gen3:
                src_batch = fill_batch(src_batch)
                trg_batch = fill_batch(trg_batch)
                K = len(src_batch)
                hyp_batch, loss = self.forward(src_batch, trg_batch, src_vocab, trg_vocab, encdec, True, 0)
                loss.backward()
                opt.update()

                if trained == 0:
                    self.print_out(random_number, epoch, trained, src_batch, trg_batch, hyp_batch)

                trained += K

        trace('saving model ...')
        prefix = self.model
        src_vocab.save(prefix + '.srcvocab')
        trg_vocab.save(prefix + '.trgvocab')
        encdec.save_spec(prefix + '.spec')
        serializers.save_hdf5(prefix + '.weights', encdec)

        trace('finished.')
开发者ID:fedorajzf,项目名称:Chainer-Slack-Twitter-Dialogue,代码行数:45,代码来源:EncoderDecoderModel.py


示例15: test

def test(args):
  trace('loading model ...')
  word_vocab = Vocabulary.load(args.model + '.words')
  phrase_vocab = Vocabulary.load(args.model + '.phrases')
  semiterminal_vocab = Vocabulary.load(args.model + '.semiterminals')
  parser = Parser.load_spec(args.model + '.spec')
  if args.use_gpu:
    parser.to_gpu()
  serializers.load_hdf5(args.model + '.weights', parser)

  embed_cache = {}
  parser.reset()

  trace('generating parse trees ...')
  with open(args.source) as fp:
    for l in fp:
      word_list = to_vram_words(convert_word_list(l.split(), word_vocab))
      tree = combine_xbar(
          restore_labels(
              parser.forward(word_list, None, args.unary_limit, embed_cache),
              phrase_vocab,
              semiterminal_vocab))
      print('( ' + tree_to_string(tree) + ' )')

  trace('finished.')
开发者ID:odashi,项目名称:nn_parsers,代码行数:25,代码来源:parse15a.py


示例16: train_mulit_model

 def train_mulit_model(self):
     """
     Call the Dialogue Training for multi model
     """
     trace('initializing ...')
     train_path = APP_ROOT + "/../twitter/data/"
     file_list = os.listdir(train_path)
     twitter_source_dict = {}
     twitter_replay_dict = {}
     for file in file_list:
         word_class = re.sub("_replay_twitter_data\.txt|_source_twitter_data\.txt", "", file.strip())
         if word_class not in twitter_source_dict:
             twitter_source_dict.update({word_class: file.strip()})
         if word_class not in twitter_replay_dict:
             twitter_replay_dict.update({word_class: file.strip()})
     for word_class in twitter_source_dict.keys():
         self.parameter_dict["source"] = train_path + word_class + "_source_twitter_data.txt"
         print(self.parameter_dict["source"])
         self.parameter_dict["target"] = train_path + word_class + "_replay_twitter_data.txt"
         print(self.parameter_dict["target"])
         self.parameter_dict["model"] = "ChainerDialogue_" + word_class
         encoderDecoderModel = EncoderDecoderModelAttention(self.parameter_dict)
         encoderDecoderModel.train()
开发者ID:SnowMasaya,项目名称:Chainer-Slack-Twitter-Dialogue,代码行数:23,代码来源:execute_dialogue_attention.py


示例17: main

def main():
    args              = parse_args()
    data, target, ids = load_data(args.train)
    test_data, test_target, ids = load_data(args.test, ids)
    model             = init_model(input_size = len(ids),
            depth        = args.depth,
            hidden_size  = args.hidden_size,
            output_size  = 2)
    optimizer         = optimizers.SGD()
    
    # Begin Training
    optimizer.setup(model)
    for ep in range(epoch):
        UF.trace("Training Epoch %d" % ep)
        indexes = np.random.permutation(len(data))
        for i in range(0, len(data), batchsize):
            x_batch = data[indexes[i: i+batchsize]]
            y_batch = target[indexes[i : i+batchsize]]
            optimizer.zero_grads()
            loss, accuracy = forward(model,x_batch, y_batch)
            loss.backward()
            optimizer.update()
            UF.trace(accuracy.data)

    # Begin Testing
    sum_loss, sum_accuracy = 0, 0
    for i in range(0, len(test_data), batchsize):
        x_batch         = test_data[i : i+batchsize]
        y_batch         = test_target[i : i+batchsize]
        loss, accuracy  = forward(model, x_batch, y_batch)
        sum_loss       += loss.data * batchsize
        sum_accuracy   += accuracy.data * batchsize
    mean_loss     = sum_loss / len(test_data)
    mean_accuracy = sum_accuracy / len(test_data)
    print("Mean Loss", mean_loss)
    print("Mean Accuracy", mean_accuracy)
开发者ID:philip30,项目名称:chainn,代码行数:36,代码来源:nn.py


示例18: print_out

    def print_out(self, K, i_epoch, trained, src_batch, trg_batch, hyp_batch):
        """
        Print out
        :param K:
        :param i_epoch:
        :param trained: train times
        :param src_batch:
        :param trg_batch:
        :param hyp_batch:
        :return:
        """
        if K > len(src_batch) and K > len(trg_batch) and K > len(hyp_batch):
            K = len(src_batch) - 1

        trace("epoch %3d/%3d, sample %8d" % (i_epoch + 1, self.epoch, trained + K + 1))
        trace("  src = " + " ".join([x if x != "</s>" else "*" for x in src_batch[K]]))
        trace("  trg = " + " ".join([x if x != "</s>" else "*" for x in trg_batch[K]]))
        trace("  hyp = " + " ".join([x if x != "</s>" else "*" for x in hyp_batch[K]]))
开发者ID:SnowMasaya,项目名称:Chainer-Slack-Twitter-Dialogue,代码行数:18,代码来源:EncoderDecoderModelAttention.py


示例19: print_out

    def print_out(self, K, i_epoch, trained, src_batch, trg_batch, hyp_batch):
        """
        Print out
        :param K(int): setting the random number()
        :param i_epoch(int): epoch times
        :param trained: train times
        :param src_batch: source data
        :param trg_batch: target data
        :param hyp_batch: hypothesis data
        :return:
        """
        if K > len(src_batch) and K > len(trg_batch) and K > len(hyp_batch):
            K = len(src_batch) - 1

        trace('epoch %3d/%3d, sample %8d' % (i_epoch + 1, self.epoch, trained + K + 1))
        trace('  src = ' + ' '.join([x if x != '</s>' else '*' for x in src_batch[K]]))
        trace('  trg = ' + ' '.join([x if x != '</s>' else '*' for x in trg_batch[K]]))
        trace('  hyp = ' + ' '.join([x if x != '</s>' else '*' for x in hyp_batch[K]]))
开发者ID:SnowMasaya,项目名称:Chainer-Slack-Twitter-Dialogue,代码行数:18,代码来源:EncoderDecoderModel.py


示例20: main

def main():
    global xp
    args                   = parse_args()
    x_ids                  = defaultdict(lambda:len(x_ids))
    y_ids                  = defaultdict(lambda:len(y_ids))
    init_wrapper(not args.use_cpu)
    data, target           = load_data(args.train, x_ids, y_ids)
    test_data, test_target = load_data(args.test, x_ids, y_ids)
    model = init_model(input_size = args.input_size,
            embed_size   = args.embed_size,
            hidden_size  = args.hidden_size,
            output_size  = len(y_ids))
    optimizer         = optimizers.SGD(lr=0.5)
  
    # Begin Training
    UF.init_model_parameters(model)
    model = UF.convert_to_GPU(not args.use_cpu, model)
    optimizer.setup(model)
    prev_acc = 0
    for ep in range(epoch):
        UF.trace("Training Epoch %d" % ep)
        epoch_acc = 0
        total     = 0
        for i in range(0, len(data), batchsize):
            x_batch = data[i: i+batchsize]
            y_batch = target[i : i+batchsize]
            optimizer.zero_grads()
            loss, accuracy = forward(model, x_batch, y_batch, args.hidden_size)
            loss.backward()
            optimizer.update()
            # Counting epoch accuracy
            epoch_acc += 100 * accuracy.data
            total     += 1
        epoch_acc /= total
        if prev_acc > epoch_acc:
            optimizer.lr *= 0.9
            UF.trace("Reducing LR:", optimizer.lr)
        prev_acc = epoch_acc
        UF.trace("Epoch Accuracy: %.2f" % (epoch_acc))
    
    # Begin Testing
    sum_loss, sum_accuracy = 0, 0
    for i in range(0, len(test_data), batchsize):
        x_batch = test_data[i : i+batchsize]
        y_batch = test_target[i : i+batchsize]
        loss, accuracy = forward(model, x_batch, y_batch, args.hidden_size)
        sum_loss      += loss.data * batchsize
        sum_accuracy  += accuracy.data * batchsize
    mean_loss     = sum_loss / len(test_data)
    mean_accuracy = sum_accuracy / len(test_data)
    print("Mean Loss", mean_loss)
    print("Mean Accuracy", mean_accuracy)
开发者ID:philip30,项目名称:chainn,代码行数:52,代码来源:rnn-pos.py



注:本文中的util.functions.trace函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python lat_lon.lon_lat_to_cartesian函数代码示例发布时间:2022-05-26
下一篇:
Python function.intval函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap