• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python defer.returnD函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中twisted.internet.defer.returnD函数的典型用法代码示例。如果您正苦于以下问题:Python returnD函数的具体用法?Python returnD怎么用?Python returnD使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了returnD函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: SingleMongo

def SingleMongo(coll, method, *args, **kwargs):
    conn = MongoConnection(MONGODB['HOST'], MONGODB['PORT'])
    db = conn[MONGODB['DATABASE']]
    yield db.authenticate(MONGODB['USER'], MONGODB['PSWD'])
    res = yield getattr(db[coll], method)(*args, **kwargs)
    conn.disconnect()
    returnD(res)
开发者ID:wincelau,项目名称:gazouilleur,代码行数:7,代码来源:mongo.py


示例2: __run__

 def __run__(self, coll, method, *args, **kwargs):
     attempts_left = self.retries
     result = []
     lasttry = False
     if 'lasttry' in kwargs:
         lasttry = True
         del kwargs['lasttry']
     while True:
         try:
             self.coll = coll
             self.method = method
             if not self.conn and not self.db:
                 status = "Connec"
                 self.conn = yield MongoConnection(MONGODB['HOST'], MONGODB['PORT'], reconnect=False)
                 self.db = self.conn[MONGODB['DATABASE']]
                 status = "Authentica"
                 yield self.db.authenticate(MONGODB['USER'], MONGODB['PSWD'])
             status = "Communica"
             result = yield getattr(self.db[coll], method)(*args, **kwargs)
         except Exception as e:
             if not lasttry:
                 if attempts_left > 0:
                     attempts_left -= 1
                     if DEBUG:
                         self.logerr("%sting" % status, "Retry #%d" % (self.retries-attempts_left))
                     yield self.close(silent=True)
                     continue
                 if DEBUG:
                     self.logerr("%sting" % status, "HARD RETRY %s %s" % (type(e), str(e)))
                 result = yield Mongo(coll, method, *args, lasttry=True, **kwargs)
             yield self.close()
         returnD(result)
开发者ID:Psycojoker,项目名称:gazouilleur,代码行数:32,代码来源:mongo.py


示例3: get_queue

 def get_queue(self, corpus, specs={}, **kwargs):
     if "sort" not in kwargs:
         kwargs["sort"] = sortasc('timestamp')
     res = yield self.queue(corpus).find(specs, **kwargs)
     if res and "limit" in kwargs and kwargs["limit"] == 1:
         res = res[0]
     returnD(res)
开发者ID:medialab,项目名称:hyphe,代码行数:7,代码来源:mongo.py


示例4: save_WEs_query

 def save_WEs_query(self, corpus, ids, query_options):
     res = yield self.queries(corpus).insert_one({
       "webentities": ids,
       "total": len(ids),
       "query": query_options
     })
     returnD(str(res.inserted_id))
开发者ID:medialab,项目名称:hyphe,代码行数:7,代码来源:mongo.py


示例5: save_WEs_query

 def save_WEs_query(self, corpus, ids, query_options):
     res = yield self.queries(corpus).insert({
       "webentities": ids,
       "total": len(ids),
       "query": query_options
     }, safe=True)
     returnD(str(res))
开发者ID:anukat2015,项目名称:hyphe,代码行数:7,代码来源:mongo.py


示例6: list_jobs

 def list_jobs(self, corpus, specs={}, **kwargs):
     if "sort" not in kwargs:
         kwargs["sort"] = sortasc("crawling_status") + sortasc("indexing_status") + sortasc("created_at")
     jobs = yield self.jobs(corpus).find(specs, **kwargs)
     if jobs and "limit" in kwargs and kwargs["limit"] == 1:
         jobs = jobs[0]
     returnD(jobs)
开发者ID:medialab,项目名称:hyphe,代码行数:7,代码来源:mongo.py


示例7: start_stream

 def start_stream(self, conf):
     if not self.fact.__init_timeout__():
         returnD(False)
     queries = yield self.fact.db['feeds'].find({'database': 'tweets', 'channel': self.fact.channel}, fields=['query'])
     track = []
     skip = []
     k = 0
     for query in queries:
         q = str(query['query'].encode('utf-8')).lower()
         # queries starting with @ should return only tweets from corresponding user, stream doesn not know how to handle this so skip
         if self.re_twitter_account.match(q):
             continue
         elif " OR " in q or " -" in q or '"' in q or len(q) > 60 or len(q) < 6:
             skip.append(q)
             continue
         track.append(q)
         k += 1
         if k > 395:
             break
     if self.fact.twuser not in track:
         track.append(self.fact.twuser)
     if len(skip):
         self.log("Skipping unprocessable queries for streaming: « %s »" % " » | « ".join(skip), hint=True)
     self.log("Start search streaming for: « %s »" % " » | « ".join(track), hint=True)
     conn = Microblog("twitter", conf, bearer_token=self.fact.twitter_token)
     # tries to find users corresponding with queries to follow with stream
     users, self.fact.ircclient.twitter['users'] = conn.lookup_users(track, self.fact.ircclient.twitter['users'])
     deferToThreadPool(reactor, self.threadpool, self.follow_stream, conf, users.values(), track)
     self.depiler = LoopingCall(self.flush_tweets)
     self.depiler.start(1)
     returnD(True)
开发者ID:wincelau,项目名称:gazouilleur,代码行数:31,代码来源:feeds.py


示例8: depile

    def depile(self):
        if self.queue is None:
            yield self.init_queue()
        if not len(self.queue):
            returnD(None)

        status = yield self.get_scrapyd_status()
        if status["pending"] > 0:
            returnD(None)
        # Add some random wait to allow possible concurrent Hyphe instance
        # to compete for ScrapyD's empty slots
        yield deferredSleep(1./randint(4,20))

        # Order jobs by corpus with less currently running crawls then age
        ordered = sorted(self.queue.items(), key=lambda x: \
          float("%s.%s" % (status.get(x[1]["corpus"], 0), x[1]["timestamp"])))
        job_id, job = ordered[0]
        res = yield self.send_scrapy_query('schedule', job["crawl_arguments"])
        ts = now_ts()
        if is_error(res):
            logger.msg("WARNING: error sending job %s to ScrapyD: %s" % (job, res))
            self.queue[job_id]['timestamp'] = ts    # let it retry a bit later
        else:
            yield self.db.update_job(job["corpus"], job_id, res['jobid'], ts)
            yield self.db.add_log(job["corpus"], job_id, "CRAWL_SCHEDULED", ts)
            del(self.queue[job_id])
开发者ID:fuzzydata,项目名称:hyphe,代码行数:26,代码来源:jobsqueue.py


示例9: get_WEs

 def get_WEs(self, corpus, query=None):
     if not query:
         res = yield self.WEs(corpus).find()
     else:
         if isinstance(query, list) and isinstance(query[0], int):
             query = {"_id": {"$in": query}}
         res = yield self.WEs(corpus).find(query)
     returnD(res)
开发者ID:medialab,项目名称:hyphe,代码行数:8,代码来源:mongo.py


示例10: stop_corpus

 def stop_corpus(self, name, quiet=False):
     if self.stopped_corpus(name):
         if config["DEBUG"]:
             self.log(name, "Traph already stopped", quiet=quiet)
         returnD(False)
     if name in self.corpora:
         yield self.corpora[name].stop()
     returnD(True)
开发者ID:medialab,项目名称:hyphe,代码行数:8,代码来源:client.py


示例11: list_logs

 def list_logs(self, corpus, job, **kwargs):
     if "sort" not in kwargs:
         kwargs["sort"] = sortasc('timestamp')
     if "projection" not in kwargs:
         kwargs["projection"] = ['timestamp', 'log']
     if type(job) == list:
         job = {"$in": job}
     res = yield self.logs(corpus).find({"_job": job}, **kwargs)
     returnD(res)
开发者ID:medialab,项目名称:hyphe,代码行数:9,代码来源:mongo.py


示例12: run_twitter_search

 def run_twitter_search(self):
     if not self.__init_timeout__():
         returnD(False)
     queries = yield self.db['feeds'].find({'database': 'tweets', 'channel': self.channel})
     randorder = range(len(queries))
     shuffle(randorder)
     urls = yield getFeeds(self.db, self.channel, 'tweets', randorder=randorder)
     yield self.protocol.start_twitter_search(urls, randorder=randorder)
     self.status = "stopped"
开发者ID:wincelau,项目名称:gazouilleur,代码行数:9,代码来源:feeds.py


示例13: add_job

 def add_job(self, args, corpus, webentity_id):
     ts = now_ts()
     job_id = yield self.db.add_job(corpus, webentity_id, args, ts)
     self.queue[job_id] = {
       "corpus": corpus,
       "timestamp": ts,
       "crawl_arguments": args
     }
     yield self.db.add_log(corpus, job_id, "CRAWL_ADDED", ts)
     returnD(job_id)
开发者ID:fuzzydata,项目名称:hyphe,代码行数:10,代码来源:jobsqueue.py


示例14: list_logs

 def list_logs(self, corpus, job, **kwargs):
     if "filter" not in kwargs:
         kwargs["filter"] = sortasc('timestamp')
     if "fields" not in kwargs:
         kwargs["fields"] = ['timestamp', 'log']
     kwargs["safe"] = True
     if type(job) == list:
         job = {"$in": job}
     res = yield self.logs(corpus).find({"_job": job}, **kwargs)
     returnD(res)
开发者ID:anukat2015,项目名称:hyphe,代码行数:10,代码来源:mongo.py


示例15: run_rss_feeds

 def run_rss_feeds(self):
     if not self.__init_timeout__():
         returnD(False)
     urls = self.feeds
     if not urls:
         urls = yield getFeeds(self.db, self.channel, self.name, add_url=self.tweets_search_page)
     ct = 0
     for url in urls:
         yield deferredSleep(3 + int(random()*500)/100)
         self.update_timeout(extra=10)
         yield self.protocol.start(url)
     self.status = "stopped"
开发者ID:wincelau,项目名称:gazouilleur,代码行数:12,代码来源:feeds.py


示例16: list_jobs

 def list_jobs(self, corpus, *args, **kwargs):
     kwargs["safe"] = True
     if "filter" not in kwargs:
         kwargs["filter"] = sortasc("crawling_status") + sortasc("indexing_status") + sortasc("created_at")
     jobs = yield self.jobs(corpus).find(*args, **kwargs)
     for j in jobs:
         if "created_at" not in j and "timestamp" in j:
             j["created_at"] = j["timestamp"]
             for k in ['start', 'crawl', 'finish']:
                 j["%sed_at" % k] = None
     if jobs and "limit" in kwargs and kwargs["limit"] == 1:
         jobs = jobs[0]
     returnD(jobs)
开发者ID:anukat2015,项目名称:hyphe,代码行数:13,代码来源:mongo.py


示例17: process_twitter_feed

 def process_twitter_feed(self, listtweets, feedtype, query=None, pagecount=0):
     if not listtweets:
         returnD(False)
     if query:
         if not isinstance(listtweets, dict):
             returnD(False)
         nexturl = ""
         if 'max_id_str' in listtweets['search_metadata']:
             nexturl = listtweets['search_metadata']['max_id_str']
         elif 'next_results' in listtweets['search_metadata']:
             nexturl = self.re_max_id.sub(r'\1', listtweets['search_metadata']['next_results'])
         res = {'nexturl':  nexturl}
         listtweets = listtweets['statuses']
     elif not isinstance(listtweets, list):
         returnD(False)
     feed = []
     for tweet in listtweets:
         if not isinstance(tweet, dict):
             continue
         tw = {'created_at': tweet['created_at'], 'title': unescape_html(tweet['text']), 'link': tweet['url']}
         tw = grab_extra_meta(tweet, tw)
         feed.append(tw)
     if query:
         res['tweets'] = feed
         processed = yield self.process_tweets(res, 'search', query=query, pagecount=pagecount)
     else:
         processed = yield self.process_tweets(feed, 'my%s' % feedtype)
     returnD(processed)
开发者ID:RouxRC,项目名称:gazouilleur,代码行数:28,代码来源:feeds.py


示例18: search_twitter

 def search_twitter(self, data, query, max_id=None, page=0, randorder=None):
     if page and randorder:
         try:
             query = yield getFeeds(self.fact.db, self.fact.channel, "tweets", randorder=randorder)
             query = query[page]
         except Exception as e:
             returnD(False)
     if config.DEBUG:
         text = unquote(query)
         if max_id:
             text = "%s before id %s" % (text, max_id.encode('utf-8'))
         self.log("Query Twitter search for %s" % text)
     conn = Microblog('twitter', chanconf(self.fact.channel), bearer_token=self.fact.twitter_token)
     res = conn.search(query, max_id=max_id)
     returnD(res)
开发者ID:wincelau,项目名称:gazouilleur,代码行数:15,代码来源:feeds.py


示例19: collect_tlds

def collect_tlds():
    tree = {}
    try:
        tldlist = yield getPage(MOZ_TLD_LIST)
    except: #Fallback local copy
        from os.path import join, realpath, dirname
        with open(join(dirname(realpath(__file__)), "tld_list.txt")) as f:
            tldlist = f.read()
    for line in tldlist.split("\n"):
        line = line.strip()
        if not line or line.startswith("//"):
            continue
        chunks = line.decode('utf-8').split('.')
        add_tld_chunks_to_tree(chunks, tree)
    returnD(tree)
开发者ID:medialab,项目名称:hyphe,代码行数:15,代码来源:tlds.py


示例20: collect_tlds

def collect_tlds():
    tree = {}
    double_list = {"rules": [], "exceptions": []}
    tldlist = yield getPage(MOZ_TLD_LIST)
    for line in tldlist.split("\n"):
        line = line.strip()
        if not line or line.startswith("//"):
            continue
        chunks = line.decode('utf-8').split('.')
        add_tld_chunks_to_tree(chunks, tree)
        if line[0] == '!':
            double_list["exceptions"].append(line[1:])
        else:
            double_list["rules"].append(line.strip())
    returnD((double_list, tree))
开发者ID:Dim25,项目名称:hyphe,代码行数:15,代码来源:tlds.py



注:本文中的twisted.internet.defer.returnD函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python defer.returnValue函数代码示例发布时间:2022-05-27
下一篇:
Python defer.maybeDeferred函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap