• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python queues.Queue类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tornado.queues.Queue的典型用法代码示例。如果您正苦于以下问题:Python Queue类的具体用法?Python Queue怎么用?Python Queue使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了Queue类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __init__

        def __init__(self, max_concurrent_batches=10, block_on_send=False,
                    block_on_response=False, max_batch_size=100, send_frequency=0.25,
                    user_agent_addition=''):
            if not has_tornado:
                raise ImportError('TornadoTransmission requires tornado, but it was not found.')

            self.block_on_send = block_on_send
            self.block_on_response = block_on_response
            self.max_batch_size = max_batch_size
            self.send_frequency = send_frequency

            user_agent = "libhoney-py/" + VERSION
            if user_agent_addition:
                user_agent += " " + user_agent_addition

            self.http_client = AsyncHTTPClient(
                force_instance=True,
                defaults=dict(user_agent=user_agent))

            # libhoney adds events to the pending queue for us to send
            self.pending = Queue(maxsize=1000)
            # we hand back responses from the API on the responses queue
            self.responses = Queue(maxsize=2000)

            self.batch_data = {}
            self.sd = statsd.StatsClient(prefix="libhoney")
            self.batch_sem = Semaphore(max_concurrent_batches)
开发者ID:honeycombio,项目名称:libhoney-py,代码行数:27,代码来源:transmission.py


示例2: __init__

    def __init__(self, routes, node, pipe):
        """
        Application instantiates and registers handlers for each message type,
        and routes messages to the pre-instantiated instances of each message handler

        :param routes: list of tuples in the form of (<message type str>, <MessageHandler class>)
        :param node: Node instance of the local node
        :param pipe: Instance of multiprocessing.Pipe for communicating with the parent process
        """
        # We don't really have to worry about synchronization
        # so long as we're careful about explicit context switching
        self.nodes = {node.node_id: node}

        self.local_node = node
        self.handlers = {}

        self.tcpclient = TCPClient()

        self.gossip_inbox = Queue()
        self.gossip_outbox = Queue()

        self.sequence_number = 0

        if routes:
            self.add_handlers(routes)

        self.pipe = pipe
        self.ioloop = IOLoop.current()

        self.add_node_event = Event()
开发者ID:jefffm,项目名称:swimpy,代码行数:30,代码来源:app.py


示例3: f

    def f(c, a, b):
        s = Scheduler((c.ip, c.port), loop=loop)
        yield s._sync_center()
        done = s.start()
        sched, report = Queue(), Queue(); s.handle_queues(sched, report)
        msg = yield report.get(); assert msg['op'] == 'stream-start'

        s.update_graph(dsk={'x': (inc, 1),
                            'y': (inc, 'x'),
                            'z': (inc, 'y')},
                       keys=['z'])
        progress = TextProgressBar(['z'], scheduler=s)
        progress.start()

        assert progress.all_keys == {'x', 'y', 'z'}
        assert progress.keys == {'x', 'y', 'z'}

        while True:
            msg = yield report.get()
            if msg['op'] == 'key-in-memory' and msg['key'] == 'z':
                break

        assert progress.keys == set()
        check_bar_completed(capsys)

        assert progress not in s.plugins

        sched.put_nowait({'op': 'close'})
        yield done
开发者ID:aterrel,项目名称:distributed,代码行数:29,代码来源:test_diagnostics.py


示例4: __init__

 def __init__(self):
     self.ctx = zmq.Context()
     self.WSmessages = Queue()
     self.MQmessages = Queue()
     self.sub = MQAsyncSub.__init__(self, self.ctx, 'admin', [])
     self.pub = MQPub(self.ctx, 'admin-ws')
     self.subscribers = set()
开发者ID:domogik,项目名称:domogik,代码行数:7,代码来源:admin.py


示例5: f

    def f(c, a, b):
        s = Scheduler((c.ip, c.port), loop=loop)
        yield s._sync_center()
        done = s.start()
        sched, report = Queue(), Queue(); s.handle_queues(sched, report)
        msg = yield report.get(); assert msg['op'] == 'stream-start'

        s.update_graph(dsk={'x-1': (inc, 1),
                            'x-2': (inc, 'x-1'),
                            'x-3': (inc, 'x-2'),
                            'y-1': (dec, 'x-3'),
                            'y-2': (dec, 'y-1'),
                            'e': (throws, 'y-2'),
                            'other': (inc, 123)},
                       keys=['e'])

        while True:
            msg = yield report.get()
            if msg['op'] == 'key-in-memory' and msg['key'] == 'y-2':
                break

        p = MultiProgressWidget(['x-1', 'x-2', 'x-3'], scheduler=s)
        assert set(concat(p.all_keys.values())).issuperset({'x-1', 'x-2', 'x-3'})
        assert 'x' in p.bars

        sched.put_nowait({'op': 'close'})
        yield done
开发者ID:aterrel,项目名称:distributed,代码行数:27,代码来源:test_widgets.py


示例6: test_diagnostic

def test_diagnostic(s, a, b):
    sched, report = Queue(), Queue(); s.handle_queues(sched, report)
    msg = yield report.get(); assert msg['op'] == 'stream-start'

    class Counter(SchedulerPlugin):
        def start(self, scheduler):
            scheduler.add_plugin(self)
            self.count = 0

        def task_finished(self, scheduler, key, worker, nbytes):
            self.count += 1

    counter = Counter()
    counter.start(s)

    assert counter.count == 0
    sched.put_nowait({'op': 'update-graph',
                      'tasks': {'x': dumps_task((inc, 1)),
                                'y': dumps_task((inc, 'x')),
                                'z': dumps_task((inc, 'y'))},
                      'dependencies': {'y': ['x'], 'z': ['y']},
                      'keys': ['z']})

    while True:
        msg = yield report.get()
        if msg['op'] == 'key-in-memory' and msg['key'] == 'z':
            break

    assert counter.count == 3
开发者ID:dela3499,项目名称:distributed,代码行数:29,代码来源:test_plugin.py


示例7: __init__

 def __init__(self):
     self.connected = False
     self.connected_event = Event()
     self.disconnected_event = Event()
     self.presence_queue = Queue()
     self.message_queue = Queue()
     self.error_queue = Queue()
开发者ID:pubnub,项目名称:python,代码行数:7,代码来源:pubnub_tornado.py


示例8: __init__

    def __init__(self, center, delete_batch_time=1):
        self.scheduler_queue = Queue()
        self.report_queue = Queue()
        self.delete_queue = Queue()
        self.status = None

        self.center = coerce_to_rpc(center)

        self.dask = dict()
        self.dependencies = dict()
        self.dependents = dict()
        self.generation = 0
        self.has_what = defaultdict(set)
        self.held_data = set()
        self.in_play = set()
        self.keyorder = dict()
        self.nbytes = dict()
        self.ncores = dict()
        self.processing = dict()
        self.restrictions = dict()
        self.stacks = dict()
        self.waiting = dict()
        self.waiting_data = dict()
        self.who_has = defaultdict(set)

        self.exceptions = dict()
        self.tracebacks = dict()
        self.exceptions_blame = dict()

        self.delete_batch_time = delete_batch_time
开发者ID:freeman-lab,项目名称:distributed,代码行数:30,代码来源:scheduler.py


示例9: __init__

 def __init__(self, server, name, stream):
     self.server = server
     self.name = name
     self.rooms = {}
     self.stream = stream
     self.inqueue = Queue(maxsize=QUEUE_SIZE)
     self.outqueue = Queue(maxsize=QUEUE_SIZE)
开发者ID:beef9999,项目名称:go-chatroom,代码行数:7,代码来源:py-server.py


示例10: Publisher

class Publisher(MQAsyncSub):
    """Handles new data to be passed on to subscribers."""
    def __init__(self):
        self.WSmessages = Queue()
        self.MQmessages = Queue()
        self.sub = MQAsyncSub.__init__(self, zmq.Context(), 'admin', [])
        self.subscribers = set()

    def register(self, subscriber):
        """Register a new subscriber."""
        self.subscribers.add(subscriber)

    def deregister(self, subscriber):
        """Stop publishing to a subscriber."""
        self.subscribers.remove(subscriber)

    @gen.coroutine
    def on_message(self, did, msg):
        """Receive message from MQ sub and send to WS."""
        yield self.WSmessages.put({"msgid": did, "content": msg})

    @gen.coroutine
    def submit(self, message):
        """Submit a new message to publish to subscribers."""
        yield self.WSmessages.put(message)

    @gen.coroutine
    def publishToWS(self):
        while True:
            message = yield self.WSmessages.get()
            if len(self.subscribers) > 0:
                print("Pushing MQ message {} to {} WS subscribers...".format(
                    message, len(self.subscribers)))
                yield [subscriber.submit(message) for subscriber in self.subscribers]

    @gen.coroutine
    def publishToMQ(self):
        ctx = zmq.Context()
        cli = MQSyncReq(ctx)
        pub = MQPub(ctx, 'admin')
        while True:
            message = yield self.MQmessages.get()
            jsons = json.loads(message)
            # req/rep
            if 'mq_request' in jsons and 'data' in jsons:
                msg = MQMessage()
                msg.set_action(str(jsons['mq_request']))
                msg.set_data(jsons['data'])
                print("REQ : {0}".format(msg.get()))
                if 'dst' in jsons:
                    print cli.request(str(jsons['dst']), msg.get(), timeout=10).get()
                else:
                    print cli.request('manager', msg.get(), timeout=10).get()
            # pub
            elif 'mq_publish' in jsons and 'data' in jsons:
                print("Publish : {0}".format(jsons['data']))
                pub.send_event(jsons['mq_publish'],
                                    jsons['data'])
开发者ID:ewintec,项目名称:domogik,代码行数:58,代码来源:admin.py


示例11: run

def run(args):
    if not args.test:
        ip_iter = _create_ip_iterator()
    else:
        ip_iter = _get_test_ips()
        good_ips = []

    job_queue = Queue(maxsize=200)

    start = time.time()
    counter = Counter()

    @gen.coroutine
    def job_producer():
        for ip in ip_iter:
            yield job_queue.put(ip)
            #print("Put {}".format(ip))

    @gen.coroutine
    def worker(id):
        while True:
            ip = yield job_queue.get()
            try:
                good = yield test_ip(ip)
                counter['all'] += 1
                if args.progress:
                    if counter['all'] % 10000 == 0:
                        print("Tested {} ips.".format(counter['all']))
                if good:
                    print("Found good ip: {}".format(ip))
                    counter['good'] += 1
                    if not args.test:
                        yield record_good_ip(ip)
                    else:
                        good_ips.append(ip)
            finally:
                job_queue.task_done()

    for i in range(CONCURRENCY):
        worker(i)

    _disable_logging()

    try:
        yield job_producer()
        yield job_queue.join()
    finally:
        print("\n\nTested: {} ips\nFound {} good ips\nQps: {}".format(
            counter['all'],
            counter['good'],
            counter['all'] / (time.time() - start)
        ))

    if args.test and args.remove:
        with open(GOOD_IP_FILE + '_removed', 'w') as f:
            f.write('|'.join(good_ips))
开发者ID:kawing-chiu,项目名称:search_google_ip,代码行数:56,代码来源:search_google_ip.py


示例12: _first_completed

def _first_completed(futures):
    """ Return a single completed future

    See Also:
        _as_completed
    """
    q = Queue()
    yield _as_completed(futures, q)
    result = yield q.get()
    raise gen.Return(result)
开发者ID:canavandl,项目名称:distributed,代码行数:10,代码来源:executor.py


示例13: __init__

    def __init__(self, stream, interval):
        self.stream = stream
        self.interval = interval / 1000.0
        self.last_transmission = default_timer()
        self.send_q = Queue()
        self.recv_q = Queue()
        self._background_send_coroutine = self._background_send()
        self._background_recv_coroutine = self._background_recv()
        self._broken = None

        self.pc = PeriodicCallback(lambda: None, 100)
        self.pc.start()
开发者ID:broxtronix,项目名称:distributed,代码行数:12,代码来源:batched.py


示例14: __init__

    def __init__(self, center, start=True, delete_batch_time=1):
        self.center = coerce_to_rpc(center)
        self.futures = dict()
        self.refcount = defaultdict(lambda: 0)
        self.dask = dict()
        self.restrictions = dict()
        self.loop = IOLoop()
        self.report_queue = Queue()
        self.scheduler_queue = Queue()
        self._shutdown_event = Event()
        self._delete_batch_time = delete_batch_time

        if start:
            self.start()
开发者ID:cowlicks,项目名称:distributed,代码行数:14,代码来源:executor.py


示例15: get_file_list

    def get_file_list(account, **kwargs):
        queue = Queue()
        sem = BoundedSemaphore(FETCH_CONCURRENCY)
        done, working = set(), set()
        data = set()

        @gen.coroutine
        def fetch_url():
            current_url = yield queue.get()
            try:
                if current_url in working:
                    return
                page_no = working.__len__()
                app_log.info("Fetching page {}".format(page_no))
                working.add(current_url)
                req = account.get_request(current_url)
                client = AsyncHTTPClient()
                response = yield client.fetch(req)
                done.add(current_url)
                app_log.info("Page {} downloaded".format(page_no))
                response_data = json.loads(response.body.decode('utf-8'))

                for file in response_data:
                    # be sure we're a valid file type and less than our maximum response size limit
                    extension = file['path'].lower().split('.')[-1]
                    if extension in VALID_FILETYPES and int(file['bytes']) < RESPONSE_SIZE_LIMIT * 1000000:
                        data.add((file['path'].lstrip('/'), file['path'], ))
                app_log.info("Page {} completed".format(page_no))
            finally:
                queue.task_done()
                sem.release()

        @gen.coroutine
        def worker():
            while True:
                yield sem.acquire()
                fetch_url()

        app_log.info("Gathering filelist for account {}".format(account._id))
        for file_type in VALID_FILETYPES:
            file_type = '.'.join([file_type])
            url = "https://api.dropbox.com/1/search/auto/?query={}&include_membership=true".format(file_type)
            queue.put(url)
        # start our concurrency worker
        worker()
        # wait until we're done
        yield queue.join(timeout=timedelta(seconds=MAXIMUM_REQ_TIME))
        app_log.info("Finished list retrieval. Found {} items.".format(data.__len__()))
        return sorted([{"title": title, "value": path} for title, path in data], key=lambda f: f['title'])
开发者ID:vizydrop,项目名称:apps,代码行数:49,代码来源:files.py


示例16: __init__

 def __init__(self, **conf):
     self.username = conf["username"]
     self.passwd = conf["passwd"]
     self.save_path = conf.get("save_path")
     self._q = Queue()
     self._cookies = self._get_user_cookies()
     self._parse_save_path()
开发者ID:quietin,项目名称:seg_backup_script,代码行数:7,代码来源:backup_simple.py


示例17: BaseHandler

class BaseHandler(RequestHandler):
    """Base handler for subscribers. To be compatible with data stores
    defined in :mod:`tornadose.stores`, custom handlers should inherit
    this class and implement the :meth:`publish` method.

    """
    def initialize(self, store):
        """Common initialization of handlers happens here. If additional
        initialization is required, this method must either be called with
        ``super`` or the child class must assign the ``store`` attribute and
        register itself with the store.

        """
        assert isinstance(store, stores.BaseStore)
        self.messages = Queue()
        self.store = store
        self.store.register(self)

    @gen.coroutine
    def submit(self, message):
        """Submit a new message to be published."""
        yield self.messages.put(message)

    def publish(self):
        """Push a message to the subscriber. This method must be
        implemented by child classes.

        """
        raise NotImplementedError('publish must be implemented!')
开发者ID:mivade,项目名称:tornadose,代码行数:29,代码来源:handlers.py


示例18: __init__

    def __init__(self, center=None, scheduler=None, start=True, delete_batch_time=1, loop=None):
        self.futures = dict()
        self.refcount = defaultdict(lambda: 0)
        self.loop = loop or IOLoop()
        self.scheduler_queue = Queue()
        self.report_queue = Queue()

        if scheduler:
            if isinstance(scheduler, Scheduler):
                self.scheduler = scheduler
                if not center:
                    self.center = scheduler.center
            else:
                raise NotImplementedError()
                # self.scheduler = coerce_to_rpc(scheduler)
        else:
            self.scheduler = Scheduler(center, loop=self.loop,
                                       delete_batch_time=delete_batch_time)
        if center:
            self.center = coerce_to_rpc(center)

        if not self.center:
            raise ValueError("Provide Center address")

        if start:
            self.start()
开发者ID:aterrel,项目名称:distributed,代码行数:26,代码来源:executor.py


示例19: __init__

 def __init__(self, family):
     super(TestTCPServer, self).__init__()
     self.streams = []
     self.queue = Queue()
     sockets = bind_sockets(None, 'localhost', family)
     self.add_sockets(sockets)
     self.port = sockets[0].getsockname()[1]
开发者ID:leeclemens,项目名称:tornado,代码行数:7,代码来源:tcpclient_test.py


示例20: as_completed

def as_completed(fs):
    if len(set(f.executor for f in fs)) == 1:
        loop = first(fs).executor.loop
    else:
        # TODO: Groupby executor, spawn many _as_completed coroutines
        raise NotImplementedError(
        "as_completed on many event loops not yet supported")

    from .compatibility import Queue
    queue = Queue()

    coroutine = lambda: _as_completed(fs, queue)
    loop.add_callback(coroutine)

    for i in range(len(fs)):
        yield queue.get()
开发者ID:aterrel,项目名称:distributed,代码行数:16,代码来源:executor.py



注:本文中的tornado.queues.Queue类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python simple_httpclient.SimpleAsyncHTTPClient类代码示例发布时间:2022-05-27
下一篇:
Python process.Subprocess类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap