• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python threadpool.Pool类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中w3af.core.controllers.threads.threadpool.Pool的典型用法代码示例。如果您正苦于以下问题:Python Pool类的具体用法?Python Pool怎么用?Python Pool使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了Pool类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_exceptions

    def test_exceptions(self):
        worker_pool = Pool(3, worker_names='WorkerThread')

        def raise_on_1(foo):
            if foo == 1:
                raise TypeError('%s Boom!' % foo)

            return foo

        answers = worker_pool.imap_unordered(raise_on_1, xrange(3))

        try:
            [i for i in answers]
        except TypeError, te:
            self.assertEqual(str(te), '1 Boom!')
开发者ID:foobarmonk,项目名称:w3af,代码行数:15,代码来源:test_threadpool.py


示例2: test_pickleable_shells

 def test_pickleable_shells(self):
     pool = Pool(1)
     xurllib = ExtendedUrllib()
     
     original_shell = Shell(MockVuln(), xurllib, pool)
     
     kb.append('a', 'b', original_shell)
     unpickled_shell = kb.get('a', 'b')[0]
     
     self.assertEqual(original_shell, unpickled_shell)
     self.assertEqual(unpickled_shell.worker_pool, None)
     self.assertEqual(unpickled_shell._uri_opener, None)
     
     pool.terminate()
     pool.join()
     xurllib.end()
开发者ID:ElAleyo,项目名称:w3af,代码行数:16,代码来源:test_knowledge_base.py


示例3: __init__

    def __init__(self, consumer_plugins, w3af_core, thread_name,
                 create_pool=True, max_pool_queued_tasks=0,
                 max_in_queue_size=0):
        """
        :param consumer_plugins: Instances of base_consumer plugins in a list
        :param w3af_core: The w3af core that we'll use for status reporting
        :param thread_name: How to name the current thread
        :param create_pool: True to create a worker pool for this consumer
        """
        super(BaseConsumer, self).__init__(name='%sController' % thread_name)

        self.in_queue = QueueSpeed(maxsize=max_in_queue_size)
        self._out_queue = Queue.Queue()
        
        self._consumer_plugins = consumer_plugins
        self._w3af_core = w3af_core
        self._observers = []

        self._tasks_in_progress = {}
        self._poison_pill_sent = False

        self._threadpool = None

        if create_pool:
            self._threadpool = Pool(self.THREAD_POOL_SIZE,
                                    worker_names='%sWorker' % thread_name,
                                    max_queued_tasks=max_pool_queued_tasks)
开发者ID:batmanWjw,项目名称:w3af,代码行数:27,代码来源:base_consumer.py


示例4: test_multiple_append_uniq_group

    def test_multiple_append_uniq_group(self):
        def multi_append():
            for i in xrange(InfoSet.MAX_INFO_INSTANCES * 2):
                vuln = MockVuln()
                kb.append_uniq_group('a', 'b', vuln, group_klass=MockInfoSetTrue)

            info_set_list = kb.get('a', 'b')

            self.assertEqual(len(info_set_list), 1)

            info_set = info_set_list[0]
            self.assertEqual(len(info_set.infos), InfoSet.MAX_INFO_INSTANCES)
            return True

        pool = Pool(2)

        r1 = pool.apply_async(multi_append)
        r2 = pool.apply_async(multi_append)
        r3 = pool.apply_async(multi_append)

        self.assertTrue(r1.get())
        self.assertTrue(r2.get())
        self.assertTrue(r3.get())

        pool.terminate()
        pool.join()
开发者ID:andresriancho,项目名称:w3af,代码行数:26,代码来源:test_knowledge_base.py


示例5: test_worker_stats_not_idle

    def test_worker_stats_not_idle(self):
        worker_pool = Pool(processes=1, worker_names='WorkerThread')

        def sleep(sleep_time, **kwargs):
            time.sleep(sleep_time)

        args = (2,)
        kwds = {'x': 2}
        worker_pool.apply_async(func=sleep, args=args, kwds=kwds)

        # Let the worker get the task
        time.sleep(0.3)

        # Got it?
        self.assertFalse(worker_pool._pool[0].worker.is_idle())
        self.assertEqual(worker_pool._pool[0].worker.func_name, 'sleep')
        self.assertEqual(worker_pool._pool[0].worker.args, args)
        self.assertEqual(worker_pool._pool[0].worker.kwargs, kwds)
        self.assertGreater(worker_pool._pool[0].worker.job, 1)
开发者ID:knucker,项目名称:w3af,代码行数:19,代码来源:test_threadpool.py


示例6: test_inspect_threads

    def test_inspect_threads(self):
        worker_pool = Pool(processes=1, worker_names='WorkerThread')

        def sleep(sleep_time, **kwargs):
            time.sleep(sleep_time)

        args = (2,)
        kwds = {'x': 2}
        worker_pool.apply_async(func=sleep, args=args, kwds=kwds)

        # Let the worker get the task
        time.sleep(0.3)

        worker_states = worker_pool.inspect_threads()
        self.assertEqual(len(worker_states), 1)

        worker_state = worker_states[0]

        self.assertEqual(worker_state['func_name'], 'sleep')
        self.assertEqual(worker_state['args'], args)
        self.assertEqual(worker_state['kwargs'], kwds)
        self.assertEqual(worker_state['idle'], False)
开发者ID:knucker,项目名称:w3af,代码行数:22,代码来源:test_threadpool.py


示例7: __init__

    def __init__(self, consumer_plugins, w3af_core, thread_name,
                  create_pool=True):
        """
        :param base_consumer_plugins: Instances of base_consumer plugins in a list
        :param w3af_core: The w3af core that we'll use for status reporting
        :param thread_name: How to name the current thread
        :param create_pool: True to create a worker pool for this consumer
        """
        super(BaseConsumer, self).__init__(name='%sController' % thread_name)

        self.in_queue = QueueSpeed()
        self._out_queue = Queue.Queue()
        
        self._consumer_plugins = consumer_plugins
        self._w3af_core = w3af_core
        
        self._tasks_in_progress = {}
        
        self._threadpool = None
         
        if create_pool:
            self._threadpool = Pool(10, worker_names='%sWorker' % thread_name)
开发者ID:BioSoundSystems,项目名称:w3af,代码行数:22,代码来源:base_consumer.py


示例8: BaseConsumer

class BaseConsumer(Process):
    """
    Consumer thread that takes fuzzable requests from a Queue that's populated
    by the crawl plugins and identified vulnerabilities by performing various
    requests.
    """

    def __init__(self, consumer_plugins, w3af_core, thread_name,
                  create_pool=True):
        """
        :param base_consumer_plugins: Instances of base_consumer plugins in a list
        :param w3af_core: The w3af core that we'll use for status reporting
        :param thread_name: How to name the current thread
        :param create_pool: True to create a worker pool for this consumer
        """
        super(BaseConsumer, self).__init__(name='%sController' % thread_name)

        self.in_queue = QueueSpeed()
        self._out_queue = Queue.Queue()
        
        self._consumer_plugins = consumer_plugins
        self._w3af_core = w3af_core
        
        self._tasks_in_progress = {}
        
        self._threadpool = None
         
        if create_pool:
            self._threadpool = Pool(10, worker_names='%sWorker' % thread_name)

    def run(self):
        """
        Consume the queue items, sending them to the plugins which are then
        going to find vulnerabilities, new URLs, etc.
        """

        while True:

            work_unit = self.in_queue.get()

            if work_unit == POISON_PILL:

                # Close the pool and wait for everyone to finish
                self._threadpool.close()
                self._threadpool.join()
                del self._threadpool
                
                self._teardown()

                # Finish this consumer and everyone consuming the output
                self._out_queue.put(POISON_PILL)
                self.in_queue.task_done()
                break

            else:
                # pylint: disable=E1120
                self._consume_wrapper(work_unit)
                self.in_queue.task_done()

    def _teardown(self):
        raise NotImplementedError

    def _consume(self, work_unit):
        raise NotImplementedError

    @task_decorator
    def _consume_wrapper(self, function_id, work_unit):
        """
        Just makes sure that all _consume methods are decorated as tasks.
        """
        return self._consume(work_unit)

    def _task_done(self, function_id):
        """
        The task_in_progress_counter is needed because we want to know if the
        consumer is processing something and let it finish. It is mainly used
        in the has_pending_work().

        For example:

            * You can have pending work if there are items in the input_queue

            * You can have pending work if there are still items to be read from
            the output_queue by one of the consumers that reads our output.

            * You can have pending work when there are no items in input_queue
            and no items in output_queue but the threadpool inside the consumer
            is processing something. This situation is handled by the
            self._tasks_in_progress attribute and the _add_task and
            _task_done methods.

        So, for each _add_task() there has to be a _task_done() even if the
        task ends in an error or exception.
        
        Recommendation: Do NOT set the callback for apply_async to call
        _task_done, the Python2.7 pool implementation won't call it if the
        function raised an exception and you'll end up with tasks in progress
        that finished with an exception.
        """
        try:
#.........这里部分代码省略.........
开发者ID:BioSoundSystems,项目名称:w3af,代码行数:101,代码来源:base_consumer.py


示例9: test_increase_number_of_workers

    def test_increase_number_of_workers(self):
        worker_pool = Pool(processes=2,
                           worker_names='WorkerThread',
                           maxtasksperchild=3)

        self.assertEqual(worker_pool.get_worker_count(), 2)

        def noop():
            return 1 + 2

        for _ in xrange(12):
            result = worker_pool.apply_async(func=noop)
            self.assertEqual(result.get(), 3)

        self.assertEqual(worker_pool.get_worker_count(), 2)

        worker_pool.set_worker_count(4)

        # It takes some time...
        self.assertEqual(worker_pool.get_worker_count(), 2)

        for _ in xrange(12):
            result = worker_pool.apply_async(func=noop)
            self.assertEqual(result.get(), 3)

        self.assertEqual(worker_pool.get_worker_count(), 4)

        worker_pool.terminate()
        worker_pool.join()
开发者ID:foobarmonk,项目名称:w3af,代码行数:29,代码来源:test_threadpool.py


示例10: test_terminate_join

 def test_terminate_join(self):
     worker_pool = Pool(1, worker_names='WorkerThread')
     worker_pool.terminate()
     worker_pool.join()
开发者ID:foobarmonk,项目名称:w3af,代码行数:4,代码来源:test_threadpool.py


示例11: test_close_terminate

 def test_close_terminate(self):
     worker_pool = Pool(1, worker_names='WorkerThread')
     worker_pool.close()
     worker_pool.terminate()
开发者ID:foobarmonk,项目名称:w3af,代码行数:4,代码来源:test_threadpool.py


示例12: test_max_queued_tasks

    def test_max_queued_tasks(self):
        worker_pool = Pool(processes=1, max_queued_tasks=2)

        # These tasks should be queued very fast
        worker_pool.apply_async(func=time.sleep, args=(2,))
        worker_pool.apply_async(func=time.sleep, args=(2,))
        worker_pool.apply_async(func=time.sleep, args=(2,))
        worker_pool.apply_async(func=time.sleep, args=(2,))

        # Now the pool is full and we need to wait in the main
        # thread to get the task queued
        start = time.time()

        worker_pool.apply_async(func=time.sleep, args=(2,))

        spent = time.time() - start

        worker_pool.close()
        worker_pool.join()

        self.assertLess(spent, 2.1)
        self.assertGreater(spent, 1.9)
开发者ID:andresriancho,项目名称:w3af,代码行数:22,代码来源:test_threadpool.py


示例13: __init__

    def __init__(self, consumer_plugins, w3af_core, thread_name,
                 create_pool=True, max_pool_queued_tasks=0,
                 max_in_queue_size=0, thread_pool_size=None):
        """
        :param consumer_plugins: Instances of base_consumer plugins in a list
        :param w3af_core: The w3af core that we'll use for status reporting
        :param thread_name: How to name the current thread, eg. Auditor
        :param create_pool: True to create a worker pool for this consumer
        """
        super(BaseConsumer, self).__init__(name='%sController' % thread_name)

        self.in_queue = CachedQueue(maxsize=max_in_queue_size,
                                    name=thread_name + 'In')

        #
        # Crawl and infrastructure plugins write to this queue using:
        #
        #   self.output_queue.put(fuzz_req)
        #
        # The strategy will read items from this queue in a tight loop using:
        #
        #   result_item = url_producer.get_result(timeout=0.1)
        #
        # And write them to self.in_queue (defined above) for all the url consumers
        #
        # Since this queue is read in a tight loop, items that are written here
        # will, in theory, not stay in memory for long.
        #
        # Also, items written here are fuzzable requests, which shouldn't use a lot
        # of memory.
        #
        # The only scenario I can think of where this queue is full of items
        # is one where the strategy loop is slow / delayed and the crawl plugins
        # are all findings many new URLs and forms.
        #
        # Tests showed something like this for a common site:
        #
        #   [Thu Feb 15 16:45:36 2018 - debug] CachedQueue.get() ... CrawlInfraOut DiskDict size is 19.
        #   [Thu Feb 15 16:45:36 2018 - debug] CachedQueue.get() ... CrawlInfraOut DiskDict size is 28.
        #   [Thu Feb 15 16:45:37 2018 - debug] CachedQueue.get() ... CrawlInfraOut DiskDict size is 27.
        #   ...
        #   [Thu Feb 15 16:45:52 2018 - debug] CachedQueue.get() ... CrawlInfraOut DiskDict size is 1.
        #
        # This was with a max_in_queue_size of 100 set for the CachedQueue defined below.
        #
        # Meaning that:
        #       * There were 119 items in the queue (100 in memory ) in the first log line
        #       * Also at 16:45:36, there were 128 items in the queue (100 in memory)
        #       * It took 16 seconds to consume 28 items from the queue (from second 36 to second 52)
        #
        # This surprises me a little bit. I expected this queue to have less items in memory.
        # Since I want to remove the memory usage in the framework, I'm going to reduce the
        # maxsize sent to this CachedQueue to 50
        #
        # But just in case I'm using a CachedQueue!
        self._out_queue = CachedQueue(maxsize=75, name=thread_name + 'Out')

        self._thread_name = thread_name
        self._consumer_plugins = consumer_plugins
        self._w3af_core = w3af_core
        self._observers = []

        self._tasks_in_progress = {}
        self._poison_pill_sent = False

        self._threadpool = None

        if create_pool:
            self._threadpool = Pool(thread_pool_size or self.THREAD_POOL_SIZE,
                                    worker_names='%sWorker' % thread_name,
                                    max_queued_tasks=max_pool_queued_tasks)
开发者ID:knucker,项目名称:w3af,代码行数:71,代码来源:base_consumer.py


示例14: BaseConsumer

class BaseConsumer(Process):
    """
    Consumer thread that takes fuzzable requests from a Queue that's populated
    by the crawl plugins and identified vulnerabilities by performing various
    requests.
    """

    THREAD_POOL_SIZE = 10

    def __init__(self, consumer_plugins, w3af_core, thread_name,
                 create_pool=True, max_pool_queued_tasks=0,
                 max_in_queue_size=0, thread_pool_size=None):
        """
        :param consumer_plugins: Instances of base_consumer plugins in a list
        :param w3af_core: The w3af core that we'll use for status reporting
        :param thread_name: How to name the current thread, eg. Auditor
        :param create_pool: True to create a worker pool for this consumer
        """
        super(BaseConsumer, self).__init__(name='%sController' % thread_name)

        self.in_queue = CachedQueue(maxsize=max_in_queue_size,
                                    name=thread_name + 'In')

        #
        # Crawl and infrastructure plugins write to this queue using:
        #
        #   self.output_queue.put(fuzz_req)
        #
        # The strategy will read items from this queue in a tight loop using:
        #
        #   result_item = url_producer.get_result(timeout=0.1)
        #
        # And write them to self.in_queue (defined above) for all the url consumers
        #
        # Since this queue is read in a tight loop, items that are written here
        # will, in theory, not stay in memory for long.
        #
        # Also, items written here are fuzzable requests, which shouldn't use a lot
        # of memory.
        #
        # The only scenario I can think of where this queue is full of items
        # is one where the strategy loop is slow / delayed and the crawl plugins
        # are all findings many new URLs and forms.
        #
        # Tests showed something like this for a common site:
        #
        #   [Thu Feb 15 16:45:36 2018 - debug] CachedQueue.get() ... CrawlInfraOut DiskDict size is 19.
        #   [Thu Feb 15 16:45:36 2018 - debug] CachedQueue.get() ... CrawlInfraOut DiskDict size is 28.
        #   [Thu Feb 15 16:45:37 2018 - debug] CachedQueue.get() ... CrawlInfraOut DiskDict size is 27.
        #   ...
        #   [Thu Feb 15 16:45:52 2018 - debug] CachedQueue.get() ... CrawlInfraOut DiskDict size is 1.
        #
        # This was with a max_in_queue_size of 100 set for the CachedQueue defined below.
        #
        # Meaning that:
        #       * There were 119 items in the queue (100 in memory ) in the first log line
        #       * Also at 16:45:36, there were 128 items in the queue (100 in memory)
        #       * It took 16 seconds to consume 28 items from the queue (from second 36 to second 52)
        #
        # This surprises me a little bit. I expected this queue to have less items in memory.
        # Since I want to remove the memory usage in the framework, I'm going to reduce the
        # maxsize sent to this CachedQueue to 50
        #
        # But just in case I'm using a CachedQueue!
        self._out_queue = CachedQueue(maxsize=75, name=thread_name + 'Out')

        self._thread_name = thread_name
        self._consumer_plugins = consumer_plugins
        self._w3af_core = w3af_core
        self._observers = []

        self._tasks_in_progress = {}
        self._poison_pill_sent = False

        self._threadpool = None

        if create_pool:
            self._threadpool = Pool(thread_pool_size or self.THREAD_POOL_SIZE,
                                    worker_names='%sWorker' % thread_name,
                                    max_queued_tasks=max_pool_queued_tasks)

    def get_pool(self):
        return self._threadpool

    def run(self):
        """
        Consume the queue items, sending them to the plugins which are then
        going to find vulnerabilities, new URLs, etc.
        """
        while True:

            try:
                work_unit = self.in_queue.get()
            except KeyboardInterrupt:
                # https://github.com/andresriancho/w3af/issues/9587
                #
                # If we don't do this, the thread will die and will never
                # process the POISON_PILL, which will end up in an endless
                # wait for .join()
                continue
#.........这里部分代码省略.........
开发者ID:knucker,项目名称:w3af,代码行数:101,代码来源:base_consumer.py



注:本文中的w3af.core.controllers.threads.threadpool.Pool类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python w3afCore.w3afCore函数代码示例发布时间:2022-05-26
下一篇:
Python infrastructure_plugin.InfrastructurePlugin类代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap