• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python multiprocessing.Queue类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中multiprocessing.Queue的典型用法代码示例。如果您正苦于以下问题:Python Queue类的具体用法?Python Queue怎么用?Python Queue使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了Queue类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: recoverPRNGState

def recoverPRNGState(cookie,timeMillisEstimate,PRNGMillisEstimate,IPAddr,serverPort,numWorkers,chunkSize):
    global PRNGMillisDelta
    global initalSeek
    
    q = Queue(0)
    i = 0
    
    if PRNGMillisDelta%chunkSize > 0:
        q.put((PRNGMillisEstimate+PRNGMillisDelta-PRNGMillisDelta%chunkSize,PRNGMillisEstimate+PRNGMillisDelta,initalSeek))
    
    for i in range(PRNGMillisEstimate,PRNGMillisEstimate+PRNGMillisDelta-PRNGMillisDelta%chunkSize,chunkSize):
        q.put((i,i+chunkSize,initalSeek))
        
    desc = []

    seedValue = Value('d', 0)

    # Start worker processes and assign work.                     
    for i in range(numWorkers):
        p = Process(target=recoverPRNGStateWorker, args=(cookie,timeMillisEstimate,q,IPAddr,serverPort,seedValue))
        p.start()
        desc.append(p)
        
    # Wait for worker processes finish.       
    for p in desc:
        p.join()

    return long(seedValue.value)
开发者ID:votadlos,项目名称:JavaCG,代码行数:28,代码来源:WinstoneSessionCatcher.py


示例2: main

def main(world_folder, replacement_file_name):
    global replacements
    world = nbt.world.WorldFolder(world_folder)
    logger = configure_logging()
    logger.info("Starting processing of %s", world_folder)
    if not isinstance(world, nbt.world.AnvilWorldFolder):
        logger.error("%s is not an Anvil world" % (world_folder))
        return 65 # EX_DATAERR
    if replacement_file_name != None:
        logger.info("Using Replacements file: %s", replacement_file_name)
        with open(replacement_file_name, 'r') as replacement_file:
            replacements = json.load(replacement_file)
    # get list of region files, going to pass this into function to process region
    region_files = world.get_regionfiles();
    
    # Parallel
    q = Queue()
    lp = threading.Thread(target=logger_thread, args=[q])
    lp.start()
    p = Pool(initializer=process_init, initargs=[q,replacements], maxtasksperchild=1)
    region_data = p.map(process_region, region_files)
    # Map has finished up, lets close the logging QUEUE
    q.put(None)
    lp.join()
    
    # Not Parallel
#     region_data = map(process_region, region_files)
    
    # Write output data
    write_block_data(region_data,"output.txt")
    return 0
开发者ID:bot190,项目名称:MinecraftAnalysis,代码行数:31,代码来源:WorldAnalysis.py


示例3: main

def main():
    arg = parse_args()
    folder = arg.fold
    core = arg.core
    output = arg.out
    start = arg.start
    if start:
        start = start.replace('-', '') + '000000'

    task_queue = Queue()
    result_queue = Queue()

    task_count = create_task(folder, task_queue, start)
    print task_count
    for i in range(core):
        Process(target=worker, args=(task_queue, result_queue)).start()

    #send stop signal
    for i in range(core):
        task_queue.put('STOP')

    #print result
    out_files = {}
    for i in range(task_count):
        actions = result_queue.get()
        user = actions["user"]
        for day in actions["actions"]:
            if day not in out_files:
                out_files[day] = open(os.path.join(output, day), "w")
            out_files[day].write(json.dumps({"user": user, "actions": actions["actions"][day]}) + "\n")
    for day in out_files:
        out_files[day].flush()
        out_files[day].close()
开发者ID:Tskatom,项目名称:company_market,代码行数:33,代码来源:extractDailyInteraction.py


示例4: send_probe_requests

def send_probe_requests(interface=None, ssid=None):

    # initialize shared memory
    results = Queue()

    # start sniffer before sending out probe requests
    p = Process(target=sniffer, args=(interface, results,))
    p.start()

    # give sniffer a chance to initialize so that we don't miss
    # probe responses
    time.sleep(3)

    # send out probe requests... sniffer will catch any responses
    ProbeReq(ssid=ssid, interface='wlp3s0')

    # make sure to get results from shared memory before allowing 
    # sniffer to join with parent process 
    probe_responses = results.get()

    # join sniffer with its parent process
    p.join()

    # return results
    return probe_responses
开发者ID:BwRy,项目名称:sentrygun,代码行数:25,代码来源:sniffer.py


示例5: test_report_hash_added_after_send

    def test_report_hash_added_after_send(self, fromConfig, fromOptions, getLogger):
        # Side effect for fromConfig
        def fake_virts(logger, config):
            new_fake_virt = Mock()
            new_fake_virt.config.name = config.name
            return new_fake_virt

        fromConfig.side_effect = fake_virts
        options = Mock()
        options.interval = 0
        options.oneshot = True
        options.print_ = False
        options.log_file = ''
        options.log_dir = ''
        virtwho = VirtWho(self.logger, options, config_dir="/nonexistant")

        def send(report):
            report.state = AbstractVirtReport.STATE_FINISHED
            return True
        virtwho.send = Mock(side_effect=send)
        queue = Queue()
        virtwho.queue = queue
        virtwho.retry_after = 1
        virtwho.configManager.addConfig(self.config)
        virtwho.configManager.addConfig(self.second_config)
        queue.put(self.fake_report)
        queue.put(self.fake_domain_list)
        virtwho.run()

        self.assertEquals(virtwho.send.call_count, 2)
        self.assertEqual(virtwho.last_reports_hash[self.config.name], self.fake_report.hash)
        self.assertEqual(virtwho.last_reports_hash[self.second_config.name], self.fake_domain_list.hash)
开发者ID:mtulio,项目名称:virt-who,代码行数:32,代码来源:test_virtwho.py


示例6: __init__

class UpDown:

    def __init__(self, down_workers=2, up_workers=2, db=None):
        self.down_workers_num = down_workers
        self.up_workers_num = up_workers
        self.db = db
        self.base_url = "http://eol.jsc.nasa.gov/SearchPhotos/"
        self.down_workers = []
        self.up_workers = []
        self.to_upload = []
        self.q = Queue()

    def down_worker(self, download_url, image_id):
        """
        Download images and set the database after the download was complete.
        """
        down = ImageDownload(self.base_url + download_url)
        down.find_urls()
        if(down.dl()):
            self.db.update_image_downloaded(image_id, down.file_name)

    def up_worker(self, mission_id):
        """
        Check for images that are downloaded but not uploaded every minute.
        """
        while True:
            self.to_upload = self.db.get_to_upload(mission_id)
            print "No files to upload found!\n"
            if(len(list(self.to_upload)) > 0):
                print "Found a file to upload!\n"
                self.to_upload = list(self.db.get_to_upload(mission_id))
                self.q.put(self.to_upload)
            time.sleep(60)
开发者ID:PlanetHunt,项目名称:nasaeol,代码行数:33,代码来源:UpDown.py


示例7: test_req_all_open_orders

 def test_req_all_open_orders(self):
     result_queue = Queue()
     class MockClientSocket(ClientSocket):
         def __init__(self):
             ClientSocket.__init__(self)
         def open_order(self, req_id, contract, order):
             result_queue.put(req_id)
             result_queue.put(contract)
             result_queue.put(order)
         def open_order_end(self):
             result_queue.put('open_order_end')
         def order_status(self, req_id, status, filled, remaining,
                          avg_fill_price, perm_id, parent_id,
                          last_fill_price, client_id, why_held):
             result_queue.put(req_id)
             result_queue.put(status)
             result_queue.put(filled)
             result_queue.put(remaining)
             result_queue.put(avg_fill_price)
             result_queue.put(perm_id)
             result_queue.put(parent_id)
             result_queue.put(last_fill_price)
             result_queue.put(client_id)
             result_queue.put(why_held)
     client = MockClientSocket()
     client.connect()
     client.req_all_open_orders()
     while True:
         result = result_queue.get()
         self.assertIsNotNone(result)
         if result == 'open_order_end':
             break
     client.disconnect()
开发者ID:schalekamp,项目名称:ibapipy,代码行数:33,代码来源:client_socket_tests.py


示例8: test

    def test():

        queue = Queue()

        proc = Process(target=doNothing, args=(queue, ))
        proc.start()

        _logger.info("Started dummy process with PID %d", proc.pid)
        startCodeCheckerServerAttachedToPid(proc.pid)
        time.sleep(3)
        _logger.info("Allowing the dummy process to finish")
        queue.put(1)
        proc.join()

        if utils.isProcessRunning(proc.pid):
            _logger.warning("Dummy process %d was still running", proc.pid)
            proc.terminate()
            time.sleep(1)
            it.assertFalse(utils.isProcessRunning(proc.pid),
                           "Process %d is still running after terminating "
                           "it!" % proc.pid)

        time.sleep(1)
        _logger.info("Server should have died by now")

        with it.assertRaises(requests.ConnectionError):
            requests.post(it._url + '/get_diagnose_info')
开发者ID:suoto,项目名称:hdlcc,代码行数:27,代码来源:test_hdlcc_server.py


示例9: MultiSegmentWriter

class MultiSegmentWriter(IndexWriter):
    def __init__(self, index, procs=2, **writerargs):
        self.index = index
        self.lock = index.storage.lock(index.indexname + "_LOCK")
        self.tasks = []
        self.postingqueue = Queue()
        #self.resultqueue = Queue()
        
        names = [index._next_segment_name() for _ in xrange(procs)]
        
        self.tasks = [SegmentWritingTask(index.storage, index.indexname,
                                         segname, writerargs, self.postingqueue)
                      for segname in names]
        for task in self.tasks:
            task.start()
        
    def add_document(self, **args):
        self.postingqueue.put(args)
        
    def cancel(self):
        for task in self.tasks:
            task.cancel()
        self.lock.release()
        
    def commit(self):
        procs = len(self.tasks)
        for _ in xrange(procs):
            self.postingqueue.put(None)
        for task in self.tasks:
            print "Joining", task
            task.join()
            self.index.segments.append(task.get_segment())
        self.index.commit()
        self.lock.release()
开发者ID:KeNJiKunG,项目名称:E-Tipitaka-for-PC,代码行数:34,代码来源:multiproc.py


示例10: f

def f(idx, q,r):
    path = "data%s"%(idx)
    os.makedirs(path)
    while True:
        item = q.get()
        if( item.item_type == ITEM_QUIT ):
            break;

        count = 0
        localQueue = Queue()
        current = item.data
        while True:
            print current
            fo = urlopen(current)
            data = fo.read()
            name = "%s/%s"%(path,count)
            fw = open( name, "w" )
            count = count + 1
            fw.write(data)
            fw.close()
            fo.close()
            p = MyHTMLParser()
            try:
                p.feed(data)
            except:
                pass

            for href in p.hrefs:
                print item.data, ": ", href

            try:
                current = localQueue.get_nowait()
            except:
                break;
开发者ID:charsyam,项目名称:pythoncrawl,代码行数:34,代码来源:main.py


示例11: Updater

class Updater(Process):

    def __init__(self, maxsize=15):
        Process.__init__(self)
        #self.queue = Queue(maxsize)
        self.queue = Queue()
        self.queue_lock = Lock()
        self._exit = Event()

    def run(self):
        while not self._exit.is_set():
            #with self.queue_lock:
            self.queue.put(self.receive())
            #self.queue.put_nowait(self.receive())
            #if self.queue.full():
            #    try:
            #        self.queue.get_nowait()
            #    except:
            #        pass

    def stop(self):
        self._exit.set()
        # This leaves the process hanging on Windows
        #self.join(STOP_TIMEOUT)
        if self.is_alive():
            #TODO make a nicer warning
            print 'Terminating updater:', self
            self.terminate()

    def receive(self):
        raise NotImplementedError
开发者ID:KN2C,项目名称:pyroboime,代码行数:31,代码来源:updater.py


示例12: likelihood_mp_simple

def likelihood_mp_simple(seqlens,fss,uon,bon,theta,seqnum,K,ufnum,bfnum,regtype,sigma):
    global _gradient
    grad = numpy.array(fss,copy=True)  # data distribuition
    likelihood = numpy.dot(fss,theta)
    que1 = Queue() # for the likihood output
    que2 = Queue() # for the gradient output
    np = 0
    subprocesses = []
    corenum=multiprocessing.cpu_count()
    #corenum=1
    if corenum>1:
        chunk=seqnum/corenum+1
    else:
        chunk=seqnum
    starti=0
    while starti < (seqnum):
        endi=starti+chunk
        if endi>seqnum:
            endi=seqnum    
        p = Process(target=likelihoodthread_simple, 
            args=(seqlens[starti:endi],uon[starti:endi],bon[starti:endi],theta,K,ufnum,bfnum,que1,que2))
        p.start()
        np+=1
        #print 'delegated %s:%s to subprocess %s' % (starti, endi, np)
        subprocesses.append(p)
        starti += chunk
    for i in range(np):
        likelihood += que1.get()
    for i in range(np):
        grad += que2.get()
    while subprocesses:
        subprocesses.pop().join()
    grad -= regularity_deriv(theta,regtype,sigma)
    _gradient = grad
    return likelihood - regularity(theta,regtype,sigma)
开发者ID:liuminglu19870419,项目名称:python_crf,代码行数:35,代码来源:LinearCRF2.py


示例13: likelihood_multithread_O

def likelihood_multithread_O(seqlens,fss,uon,bon,theta,seqnum,K,ufnum,bfnum):   # multithread version of likelihood
    '''conditional log likelihood log p(Y|X)'''
    likelihood = numpy.dot(fss,theta)
    thetab=theta[0:bfnum]
    thetau=theta[bfnum:]
    que = Queue()
    np = 0
    subprocesses = []
    corenum=multiprocessing.cpu_count()
    #corenum=1
    if corenum>1:
        chunk=seqnum/corenum+1
    else:
        chunk=seqnum
    starti=0
    while starti < (seqnum):
        endi=starti+chunk
        if endi>seqnum:
            endi=seqnum    
        p = Process(target=likelihoodthread, 
           args=(seqlens,uon,bon,thetau,thetab,seqnum,K,ufnum,bfnum,starti,endi,que))
        p.start()
        np+=1
        #print 'delegated %s:%s to subprocess %s' % (starti, endi, np)
        subprocesses.append(p)
        starti += chunk
    for i in range(np):
        likelihood += que.get()
    while subprocesses:
        subprocesses.pop().join()
    return likelihood - regularity(theta)
开发者ID:liuminglu19870419,项目名称:python_crf,代码行数:31,代码来源:LinearCRF2.py


示例14: BackgroundProcess

class BackgroundProcess(object):
    """A background process that reads batches and stores them in a queue.

    The :meth:`main` method needs to be called in order to start reading
    batches into the queue. Note that this process will run infinitely;
    start it as a :attr:`~multiprocessing.Process.daemon` to make sure it
    will get killed when the main process exits.

    Parameters
    ----------
    data_stream : :class:`.DataStream` or :class:`Transformer`
        The data stream from which to read batches.
    max_batches : int
        The maximum number of batches to store in the queue. If reached,
        the process wil block until a batch is popped from the queue.

    """
    def __init__(self, data_stream, max_batches):
        self.data_stream = data_stream
        self.batches = Queue(max_batches)
        self.run_background = True

    def main(self):
        while True:
            iterator = self.data_stream.get_epoch_iterator()
            for batch in iterator:
                self.batches.put(batch)
            self.batches.put(StopIteration)

    def get_next_data(self):
        return self.batches.get()
开发者ID:yk,项目名称:fuel,代码行数:31,代码来源:__init__.py


示例15: JobPool

class JobPool(object):

    """
    Pool container.
    """
    pool = None
    message_queue = None

    def __init__(self, max_instances=4):
        self.message_queue = Queue()
        self.pool = Pool(max_instances, execute_task, (self.message_queue,))
        atexit.register(self.clear)

    def add_analysis(self, analysis):
        """
        Add analysis to the pool.
        """
        analysis.set_started()
        self.message_queue.put(analysis)

    def clear(self):
        """
        Pool cleanup.
        """
        self.pool.terminate()
        self.pool.join()
开发者ID:ANSSI-FR,项目名称:polichombr,代码行数:26,代码来源:jobpool.py


示例16: run

    def run(self):
        logger.info('starting horizon agent')
        listen_queue = Queue(maxsize=settings.MAX_QUEUE_SIZE)
        pid = getpid()

        #If we're not using oculus, don't bother writing to mini
        try:
            skip_mini = True if settings.OCULUS_HOST == '' else False
        except Exception:
            skip_mini = True

        # Start the workers
        for i in range(settings.WORKER_PROCESSES):
            if i == 0:
                Worker(listen_queue, pid, skip_mini, canary=True).start()
            else:
                Worker(listen_queue, pid, skip_mini).start()

        # Start the listeners
        Listen(settings.PICKLE_PORT, listen_queue, pid, type="pickle").start()
        Listen(settings.UDP_PORT, listen_queue, pid, type="udp").start()

        # Start the roomba
        Roomba(pid, skip_mini).start()

        # Warn the Mac users
        try:
            listen_queue.qsize()
        except NotImplementedError:
            logger.info('WARNING: Queue().qsize() not implemented on Unix platforms like Mac OS X. Queue size logging will be unavailable.')

        # Keep yourself occupied, sucka
        while 1:
            time.sleep(100)
开发者ID:B-Rich,项目名称:skyline,代码行数:34,代码来源:horizon-agent.py


示例17: YaraJobPool

class YaraJobPool(object):

    """
    Yara pool container.
    """
    pool = None
    message_queue = None

    def __init__(self, max_instances=3):
        self.message_queue = Queue()
        self.pool = Pool(max_instances, execute_yara_task,
                         (self.message_queue,))
        atexit.register(self.clear)

    def add_yara_task(self, yara_task):
        """
        Adds the yara task.
        """
        self.message_queue.put(yara_task)

    def clear(self):
        """
        Pool cleanup.
        """
        self.pool.terminate()
        self.pool.join()
开发者ID:ANSSI-FR,项目名称:polichombr,代码行数:26,代码来源:jobpool.py


示例18: test_same_report_filtering

    def test_same_report_filtering(self, fromConfig, fromOptions, getLogger):
        def fake_virts(logger, config):
            new_fake_virt = Mock()
            new_fake_virt.config.name = config.name
            return new_fake_virt

        fromConfig.side_effect = fake_virts
        options = Mock()
        options.interval = 0
        options.oneshot = True
        options.print_ = False
        options.log_dir = ''
        options.log_file = ''
        virtwho = VirtWho(self.logger, options, config_dir="/nonexistant")

        queue = Queue()
        # Create another report with same hash
        report2 = HostGuestAssociationReport(self.config, self.fake_report.association)
        self.assertEqual(self.fake_report.hash, report2.hash)

        def send(report):
            report.state = AbstractVirtReport.STATE_FINISHED
            # Put second report when the first is done
            queue.put(report2)
            return True
        virtwho.send = Mock(side_effect=send)
        virtwho.queue = queue
        virtwho.retry_after = 1
        virtwho.configManager.addConfig(self.config)
        queue.put(self.fake_report)
        virtwho.run()

        self.assertEquals(virtwho.send.call_count, 1)
开发者ID:mtulio,项目名称:virt-who,代码行数:33,代码来源:test_virtwho.py


示例19: ParCalculate

def ParCalculate(systems,calc,cleanup=True,block=True,prefix="Calc_"):
    '''
    Run calculators in parallel for all systems. 
    Calculators are executed in isolated processes and directories.
    The resulting objects are returned in the list (one per input system).
    '''

    if type(systems) != type([]) :
        sysl=[systems]
    else :
        sysl=systems

    if block :
        iq=Queue(len(sysl)+1)
        oq=Queue(len(sysl)+1)
            
        # Create workers    
        for s in sysl:
            __PCalcProc(iq, oq, calc, prefix=prefix, cleanup=cleanup).start()

        # Put jobs into the queue
        for n,s in enumerate(sysl):
            iq.put([n,s])
            # Protection against too quick insertion
            time.sleep(0.2)
        
        if verbose : 
            print("Workers started:", len(sysl))
        
       # Collect the results
        res=[]
        while len(res)<len(sysl) :
            n,s=oq.get()
            res.append([n,s])
            #print("Got from oq:", n, s.get_volume(), s.get_pressure())
    else :
        # We do not need the multiprocessing complications for non-blocking 
        # workers. We just run all in sequence.
        basedir=os.getcwd()
        res=[]
        for n,s in enumerate(sysl):
            s.set_calculator(copy.deepcopy(calc))
            s.get_calculator().block=block
            place=tempfile.mkdtemp(prefix=prefix, dir=basedir)
            os.chdir(place)
            s.get_calculator().working_dir=place
            #print("Start at :", place)
            if hasattr(calc, 'name') and calc.name=='Siesta':
                s.get_potential_energy()
            else:
                s.get_calculator().calculate(s)
            os.chdir(basedir)
            #print("Submited", s.get_calculator().calc_finished(), os.getcwd())
            # Protection against too quick insertion
            time.sleep(0.2)
            res.append([n,s])
        if verbose : 
            print("Workers started:", len(sysl))
            
    return [r for ns,s in enumerate(sysl) for nr,r in res if nr==ns]
开发者ID:digideskio,项目名称:Elastic,代码行数:60,代码来源:parcalc.py


示例20: main

def main():
	# Threads we will use, don't change this because each thread calculates keys for 100 games exactly
	# (You can change this if you know how, I'm too euphoric now to do more flexibility)
	start = time();
	threads = 10;
	for line in sys.stdin:
		# Parsing the stdin
		encryptedMessage,encryptedGames = line.strip().split(':');
		encryptedGames = encryptedGames.split('~');
		# Queue with keys
		q = Queue();
		# Threads
		for i in range(10):
			p = Process(target=keysFinder, args=(encryptedGames[i*100:(i+1)*100],q));
			p.start();
		# Number of threads already finished
		finished = 0;
		keys = [];
		while finished < threads:
			keys += q.get();
			finished+=1;

		# From all keys, try which one decrypts a valid message
		em = binascii.unhexlify(encryptedMessage);
		found = False;
		for key in keys:
			x = AES.new(key);
			dec = x.decrypt(em);
			if (isCorrect(dec)):
				found = True;
				# Make unpadding and print. Voila!
				print removePadding(dec.strip());
	if (sys.argv[1] == 'benchmark'):
		print "Time elapsed: ",time()-start;
开发者ID:colega,项目名称:tuentichallenge2012,代码行数:34,代码来源:challenge20.py



注:本文中的multiprocessing.Queue类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python multiprocessing.Semaphore类代码示例发布时间:2022-05-27
下一篇:
Python multiprocessing.Process类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap