• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python dummy.Pool类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中multiprocessing.dummy.Pool的典型用法代码示例。如果您正苦于以下问题:Python Pool类的具体用法?Python Pool怎么用?Python Pool使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了Pool类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: assync_users_proceed

 def assync_users_proceed(self, users_pool, threads):
     pool = ThreadPool(threads)
     try:
         full_users = pool.map(self.get_user_info, users_pool)
     except Exception, e:
         print e
         full_users = []
开发者ID:shanginn,项目名称:github_public_keys_grabber,代码行数:7,代码来源:getkeys.py


示例2: test_multi_threading

def test_multi_threading():
    import time
    import random
    from multiprocessing.dummy import Pool

    def op_a(a, b):
        time.sleep(random.random()*.02)
        return a+b

    def op_b(c, b):
        time.sleep(random.random()*.02)
        return c+b

    def op_c(a, b):
        time.sleep(random.random()*.02)
        return a*b

    pipeline = compose(name="pipeline", merge=True)(
        operation(name="op_a", needs=['a', 'b'], provides='c')(op_a),
        operation(name="op_b", needs=['c', 'b'], provides='d')(op_b),
        operation(name="op_c", needs=['a', 'b'], provides='e')(op_c),
    )

    def infer(i):
        # data = open("616039-bradpitt.jpg").read()
        outputs = ["c", "d", "e"]
        results = pipeline({"a": 1, "b":2}, outputs)
        assert tuple(sorted(results.keys())) == tuple(sorted(outputs)), (outputs, results)
        return results

    N = 100
    for i in range(20, 200):
        pool = Pool(i)
        pool.map(infer, range(N))
        pool.close()
开发者ID:yahoo,项目名称:graphkit,代码行数:35,代码来源:test_graphkit.py


示例3: download_urls_to_zip

def download_urls_to_zip(zf, urls):

    urls = set(urls)

    pool = ThreadPool(10)
    download_to_zip_func = lambda url: download_url_to_zip(zf, url)
    pool.map(download_to_zip_func, urls)
开发者ID:SG345,项目名称:ka-lite,代码行数:7,代码来源:generate_assessment_zips.py


示例4: BurstDz

def BurstDz(host, path, user, passfile):
    hostuser = host.split('.')
    hostuser = hostuser[len(hostuser)-2]
    hostdir = [hostuser,hostuser+hostuser,'admin'+hostuser,hostuser+'123','manage'+hostuser,hostuser+'123456',hostuser+'admin','123'+hostuser]

    opts_list = []

    f = open(passfile, 'r')
    password = f.read().split()
    dic = password+hostdir
    pool = ThreadPool(10)
    host1 = host+path

    for x in range(len(dic)):
        mima = dic[x]
        opts = {
            'host': host1,
            'user': user,
            'password': mima
        }
        opts_list.append(opts)

    #print hostr
    #print result
    pool.map(LoginDisCuz, opts_list)
    #pool.join()
    print 'All PassWord Run Over'
开发者ID:c0deeast,项目名称:DzScan,代码行数:27,代码来源:DzScan.py


示例5: main

def main():
    pool = ThreadPool(4)
    terms_to_articles = {}

    t0 = time()

    for term in search_terms:
        print("Getting articles for {}...".format(term))
        article_urls = get_articles_urls_for(term)
        articles = pool.map(get_article, article_urls)
        terms_to_articles[term] = articles

    print("Fetching articles took {:.1f} seconds".format(time() - t0))

    for term in search_terms:
        articles = terms_to_articles[term]
        print("Articles for {} ({}):".format(term, len(articles)))
        for article in articles:
            print(u"  == {} ==".format(article.title))
            print(u"  {}...".format(article.text[:70]))
            print(u"  - {}".format(article.url))
            print

    with open('articles.pickle', 'wb') as f:
        pickle.dump(terms_to_articles, f)
开发者ID:AkiraKane,项目名称:dsr-2015,代码行数:25,代码来源:3_proxies.py


示例6: update_proxy_pool

def update_proxy_pool(test_url, timeout, proxy_pool, ready_flag, interval):
    """
    守护进程执行的任务,定时更新代理池。
    注意每次更新本身需要十几秒的时间,
    所谓定时,是规定更新间隔时间。
    """
    while 1:
        proxy_list = get_proxies(test_url, timeout)  # 获取新代理列表
        # 筛选不在新代理列表中的旧代理
        pre_test_list = proxy_pool.keys()
        pre_test_list.remove(None)
        for proxy in proxy_list:
            if proxy in proxy_pool:  # 如果该旧代理在新代理列表中,不测试该代理
                pre_test_list.remove(proxy)
        # 测试旧代理,弃用响应太慢的旧代理
        if len(pre_test_list) > 0:
            pool = Pool(16)  # 创建线程池
            kwargs = [{'test_url': test_url, 'proxy': proxy, 'timeout': timeout} for proxy in pre_test_list]  # 封装参数
            response_time_list = pool.map(multi_test_wrapper, kwargs)  # 并行测试
            for i in xrange(len(pre_test_list)):  # 弃用响应太慢的旧代理
                if response_time_list[i] > timeout:
                    del proxy_pool[pre_test_list[i]]
        # 合并新旧代理列表
        for proxy in proxy_list:
            if proxy not in proxy_pool:  # 如果新代理不在代理池中,初始化新代理
                proxy_pool[proxy] = 0
        ready_flag.value = True
        print('代理池更新完成,当前代理池中有', len(proxy_pool), '个代理')
        sleep(interval)  # 定时更新一次代理列表
开发者ID:wujun,项目名称:weibo-crawler2,代码行数:29,代码来源:proxies.py


示例7: _get_item_data

    def _get_item_data(self, itemids, threads=-1):
        """
        Get metadata for many items.

        :param itemids: item numbers
        :param threads: number of cpu threads to use

        :type itemids: list
        :type threads: int
        """
        self.info('getting data')
        self.info('threads = %d', threads)

        # threads make it faster but I've seen it freeze so disabling this for now
        if threads > 1:
            threads = 0
            self.error('multiprocessing seems fishy')
            self.error('setting threads=1')

        # get data from itemids
        if threads > 1:
            from multiprocessing.dummy import Pool as ThreadPool
            import itertools
            params = zip(itemids, range(len(itemids)), itertools.repeat(len(itemids)))
            pool = ThreadPool(threads)
            data = pool.map(self._get_item_data_for_itemid_map, params)
            data = {d['itemid'] : d for d in data}
        else:
            data = {}
            for i, itemid in enumerate(itemids):
                data[itemid] = self._get_item_data_for_itemid(itemid, index=i, total=len(itemids))

        return data
开发者ID:dacrybabysuck,项目名称:pydarkstar,代码行数:33,代码来源:ffxiah.py


示例8: test_thread

def test_thread(data_array, word_list):

    def test_update_line(line):
        if len(line) == 1:
            return line
        else:
            for i in range(len(word_list)):
                for j in range(len(line)-1):
                    if line[j] == word_list[i][0] and line[j+1] == word_list[i][1]:
                        line[j] = line[j] + line[j+1]
                        line[j+1] = ''
            return line

    print data_array
    IS_MUTI_THREAD = True
    MUTI_THREAD_NUM = 3
    if IS_MUTI_THREAD:
        from multiprocessing.dummy import Pool as ThreadPool
    if IS_MUTI_THREAD:
        pool = ThreadPool(MUTI_THREAD_NUM)
        pool.map(test_update_line, data_array)
        data_array = [filter(lambda x:x!='',line) for line in data_array]
    else:
        # for i in range(len(data_array)):
            # data_array[i] = filter(lambda x:x!='', test_update_line(data_array[i]))
        data_array = [filter(lambda x:x!='', test_update_line(line)) for line in data_array]

    print data_array
开发者ID:yinruyi,项目名称:compound,代码行数:28,代码来源:test.py


示例9: start

def start():

    CSVFile(header=['Artist', 'Album', 'Genre', 'Style', 'Year', 'Rating'])
    page = 1
    page_not_found = None
    while page_not_found == None:

        try:
            print('Page', page)

            pitchfork_page = Grab()
            pitchfork_page.go(PITC_URL + str(page))
            soup = Soup(pitchfork_page.doc.select('//div[@id="main"]/ul[@class="object-grid "]').html(), 'lxml')
            albums_on_page = []

            for link in soup.find_all('a', href=True):
                albums_on_page.append('http://pitchfork.com' + link['href'])

            pool = ThreadPool(THREADS)

            pool.map(pitchfork, albums_on_page)

            page += 1

            # if page > 1:
            #   page_not_found = True

        except IndexError as error:
            print(error)
            page_not_found = True
开发者ID:thzvm,项目名称:Python,代码行数:30,代码来源:pitchfork.py


示例10: main

def main():
    mht_list = get_mht_list()
    if not mht_list:
        print u'请确保目录下有mht文件\n'
        return
    print u'共有%s个mht文件中的图片需要备份\n'%len(mht_list)

    print u'请输入你的QQ号码(6-10位纯数字):'
    qq = raw_input()
    print u'正在搜索mht文件中待备份图片,请稍后....'
    get_mht_pic(mht_list)
    if not mht_pic_md5:
        print u'mht文件中未包含可备份图片\n'
        return
    print u'共找到%s张待备份图片'%len(mht_pic_md5)
    # QQ图片文件夹
    key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows\CurrentVersion\Explorer\User Shell Folders')
    documents_path = _winreg.QueryValueEx(key, 'Personal')[0]
    img_path = documents_path + os.sep + 'Tencent Files/' + qq + '/Image'
    print u'正在统计QQ聊天记录图片, 请稍后....'
    pic_list = get_pic_list(img_path)
    if not pic_list:
        print u'未找到QQ聊天记录图片文件夹,请确保输入了正确的QQ号码\n'
        main()
    
    pool = ThreadPool(thread_num)
    print u'正在备份....'
    pool.map(backup, pic_list)
    print u'备份完成\n图片保存在当前路径的bak文件夹下\n'
开发者ID:guxiaodong1987,项目名称:QQ_pic_bak,代码行数:29,代码来源:QQ_pic_bak.py


示例11: audio_convert

def audio_convert(filename):
    # This combines the cutting and the conversions

    cut_files = {}
    text = {}
    
    error_file = open('error.txt', 'w')
    error_file.write(filename)
    for speed in ['slow', 'fast']:
        if speed == 'slow':
            cut_files[speed] = cut_wave(filename, 0.70)
        else:
            cut_files[speed] = cut_wave(filename, 0.85) 
        # assert(False)
        pool = ThreadPool(processes = len(cut_files[speed]))
        text[speed] = pool.map(chunk_convert, cut_files[speed])
        pool.close()
        # text[speed] = [chunk_convert(x) for x in cut_files[speed]]
        print "Closed a pool"
        # Clear out the temporary files created
        for x in cut_files[speed]:
            os.remove(x)

    text = text['slow'] + text['fast']
    text = [x for x in text if len(x) > 0]
    return(text)
开发者ID:kenzshelley,项目名称:vinder,代码行数:26,代码来源:audio_parse.py


示例12: abortable_func

def abortable_func(func, *args, **kwargs):
	"""
	The abortable_func is the wrapper function, which wraps around function type "func", call 
	  it in a background thread (multiprocessing.dummy.Thread), and terminates it after
	  "timeout" seconds.
	This function is inspired by 
	  http://stackoverflow.com/questions/29494001/how-can-i-abort-a-task-in-a-multiprocessing-pool-after-a-timeout
	  but is an improvement over the original solution, since the original solution is only 
	  applicable to a function that takes positional arguments.

	Parameters of the function:
	  func - the function that will be called and terminated if not return with "timeout" seconds
	  *args - positional arguments of "func"
	  **kwargs - named arguments of "func" + "timeout" value
	"""
	
	#- Get "timeout" value and create a ThreadPool (multiprocessing.dummy.Pool) 
	#  with only 1 worker. 
	#- Use functools.partial (https://docs.python.org/3/library/functools.html)
	#  to fit all the arguments of the func into the interface of
	#  Pool.apply_async function
	timeout = kwargs.pop('timeout', None);
	p = ThreadPool(1);
	partial_func = partial(func,**kwargs);
	res = p.apply_async(partial_func,args);

	#- Terminate the thread if it does not return after "timeout" seconds
	#  otherwise return the returned value of func
	try:
		out = res.get(timeout);
		return out
	except TimeoutError:
		p.terminate()
		return "{}:Timeout exceeded. Process terminated.\r\n".format(args[0]);
开发者ID:snwhite777,项目名称:multiexec,代码行数:34,代码来源:multiexec.py


示例13: find_process_files

def find_process_files(root_dir):
    lock = Lock()
    pool = Pool()

    hash_db = load_hashes(HASH_FILE)
    # Keep changed .pxi hashes in a separate dict until the end
    # because if we update hash_db and multiple files include the same
    # .pxi file the changes won't be detected.
    pxi_hashes = {}

    jobs = []

    for cur_dir, dirs, files in os.walk(root_dir):
        for filename in files:
            in_file = os.path.join(cur_dir, filename + ".in")
            if filename.endswith('.pyx') and os.path.isfile(in_file):
                continue
            for fromext, function in rules.items():
                if filename.endswith(fromext):
                    toext = ".c"
                    with open(os.path.join(cur_dir, filename), 'rb') as f:
                        data = f.read()
                        m = re.search(br"^\s*#\s*distutils:\s*language\s*=\s*c\+\+\s*$", data, re.I|re.M)
                        if m:
                            toext = ".cxx"
                    fromfile = filename
                    tofile = filename[:-len(fromext)] + toext
                    jobs.append((cur_dir, fromfile, tofile, function, hash_db, pxi_hashes, lock))

    for result in pool.imap(lambda args: process(*args), jobs):
        pass

    hash_db.update(pxi_hashes)
    save_hashes(hash_db, HASH_FILE)
开发者ID:Brucechen13,项目名称:scipy,代码行数:34,代码来源:cythonize.py


示例14: main

def main():
    n = 100000
    m = 10
    m2 = 1000
    
    create_db()

    pool = Pool(processes=5)
    start = time.time()
    fill(n)
    fill_time = time.time() - start
    print('{} inserts in {}s'.format(n,fill_time))
    db = get_db()
    print(db.directories.find().count(),'directories')

    start = time.time()
    results = []
    for _ in range(m):
        results.append(pool.apply_async(read, ()))
#        results.append(pool.apply_async(read_dataset, ()))
        for i in range(m2):
            results.append(pool.apply_async(read_one, ()))
#            if i%10 == 0:
#                results.append(pool.apply_async(fill, (1,)))
    for r in results:
        r.get(timeout=1000000)
    read_time = time.time() - start
    pool.terminate()

    print('{}.{} reads in {}s'.format(m,m2,read_time))
开发者ID:dsschult,项目名称:file_catalog,代码行数:30,代码来源:mongo_test2.py


示例15: runLocalCommands

def runLocalCommands(args, outputDir, commands):
    # NOTE: this is going to BREAK meff optimisation if we re-cycle histograms.
    # Needs to be updated to run in successive orde if we implement that.
    N = len(commands)

    if N > 50:
        print("")
        print("Are you sure you want to run %d commands locally?" % N)
        if args.dry_run:
            print("[NB: this is a dry run]")
        var = input("Press enter to continue")
        print("")

    cmds = []
    for i, x in enumerate(commands):
        (cuts, name, cmd) = x
        cmd = "cd %s && echo '%d/%d\t%s' && %s 2>&1 >/dev/null" % (outputDir, i+1, N, cmd, cmd)
        cmds.append(cmd)

    if args.dry_run:
        print("Would run following commands:")
        for cmd in cmds:
            print("   %s" % cmd)
        return

    pool = Pool(10) # concurrent commands at a time
    for i, returncode in enumerate(pool.imap(partial(subprocess.call, shell=True), cmds)):
        if returncode != 0:
           print(("%d command failed: %d" % (i, returncode)))
开发者ID:lawrenceleejr,项目名称:ZeroLeptonAnalysis,代码行数:29,代码来源:utils.py


示例16: multithread

def multithread(function, items, extra_variable, threads=2):
    """ Takes the main function to run in parallel, inputs the variable(s) and returns the results.
    :param function: The main function to process in parallel.
    :param items: A list of strings that are passed into the function for each thread.
    :param extra_variable: One additional variable that can be passed into the function.
    :param threads: The number of threads to use. The default is 2, but the threads are not CPU core bound.
    :return: The results of the function passed into this function.
    """

    if __name__ == '__main__':

        # """ A CPU core dependent multiprocessing technique.
        # The synchronized variant, which locks the main program until a process is finished. Order is retained. """
        # pool = Pool(threads)
        # results = [pool.apply(function, args=(item, extra_variable)) for item in items]
        # pool.close()
        # pool.join()

        # """ A thread dependent multiprocessing technique. Theoretically, an unlimited number of threads can be used.
        # The synchronized variant, which locks the main program until a process is finished. Order is retained. """
        # pool = ThreadPool(threads)
        # results = [pool.apply(function, args=(item, extra_variable)) for item in items]
        # pool.close()
        # pool.join()

        """ A thread dependent multiprocessing technique. Theoretically, an unlimited number of threads can be used.
        The async variant, which submits all processes at once and retrieve the results as soon as finished. """
        pool = ThreadPool(threads)
        output = [pool.apply_async(function, args=(item, extra_variable)) for item in items]
        results = [p.get() for p in output]

        return results
开发者ID:ricrosales,项目名称:StudentAttrition,代码行数:32,代码来源:one_class.py


示例17: getAllProducts

 def getAllProducts(self):
     """
     multithreaded
     returns a dictionary of information
         {skus}
         skus is a dictionary with many keys and values
         refer to output.txt to see what information it holds
     """
     skus = {}
     page = 1
     num_pages = 8
     r = None
     found_empty = False
     pool = ThreadPool(num_pages)
     while not found_empty:
         pages = range(page, page + num_pages)
         results = pool.map(lambda x: self._listProducts(page=x), pages)
     # print(results)
         for r in results:
             if str(r.status_code) == "204":
                 found_empty = True
                 break
             if str(r.status_code).startswith("4"):
                 raise Exception("Error {}: {}.".format(r.status_code, BigCommerce.error_codes[int(r.status_code)]))
             temp_data = r.json()
             for item in temp_data:
                 sku = item["sku"]
                 skus[sku] = item
             page += 1
     return {"skus": skus}
开发者ID:brianjp93,项目名称:bigcommerce,代码行数:30,代码来源:bigcommerce.py


示例18: spider

def spider():
	# initialize the count
	global COUNT
	global TOTAL
	COUNT = 0


	# connect the local mongodb
	conn = pymongo.Connection( )
	db = conn.adsdata

	logfile = open("./runtime.log", "a")

	enzyme_content = KEGG.getList('enzyme')
	enzyme_lines = enzyme_content.split('\n')

	TOTAL = len(enzyme_lines)
	print('TOTAL:  ' + str(TOTAL))

	enzyme_ids = map(lambda line: line.split('\t')[0], enzyme_lines)

	## multithread inserting
	pool = ThreadPool(10)
	try:
		pool.map(lambda id:insertEnzymeTreeWith_safe(id, db), enzyme_ids )
	except Exception,e:
		print("Error: " + e.message)
开发者ID:fycisc,项目名称:ADS,代码行数:27,代码来源:grab.py


示例19: format_data

def format_data(dealer_data):
     start_time = time.time()
     pool = Pool(1)
     dealers=[]
     today = datetime.now()
     for data in dealer_data:
         temp = {}
         temp['id'] = data[0]
         temp['service_advisor_id'] = data[1]
         temp['name'] = data[2]
         temp['phone_number'] = data[3]
         temp['order'] = data[4]
         temp['password'] = data[1]+'@123'
         temp['last_login'] = today
         temp['is_superuser'] = 0
         temp['username'] = data[1]
         temp['first_name'] = ' '
         temp['last_name'] = ' '
         temp['email'] = ''
         temp['is_staff'] = 0
         temp['is_active'] = 1
         temp['date_joined'] = today
         dealers.append(temp)
     pool.map(process_query, dealers)
     end_time = time.time()
     print "..........Total TIME TAKEN.........", end_time-start_time
开发者ID:ashish-srivastava92,项目名称:GladmindsAshish,代码行数:26,代码来源:create_sa_user.py


示例20: run

def run(node):
    """
    Primary entry-point for running this module.

    :param node: dict
    {
        "url": "https://some-site.com"
    }

    :return:
    {
        document_url: metadata,
        ...
    }
    :rtype:  dict
    """
    mapper    = lambda x: redis_load(x, r)
    url       = node.get('url', 'http://www.cic.gc.ca')
    pool      = ThreadPool(32)
    docs      = redis_docs(url, r)
    metadata  = pool.map(mapper, docs)
    return {
        url2pathname(k): v
            for k,v in metadata if v
    }
开发者ID:anukat2015,项目名称:linkalytics,代码行数:25,代码来源:tika.py



注:本文中的multiprocessing.dummy.Pool类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python dummy.Process类代码示例发布时间:2022-05-27
下一篇:
Python connection.Listener类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap