• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python pool.map函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中multiprocessing.pool.map函数的典型用法代码示例。如果您正苦于以下问题:Python map函数的具体用法?Python map怎么用?Python map使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了map函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _push

 def _push(self, src, dst):
     """
     Push src to dst on the remote.
     """
     force = False
     if src.startswith('+'):
         src = src[1:]
         force = True
     present = [self._refs[name][1] for name in self._refs]
     present.extend(self._pushed.values())
     # before updating the ref, write all objects that are referenced
     objects = git_list_objects(src, present)
     try:
         # upload objects in parallel
         pool = multiprocessing.pool.ThreadPool(processes=self._processes)
         pool.map(Binder(self, '_put_object'), objects)
     except Exception:
         self._fatal('exception while writing objects')
     sha = git_ref_value(src)
     error = self._write_ref(sha, dst, force)
     if error is None:
         self._write('ok %s' % dst)
         self._pushed[dst] = sha
     else:
         self._write('error %s %s' % (dst, error))
开发者ID:Blueprint-Marketing,项目名称:git-remote-dropbox,代码行数:25,代码来源:__init__.py


示例2: parallel_compile

def parallel_compile(self, sources, output_dir=None, macros=None,
                     include_dirs=None, debug=0, extra_preargs=None,
                     extra_postargs=None, depends=None):
    """New compile function that we monkey patch into the existing compiler instance.
    """
    import multiprocessing.pool

    # Copied from the regular compile function
    macros, objects, extra_postargs, pp_opts, build = \
            self._setup_compile(output_dir, macros, include_dirs, sources,
                                depends, extra_postargs)
    cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)

    def _single_compile(obj):
        try:
            src, ext = build[obj]
        except KeyError:
            return
        self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)

    # Set by fix_compiler
    global glob_use_njobs
    if glob_use_njobs == 1:
        # This is equivalent to regular compile function
        for obj in objects:
            _single_compile(obj)
    else:
        # Use ThreadPool, rather than Pool, since the objects are picklable.
        pool = multiprocessing.pool.ThreadPool(glob_use_njobs)
        pool.map(_single_compile, objects)
        pool.close()
        pool.join()

    # Return *all* object filenames, not just the ones we just built.
    return objects
开发者ID:GalSim-developers,项目名称:GalSim,代码行数:35,代码来源:setup.py


示例3: run_all_intersections

def run_all_intersections(write_anomaly, incomplete, intersections, multi_model, smooth):
    print "Running all on", os.getpid()
    start_time = time.time()

    if incomplete:
        key = '_id'
        query = [
            {'$match': {'anomaly': {'$exists': False}}},
            {'$group': {'_id': '$site_no'}}
        ]
        if intersections != '':
            query[0]['$match']['site_no'] = {'$in': intersections.split(',')}
        locations = list(readings_collection.aggregate(query))

    else:
        key = 'intersection_number'
        if intersections != '':
            query = {key: {'$in': intersections.split(',')}}
        else:
            query = {key:  {'$regex': '3\d\d\d'}}
        locations = list(locations_collection.find(query))
    gen = [(str(l[key]), write_anomaly, incomplete, False, multi_model, smooth) for l in locations]
    pool = Pool(8, maxtasksperchild=1)
    pool.map(run_single_intersection, gen)
    print("TOTAL TIME: --- %s seconds ---" % (time.time() - start_time))
开发者ID:JonnoFTW,项目名称:htm-models-adelaide,代码行数:25,代码来源:index.py


示例4: build_packages

  def build_packages(self):
      """Build all the Spinnaker packages."""
      all_subsystems = []
      all_subsystems.extend(SUBSYSTEM_LIST)
      all_subsystems.extend(ADDITIONAL_SUBSYSTEMS)

      if self.__options.build:
        # Build in parallel using half available cores
        # to keep load in check.
        weighted_processes = self.__options.cpu_ratio * multiprocessing.cpu_count()
        pool = multiprocessing.pool.ThreadPool(
            processes=int(max(1, weighted_processes)))
        pool.map(self.__do_build, all_subsystems)

      if self.__build_failures:
        if set(self.__build_failures).intersection(set(SUBSYSTEM_LIST)):
          raise RuntimeError('Builds failed for {0!r}'.format(
            self.__build_failures))
        else:
          print 'Ignoring errors on optional subsystems {0!r}'.format(
              self.__build_failures)

      if self.__options.nebula:
        return

      wait_on = set(all_subsystems).difference(set(self.__build_failures))
      pool = multiprocessing.pool.ThreadPool(processes=len(wait_on))
      print 'Copying packages...'
      pool.map(self.__do_copy, wait_on)
      return
开发者ID:edwinavalos,项目名称:spinnaker,代码行数:30,代码来源:build_release.py


示例5: main

def main():
    parser = argparse.ArgumentParser(
        description="A simple tool to backup your Bitbucket repositories",
    )
    
    parser.add_argument('username', type=str, help='Username')
    parser.add_argument('password', type=str, help='Password')
    parser.add_argument('backupdir', type=str, 
                        help='The target backup directory')
    
    args = parser.parse_args()
    
    bitbucket = Bitbucket(args.username, args.password)
    repos = list(bitbucket.get_repositories())
    random.shuffle(repos)

    pool = multiprocessing.pool.ThreadPool(20)
    pool.map(lambda x: x.backup(args.backupdir), repos)

    failed = 0
    for repo in repos:
        if repo.failed is None:
            continue
        failed += 1
        print 'WARNING: the following repositories failed to update:'
        print repo.name
        print repo.output
        print repo.failed
    if failed:
        sys.exit(2)
开发者ID:ctheune,项目名称:codebackup,代码行数:30,代码来源:__init__.py


示例6: get_item_by_url

def get_item_by_url(urls):

    pool =mul.Pool()

    t1 =clock()
    pool.map(get_item_info,urls)
    t2=clock()
    print 'time\t'+str(t2-t1)
    print 'total count\t'+str(count)
开发者ID:nihaozjf,项目名称:crawler,代码行数:9,代码来源:ItemInfo.py


示例7: run_master

	def run_master(self):

		logging.info('Creating a pool of ' + str(self.num_processes) + ' subprocess workers.')
		# create a pool of processes.
		pool = Pool(processes=self.num_processes,)

		# apply map on the chunks in parallel.
		regions = pool.map(self.apply_map, range(0, self.num_processes))

		# do the intermediate grouping step on each chunks in parallel.
		inters = pool.map(self.apply_intermediate, range(0, self.num_processes))
开发者ID:tiggreen,项目名称:map_reduce.py,代码行数:11,代码来源:map_reduce.py


示例8: scan_all

def scan_all():
    items = [(name, addr)
             for name, endpoints in config['endpoints'].items()
             for addr in endpoints]
    pool.map(scan_one, items)

    info()

    if 'verbose' in sys.argv:
        import pprint;
        pprint.pprint(dict(active))
        pprint.pprint(dict(inactive))

    header = "".join([
        "name".center(29),
        "active".rjust(8),
        "inactive".rjust(9),
        "percent".rjust(9),
        "reason".center(32),
    ])
    info()
    info(header + "\n")
    info("-" * len(header) + "\n")

    active_n_total, inactive_n_total = 0, 0
    for name in sorted(config['endpoints']):
        active_n = len(active[name])
        inactive_n = len(inactive[name])

        active_n_total += active_n
        inactive_n_total += inactive_n

        total = active_n + inactive_n

        percent = ""
        if total:
            percent = "%%%0.1f" % (100 * float(active_n) / total)

        reasons = set([reason for _, reason in inactive[name]])

        info(name.rjust(29))
        info(str(active_n).rjust(8))
        info(str(inactive_n).rjust(9))
        info(percent.rjust(9))
        info(", ".join(reasons).rjust(32) + "\n")

    info("-" * len(header) + "\n")

    info("  total active:   %i\n" % active_n_total)
    info("total inactive:   %i\n" % inactive_n_total)
    value = 100 * float(active_n_total) / (active_n_total + inactive_n_total)
    info("percent active: %%%0.1f\n" % value)
    return value
开发者ID:AdamWill,项目名称:fedmsg,代码行数:53,代码来源:fedmsg-map.py


示例9: _ConvertToWebP

def _ConvertToWebP(webp_binary, png_files):
  pool = multiprocessing.pool.ThreadPool(10)
  def convert_image(png_path):
    root = os.path.splitext(png_path)[0]
    webp_path = root + '.webp'
    args = [webp_binary, png_path] + _PNG_TO_WEBP_ARGS + [webp_path]
    subprocess.check_call(args)
    os.remove(png_path)
  # Android requires pngs for 9-patch images.
  pool.map(convert_image, [f for f in png_files if not f.endswith('.9.png')])
  pool.close()
  pool.join()
开发者ID:google,项目名称:proto-quic,代码行数:12,代码来源:package_resources.py


示例10: build_jars

  def build_jars(self):
    """Build the Spinnaker packages as jars
    """
    subsystems = ['halyard']

    if self.__options.do_jar_build:
      weighted_processes = self.__options.cpu_ratio * multiprocessing.cpu_count()
      pool = multiprocessing.pool.ThreadPool(
        processes=int(max(1, weighted_processes)))
      pool.map(self.__do_jar_build, subsystems)

    self.__check_build_failures(subsystems)
开发者ID:PioTi,项目名称:spinnaker,代码行数:12,代码来源:build_release.py


示例11: build_container_images

  def build_container_images(self):
    """Build the Spinnaker packages as container images.
    """
    subsystems = [comp for comp in SUBSYSTEM_LIST if comp != 'spinnaker']
    subsystems.append('spinnaker-monitoring')

    if self.__options.container_builder:
      weighted_processes = self.__options.cpu_ratio * multiprocessing.cpu_count()
      pool = multiprocessing.pool.ThreadPool(
        processes=int(max(1, weighted_processes)))
      pool.map(self.__do_container_build, subsystems)

    self.__check_build_failures(subsystems)
开发者ID:PioTi,项目名称:spinnaker,代码行数:13,代码来源:build_release.py


示例12: run_program

	def run_program(self):

		logging.info('Running the framework...')
		# Fixing the start time.
		start_time = time.time()

		"""
		Create a pool of processes. The number of processes
		is equal to the number of files. One process takes care of one file.
		"""
		pool = MyMRPool(len(self.files))

		logging.info('The initial number of running processes is ' + str(len(self.files)) + '.')


		"""
		Apply call_map_reduce on all files in parallel. All files 
		will be partitioned/mapped/shuffled individually.
		"""
		apply_map_reduces = pool.map(self.call_map_reduce, self.files)


		self.shuffle()

		"""
		At this point we have bunch of inter-shuffled files.
		We can reduce them in parallel.	
		"""
		reduces = pool.map(self.apply_reduce, range(0, self.num_processes))

		"""
		At this point we have bunch of reduced files so we can 
		merge all reduce files into one final file.	
		"""
		self.merge_reduce_results()

		"""
		Finilizing the framework execution.	
		"""
		self.__finalize_program()

		logging.info('The program is successfully finished.')

		"""
		Fixing the end time. We use this for calculating
		the total execution time of the framework.
		"""
		end_time = time.time()

		logging.info('The total execution time is: ' + str(end_time - start_time))
开发者ID:tiggreen,项目名称:map_reduce.py,代码行数:50,代码来源:map_reduce.py


示例13: executeOperatorSequence

def executeOperatorSequence(operator, kwargsUpdated, parallel):   
    outputPathPattern = ''
    inputPathPattern = ''
    for key, value in kwargsUpdated.iteritems():
        arg = str(value)
        if '*' in arg:

            if operator.get_targets().count(key) == 1:
                outputPathPattern = arg
                outputKey = key
            else: 
                # get file of path in arg with Unix style pathname pattern expansion
                fileList = glob.glob(arg)
                if not fileList:
                    log.error("%s: Could not find any files for input pattern %s in Slot %s" % operator.name, outputPathPattern, outputKey)
                
                inputFileList = fileList
                inputPathPattern = arg
                inputKey = key

    if outputPathPattern == '' or inputPathPattern == '':
        log.error("If two file patterns (paths with '*' are used, one must be an argument for a non-target parameter and one in target parameter")
            
    pre, post = inputPathPattern.split('*')
    outputFileList = []
    
    for fil in inputFileList:
        tmp = str(fil).replace(pre, '')
        tmp = str(tmp).replace(post, '')  
        outputFileList.append(outputPathPattern.replace('*', tmp))
    
    args_list = []
    for j in range(len(inputFileList)):
        kwargs_new = OrderedDict(kwargsUpdated)
        kwargs_new[inputKey] = inputFileList[j]
        kwargs_new[outputKey] = outputFileList[j]
        args_new = list(kwargs_new.values())
        args_new.append(operator)            
        args_list.append(args_new)
        
    if parallel:
        # multiprocessing
        num_of_workers = multiprocessing.cpu_count()
        pool = multiprocessing.Pool(num_of_workers - 1)
        # blocks until finished
        pool.map(callWrapper, args_list)
    else:
        for args in args_list:
            callWrapper(args)
    return outputPathPattern
开发者ID:CognitionGuidedSurgery,项目名称:msml,代码行数:50,代码来源:sequence.py


示例14: _ConvertToWebP

def _ConvertToWebP(webp_binary, png_files):
  pool = multiprocessing.pool.ThreadPool(10)
  def convert_image(png_path):
    root = os.path.splitext(png_path)[0]
    webp_path = root + '.webp'
    args = [webp_binary, png_path, '-mt', '-quiet', '-m', '6', '-q', '100',
        '-lossless', '-o', webp_path]
    subprocess.check_call(args)
    os.remove(png_path)

  pool.map(convert_image, [f for f in png_files
                           if not _PNG_WEBP_BLACKLIST_PATTERN.match(f)])
  pool.close()
  pool.join()
开发者ID:bashow0316,项目名称:chromium,代码行数:14,代码来源:process_resources.py


示例15: process_images

def process_images():
    """Process all images in parallel.

    Like app.process_images, use a process pool for convenience. The last
    three steps of the problem (cropping and saving) are also parallelized.
    This cannot be done using multiprocessing.Pool because it daemonizes its
    children processes, and they in turn cannot have children of their own.

    Use custom Pool and Process subclasses that ensure the children are not
    daemonized.
    """
    pool = NoDaemonPool()  # use cpu_count() processes
    pool.map(process_image, image_paths())
    pool.close()
    pool.join()
开发者ID:ag-temp,项目名称:coding-challenge-d,代码行数:15,代码来源:app_nodaemon.py


示例16: build_packages

  def build_packages(self):
      """Build all the Spinnaker packages."""
      all_subsystems = []
      all_subsystems.extend(SUBSYSTEM_LIST)
      all_subsystems.extend(ADDITIONAL_SUBSYSTEMS)

      if self.__options.build:
        # Build in parallel using half available cores
        # to keep load in check.
        weighted_processes = self.__options.cpu_ratio * multiprocessing.cpu_count()
        pool = multiprocessing.pool.ThreadPool(
            processes=int(max(1, weighted_processes)))
        pool.map(self.__do_build, all_subsystems)

      self.__check_build_failures(SUBSYSTEM_LIST)
开发者ID:PioTi,项目名称:spinnaker,代码行数:15,代码来源:build_release.py


示例17: _ConvertToWebP

def _ConvertToWebP(webp_binary, png_files):
  pool = multiprocessing.pool.ThreadPool(10)
  def convert_image(png_path):
    root = os.path.splitext(png_path)[0]
    webp_path = root + '.webp'
    args = [webp_binary, png_path, '-mt', '-quiet', '-m', '6', '-q', '100',
        '-lossless', '-o', webp_path]
    subprocess.check_call(args)
    os.remove(png_path)
  # Android requires pngs for 9-patch images.
  # Daydream (*.dd) requires pngs for icon files.
  pool.map(convert_image, [f for f in png_files if not (f.endswith('.9.png') or
                           f.endswith('.dd.png'))])
  pool.close()
  pool.join()
开发者ID:grf123,项目名称:chromium,代码行数:15,代码来源:process_resources.py


示例18: _map_parallel

def _map_parallel(function, args, n_jobs):
    """multiprocessing.Pool(processors=n_jobs).map with some error checking"""
    # Following the error checking found in joblib
    multiprocessing = int(os.environ.get('JOBLIB_MULTIPROCESSING', 1)) or None
    if multiprocessing:
        try:
            import multiprocessing
            import multiprocessing.pool
        except ImportError:
            multiprocessing = None
    # 2nd stage: validate that locking is available on the system and
    #            issue a warning if not
    if multiprocessing:
        try:
            _sem = multiprocessing.Semaphore()
            del _sem  # cleanup
        except (ImportError, OSError) as e:
            multiprocessing = None
            warnings.warn('%s. _map_parallel will operate in serial mode' % (e,))
    if multiprocessing and int(n_jobs) not in (0, 1):
        if n_jobs == -1:
            n_jobs = None
        pool = multiprocessing.Pool(processes=n_jobs)
        map_result = pool.map(function, args)
        pool.close()
        pool.join()
    else:
        map_result = list(map(function, args))
    return map_result
开发者ID:xyh-cosmo,项目名称:pystan,代码行数:29,代码来源:model.py


示例19: test_no_thread_pool

def test_no_thread_pool():
    pool = xmon_stepper.ThreadlessPool()
    result = pool.map(lambda x: x + 1, range(10))
    assert result == [x + 1 for x in range(10)]
    # No ops.
    pool.terminate()
    pool.join()
开发者ID:google2013,项目名称:Cirq,代码行数:7,代码来源:xmon_stepper_test.py


示例20: _CompileDeps

def _CompileDeps(aapt_path, dep_subdirs, temp_dir):
  partials_dir = os.path.join(temp_dir, 'partials')
  build_utils.MakeDirectory(partials_dir)
  partial_compile_command = [
      aapt_path + '2',
      'compile',
      # TODO(wnwen): Turn this on once aapt2 forces 9-patch to be crunched.
      # '--no-crunch',
  ]
  pool = multiprocessing.pool.ThreadPool(10)
  def compile_partial(directory):
    dirname = os.path.basename(directory)
    partial_path = os.path.join(partials_dir, dirname + '.zip')
    compile_command = (partial_compile_command +
                       ['--dir', directory, '-o', partial_path])
    build_utils.CheckOutput(compile_command)

    # Sorting the files in the partial ensures deterministic output from the
    # aapt2 link step which uses order of files in the partial.
    sorted_partial_path = os.path.join(partials_dir, dirname + '.sorted.zip')
    _SortZip(partial_path, sorted_partial_path)

    return sorted_partial_path

  partials = pool.map(compile_partial, dep_subdirs)
  pool.close()
  pool.join()
  return partials
开发者ID:gregocyro,项目名称:android,代码行数:28,代码来源:compile_resources.py



注:本文中的multiprocessing.pool.map函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python pool.starmap函数代码示例发布时间:2022-05-27
下一篇:
Python pool.join函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap