• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python pool.terminate函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中multiprocessing.pool.terminate函数的典型用法代码示例。如果您正苦于以下问题:Python terminate函数的具体用法?Python terminate怎么用?Python terminate使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了terminate函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: ScopedPool

def ScopedPool(*args, **kwargs):
  """Context Manager which returns a multiprocessing.pool instance which
  correctly deals with thrown exceptions.

  *args - Arguments to multiprocessing.pool

  Kwargs:
    kind ('threads', 'procs') - The type of underlying coprocess to use.
    **etc - Arguments to multiprocessing.pool
  """
  if kwargs.pop('kind', None) == 'threads':
    pool = multiprocessing.pool.ThreadPool(*args, **kwargs)
  else:
    orig, orig_args = kwargs.get('initializer'), kwargs.get('initargs', ())
    kwargs['initializer'] = _ScopedPool_initer
    kwargs['initargs'] = orig, orig_args
    pool = multiprocessing.pool.Pool(*args, **kwargs)

  try:
    yield pool
    pool.close()
  except:
    pool.terminate()
    raise
  finally:
    pool.join()
开发者ID:duanwujie,项目名称:depot_tools,代码行数:26,代码来源:git_common.py


示例2: finalize

    def finalize(self):
        pool = self._pool

        self._next = None
        self._pool = None
        if pool is not None:
            pool.terminate()
开发者ID:ktnyt,项目名称:chainer,代码行数:7,代码来源:multithread_iterator.py


示例3: test_no_thread_pool

def test_no_thread_pool():
    pool = xmon_stepper.ThreadlessPool()
    result = pool.map(lambda x: x + 1, range(10))
    assert result == [x + 1 for x in range(10)]
    # No ops.
    pool.terminate()
    pool.join()
开发者ID:google2013,项目名称:Cirq,代码行数:7,代码来源:xmon_stepper_test.py


示例4: system_status

def system_status():
    from status_checks import run_checks
    class WebOutput:
        def __init__(self):
            self.items = []

        def add_heading(self, heading):
            self.items.append({"type": "heading", "text": heading, "extra": []})

        def print_ok(self, message):
            self.items.append({"type": "ok", "text": message, "extra": []})

        def print_error(self, message):
            self.items.append({"type": "error", "text": message, "extra": []})

        def print_warning(self, message):
            self.items.append({"type": "warning", "text": message, "extra": []})

        def print_line(self, message, monospace=False):
            self.items[-1]["extra"].append({"text": message, "monospace": monospace})

    output = WebOutput()
    # Create a temporary pool of processes for the status checks
    pool = multiprocessing.pool.Pool(processes=5)
    run_checks(False, env, output, pool)
    pool.terminate()
    return json_response(output.items)
开发者ID:jkaberg,项目名称:mailinabox,代码行数:27,代码来源:daemon.py


示例5: _run_tests

  def _run_tests(self):
    pool = multiprocessing.pool.ThreadPool(processes=self.suite_concurrency)
    outstanding_suites = []
    for suite in self.suite_runners:
      suite.task = pool.apply_async(suite.run)
      outstanding_suites.append(suite)

    ret = True
    try:
      while len(outstanding_suites) > 0:
        for suite in list(outstanding_suites):
          if suite.timed_out():
            msg = "Task %s not finished within timeout %s" % (suite.name,
                suite.suite.timeout_minutes,)
            logging.error(msg)
            raise Exception(msg)
          task = suite.task
          if task.ready():
            this_task_ret = task.get()
            outstanding_suites.remove(suite)
            if this_task_ret:
              logging.info("Suite %s succeeded.", suite.name)
            else:
              logging.info("Suite %s failed.", suite.name)
              ret = False
        time.sleep(5)
    except KeyboardInterrupt:
      logging.info("\n\nDetected KeyboardInterrupt; shutting down!\n\n")
      raise
    finally:
      pool.terminate()
    return ret
开发者ID:twmarshall,项目名称:Impala,代码行数:32,代码来源:test-with-docker.py


示例6: _create_process_pool

def _create_process_pool(processes=1):
    if _MULTIPROCESSING and processes:
        logger.info("creating pool with %i workers", processes)
        pool = multiprocessing.pool.Pool(processes=processes)
    else:
        logger.info("creating dummy pool")
        pool = DummyPool()
    yield pool
    pool.terminate()
开发者ID:mpenkov,项目名称:smart_open,代码行数:9,代码来源:s3.py


示例7: s3_iter_bucket

def s3_iter_bucket(bucket, prefix="", accept_key=lambda key: True, key_limit=None, workers=16):
    """
    Iterate and download all S3 files under `bucket/prefix`, yielding out
    `(key, key content)` 2-tuples (generator).

    `accept_key` is a function that accepts a key name (unicode string) and
    returns True/False, signalling whether the given key should be downloaded out or
    not (default: accept all keys).

    If `key_limit` is given, stop after yielding out that many results.

    The keys are processed in parallel, using `workers` processes (default: 16),
    to speed up downloads greatly. If multiprocessing is not available, thus
    NO_MULTIPROCESSING is True, this parameter will be ignored.

    Example::

      >>> mybucket = boto.connect_s3().get_bucket('mybucket')

      >>> # get all JSON files under "mybucket/foo/"
      >>> for key, content in s3_iter_bucket(mybucket, prefix='foo/', accept_key=lambda key: key.endswith('.json')):
      ...     print key, len(content)

      >>> # limit to 10k files, using 32 parallel workers (default is 16)
      >>> for key, content in s3_iter_bucket(mybucket, key_limit=10000, workers=32):
      ...     print key, len(content)

    """
    total_size, key_no = 0, -1
    keys = (key for key in bucket.list(prefix=prefix) if accept_key(key.name))

    if NO_MULTIPROCESSING:
        logger.info("iterating over keys from %s without multiprocessing" % bucket)
        iterator = imap(s3_iter_bucket_process_key, keys)
    else:
        logger.info("iterating over keys from %s with %i workers" % (bucket, workers))
        pool = multiprocessing.pool.Pool(processes=workers)
        iterator = pool.imap_unordered(s3_iter_bucket_process_key, keys)

    for key_no, (key, content) in enumerate(iterator):
        if key_no % 1000 == 0:
            logger.info(
                "yielding key #%i: %s, size %i (total %.1fMB)" % (key_no, key, len(content), total_size / 1024.0 ** 2)
            )

        yield key, content
        key.close()
        total_size += len(content)

        if key_limit is not None and key_no + 1 >= key_limit:
            # we were asked to output only a limited number of keys => we're done
            break

    if not NO_MULTIPROCESSING:
        pool.terminate()

    logger.info("processed %i keys, total size %i" % (key_no + 1, total_size))
开发者ID:val314159,项目名称:smart_open,代码行数:57,代码来源:smart_open_lib.py


示例8: buildList

    def buildList(self):
        """
        Build the artifact "list" from sources defined in the given configuration.

        :returns: Dictionary described above.
        """
        priority = 0
        pool_dict = {}

        for source in self.configuration.artifactSources:
            priority += 1
            pool = pool_dict.setdefault(source['type'], ThreadPool(self.MAX_THREADS_DICT[source['type']]))
            pool.apply_async(self._read_artifact_source, args=[source, priority],
                             callback=self._add_result)

        for pool in pool_dict.values():
            pool.close()

        at_least_1_runs = True
        all_keys = range(1, len(self.configuration.artifactSources) + 1)
        finished = False
        while at_least_1_runs:
            for i in range(30):
                time.sleep(1)

                if not self.errors.empty():
                    for pool in pool_dict.values():
                        logging.debug("Terminating pool %s", str(pool))
                        pool.terminate()
                    finished = True
                    break

            at_least_1_runs = False
            if not finished:            
                self.results_lock.acquire()
                finished = sorted(list(self.results.keys()))
                self.results_lock.release()
                if all_keys != finished:
                    logging.debug("Still waiting for priorities %s to finish", str(list(set(all_keys) - set(finished))))
                    at_least_1_runs = True

        for pool in pool_dict.values():
            if pool._state != multiprocessing.pool.TERMINATE:
                pool.join()

        if not self.errors.empty():
            raise RuntimeError("%i error(s) occured during reading of artifact list." % self.errors.qsize())

        return self._get_artifact_list()
开发者ID:jboss-eap,项目名称:maven-repository-builder,代码行数:49,代码来源:artifact_list_builder.py


示例9: thread_pool

def thread_pool( size ):
    """
    A context manager that yields a thread pool of the given size. On normal closing,
    this context manager closes the pool and joins all threads in it. On exceptions, the pool
    will be terminated but threads won't be joined.
    """
    pool = multiprocessing.pool.ThreadPool( processes=size )
    try:
        yield pool
    except:
        pool.terminate( )
        raise
    else:
        pool.close( )
        pool.join( )
开发者ID:arkal,项目名称:cgcloud,代码行数:15,代码来源:util.py


示例10: get_builds

def get_builds(db, jobs_dir, metadata, threads, client_class):
    """
    Adds information about tests to a dictionary.

    Args:
        jobs_dir: the GCS path containing jobs.
        metadata: a dict of metadata about the jobs_dir.
        threads: how many threads to use to download build information.
        client_class: a constructor for a GCSClient (or a subclass).
    """
    gcs = client_class(jobs_dir, metadata)

    print('Loading builds from %s' % jobs_dir)
    sys.stdout.flush()

    builds_have = db.get_existing_builds(jobs_dir)
    print('already have %d builds' % len(builds_have))
    sys.stdout.flush()

    jobs_and_builds = gcs.get_builds(builds_have)
    pool = None
    if threads > 1:
        pool = multiprocessing.Pool(threads, mp_init_worker,
                                    (jobs_dir, metadata, client_class))
        builds_iterator = pool.imap_unordered(
            get_started_finished, jobs_and_builds)
    else:
        global WORKER_CLIENT  # pylint: disable=global-statement
        WORKER_CLIENT = gcs
        builds_iterator = (
            get_started_finished(job_build) for job_build in jobs_and_builds)

    try:
        for n, (build_dir, started, finished) in enumerate(builds_iterator):
            print(build_dir)
            if started or finished:
                db.insert_build(build_dir, started, finished)
            if n % 200 == 0:
                db.commit()
    except KeyboardInterrupt:
        if pool:
            pool.terminate()
        raise
    else:
        if pool:
            pool.close()
            pool.join()
    db.commit()
开发者ID:Kashomon,项目名称:test-infra,代码行数:48,代码来源:make_db.py


示例11: ScopedPool

def ScopedPool(*args, **kwargs):
  if kwargs.pop('kind', None) == 'threads':
    pool = multiprocessing.pool.ThreadPool(*args, **kwargs)
  else:
    orig, orig_args = kwargs.get('initializer'), kwargs.get('initargs', ())
    kwargs['initializer'] = initer
    kwargs['initargs'] = orig, orig_args
    pool = multiprocessing.pool.Pool(*args, **kwargs)

  try:
    yield pool
    pool.close()
  except:
    pool.terminate()
    raise
  finally:
    pool.join()
开发者ID:riannucci,项目名称:vimified,代码行数:17,代码来源:common.py


示例12: list_all_machines

def list_all_machines(cloud_ids, headers):
    "Given the cloud ids, runs in parallel queries to get all machines"
    def list_one_cloud(cloud_id):
        cloud_machines = requests.get('https://mist.io/clouds/%s/machines' % cloud_id, headers=headers)
        if cloud_machines.status_code == 200:
            machines = cloud_machines.json()
            for machine in machines:
                machine['cloud'] = cloud_id
            return machines
        return []

    pool = multiprocessing.pool.ThreadPool(8)
    results = pool.map(list_one_cloud, cloud_ids)
    pool.terminate()

    machines = []
    for result in results:
        machines.extend(result)
    return machines
开发者ID:mistio,项目名称:mist.io-api-examples,代码行数:19,代码来源:list_machines.py


示例13: master_progress

    def master_progress(mpu,num_processes,bucket,upload_list):
	    x=0
	    print "proc = ?? "  + str(num_processes)
	    while True:
		try:
			if x!=num_parts:
	#			logger.error(str(src.name) +" start " )
				pool = NoDaemonProcessPool(processes=num_processes)
				value = pool.map_async(do_part_upload, gen_args(x,fold_last,upload_list)).get(99999999)
				print "when to finish??????"
#			print "dadadada " + str(value)
			que.put(value)
			src.close()
                        mpu.complete_upload()
			logger.error(str(src.name) +" stop " )
			#proc = subprocess.Popen('date', stdout=subprocess.PIPE)
			#print stdout
			print "mpu.complete src name " +src.name
			#os.remove(src.name)
			#print "index in proc = "+str(FileList.index(uploadFileNames))
			lock.acquire()
			status_list[FileList.index(uploadFileNames)]='finish'
			print src.name +" finish  "+str (status_list)
			critical_threadnum(Total_Threadnum,Threadnum,num_processes)
			print uploadFileNames +" add back now is   " + str(Threadnum.value)
			lock.release()
			src.close()
			return value
#			pool.terminate()
                        break
		except KeyboardInterrupt:
			logger.warn("Received KeyboardInterrupt, canceling upload")
			pool.terminate()
			mpu.cancel_upload()
			print "keyboarddddddddddddddddddddddddddddddd"
			break
		except IOError:
			break
		except Exception, err:
			logger.error("Encountered an error, canceling upload aaaaaaaaaaaa")
			print src.name
			logger.error(str(src.name)+str(err))
开发者ID:zgbsfs,项目名称:storage,代码行数:42,代码来源:s3-mp-upload.py


示例14: _list_machines

    def _list_machines(self):
        self._machines = []
        backends = self.backends()
        # show only enabled backends
        # enabled_backends = [backend for backend in backends if backend.enabled]

        def _list_one(backend):
            machines = []
            try:
                machines = backend.machines()
            except:
                # could be a cloud with expired creds, so don't fail
                pass
            return machines

        pool = multiprocessing.pool.ThreadPool(10)
        results = pool.map(_list_one, backends)
        pool.terminate()
        for result in results:
            self._machines.extend(result)
开发者ID:vkefallinos,项目名称:mist.client,代码行数:20,代码来源:__init__.py


示例15: eval


#.........这里部分代码省略.........

            # Reset '_'

            globals_['_'] = locals_['_'] = None

        if globals_.get('__IN_EVAL__', None) is None:

            globals_['__IN_EVAL__'] = True

        result = _run(graph, root_nodes[0], globals_, locals_, {}, None, False)

        # 1

        for expr_return_value in result:

            globals_values.append(globals_)

            locals_values.append(locals_)

            return_values.append([expr_return_value])

        # N-1

        for (task_result, task_locals_, task_globals_) in tasks:

            return_values.append(task_result.get())

            locals_values.append(task_locals_)

            globals_values.append(task_globals_)

        # Reduce + _PythonectResult Grouping

        for item in return_values:

            # Is there _PythonectResult in item list?

            for sub_item in item:

                if isinstance(sub_item, _PythonectResult):

                    # 1st Time?

                    if sub_item.values['node'] not in reduces:

                        reduces[sub_item.values['node']] = []

                        # Add Place holder to mark the position in the return value list

                        return_value.append(_PythonectLazyRunner(sub_item.values['node']))

                    reduces[sub_item.values['node']] = reduces[sub_item.values['node']] + [sub_item.values]

                else:

                    return_value.append(sub_item)

        # Any _PythonectLazyRunner's?

        if reduces:

            for return_item_idx in xrange(0, len(return_value)):

                if isinstance(return_value[return_item_idx], _PythonectLazyRunner):

                    # Swap list[X] with list[X.go(reduces)]

                    return_value[return_item_idx] = pool.apply_async(return_value[return_item_idx].go, args=(graph, reduces))

            return_value = __resolve_and_merge_results(return_value)

        # [...] ?

        if return_value:

            # Single return value? (e.g. [1])

            if len(return_value) == 1:

                return_value = return_value[0]

            # Update globals_ and locals_

#            globals_, locals_ = __merge_all_globals_and_locals(globals_, locals_, globals_values, {}, locals_values, {})

        # Set `return value` as `_`

        globals_['_'] = locals_['_'] = return_value

        if globals_.get('__IN_EVAL__', None) is not None:

            del globals_['__IN_EVAL__']

        pool.close()

        pool.join()

        pool.terminate()

    return return_value
开发者ID:ESSL-CQQ,项目名称:pythonect,代码行数:101,代码来源:eval.py


示例16: _spider


#.........这里部分代码省略.........
        else:
            context = ssl._create_unverified_context()

        # Make a HEAD request first to check the content type.  This lets
        # us ignore tarballs and gigantic files.
        # It would be nice to do this with the HTTP Accept header to avoid
        # one round-trip.  However, most servers seem to ignore the header
        # if you ask for a tarball with Accept: text/html.
        req = Request(url)
        req.get_method = lambda: "HEAD"
        resp = _urlopen(req, timeout=_timeout, context=context)

        if "Content-type" not in resp.headers:
            tty.debug("ignoring page " + url)
            return pages, links

        if not resp.headers["Content-type"].startswith('text/html'):
            tty.debug("ignoring page " + url + " with content type " +
                      resp.headers["Content-type"])
            return pages, links

        # Do the real GET request when we know it's just HTML.
        req.get_method = lambda: "GET"
        response = _urlopen(req, timeout=_timeout, context=context)
        response_url = response.geturl()

        # Read the page and and stick it in the map we'll return
        page = response.read().decode('utf-8')
        pages[response_url] = page

        # Parse out the links in the page
        link_parser = LinkParser()
        subcalls = []
        link_parser.feed(page)

        while link_parser.links:
            raw_link = link_parser.links.pop()
            abs_link = urljoin(response_url, raw_link.strip())

            links.add(abs_link)

            # Skip stuff that looks like an archive
            if any(raw_link.endswith(suf) for suf in ALLOWED_ARCHIVE_TYPES):
                continue

            # Skip things outside the root directory
            if not abs_link.startswith(root):
                continue

            # Skip already-visited links
            if abs_link in visited:
                continue

            # If we're not at max depth, follow links.
            if depth < max_depth:
                subcalls.append((abs_link, visited, root,
                                 depth + 1, max_depth, raise_on_error))
                visited.add(abs_link)

        if subcalls:
            pool = NonDaemonPool(processes=len(subcalls))
            try:
                results = pool.map(_spider_wrapper, subcalls)

                for sub_pages, sub_links in results:
                    pages.update(sub_pages)
                    links.update(sub_links)

            finally:
                pool.terminate()
                pool.join()

    except URLError as e:
        tty.debug(e)

        if hasattr(e, 'reason') and isinstance(e.reason, ssl.SSLError):
            tty.warn("Spack was unable to fetch url list due to a certificate "
                     "verification problem. You can try running spack -k, "
                     "which will not check SSL certificates. Use this at your "
                     "own risk.")

        if raise_on_error:
            raise NoNetworkConnectionError(str(e), url)

    except HTMLParseError as e:
        # This error indicates that Python's HTML parser sucks.
        msg = "Got an error parsing HTML."

        # Pre-2.7.3 Pythons in particular have rather prickly HTML parsing.
        if sys.version_info[:3] < (2, 7, 3):
            msg += " Use Python 2.7.3 or newer for better HTML parsing."

        tty.warn(msg, url, "HTMLParseError: " + str(e))

    except Exception as e:
        # Other types of errors are completely ignored, except in debug mode.
        tty.debug("Error in _spider: %s:%s" % (type(e), e),
                  traceback.format_exc())

    return pages, links
开发者ID:matzke1,项目名称:spack,代码行数:101,代码来源:web.py


示例17: close_multi

def close_multi():
  '''Close multi stuff'''

  if pool is not None:
    pool.terminate()
开发者ID:jdevemy,项目名称:lammps-tools,代码行数:5,代码来源:utilsscript.py


示例18: _run_next_virtual_nodes

def _run_next_virtual_nodes(graph, node, globals_, locals_, flags, pool, result):

    operator = graph.node[node].get('OPERATOR', None)

    return_value = []

    not_safe_to_iter = False

    is_head_result = True

    head_result = None

    # "Hello, world" or {...}

    if isinstance(result, (basestring, dict)) or not __isiter(result):

        not_safe_to_iter = True

    # [[1]]

    if isinstance(result, list) and len(result) == 1 and isinstance(result[0], list):

        result = result[0]

        not_safe_to_iter = True

    # More nodes ahead?

    if operator:

        if not_safe_to_iter:

            logging.debug('not_safe_to_iter is True for %s' % result)

            head_result = result

            tmp_globals = copy.copy(globals_)

            tmp_locals = copy.copy(locals_)

            tmp_globals['_'] = tmp_locals['_'] = head_result

            return_value = __resolve_and_merge_results(_run(graph, node, tmp_globals, tmp_locals, {}, None, True))

        else:

            # Originally this was implemented using result[0] and result[1:] but xrange() is not slice-able, thus, I have changed it to `for` with buffer for 1st result

            for res_value in result:

                logging.debug('Now at %s from %s' % (res_value, result))

                if is_head_result:

                    logging.debug('is_head_result is True for %s' % res_value)

                    is_head_result = False

                    head_result = res_value

                    tmp_globals = copy.copy(globals_)

                    tmp_locals = copy.copy(locals_)

                    tmp_globals['_'] = tmp_locals['_'] = head_result

                    return_value.insert(0, _run(graph, node, tmp_globals, tmp_locals, {}, None, True))

                    continue

                tmp_globals = copy.copy(globals_)

                tmp_locals = copy.copy(locals_)

                tmp_globals['_'] = tmp_locals['_'] = res_value

                # Synchronous

                if operator == '|':

                    return_value.append(pool.apply(_run, args=(graph, node, tmp_globals, tmp_locals, {}, None, True)))

                # Asynchronous

                if operator == '->':

                    return_value.append(pool.apply_async(_run, args=(graph, node, tmp_globals, tmp_locals, {}, None, True)))

            pool.close()

            pool.join()

            pool.terminate()

            logging.debug('return_value = %s' % return_value)

            return_value = __resolve_and_merge_results(return_value)

    # Loopback

#.........这里部分代码省略.........
开发者ID:ESSL-CQQ,项目名称:pythonect,代码行数:101,代码来源:eval.py


示例19: _run_next_graph_nodes

def _run_next_graph_nodes(graph, node, globals_, locals_, pool):

    operator = graph.node[node].get('OPERATOR', None)

    nodes_return_value = []

    return_value = None

    # False? Terminate Flow.

    if isinstance(locals_['_'], bool) and locals_['_'] is False:

        return False

    if operator:

        #   -->  (a)
        #   --> / | \
        #    (b) (c) (d)
        #       \ | /
        #        (e)

        next_nodes = sorted(graph.successors(node))

        # N-1

        for next_node in next_nodes[1:]:

            # Synchronous

            if operator == '|':

                nodes_return_value.append(pool.apply(_run, args=(graph, next_node, globals_, locals_, {}, None, False)))

            # Asynchronous

            if operator == '->':

                nodes_return_value.append(pool.apply_async(_run, args=(graph, next_node, globals_, locals_, {}, None, False)))

        # 1

        nodes_return_value.insert(0, _run(graph, next_nodes[0], globals_, locals_, {}, None, False))

        pool.close()

        pool.join()

        pool.terminate()

        return_value = __resolve_and_merge_results(nodes_return_value)

    else:

        #        (a)
        #       / | \
        #    (b) (c) (d)
        #       \ | /
        #    --> (e)

        return_value = locals_['_']

    return return_value
开发者ID:ESSL-CQQ,项目名称:pythonect,代码行数:63,代码来源:eval.py


示例20: build

    def build(self):

        ncpus = num_cpus()

        log.log("Building useing " + str(ncpus) + " threads\n", log.BLUE)

        t = time.time()

        if ncpus > 1:

            import multiprocessing
            import multiprocessing.pool

            pool = multiprocessing.Pool(
                ncpus, initializer=_init_multiprocessing_helper)

            try:
                result = pool.map_async(builder, self.packages, chunksize=1)
                pool.close()
                while not result.ready():
                    try:
                        result.get(1)  # seconds
                    except multiprocessing.TimeoutError:
                        pass
            except KeyboardInterrupt:
                pool.terminate()
                raise

            pool.terminate()
            pool.join()

            results = result.get(1)

            # fix keyboard interupt
            # from multiprocessing.pool import IMapIterator

            # def wrapper(func):
            #     def wrap(self, timeout=None):
            # Note: the timeout of 1 googol seconds introduces a rather subtle
            # bug for Python scripts intended to run many times the age of the universe.
            #         return func(self, timeout=timeout if timeout is not None else 1e100)
            #     return wrap
            # IMapIterator.next = wrapper(IMapIterator.next)

            # with multiprocessing.pool.Pool(ncpus) as pool:
            #     results = pool.map(builder, self.packages)
        else:
            results = []
            for path in self.packages:
                results.append(builder(path))

        log.log(
            "TOTAL Time spent Compiling: %s Seconds\n" %
            (time.time() - t),
            log.BLUE)
        errors = [r for r in results if r]
        if errors:
            return ''.join(errors)
        else:
            log.log("There were no errors", log.GREEN)
        return False
开发者ID:borisblizzard,项目名称:arcreator,代码行数:61,代码来源:cythonize.py



注:本文中的multiprocessing.pool.terminate函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python pool.Pool类代码示例发布时间:2022-05-27
下一篇:
Python pool.starmap函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap