• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python more_itertools.chunked函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中more_itertools.chunked函数的典型用法代码示例。如果您正苦于以下问题:Python chunked函数的具体用法?Python chunked怎么用?Python chunked使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了chunked函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: get_context_data

 def get_context_data(self, **kwargs):
     context = super(HomePageView, self).get_context_data(**kwargs)
     context['courses_slides'] = chunked(context['homepage'].promoted_courses.all(), 3)
     context['menthors_slides'] = chunked(context['homepage'].promoted_menthors.all(), 3)
     context['promoted_portfolios'] = Portfolio.objects.filter(
         home_published=True, status='published').order_by('-timestamp')[:8]
     return context
开发者ID:Enois,项目名称:timtec,代码行数:7,代码来源:views.py


示例2: group_by_magnitude

 def group_by_magnitude(collection):
     alen = len(collection)
     if alen > 1000:
         return chunked(collection, 100)
     if alen > 100:
         return chunked(collection, 10)
     return [collection]
开发者ID:whitmo,项目名称:CheesePrism,代码行数:7,代码来源:index.py


示例3: chunked_join

def chunked_join(iterable, int1, int2, str1, str2, func):
    """Chunk and join."""
    chunks = list(chunked(iterable, int1))
    logging.debug(chunks)
    groups = [list(chunked(chunk, int2)) for chunk in chunks]
    logging.debug(groups)
    return str1.join([
        str2.join([func(''.join(chunk)) for chunk in chunks])
        for chunks in groups
    ])
开发者ID:NoviceLive,项目名称:intellicoder,代码行数:10,代码来源:converters.py


示例4: _iter_cores

def _iter_cores(cores, ncontainer):
    full_cores, part_cores = cores.get('full', []), cores.get('part', [])
    if not (full_cores or part_cores):
        return (([], []) for _ in range(ncontainer))

    return izip_longest(
        chunked(full_cores, len(full_cores)/ncontainer),
        chunked(part_cores, len(part_cores)/ncontainer),
        fillvalue=[]
    )
开发者ID:binblee,项目名称:eru-core,代码行数:10,代码来源:task.py


示例5: decl

 def decl(self):
     logging.debug(_('args: %s'), self.args)
     args = self.args.strip().replace('__user ', '').split(',')
     logging.debug(_('args: %s'), args)
     args = [''.join(pair) for pair in chunked(args, 2)]
     return 'long {}({});'.format(
         self.name.strip(), ', '.join(args))
开发者ID:NoviceLive,项目名称:intellicoder,代码行数:7,代码来源:database.py


示例6: __init__

    def __init__(self, recs):
        self.argslist = []

        # TODO make these separate nodes
        rec_values = (rec.value for rec in recs)
        for name, value, type_ in chunked(rec_values, 3):
            self.argslist.append((name, value, type_))
开发者ID:abuchanan,项目名称:cue,代码行数:7,代码来源:translate.py


示例7: parse_obj

    def parse_obj(self,obj,dtype):
        dic = OD((('type','Feature'),('geometry',OD()),('properties',OD())))
        dic['properties']['class'] = dtype
        for child in obj:
            ctag = self.clip_tag(child.tag)
            if ctag in ['pos','area','loc']:
                if ctag == 'area':
                    dic['geometry']['type'] = 'Polygon'
                    dic['geometry']['coordinates'] = self.get_polygon_coord(child)
                else:
                    if ctag == 'pos':
                        dic['geometry']['type'] = 'Point'
                    elif ctag == 'loc':
                        dic['geometry']['type'] = 'LineString'
                    i = ""
                    for l in child.itertext():
                        i += l
                    l = list(chunked(i.strip().split(),2))
                    i = [[float(xy[1]),float(xy[0])] for xy in l]

                    if len(i) == 1:
                        dic['geometry']['coordinates'] = i[0]
                    else:
                        dic['geometry']['coordinates'] = i
            elif not child.text.strip() == '':
                dic['properties'][ctag]=child.text
            else:
                i = ''
                for l in child.itertext():
                    i += l
                dic['properties'][ctag]=i.strip()
        dic = self.chk_types(dic)
        return dic
开发者ID:gsi-cyberjapan,项目名称:vector_tiles_convert,代码行数:33,代码来源:load.py


示例8: cooccurrence

def cooccurrence(
    corpus,
    execnet_hub,
    targets,
    context,
    paths_progress_iter,
    output=('o', 'space.h5', 'The output space file.'),
):
    """Build the co-occurrence matrix."""

    if targets.index.nlevels > 1:
        targets.sortlevel(inplace=True)
    if context.index.nlevels > 1:
        context.sortlevel(inplace=True)

    def init(channel):
        channel.send(
            (
                'data',
                pickle.dumps(
                    {
                        'kwargs': {
                            'targets': targets,
                            'context': context,
                        },
                        'instance': corpus,
                        'folder_name': 'cooccurrence',
                    },
                )
            )
        )

    results = execnet_hub.run(
        remote_func=sum_folder,
        iterable=paths_progress_iter,
        init_func=init,
    )

    results = ([r] for r in results if r is not None)
    result = next(results)[0]

    for i, chunk in enumerate(chunked(results, 100)):
        logger.info('Received result chunk #%s.', i)
        chunked_result = [c[0] for c in chunk]

        with Timer() as timed:
            result = pd.concat(
                chunked_result + [result],
                copy=False,
            ).groupby(level=result.index.names).sum()

        logger.info(
            'Computed the result by merging a chunk of received results and the result in %.2f seconds.',
            timed.elapsed,
        )

    result = result.to_frame('count')
    result.reset_index(inplace=True)

    write_space(output, context, targets, result)
开发者ID:dimazest,项目名称:fowler.corpora,代码行数:60,代码来源:main.py


示例9: add_to_spotify

def add_to_spotify(db, spotify, album, original_artist, original_album):
    album = spotify.album(album["uri"])
    tracks = album["tracks"]
    track_ids = [t["uri"] for t in tracks["items"]]
    while tracks["next"]:
        tracks = spotify.next(tracks)
        track_ids.extend(t["uri"] for t in tracks["items"])

    click.echo("Adding {0} tracks to Spotify...".format(len(track_ids)))
    for chunk in chunked(track_ids, 50):
        response = spotify.current_user_saved_tracks_add(chunk)
        if response is not None:
            click.secho("Fuck, something broke:")
            pprint(response)
            click.confirm("Continue?", abort=True)
            return

    cursor = db.cursor()
    cursor.execute(
        """UPDATE collection SET complete = 1
                      WHERE artist = ? AND album = ?""",
        [original_artist, original_album],
    )
    db.commit()
    click.secho("Done ", fg="green", nl=False)
    time.sleep(0.25)
开发者ID:jacobian,项目名称:rdio-takeout-importer,代码行数:26,代码来源:r2s.py


示例10: get_random_logs

	def get_random_logs(self, limit):
		count = min(limit, self.db.count())
		ids = self.db.find({}, {'_id': 1})
		rand_ids = [r['_id'] for r in random.sample(list(ids), count)]
		for rand_ids_chunk in chunked(rand_ids, 100):
		    query = {'_id': {'$in': rand_ids_chunk}}
		    for doc in self.db.find(query, {'message': 1}):
			    yield doc['message']
开发者ID:yougov,项目名称:pmxbot,代码行数:8,代码来源:logging.py


示例11: parallelize_func

def parallelize_func(iterable, func, chunksz=1, n_jobs=16, *args, **kwargs):
    """ Parallelize a function over each element of an iterable. """
    chunker = func
    chunks = more_itertools.chunked(iterable, chunksz)
    chunks_results = Parallel(n_jobs=n_jobs, verbose=50)(
        delayed(chunker)(chunk, *args, **kwargs) for chunk in chunks)
    results = more_itertools.flatten(chunks_results)
    return list(results)
开发者ID:123mitnik,项目名称:changepoint,代码行数:8,代码来源:ts_stats.py


示例12: start

def start(experiment_description, agent, environment, results_descriptor):
    """Kick off the execution of an experiment."""
    initialize_results(results_descriptor)
    interval_results = islice(interval_results_generator(agent, environment, experiment_description), experiment_description.num_steps)
    results_interval_chunks = chunked(interval_results, results_descriptor.interval)
    for chunk in results_interval_chunks:
        results = [interval_data.results for interval_data in chunk]
        write_results(merge_results(results), results_descriptor)
开发者ID:xanderdunn,项目名称:options,代码行数:8,代码来源:experiment.py


示例13: create_partials

    def create_partials(self, product, branch, platform, locales, revision,
                        chunk_name=1):
        """Calculates "from" and "to" MAR URLs and calls  create_task_graph().
        Currently "from" MAR is 2 releases behind to avoid duplication of
        existing CI partials.

        :param product: capitalized product name, AKA appName, e.g. Firefox
        :param branch: branch name (mozilla-central)
        :param platform: buildbot platform (linux, macosx64)
        :param locales: list of locales
        :param revision: revision of the "to" build
        :param chunk_name: chunk name
        """
        # TODO: move limit to config
        # Get last 5 releases (including current),
        # generate partial for 4 latest
        last_releases = self.balrog_client.get_releases(product, branch)[:5]
        release_to = last_releases.pop(0)
        per_chunk = 5
        for update_number, release_from in enumerate(last_releases, start=1):
            log.debug("From: %s", release_from)
            log.debug("To: %s", release_to)
            for n, chunk in enumerate(chunked(locales, per_chunk), start=1):
                extra = []
                for locale in chunk:
                    try:
                        build_from = self.balrog_client.get_build(
                            release_from, platform, locale)
                        log.debug("Build from: %s", build_from)
                        build_to = self.balrog_client.get_build(
                            release_to, platform, locale)
                        log.debug("Build to: %s", build_to)
                        from_mar = build_from["completes"][0]["fileUrl"]
                        to_mar = build_to["completes"][0]["fileUrl"]
                        extra.append({
                            "locale": locale,
                            "from_mar": from_mar,
                            "to_mar": to_mar,
                        })
                    except (requests.HTTPError, ValueError):
                        log.exception(
                            "Error getting build, skipping this scenario")

                if extra:
                    if len(locales) > per_chunk:
                        # More than 1 chunk
                        subchunk = n
                    else:
                        subchunk = None

                    all_locales = [e["locale"] for e in extra]
                    log.info("New Funsize task for %s", all_locales)
                    self.submit_task_graph(
                        branch=branch, revision=revision, platform=platform,
                        update_number=update_number, chunk_name=chunk_name,
                        extra=extra, subchunk=subchunk)
                else:
                    log.warn("Nothing to submit")
开发者ID:kmoir,项目名称:funsize,代码行数:58,代码来源:worker.py


示例14: score

    def score(self, rev_ids, caches=None, cache=None):
        if isinstance(rev_ids, int):
            rev_ids = [rev_ids]

        batches = batch_rev_caches(chunked(rev_ids, self.batch_size), caches,
                                   cache)

        for batch_scores in self.scores_ex.map(self._score_batch, batches):
            for score in batch_scores:
                yield score
开发者ID:wiki-ai,项目名称:revscoring,代码行数:10,代码来源:score_processor.py


示例15: c_layout

def c_layout(i, definition, template):
    c_name = layer_names[i]
    pretty_name = c_name.strip('_').capitalize()
    layout = d['layout']
    
    surround = lambda s: ''.join(interleave_longest(['│']*(len(s)+1), s))
    layer = list(map(uni, definition))
    layer[41] = layer[41].center(11)
    layer = chunked(layer, 12)
    rows = intersperse(mid, map(surround, layer))
    pretty = '\n'.join(itertools.chain([top], rows, [bottom]))
    
    surround = lambda s: ', '.join(s)
    layer = list(map(lambda k: layer_name.get(k, k), definition))
    layer = chunked(layer, 12)
    rows = map(surround, layer)
    c_layer = ',\n    '.join(itertools.chain([], rows, []))
    
    return template.format(pretty_name, pretty, c_name, layout, c_layer)
开发者ID:0xdec,项目名称:qmk_firmware,代码行数:19,代码来源:generate_c.py


示例16: main

def main(args):
    # get the arguments
    method = args.method
    win_size = args.win_size
    step = args.step
    metric_name = args.metric_name
    n_jobs = args.workers

    # Load the data.
    L, H, olddf, newdf = pickle.load(open(args.filename))
    words = pd.Series(olddf.word.values.ravel()).unique()
    oldrows = []
    newrows = []
    sourcexrange = np.arange(args.mint, args.maxt, step)
    destxrange = np.arange(args.mint, args.maxt, step)
    if method == 'win':
        sourcexrange = sourcexrange[win_size:]
        destxrange = destxrange[:-win_size]

    if args.interpolate:
        sourcexinter = np.arange(sourcexrange[0], sourcexrange[-1] + 1, 1)
        destxinter = np.arange(destxrange[0], destxrange[-1] + 1, 1)
    else:
        sourcexinter = sourcexrange
        destxinter = destxrange

    # Construct the series
    assert(len(sourcexinter) == len(destxinter))
    chunk_sz = np.ceil(len(words)/float(n_jobs))
    words_chunks = more_itertools.chunked(words, chunk_sz)
    timeseries_chunks = Parallel(n_jobs=n_jobs, verbose=20)(delayed(process_chunk)(chunk, create_word_time_series, olddf, newdf,
                                                                               sourcexinter, destxinter,
                                                                               metric_name=metric_name,
                                                                               interpolate=args.interpolate) for chunk in words_chunks)

    timeseries = list(more_itertools.flatten(timeseries_chunks))

    # Dump the data frame
    for orow, newrow in timeseries:
        if orow and newrow:
            oldrows.append(orow)
            newrows.append(newrow)

    oldtimeseries = pd.DataFrame()
    newtimeseries = pd.DataFrame()
    header = ['word']
    header.extend(sourcexinter)
    newheader = ['word']
    newheader.extend(destxinter)
    oldtimeseries = oldtimeseries.from_records(oldrows, columns=header)
    oldtimeseries = oldtimeseries.fillna(method='backfill', axis=1)
    newtimeseries = newtimeseries.from_records(newrows, columns=newheader)
    newtimeseries = newtimeseries.fillna(method='backfill', axis=1)
    oldtimeseries.to_csv(args.sourcetimef, encoding='utf-8')
    newtimeseries.to_csv(args.endtimef, encoding='utf-8')
开发者ID:Kevinwenya,项目名称:langchangetrack,代码行数:55,代码来源:dump_timeseries.py


示例17: update_graphs

 def update_graphs(self):
     """Get data from shared mp array and appends to graph if we are ready to do so"""
     if self.sync_event.is_set():
         if self.plots_are_reset:
             self.arrays_plots = {self.plots[ch]:
                                  chunked([n for n in self.np_array[i] if not np.isnan(n)], 50)
                                  for i, ch in enumerate(self.ch_num)
                                  if not np.isnan(self.np_array[i][0])}
             self.add_point_to_graph()
     else:
         qc.QTimer.singleShot(5, self.update_graphs)
开发者ID:TiangeLi,项目名称:ArduinoCntrl,代码行数:11,代码来源:Custom_Qt_Widgets.py


示例18: write_results

def write_results(results, results_descriptor):
    """Output the given results to terminal and to file."""
    output_path = results_descriptor.output_path
    keys = results_descriptor.keys
    value_vectors = (results[key] for key in keys)
    rows = chunked(interleave(value_vectors), len(keys))
    string_rows = map(lambda v: ' '.join(str(x) for x in v), rows)
    all_string_rows = '\n'.join(string_row for string_row in string_rows)
    keys_string = ' '.join(key for key in keys)
    output_stdout(keys_string + '\n' + all_string_rows, output_path)
    output_file(all_string_rows, output_path)
开发者ID:xanderdunn,项目名称:options,代码行数:11,代码来源:results_writer.py


示例19: main

def main(argv=None):
    parser = argparse.ArgumentParser(
        formatter_class=WrappedTextHelpFormatter,
        description=DESCRIPTION.strip(),
    )
    parser.add_argument(
        '--sleep',
        help='how long in seconds to sleep before submitting the next group',
        type=int,
        default=SLEEP_DEFAULT
    )
    parser.add_argument('--host', help='host for system to reprocess in', default=DEFAULT_HOST)
    parser.add_argument('crashid', help='one or more crash ids to fetch data for',
                        nargs='*', action=FallbackToPipeAction)

    if argv is None:
        args = parser.parse_args()
    else:
        args = parser.parse_args(argv)

    api_token = os.environ.get('SOCORRO_REPROCESS_API_TOKEN')
    if not api_token:
        print('You need to set SOCORRO_REPROCESS_API_TOKEN in the environment')
        return 1

    url = args.host.rstrip('/') + '/api/Reprocessing/'
    print('Sending reprocessing requests to: %s' % url)
    session = session_with_retries()

    crash_ids = args.crashid
    print('Reprocessing %s crashes sleeping %s seconds between groups...' % (
        len(crash_ids), args.sleep
    ))

    groups = list(chunked(crash_ids, CHUNK_SIZE))
    for i, group in enumerate(groups):
        print('Processing group ending with %s ... (%s/%s)' % (group[-1], i + 1, len(groups)))
        resp = session.post(
            url,
            data={'crash_ids': group},
            headers={
                'Auth-Token': api_token
            }
        )
        if resp.status_code != 200:
            print('Got back non-200 status code: %s %s' % (resp.status_code, resp.content))
            continue

        # NOTE(willkg): We sleep here because the webapp has a bunch of rate limiting and we don't
        # want to trigger that. It'd be nice if we didn't have to do this.
        time.sleep(args.sleep)

    print('Done!')
开发者ID:mozilla,项目名称:socorro,代码行数:53,代码来源:reprocess.py


示例20: get_polygon_coord

 def get_polygon_coord(self,obj):
     #get exterior coords
     coord = []
     i = ""
     ext = obj.find('.//gml:exterior',self.ns)
     for l in ext.itertext():
         i += l
     l = list(chunked(i.strip().split(),2))
     coord.append([[float(xy[1]),float(xy[0])] for xy in l])
     #get interior coords
     inte = obj.findall('.//gml:interior',self.ns)
     if not inte:
         return coord
     else:
         for i in inte:
             j = ""
             for l in i.itertext():
                 j += l
             l = list(chunked(j.strip().split(),2))
             coord.append([[float(xy[1]),float(xy[0])] for xy in l])
     return coord
开发者ID:gsi-cyberjapan,项目名称:vector_tiles_convert,代码行数:21,代码来源:load.py



注:本文中的more_itertools.chunked函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python more_itertools.unique_everseen函数代码示例发布时间:2022-05-27
下一篇:
Python process.stop_actors_by_class函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap