• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python toolz.pluck函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中toolz.pluck函数的典型用法代码示例。如果您正苦于以下问题:Python pluck函数的具体用法?Python pluck怎么用?Python pluck使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了pluck函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: post_compute

def post_compute(e, q, d):
    """
    Execute a query using MongoDB's aggregation pipeline

    The compute_up functions operate on Mongo Collection / list-of-dict
    queries.  Once they're done we need to actually execute the query on
    MongoDB.  We do this using the aggregation pipeline framework.

    http://docs.mongodb.org/manual/core/aggregation-pipeline/
    """
    d = {'$project': toolz.merge({'_id': 0},  # remove mongo identifier
                                 dict((col, 1) for col in e.fields))}
    q = q.append(d)

    if not e.dshape.shape:  # not a collection
        result = q.coll.aggregate(list(q.query))['result'][0]
        if isscalar(e.dshape.measure):
            return result[e._name]
        else:
            return get(e.fields, result)

    dicts = q.coll.aggregate(list(q.query))['result']

    if isscalar(e.dshape.measure):
        return list(pluck(e.fields[0], dicts, default=None))  # dicts -> values
    else:
        return list(pluck(e.fields, dicts, default=None))  # dicts -> tuples
开发者ID:Casolt,项目名称:blaze,代码行数:27,代码来源:mongo.py


示例2: resource_append

def resource_append(lists, msg):
    L = list(msg.values())
    if not L:
        return
    for k in ['cpu', 'memory-percent']:
        lists[k].append(mean(pluck(k, L)) / 100)

    lists['time'].append(mean(pluck('time', L)) * 1000)
开发者ID:coobas,项目名称:distributed,代码行数:8,代码来源:worker_monitor.py


示例3: compile_components

def compile_components(summary, schema):
    """Given a ``Summary`` object and a table schema, returning 5 sub-functions.

    Parameters
    ----------
    summary : Summary
        The expression describing the aggregations to be computed.

    Returns
    -------
    A tuple of the following functions:

    ``create(shape)``
        Takes the aggregate shape, and returns a tuple of initialized numpy
        arrays.

    ``info(df)``
        Takes a dataframe, and returns preprocessed 1D numpy arrays of the
        needed columns.

    ``append(i, x, y, *aggs_and_cols)``
        Appends the ``i``th row of the table to the ``(x, y)`` bin, given the
        base arrays and columns in ``aggs_and_cols``. This does the bulk of the
        work.

    ``combine(base_tuples)``
        Combine a list of base tuples into a single base tuple. This forms the
        reducing step in a reduction tree.

    ``finalize(aggs)``
        Given a tuple of base numpy arrays, returns the finalized
        ``dynd`` array.
    """
    paths, reds = zip(*preorder_traversal(summary))

    # List of base reductions (actually computed)
    bases = list(unique(concat(r._bases for r in reds)))
    dshapes = [b.out_dshape(schema) for b in bases]
    # List of tuples of (append, base, input columns, temps)
    calls = [_get_call_tuples(b, d) for (b, d) in zip(bases, dshapes)]
    # List of unique column names needed
    cols = list(unique(concat(pluck(2, calls))))
    # List of temps needed
    temps = list(pluck(3, calls))

    create = make_create(bases, dshapes)
    info = make_info(cols)
    append = make_append(bases, cols, calls)
    combine = make_combine(bases, dshapes, temps)
    finalize = make_finalize(bases, summary, schema)

    return create, info, append, combine, finalize
开发者ID:jcrist,项目名称:datashader,代码行数:52,代码来源:compiler.py


示例4: records_to_tuples

def records_to_tuples(ds, data):
    """ Transform records into tuples

    Examples
    --------
    >>> seq = [{'a': 1, 'b': 10}, {'a': 2, 'b': 20}]
    >>> list(records_to_tuples('var * {a: int, b: int}', seq))
    [(1, 10), (2, 20)]

    >>> records_to_tuples('{a: int, b: int}', seq[0])  # single elements
    (1, 10)

    >>> records_to_tuples('var * int', [1, 2, 3])  # pass through on non-records
    [1, 2, 3]

    See Also
    --------

    tuples_to_records
    """
    if isinstance(ds, (str, unicode)):
        ds = dshape(ds)
    if isinstance(ds.measure, Record) and len(ds.shape) == 1:
        return pluck(ds.measure.names, data, default=None)
    if isinstance(ds.measure, Record) and len(ds.shape) == 0:
        return get(ds.measure.names, data)
    if not isinstance(ds.measure, Record):
        return data
    raise NotImplementedError()
开发者ID:kwin-wang,项目名称:odo,代码行数:29,代码来源:utils.py


示例5: dot_graph

def dot_graph(filename='conversions'):
    # Edges from Convert
    dg = nx.DiGraph()
    for a, b in convert.graph.edges():
        cost = convert.graph.edge[a][b]['cost']
        dg.add_edge(cls_name(a), cls_name(b),
                    cost=cost,
                    penwidth=max(log(1./(cost + 0.06)), 1))


    # Edges from Append
    for a, b in append.funcs:
        if b is not object and a != b:
            dg.add_edge(cls_name(b), cls_name(a), color='blue')


    # Color edges
    for n in convert.graph.nodes() + list(pluck(0, append.funcs)):
        if issubclass(n, tuple(ooc_types)):
            dg.node[cls_name(n)]['color'] = 'red'

    # Convert to pydot
    p = nx.to_pydot(dg)

    p.set_overlap(False)
    p.set_splines(True)

    with open(filename + '.dot', 'w') as f:
        f.write(p.to_string())

    os.system('neato -Tpdf %s.dot -o %s.pdf' % (filename, filename))
    print("Writing graph to %s.pdf" % filename)
    os.system('neato -Tpng %s.dot -o %s.png' % (filename, filename))
    print("Writing graph to %s.png" % filename)
开发者ID:CaptainAL,项目名称:Spyder,代码行数:34,代码来源:dot.py


示例6: get_profile

def get_profile(history, recent=None, start=None, stop=None, key=None):
    now = time()
    if start is None:
        istart = 0
    else:
        istart = bisect.bisect_left(history, (start,))

    if stop is None:
        istop = None
    else:
        istop = bisect.bisect_right(history, (stop,)) + 1
        if istop >= len(history):
            istop = None  # include end

    if istart == 0 and istop is None:
        history = list(history)
    else:
        iistop = len(history) if istop is None else istop
        history = [history[i] for i in range(istart, iistop)]

    prof = merge(*toolz.pluck(1, history))

    if not history:
        return create()

    if recent:
        prof = merge(prof, recent)

    return prof
开发者ID:tomMoral,项目名称:distributed,代码行数:29,代码来源:profile.py


示例7: f

    def f(c, a, b):
        aa = rpc(ip=a.ip, port=a.port)
        bb = rpc(ip=b.ip, port=b.port)

        result = yield aa.identity()
        assert not a.active
        response = yield aa.compute(key='x',
                                    function=dumps(add),
                                    args=dumps([1, 2]),
                                    who_has={},
                                    close=True)
        assert not a.active
        assert response['status'] == 'OK'
        assert a.data['x'] == 3
        assert c.who_has['x'] == {a.address}
        assert isinstance(response['compute-start'], float)
        assert isinstance(response['compute-stop'], float)
        assert isinstance(response['thread'], int)

        response = yield bb.compute(key='y',
                                    function=dumps(add),
                                    args=dumps(['x', 10]),
                                    who_has={'x': [a.address]})
        assert response['status'] == 'OK'
        assert b.data['y'] == 13
        assert c.who_has['y'] == {b.address}
        assert response['nbytes'] == sizeof(b.data['y'])
        assert isinstance(response['transfer-start'], float)
        assert isinstance(response['transfer-stop'], float)

        def bad_func():
            1 / 0

        response = yield bb.compute(key='z',
                                    function=dumps(bad_func),
                                    args=dumps(()),
                                    close=True)
        assert not b.active
        assert response['status'] == 'error'
        assert isinstance(loads(response['exception']), ZeroDivisionError)
        if sys.version_info[0] >= 3:
            assert any('1 / 0' in line
                      for line in pluck(3, traceback.extract_tb(
                          loads(response['traceback'])))
                      if line)

        aa.close_streams()
        yield a._close()

        assert a.address not in c.ncores and b.address in c.ncores

        assert list(c.ncores.keys()) == [b.address]

        assert isinstance(b.address, str)
        assert b.ip in b.address
        assert str(b.port) in b.address

        bb.close_streams()
        yield b._close()
开发者ID:mindis,项目名称:distributed,代码行数:59,代码来源:test_worker.py


示例8: test_append_convert

def test_append_convert(empty_bank, raw_bank):
    ds = discover(raw_bank)
    assert set(ds.measure.names) == {'name', 'amount'}

    append(empty_bank, raw_bank, dshape=ds)
    assert odo(empty_bank, list, dshape=ds) == list(
        pluck(ds.measure.names, raw_bank)
    )
开发者ID:Curezhang,项目名称:odo,代码行数:8,代码来源:test_mongo.py


示例9: _set_last_times

 def _set_last_times(self, platform_id, fetched):
     with self.times_lock:
         try:
             new_max = max(pluck(1, concat(fetched.itervalues())))
             if new_max > self._last_times.get(platform_id, 0):
                 self._last_times[platform_id] = new_max
         except ValueError:
             pass
开发者ID:kehunt06,项目名称:mi-instrument,代码行数:8,代码来源:oms_extractor.py


示例10: resource_append

def resource_append(lists, msg):
    L = list(msg.values())
    if not L:
        return
    for k in ['cpu', 'memory-percent']:
        lists[k].append(mean(pluck(k, L)) / 100)

    lists['time'].append(mean(pluck('time', L)) * 1000)
    if len(lists['time']) >= 2:
        t1, t2 = lists['time'][-2], lists['time'][-1]
        interval = (t2 - t1) / 1000
    else:
        interval = 0.5
    send = mean(pluck('network-send', L, 0))
    lists['network-send'].append(send / 2**20 / (interval or 0.5))
    recv = mean(pluck('network-recv', L, 0))
    lists['network-recv'].append(recv / 2**20 / (interval or 0.5))
开发者ID:LiuXiaozeeee,项目名称:distributed,代码行数:17,代码来源:worker_monitor.py


示例11: post_compute

def post_compute(e, q, d):
    """
    Execute a query using MongoDB's aggregation pipeline

    The compute_one functions operate on Mongo Collection / list-of-dict
    queries.  Once they're done we need to actually execute the query on
    MongoDB.  We do this using the aggregation pipeline framework.

    http://docs.mongodb.org/manual/core/aggregation-pipeline/
    """
    q = q.append({'$project': toolz.merge({'_id': 0}, # remove mongo identifier
                                      dict((col, 1) for col in e.columns))})
    dicts = q.coll.aggregate(list(q.query))['result']

    if e.iscolumn:
        return list(pluck(e.columns[0], dicts)) # dicts -> values
    else:
        return list(pluck(e.columns, dicts))    # dicts -> tuples
开发者ID:holdenk,项目名称:blaze,代码行数:18,代码来源:mongo.py


示例12: arg_reduction

def arg_reduction(x, chunk, combine, agg, axis=None, split_every=None, out=None):
    """ Generic function for argreduction.

    Parameters
    ----------
    x : Array
    chunk : callable
        Partialed ``arg_chunk``.
    combine : callable
        Partialed ``arg_combine``.
    agg : callable
        Partialed ``arg_agg``.
    axis : int, optional
    split_every : int or dict, optional
    """
    if axis is None:
        axis = tuple(range(x.ndim))
        ravel = True
    elif isinstance(axis, Integral):
        axis = validate_axis(axis, x.ndim)
        axis = (axis,)
        ravel = x.ndim == 1
    else:
        raise TypeError("axis must be either `None` or int, "
                        "got '{0}'".format(axis))

    for ax in axis:
        chunks = x.chunks[ax]
        if len(chunks) > 1 and np.isnan(chunks).any():
            raise ValueError(
                "Arg-reductions do not work with arrays that have "
                "unknown chunksizes.  At some point in your computation "
                "this array lost chunking information"
            )

    # Map chunk across all blocks
    name = 'arg-reduce-{0}'.format(tokenize(axis, x, chunk,
                                            combine, split_every))
    old = x.name
    keys = list(product(*map(range, x.numblocks)))
    offsets = list(product(*(accumulate(operator.add, bd[:-1], 0)
                             for bd in x.chunks)))
    if ravel:
        offset_info = zip(offsets, repeat(x.shape))
    else:
        offset_info = pluck(axis[0], offsets)

    chunks = tuple((1, ) * len(c) if i in axis else c for (i, c)
                   in enumerate(x.chunks))
    dsk = dict(((name,) + k, (chunk, (old,) + k, axis, off)) for (k, off)
               in zip(keys, offset_info))
    # The dtype of `tmp` doesn't actually matter, just need to provide something
    graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])
    tmp = Array(graph, name, chunks, dtype=x.dtype)
    dtype = np.argmin([1]).dtype
    result = _tree_reduce(tmp, agg, axis, False, dtype, split_every, combine)
    return handle_out(out, result)
开发者ID:yliapis,项目名称:dask,代码行数:57,代码来源:reductions.py


示例13: test_min_max

def test_min_max():
    loop = IOLoop.current()
    cluster = yield LocalCluster(0, scheduler_port=0, silence_logs=False,
                                 processes=False, diagnostics_port=None,
                                 loop=loop, asynchronous=True)
    yield cluster._start()
    try:
        adapt = Adaptive(cluster.scheduler, cluster, minimum=1, maximum=2,
                         interval='20 ms', wait_count=10)
        c = yield Client(cluster, asynchronous=True, loop=loop)

        start = time()
        while not cluster.scheduler.workers:
            yield gen.sleep(0.01)
            assert time() < start + 1

        yield gen.sleep(0.2)
        assert len(cluster.scheduler.workers) == 1
        assert frequencies(pluck(1, adapt.log)) == {'up': 1}

        futures = c.map(slowinc, range(100), delay=0.1)

        start = time()
        while len(cluster.scheduler.workers) < 2:
            yield gen.sleep(0.01)
            assert time() < start + 1

        assert len(cluster.scheduler.workers) == 2
        yield gen.sleep(0.5)
        assert len(cluster.scheduler.workers) == 2
        assert len(cluster.workers) == 2
        assert frequencies(pluck(1, adapt.log)) == {'up': 2}

        del futures

        start = time()
        while len(cluster.scheduler.workers) != 1:
            yield gen.sleep(0.01)
            assert time() < start + 2
        assert frequencies(pluck(1, adapt.log)) == {'up': 2, 'down': 1}
    finally:
        yield c.close()
        yield cluster.close()
开发者ID:tomMoral,项目名称:distributed,代码行数:43,代码来源:test_adaptive.py


示例14: post_compute

def post_compute(e, q, scope=None):
    """Compute the result of a Broadcast expression.
    """
    columns = dict((col, 1) for qry in q.query for col in qry.get("$project", []))
    scope = {"$project": toolz.merge({"_id": 0}, dict((col, 1) for col in columns))}  # remove mongo identifier
    q = q.append(scope)
    dicts = get_result(q.coll.aggregate(list(q.query)))

    assert len(columns) == 1
    return list(pluck(first(columns.keys()), dicts))
开发者ID:testmana2,项目名称:blaze,代码行数:10,代码来源:mongo.py


示例15: slice_slices_and_integers

def slice_slices_and_integers(out_name, in_name, blockdims, index):
    """
    Dask array indexing with slices and integers

    See Also
    --------

    _slice_1d
    """
    shape = tuple(map(sum, blockdims))

    for dim, ind in zip(shape, index):
        if np.isnan(dim) and ind != slice(None, None, None):
            raise ValueError("Arrays chunk sizes are unknown: %s", shape)

    assert all(isinstance(ind, (slice, Integral)) for ind in index)
    assert len(index) == len(blockdims)

    # Get a list (for each dimension) of dicts{blocknum: slice()}
    block_slices = list(map(_slice_1d, shape, blockdims, index))
    sorted_block_slices = [sorted(i.items()) for i in block_slices]

    # (in_name, 1, 1, 2), (in_name, 1, 1, 4), (in_name, 2, 1, 2), ...
    in_names = list(product([in_name], *[pluck(0, s) for s in sorted_block_slices]))

    # (out_name, 0, 0, 0), (out_name, 0, 0, 1), (out_name, 0, 1, 0), ...
    out_names = list(product([out_name],
                             *[range(len(d))[::-1] if i.step and i.step < 0 else range(len(d))
                               for d, i in zip(block_slices, index)
                               if not isinstance(i, Integral)]))

    all_slices = list(product(*[pluck(1, s) for s in sorted_block_slices]))

    dsk_out = {out_name: (getitem, in_name, slices)
               for out_name, in_name, slices
               in zip(out_names, in_names, all_slices)}

    new_blockdims = [new_blockdim(d, db, i)
                     for d, i, db in zip(shape, index, blockdims)
                     if not isinstance(i, Integral)]

    return dsk_out, new_blockdims
开发者ID:mrocklin,项目名称:dask,代码行数:42,代码来源:slicing.py


示例16: resource_append

def resource_append(lists, msg):
    L = list(msg.values())
    if not L:
        return
    try:
        for k in ['cpu', 'memory-percent']:
            lists[k].append(mean(pluck(k, L)) / 100)
    except KeyError:  # initial messages sometimes lack resource data
        return        # this is safe to skip

    lists['time'].append(mean(pluck('time', L)) * 1000)
    if len(lists['time']) >= 2:
        t1, t2 = lists['time'][-2], lists['time'][-1]
        interval = (t2 - t1) / 1000
    else:
        interval = 0.5
    send = mean(pluck('network-send', L, 0))
    lists['network-send'].append(send / 2**20 / (interval or 0.5))
    recv = mean(pluck('network-recv', L, 0))
    lists['network-recv'].append(recv / 2**20 / (interval or 0.5))
开发者ID:danring,项目名称:distributed,代码行数:20,代码来源:worker_monitor.py


示例17: get_output

def get_output(fields: List[str], response: Any) -> List[Tuple[str, ...]]:
    '''
    Extract raw output from returned query dictionary
    Parameters
        fields: list of output fields
        response: dict from query response
    Returns
        list of tuples for output table
    '''
    patents = response['patents']
    return list(pluck(fields, patents))
开发者ID:philiplessner,项目名称:USPTOAPI,代码行数:11,代码来源:backend.py


示例18: select_to_iterator

def select_to_iterator(sel, dshape=None, **kwargs):
    engine = sel.bind  # TODO: get engine from select

    with engine.connect() as conn:
        result = conn.execute(sel)
        if dshape and isscalar(dshape.measure):
            result = pluck(0, result)
        else:
            result = map(tuple, result)  # Turn RowProxy into tuple

        for item in result:
            yield item
开发者ID:mrocklin,项目名称:into,代码行数:12,代码来源:sql.py


示例19: test_groupby_tasks

def test_groupby_tasks():
    b = db.from_sequence(range(160), npartitions=4)
    out = b.groupby(lambda x: x % 10, max_branch=4, method='tasks')
    partitions = dask.get(out.dask, out._keys())

    for a in partitions:
        for b in partitions:
            if a is not b:
                assert not set(pluck(0, a)) & set(pluck(0, b))


    b = db.from_sequence(range(1000), npartitions=100)
    out = b.groupby(lambda x: x % 123, method='tasks')
    assert len(out.dask) < 100**2
    partitions = dask.get(out.dask, out._keys())

    for a in partitions:
        for b in partitions:
            if a is not b:
                assert not set(pluck(0, a)) & set(pluck(0, b))


    b = db.from_sequence(range(10000), npartitions=345)
    out = b.groupby(lambda x: x % 2834, max_branch=24, method='tasks')
    partitions = dask.get(out.dask, out._keys())

    for a in partitions:
        for b in partitions:
            if a is not b:
                assert not set(pluck(0, a)) & set(pluck(0, b))
开发者ID:dukebody,项目名称:dask,代码行数:30,代码来源:test_bag.py


示例20: slice_slices_and_integers

def slice_slices_and_integers(out_name, in_name, blockdims, index):
    """
    Dask array indexing with slices and integers

    See Also
    --------

    _slice_1d
    """
    shape = tuple(map(sum, blockdims))

    assert all(isinstance(ind, (slice, int, long)) for ind in index)
    assert len(index) == len(blockdims)

    # Get a list (for each dimension) of dicts{blocknum: slice()}
    block_slices = list(map(_slice_1d, shape, blockdims, index))
    sorted_block_slices = [sorted(i.items()) for i in block_slices]

    # (in_name, 1, 1, 2), (in_name, 1, 1, 4), (in_name, 2, 1, 2), ...
    in_names = list(product([in_name], *[pluck(0, s) for s in sorted_block_slices]))

    # (out_name, 0, 0, 0), (out_name, 0, 0, 1), (out_name, 0, 1, 0), ...
    out_names = list(product([out_name],
                             *[range(len(d))[::-1] if i.step and i.step < 0 else range(len(d))
                                 for d, i in zip(block_slices, index)
                                 if not isinstance(i, (int, long))]))

    all_slices = list(product(*[pluck(1, s) for s in sorted_block_slices]))

    dsk_out = dict((out_name, (getitem, in_name, slices))
                   for out_name, in_name, slices
                   in zip(out_names, in_names, all_slices))

    new_blockdims = [new_blockdim(d, db, i)
                     for d, i, db in zip(shape, index, blockdims)
                     if not isinstance(i, (int, long))]

    return dsk_out, new_blockdims
开发者ID:ankravch,项目名称:dask,代码行数:38,代码来源:slicing.py



注:本文中的toolz.pluck函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python toolz.reduce函数代码示例发布时间:2022-05-27
下一篇:
Python toolz.pipe函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap