• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python toolz.assoc函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中toolz.assoc函数的典型用法代码示例。如果您正苦于以下问题:Python assoc函数的具体用法?Python assoc怎么用?Python assoc使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了assoc函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: todo_app

def todo_app(state, action):
    if action['type'] == ActionTypes.ADD_TODO:
        todos = state['todos'] + (action['text'],)
        return toolz.assoc(state, 'todos', todos)
    elif action['type'] == ActionTypes.COMPLETE_TODO:
        todos = state['todos'][:action['index']] + state['todos'][action['index'] + 1:]
        return toolz.assoc(state, 'todos', todos)
    else:
        return state
开发者ID:ariddell,项目名称:aioredux,代码行数:9,代码来源:todo.py


示例2: tls_cluster_context

def tls_cluster_context(worker_kwargs=None, scheduler_kwargs=None,
                        security=None, **kwargs):
    security = security or tls_only_security()
    worker_kwargs = assoc(worker_kwargs or {}, 'security', security)
    scheduler_kwargs = assoc(scheduler_kwargs or {}, 'security', security)

    with cluster(worker_kwargs=worker_kwargs,
                 scheduler_kwargs=scheduler_kwargs,
                 **kwargs) as (s, workers):
        yield s, workers
开发者ID:tomMoral,项目名称:distributed,代码行数:10,代码来源:utils_test.py


示例3: rebatch_metadata_by_experiment

def rebatch_metadata_by_experiment(metadata):
    normal, normal_rest = prioritize_normals(metadata)
    batch = metadata[0]["participant"]
    tumor_batch = [tz.assoc(x, "batch", batch) for x in metadata if x["sample_type"] in PRIORITIZED_TUMOR_CODES.keys()]
    normal = [tz.assoc(normal, "batch", batch)] if normal else []
    # run each non priority normal as its own tumor sample with no control
    normal_rest = [tz.assoc(x, "batch", batch + "-" + x["sample_type"]) for x in normal_rest]
    normal_rest = [tz.assoc(x, "phenotype", "tumor") for x in normal_rest]
    all_batches = normal + normal_rest + tumor_batch
    return all_batches
开发者ID:samesun,项目名称:bcbio-nextgen,代码行数:10,代码来源:tcga_to_bcbio.py


示例4: __init__

 def __init__(self, handlers, max_buffer_size=MAX_BUFFER_SIZE,
         connection_limit=512, **kwargs):
     self.handlers = assoc(handlers, 'identity', self.identity)
     self.id = str(uuid.uuid1())
     self._port = None
     self.rpc = ConnectionPool(limit=connection_limit)
     super(Server, self).__init__(max_buffer_size=max_buffer_size, **kwargs)
开发者ID:broxtronix,项目名称:distributed,代码行数:7,代码来源:core.py


示例5: __init__

    def __init__(self, handlers, max_buffer_size=MAX_BUFFER_SIZE,
            connection_limit=512, deserialize=True, **kwargs):
        self.handlers = assoc(handlers, 'identity', self.identity)
        self.id = str(uuid.uuid1())
        self._port = None
        self._listen_streams = dict()
        self.rpc = ConnectionPool(limit=connection_limit,
                                  deserialize=deserialize)
        self.deserialize = deserialize
        self.monitor = SystemMonitor()
        self.counters = None
        self.digests = None
        if hasattr(self, 'loop'):
            with ignoring(ImportError):
                from .counter import Digest
                self.digests = defaultdict(partial(Digest, loop=self.loop))

            from .counter import Counter
            self.counters = defaultdict(partial(Counter, loop=self.loop))

            pc = PeriodicCallback(self.monitor.update, 500, io_loop=self.loop)
            self.loop.add_callback(pc.start)
            if self.digests is not None:
                self._last_tick = time()
                self._tick_pc = PeriodicCallback(self._measure_tick, 20, io_loop=self.loop)
                self.loop.add_callback(self._tick_pc.start)


        self.__stopped = False

        super(Server, self).__init__(max_buffer_size=max_buffer_size, **kwargs)
开发者ID:dask,项目名称:distributed,代码行数:31,代码来源:core.py


示例6: _normalize_arg

def _normalize_arg(other, constants):
    """Get the name to use to build the string and turn the value into
    something that can go into the ast.

    If needed this will functionally update the constants.

    Parameters
    ----------
    other : any
        The object to normalize.
    constants : dict[str -> any]
        The constant namespace.

    Returns
    -------
    othername : str
        The name to use in the ``_name`` of the lambda.
    other : any
        The normalized value.
    constants : dict[str -> any]
        The potentially updated constants.
    """
    if not isinstance(other, placeholder):
        othername = repr(other)
        name = '_' + uuid4().hex
        constants = assoc(constants, name, other)
        other = ast.Name(id=name, ctx=ast.Load())
    elif other._tree is not other:
        othername = '(%s)' % other._name
    else:
        othername = other._name

    return othername, other, constants
开发者ID:llllllllll,项目名称:fz,代码行数:33,代码来源:__init__.py


示例7: _get_subnet_config_w_cidr

    def _get_subnet_config_w_cidr(self, network_config):
        network_cidr_base = str(network_config.get('network_cidr_base', '172.16.0.0'))
        network_cidr_size = str(network_config.get('network_cidr_size', '20'))
        first_network_address_block = str(network_config.get('first_network_address_block', network_cidr_base))

        ret_val = {}
        base_cidr = network_cidr_base + '/' + network_cidr_size
        net = netaddr.IPNetwork(base_cidr)

        grouped_subnet = groupby('size', self._get_subnet_config_w_az(network_config))
        subnet_groups = sorted(grouped_subnet.items())
        available_cidrs = []

        for subnet_size, subnet_configs in subnet_groups:
            newcidrs = net.subnet(int(subnet_size))

            for subnet_config in subnet_configs:
                try:
                    cidr = newcidrs.next()
                except StopIteration as e:
                    net = chain(*reversed(available_cidrs)).next()
                    newcidrs = net.subnet(int(subnet_size))
                    cidr = newcidrs.next()

                new_config = assoc(subnet_config, 'cidr', str(cidr))
                yield new_config
            else:
                net = newcidrs.next()
                available_cidrs.append(newcidrs)
开发者ID:stocktwits,项目名称:cloudformation-environmentbase,代码行数:29,代码来源:base_network.py


示例8: unify

def unify(u, v, s):  # no check at the moment
    """ Find substitution so that u == v while satisfying s

    >>> x = var('x')
    >>> unify((1, x), (1, 2), {})
    {~x: 2}
    """
    u = walk(u, s)
    v = walk(v, s)
    if u == v:
        return s
    if isvar(u):
        return assoc(s, u, v)
    if isvar(v):
        return assoc(s, v, u)
    return _unify(u, v, s)
开发者ID:vitormazzi,项目名称:unification,代码行数:16,代码来源:core.py


示例9: fill_kwargs

def fill_kwargs(fn, args, kwargs):
    """ Read a csv file and fill up kwargs

    This normalizes kwargs against a sample file.  It does the following:

    1.  If given a globstring, just use one file
    2.  Get names from csv file if not given
    3.  Identify the presence of a header
    4.  Identify dtypes
    5.  Establish column names
    6.  Switch around dtypes and column names if parse_dates is active

    Normally ``pd.read_csv`` does this for us.  However for ``dd.read_csv`` we
    need to be consistent across multiple files and don't want to do these
    heuristics each time so we use the pandas solution once, record the
    results, and then send back a fully explicit kwargs dict to send to future
    calls to ``pd.read_csv``.

    Returns
    -------

    kwargs: dict
        keyword arguments to give to pd.read_csv
    """
    kwargs = merge(csv_defaults, kwargs)
    sample_nrows = kwargs.pop('sample_nrows', 1000)
    essentials = ['columns', 'names', 'header', 'parse_dates', 'dtype']
    if set(essentials).issubset(kwargs):
        return kwargs

    # Let pandas infer on the first 100 rows
    if '*' in fn:
        filenames = sorted(glob(fn))
        if not filenames:
            raise ValueError("No files found matching name %s" % fn)
        fn = filenames[0]

    if 'names' not in kwargs:
        kwargs['names'] = csv_names(fn, **kwargs)
    if 'header' not in kwargs:
        kwargs['header'] = infer_header(fn, **kwargs)
        if kwargs['header'] is True:
            kwargs['header'] = 0

    try:
        head = pd.read_csv(fn, *args, **assoc(kwargs, 'nrows', sample_nrows))
    except StopIteration:
        head = pd.read_csv(fn, *args, **kwargs)

    if 'parse_dates' not in kwargs:
        kwargs['parse_dates'] = [col for col in head.dtypes.index
                           if np.issubdtype(head.dtypes[col], np.datetime64)]
    if 'dtype' not in kwargs:
        kwargs['dtype'] = dict(head.dtypes)
        for col in kwargs['parse_dates']:
            del kwargs['dtype'][col]

    kwargs['columns'] = list(head.columns)

    return kwargs
开发者ID:jayhetee,项目名称:dask,代码行数:60,代码来源:io.py


示例10: run_traffic_jam

def run_traffic_jam(nsends, nbytes):
    # This test eats `nsends * nbytes` bytes in RAM
    np = pytest.importorskip('numpy')
    from distributed.protocol import to_serialize
    data = bytes(np.random.randint(0, 255, size=(nbytes,)).astype('u1').data)
    with echo_server() as e:
        comm = yield connect(e.address)

        b = BatchedSend(interval=0.01)
        b.start(comm)

        msg = {'x': to_serialize(data)}
        for i in range(nsends):
            b.send(assoc(msg, 'i', i))
            if np.random.random() > 0.5:
                yield gen.sleep(0.001)

        results = []
        count = 0
        while len(results) < nsends:
            # If this times out then I think it's a backpressure issue
            # Somehow we're able to flood the socket so that the receiving end
            # loses some of our messages
            L = yield gen.with_timeout(timedelta(seconds=5), comm.read())
            count += 1
            results.extend(r['i'] for r in L)

        assert count == b.batch_count == e.count
        assert b.message_count == nsends

        assert results == list(range(nsends))

        comm.close()  # external closing
        yield b.close()
开发者ID:tomMoral,项目名称:distributed,代码行数:34,代码来源:test_batched.py


示例11: process

 def process(msg):
     try:
         result = yield self.compute(report=False, **msg)
         bstream.send(result)
     except Exception as e:
         logger.exception(e)
         bstream.send(assoc(error_message(e), 'key', msg.get('key')))
开发者ID:coobas,项目名称:distributed,代码行数:7,代码来源:worker.py


示例12: read_csv

def read_csv(fn, *args, **kwargs):
    chunkbytes = kwargs.pop('chunkbytes', 2**25)  # 50 MB
    categorize = kwargs.pop('categorize', None)
    index = kwargs.pop('index', None)
    if index and categorize == None:
        categorize = True

    kwargs = fill_kwargs(fn, args, kwargs)

    # Handle glob strings
    if '*' in fn:
        return concat([read_csv(f, *args, **kwargs) for f in sorted(glob(fn))])

    token = tokenize(os.path.getmtime(fn), args, kwargs)
    name = 'read-csv-%s-%s' % (fn, token)

    columns = kwargs.pop('columns')
    header = kwargs.pop('header')

    if 'nrows' in kwargs:  # Just create single partition
        dsk = {(name, 0): (apply, pd.read_csv, (fn,),
                                  assoc(kwargs, 'header', header))}
        result = DataFrame(dsk, name, columns, [None, None])

    else:
        # Chunk sizes and numbers
        total_bytes = file_size(fn, kwargs['compression'])
        nchunks = int(ceil(total_bytes / chunkbytes))
        divisions = [None] * (nchunks + 1)

        first_read_csv = partial(pd.read_csv, *args, header=header,
                               **dissoc(kwargs, 'compression'))
        rest_read_csv = partial(pd.read_csv, *args, header=None,
                              **dissoc(kwargs, 'compression'))

        # Create dask graph
        dsk = dict(((name, i), (rest_read_csv, (BytesIO,
                                   (textblock, fn,
                                       i*chunkbytes, (i+1) * chunkbytes,
                                       kwargs['compression']))))
                   for i in range(1, nchunks))
        dsk[(name, 0)] = (first_read_csv, (BytesIO,
                           (textblock, fn, 0, chunkbytes, kwargs['compression'])))

        result = DataFrame(dsk, name, columns, divisions)

    if categorize or index:
        categories, quantiles = categories_and_quantiles(fn, args, kwargs,
                                                         index, categorize,
                                                         chunkbytes=chunkbytes)

    if categorize:
        func = partial(categorize_block, categories=categories)
        result = result.map_partitions(func, columns=columns)

    if index:
        result = set_partition(result, index, quantiles)

    return result
开发者ID:benlewis-tes,项目名称:dask,代码行数:59,代码来源:io.py


示例13: test_parameterized_term_default_value

    def test_parameterized_term_default_value(self):
        defaults = {'a': 'default for a', 'b': 'default for b'}

        class F(Factor):
            params = defaults

            inputs = (SomeDataSet.foo,)
            dtype = 'f8'
            window_length = 5

        assert_equal(F().params, defaults)
        assert_equal(F(a='new a').params, assoc(defaults, 'a', 'new a'))
        assert_equal(F(b='new b').params, assoc(defaults, 'b', 'new b'))
        assert_equal(
            F(a='new a', b='new b').params,
            {'a': 'new a', 'b': 'new b'},
        )
开发者ID:FranSal,项目名称:zipline,代码行数:17,代码来源:test_term.py


示例14: _get_subnet_config_w_az

    def _get_subnet_config_w_az(self, network_config):
        az_count = int(network_config.get('az_count', 2))
        subnet_config = network_config.get('subnet_config', {})

        for subnet in subnet_config:
            for az in range(az_count):
                newsubnet = assoc(subnet, 'AZ', az)
                yield newsubnet
开发者ID:stocktwits,项目名称:cloudformation-environmentbase,代码行数:8,代码来源:base_network.py


示例15: run

 def run(self, *args, **kwargs):
     """Run the server"""
     port = kwargs.pop('port', DEFAULT_PORT)
     self.port = port
     try:
         self.app.run(*args, port=port, **kwargs)
     except socket.error:
         print("\tOops, couldn't connect on port %d.  Is it busy?" % port)
         self.run(*args, **assoc(kwargs, 'port', port + 1))
开发者ID:ChampagneDev,项目名称:blaze,代码行数:9,代码来源:server.py


示例16: compute_down

def compute_down(expr,
                 ec,
                 profiler_output=None,
                 compute_kwargs=None,
                 odo_kwargs=None,
                 **kwargs):
    """Compute down for blaze clients.

    Parameters
    ----------
    expr : Expr
        The expression to send to the server.
    ec : Client
        The blaze client to compute against.
    namespace : dict[Symbol -> any], optional
        The namespace to compute the expression in. This will be amended to
        include that data for the server. By default this will just be the
        client mapping to the server's data.
    compute_kwargs : dict, optional
        Extra kwargs to pass to compute on the server.
    odo_kwargs : dict, optional
        Extra kwargs to pass to odo on the server.
    profile : bool, optional
        Should blaze server run cProfile over the computation of the expression
        and the serialization of the response.
    profiler_output : file-like object, optional
        A file like object to hold the profiling output from the server.
        If this is not passed then the server will write the data to the
        server's filesystem
    """
    from .server import to_tree

    kwargs = keymap(u8, kwargs)

    tree = to_tree(expr)
    serial = ec.serial
    if profiler_output is not None:
        kwargs[u'profile'] = True
        kwargs[u'profiler_output'] = ':response'

    kwargs[u'compute_kwargs'] = keymap(u8, compute_kwargs or {})
    kwargs[u'odo_kwargs'] = keymap(u8, odo_kwargs or {})

    r = post(
        ec,
        '/compute',
        data=serial.dumps(assoc(kwargs, u'expr', tree)),
        auth=ec.auth,
        headers=mimetype(serial),
    )

    if not ok(r):
        raise ValueError("Bad response: %s" % reason(r))
    response = serial.loads(content(r))
    if profiler_output is not None:
        profiler_output.write(response[u'profiler_output'])
    return serial.data_loads(response[u'data'])
开发者ID:blaze,项目名称:blaze,代码行数:57,代码来源:client.py


示例17: json_expand

def json_expand(json_op, key_name='json'):
    """ Convert a string json object to Python dict in an op. """
    if type(json_op) == dict and key_name in json_op and json_op[key_name]:
        try:
            return update_in(json_op, [key_name], json.loads)
        except JSONDecodeError:
            return assoc(json_op, key_name, {})

    return json_op
开发者ID:G3niusMind,项目名称:steem-python,代码行数:9,代码来源:utils.py


示例18: _ready_task

    def _ready_task(self, function=None, key=None, args=(), kwargs={},
                    task=None, who_has=None):
        who_has = who_has or {}
        diagnostics = {}
        data = {k: self.data[k] for k in who_has if k in self.data}
        who_has = {k: set(map(coerce_to_address, v))
                   for k, v in who_has.items()
                   if k not in self.data}
        if who_has:
            try:
                logger.info("gather %d keys from peers: %s",
                            len(who_has), str(who_has))
                diagnostics['transfer-start'] = time()
                other = yield gather_from_workers(who_has)
                diagnostics['transfer-stop'] = time()
                self.data.update(other)
                yield self.center.add_keys(address=self.address,
                                           keys=list(other))
                data.update(other)
            except KeyError as e:
                logger.warn("Could not find data for %s", key)
                raise Return({'status': 'missing-data',
                              'keys': e.args,
                              'key': key})
        else:
            transfer_time = 0
        try:
            start = default_timer()
            if task is not None:
                task = loads(task)
            if function is not None:
                function = loads(function)
            if args:
                args = loads(args)
            if kwargs:
                kwargs = loads(kwargs)
            diagnostics['deserialization'] = default_timer() - start
        except Exception as e:
            logger.warn("Could not deserialize task", exc_info=True)
            raise Return(assoc(error_message(e), 'key', key))

        if task is not None:
            assert not function and not args and not kwargs
            function = execute_task
            args = (task,)

        # Fill args with data
        args2 = pack_data(args, data)
        kwargs2 = pack_data(kwargs, data)

        raise Return({'status': 'OK',
                      'function': function,
                      'args': args2,
                      'kwargs': kwargs2,
                      'diagnostics': diagnostics,
                      'key': key})
开发者ID:mindis,项目名称:distributed,代码行数:56,代码来源:worker.py


示例19: run

 def run(self, *args, **kwargs):
     """Run the server"""
     port = kwargs.pop('port', DEFAULT_PORT)
     self.port = port
     try:
         self.app.run(*args, port=port, **kwargs)
     except socket.error:
         print("\tOops, couldn't connect on port %d.  Is it busy?" % port)
         if kwargs.get('retry', True):
             # Attempt to start the server on a new port.
             self.run(*args, **assoc(kwargs, 'port', port + 1))
开发者ID:CaptainAL,项目名称:Spyder,代码行数:11,代码来源:server.py


示例20: _write_tool

def _write_tool(step_dir, name, inputs, outputs):
    out_file = os.path.join(step_dir, "%s.cwl" % name)
    out = {"class": "CommandLineTool",
           "baseCommand": ["bcbio_nextgen.py", "runfn", name, "cwl"],
           "inputs": [],
           "outputs": []}
    for i, inp in enumerate(inputs):
        out["inputs"].append(tz.assoc(inp, "inputBinding",
                                      {"prefix": "%s=" % inp["id"].replace("#", ""), "separate": False,
                                       "itemSeparator": ";;", "position": i}))
    with open(out_file, "w") as out_handle:
        yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
    return os.path.join("steps", os.path.basename(out_file))
开发者ID:vhuarui,项目名称:bcbio-nextgen,代码行数:13,代码来源:create.py



注:本文中的toolz.assoc函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python toolz.complement函数代码示例发布时间:2022-05-27
下一篇:
Python toolz.accumulate函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap