• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python moves.filter函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中six.moves.filter函数的典型用法代码示例。如果您正苦于以下问题:Python filter函数的具体用法?Python filter怎么用?Python filter使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了filter函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: get_content_snippet2

def get_content_snippet2(content, keyword, max_lines=10):
    max_lines = int(max_lines)
    p = re.compile(
        r'(?P<before>.*)%s(?P<after>.*)' %
        re.escape(keyword),
        re.MULTILINE | re.IGNORECASE | re.DOTALL)
    m = p.search(content)
    html = ""
    if m:
        words = list(filter(
            lambda x: x != "",
            striptags(
                m.group("before")).split("\n")))
        before_lines = words[-max_lines // 2:]
        words = list(filter(
            lambda x: x != "",
            striptags(
                m.group("after")).split("\n")))
        after = "<br/>".join(words[:max_lines - len(before_lines)])
        before = "<br/>".join(before_lines)
        html = "%s %s %s" % (before, striptags(keyword), after)
        kw_p = re.compile(r'(%s)' % keyword, re.IGNORECASE)
        html = kw_p.sub(r"<strong>\1</strong>", html)
        html = mark_safe(html)
    else:
        html = " ".join(
            list(filter(
                lambda x: x != "",
                striptags(content).replace(
                    "\n",
                    " ").split(" ")))[
                :max_lines])
    return html
开发者ID:arjun024,项目名称:wikicoding,代码行数:33,代码来源:wiki_tags.py


示例2: parse_celery_workers

def parse_celery_workers(celery_workers):
    """
    Parses the response from the flower get workers api into a list of hosts
    we expect to be running and a list of hosts we expect to be stopped
    """
    expect_stopped = []
    expect_running = list(filter(
        lambda hostname: not hostname.endswith('_timestamp'),
        celery_workers,
    ))

    timestamped_workers = list(filter(
        lambda hostname: hostname.endswith('_timestamp'),
        celery_workers,
    ))

    def _strip_timestamp(hostname):
        return '.'.join(hostname.split('.')[:-1])

    timestamped_workers = sorted(timestamped_workers, key=_strip_timestamp)

    for hostname, group in groupby(timestamped_workers, _strip_timestamp):

        sorted_workers = sorted(list(group), reverse=True)
        expect_running.append(sorted_workers.pop(0))
        expect_stopped.extend(sorted_workers)
    return expect_running, expect_stopped
开发者ID:kkrampa,项目名称:commcare-hq,代码行数:27,代码来源:utils.py


示例3: startup

    def startup(self, group):
        """ Prepare for a new run.

        Args
        ----
        group : `Group`
            Group that owns this recorder.
        """

        myparams = myunknowns = myresids = set()

        if MPI:
            rank = group.comm.rank
            owned = group._owning_ranks

        # Compute the inclusion lists for recording
        if self.options['record_params']:
            myparams = set(filter(self._check_path, group.params))
        if self.options['record_unknowns']:
            myunknowns = set(filter(self._check_path, group.unknowns))
        if self.options['record_resids']:
            myresids = set(filter(self._check_path, group.resids))

        self._filtered[group.pathname] = {
            'p': myparams,
            'u': myunknowns,
            'r': myresids
        }
开发者ID:NoriVicJr,项目名称:OpenMDAO,代码行数:28,代码来源:base_recorder.py


示例4: _get_eligible_broker_pair

    def _get_eligible_broker_pair(self, under_loaded_rg, eligible_partition):
        """Evaluate and return source and destination broker-pair from over-loaded
        and under-loaded replication-group if possible, return None otherwise.

        Return source broker with maximum partitions and destination broker with
        minimum partitions based on following conditions:-
        1) At-least one broker in under-loaded group which does not have
        victim-partition. This is because a broker cannot have duplicate replica.
        2) At-least one broker in over-loaded group which has victim-partition
        """
        under_brokers = list(filter(
            lambda b: eligible_partition not in b.partitions,
            under_loaded_rg.brokers,
        ))
        over_brokers = list(filter(
            lambda b: eligible_partition in b.partitions,
            self.brokers,
        ))

        # Get source and destination broker
        source_broker, dest_broker = None, None
        if over_brokers:
            source_broker = max(
                over_brokers,
                key=lambda broker: len(broker.partitions),
            )
        if under_brokers:
            dest_broker = min(
                under_brokers,
                key=lambda broker: len(broker.partitions),
            )
        return (source_broker, dest_broker)
开发者ID:Yelp,项目名称:kafka-utils,代码行数:32,代码来源:rg.py


示例5: process

    def process(self):
        """
        Process the file upload and add products to the range
        """
        all_ids = set(self.extract_ids())
        products = self.range.included_products.all()
        existing_skus = products.values_list('stockrecord__partner_sku',
                                             flat=True)
        existing_skus = set(filter(bool, existing_skus))
        existing_upcs = products.values_list('upc', flat=True)
        existing_upcs = set(filter(bool, existing_upcs))
        existing_ids = existing_skus.union(existing_upcs)
        new_ids = all_ids - existing_ids

        products = Product._default_manager.filter(
            models.Q(stockrecord__partner_sku__in=new_ids) |
            models.Q(upc__in=new_ids))
        for product in products:
            self.range.add_product(product)

        # Processing stats
        found_skus = products.values_list('stockrecord__partner_sku',
                                          flat=True)
        found_skus = set(filter(bool, found_skus))
        found_upcs = set(filter(bool, products.values_list('upc', flat=True)))
        found_ids = found_skus.union(found_upcs)
        missing_ids = new_ids - found_ids
        dupes = set(all_ids).intersection(existing_ids)

        self.mark_as_processed(products.count(), len(missing_ids), len(dupes))
开发者ID:AjitHimself,项目名称:Houseofhaus,代码行数:30,代码来源:models.py


示例6: check

def check(process_output, judge_output, split_on='lines', **kwargs):
    split_pattern = {
        'lines': b'[\r\n]',
        'whitespace': b'[\s]',
    }.get(split_on)

    if not split_pattern:
        raise InternalError('invalid `split_on` mode')

    process_lines = list(filter(None, resplit(split_pattern, utf8bytes(process_output))))
    judge_lines = list(filter(None, resplit(split_pattern, utf8bytes(judge_output))))

    if len(process_lines) != len(judge_lines):
        return False

    if split_on == 'lines':
        process_lines = list(map(six.binary_type.split, process_lines))
        judge_lines = list(map(six.binary_type.split, judge_lines))

    process_lines.sort()
    judge_lines.sort()

    for process_line, judge_line in zip(process_lines, judge_lines):
        if process_line != judge_line:
            return False

    return True
开发者ID:DMOJ,项目名称:judge,代码行数:27,代码来源:sorted.py


示例7: process_trade

    def process_trade(self, trade_event):

        if trade_event.sid not in self.open_orders:
            return

        if trade_event.volume < 1:
            # there are zero volume trade_events bc some stocks trade
            # less frequently than once per minute.
            return

        orders = self.open_orders[trade_event.sid]
        orders.sort(key=lambda o: o.dt)
        # Only use orders for the current day or before
        current_orders = filter(
            lambda o: o.dt <= trade_event.dt,
            orders)

        processed_orders = []
        for txn, order in self.process_transactions(trade_event,
                                                    current_orders):
            processed_orders.append(order)
            yield txn, order

        # remove closed orders. we should only have to check
        # processed orders
        def not_open(order):
            return not order.open
        closed_orders = filter(not_open, processed_orders)
        for order in closed_orders:
            orders.remove(order)

        if len(orders) == 0:
            del self.open_orders[trade_event.sid]
开发者ID:AlexanderAA,项目名称:zipline,代码行数:33,代码来源:blotter.py


示例8: test_simple_plan_add_on_creation

    def test_simple_plan_add_on_creation(self):
        # add a sample plan to the plans backend
        mocurly.backend.plans_backend.add_object(self.base_backed_plan_data['plan_code'], self.base_backed_plan_data)

        self.assertEqual(len(mocurly.backend.plan_add_ons_backend.datastore), 0)

        # now create some addons
        plan = recurly.Plan.get(self.base_backed_plan_data['plan_code'])
        for add_on in self.base_add_on_data:
            add_on['name'] = add_on['add_on_code'].upper()
            add_on['unit_amount_in_cents'] = recurly.Money(**add_on['unit_amount_in_cents'])
            plan.create_add_on(recurly.AddOn(**add_on))

        self.assertEqual(len(mocurly.backend.plan_add_ons_backend.datastore), 2)
        foo_add_on_backed = mocurly.backend.plan_add_ons_backend.get_object(self.base_backed_plan_data['plan_code'] + '__foo')
        add_ons = filter(lambda add_on: add_on['add_on_code'] == 'foo', self.base_add_on_data)
        foo_add_on = next(add_ons)
        for k, v in foo_add_on.items():
            if k == 'unit_amount_in_cents':
                self.assertEqual(foo_add_on_backed[k], dict((curr, str(amt)) for curr, amt in v.currencies.items()))
            else:
                self.assertEqual(foo_add_on_backed[k], v)

        bar_add_on_backed = mocurly.backend.plan_add_ons_backend.get_object(self.base_backed_plan_data['plan_code'] + '__bar')
        add_ons = filter(lambda add_on: add_on['add_on_code'] == 'bar', self.base_add_on_data)
        bar_add_on = next(add_ons)
        for k, v in bar_add_on.items():
            if k == 'unit_amount_in_cents':
                self.assertEqual(bar_add_on_backed[k], dict((curr, str(amt)) for curr, amt in v.currencies.items()))
            else:
                self.assertEqual(bar_add_on_backed[k], v)

        # make sure foreign keys are linked properly
        self.assertEqual(len(plan.add_ons()), 2)
开发者ID:Captricity,项目名称:mocurly,代码行数:34,代码来源:test_subscriptions.py


示例9: test_get_questions_with_repeats

    def test_get_questions_with_repeats(self):
        """
        This test ensures that questions that start with the repeat group id
        do not get marked as repeats. For example:

            /data/repeat_name <-- repeat group path
            /data/repeat_name_count <-- question path

        Before /data/repeat_name_count would be tagged as a repeat incorrectly.
        See http://manage.dimagi.com/default.asp?234108 for context
        """
        form = self.app.get_form(self.form_with_repeats_unique_id)
        questions = form.wrapped_xform().get_questions(
            ['en'],
            include_groups=True,
        )

        repeat_name_count = list(filter(
            lambda question: question['value'] == '/data/repeat_name_count',
            questions,
        ))[0]
        self.assertIsNone(repeat_name_count['repeat'])

        repeat_question = list(filter(
            lambda question: question['value'] == '/data/repeat_name/question5',
            questions,
        ))[0]
        self.assertEqual(repeat_question['repeat'], '/data/repeat_name')
开发者ID:kkrampa,项目名称:commcare-hq,代码行数:28,代码来源:test_get_questions.py


示例10: _leaf_versions

def _leaf_versions(tree, rc):
    '''
    Recursively traverse the versions tree in a depth-first fashion,
    and collect the last node of each branch, i.e. leaf versions.
    '''
    versions = []
    if _is_iterable(tree):
        for subtree in tree:
            versions.extend(_leaf_versions(subtree, rc))
        if not versions:
            if rc:
                last_rc = next(filter(lambda v: v.is_rc, reversed(tree)), None)
                last_prod = next(
                    filter(lambda v: not v.is_rc, reversed(tree)), None)
                if last_rc and last_prod and (last_prod < last_rc):
                    versions.extend([last_prod, last_rc])
                elif not last_prod:
                    versions.append(last_rc)
                else:
                    # Either there is no RC, or we ignore the RC as older than
                    # the latest production version:
                    versions.append(last_prod)
            else:
                versions.append(tree[-1])
    return versions
开发者ID:alban,项目名称:scope,代码行数:25,代码来源:list_versions.py


示例11: retrieve_keys

    def retrieve_keys(bucket, key, prefix='', postfix='', delim='/',
                      directories=False, recursive=False):
        """
        Retrieve keys from a bucket
        """
        if key and prefix:
            assert key.endswith(delim)

        key += prefix
        # check whether key is a directory
        if not key.endswith(delim) and key:
            # check for matching prefix
            if BotoClient.check_prefix(bucket, key + delim, delim=delim):
                # found a directory
                key += delim

        listdelim = delim if not recursive else None
        results = bucket.list(prefix=key, delimiter=listdelim)
        if postfix:
            func = lambda k_: BotoClient.filter_predicate(k_, postfix, inclusive=True)
            return filter(func, results)
        elif not directories:
            func = lambda k_: BotoClient.filter_predicate(k_, delim, inclusive=False)
            return filter(func, results)
        else:
            return results
开发者ID:alexandonian,项目名称:lightning,代码行数:26,代码来源:readers.py


示例12: removePyc

def removePyc(folder, only_excess=True, show_logs=True):

    folder = sp(folder)

    for root, dirs, files in os.walk(folder):

        pyc_files = filter(lambda filename: filename.endswith(".pyc"), files)
        py_files = set(filter(lambda filename: filename.endswith(".py"), files))
        excess_pyc_files = (
            filter(lambda pyc_filename: pyc_filename[:-1] not in py_files, pyc_files) if only_excess else pyc_files
        )

        for excess_pyc_file in excess_pyc_files:
            full_path = os.path.join(root, excess_pyc_file)
            if show_logs:
                log.debug("Removing old PYC file: %s", full_path)
            try:
                os.remove(full_path)
            except:
                log.error("Couldn't remove %s: %s", (full_path, traceback.format_exc()))

        for dir_name in dirs:
            full_path = os.path.join(root, dir_name)
            if len(os.listdir(full_path)) == 0:
                try:
                    os.rmdir(full_path)
                except:
                    log.error("Couldn't remove empty directory %s: %s", (full_path, traceback.format_exc()))
开发者ID:basrieter,项目名称:CouchPotatoServer,代码行数:28,代码来源:variable.py


示例13: test_in

 def test_in(self):
     values = ['a', 'b', 'c']
     filter = self.get_filter('in', values)
     for value in values:
         self.assertTrue(filter({'foo': value}))
     for value in ['d', 'e', 'f']:
         self.assertFalse(filter({'foo': value}))
开发者ID:dimagi,项目名称:commcare-hq,代码行数:7,代码来源:test_filters.py


示例14: get_revert_migrations

    def get_revert_migrations(self, current_migrations, backup_migrations):
        current_migrations, all_migrations = itertools.tee(reversed(list(map(
            Migration,
            filter(None, current_migrations.splitlines()),
        ))))
        all_migrations = utils.OrderedSet(all_migrations)

        backup_migrations = reversed(list(map(
            Migration,
            filter(None, backup_migrations.splitlines()),
        )))

        revert_migrations = collections.OrderedDict()

        while True:
            while True:
                backup_migration = next(backup_migrations, None)
                if not backup_migration or backup_migration in all_migrations:
                    break
            for current_migration in current_migrations:
                if current_migration == backup_migration:
                    break
                revert_migration = self._get_parent_migration(
                    current_migration,
                    migrations=all_migrations,
                )
                revert_migrations[current_migration.app] = revert_migration

            if backup_migration is None:
                return revert_migrations.values()
开发者ID:fenildf,项目名称:fabricio,代码行数:30,代码来源:django.py


示例15: label_and_sentence

 def label_and_sentence(line, clean_fn):
     label_text = re.split(TSVSeqLabelReader.SPLIT_ON, line)
     label = label_text[0]
     text = label_text[1:]
     text = ' '.join(list(filter(lambda s: len(s) != 0, [clean_fn(w) for w in text])))
     text = list(filter(lambda s: len(s) != 0, re.split('\s+', text)))
     return label, text
开发者ID:dpressel,项目名称:baseline,代码行数:7,代码来源:reader.py


示例16: DelayedFcnCall

 def DelayedFcnCall(*args, **kwargs):
   # Check to see if any args or kw are Later. If not, return normal fcn.
   checkIfLater = lambda x: type(x) == Later
   if (len(list(filter(checkIfLater, args))) == 0 and 
       len(list(filter(checkIfLater, list(kwargs.values())))) == 0):
     return fcn(*args, **kwargs)
   else:
     return CreateLaterFunction(fcn, *args, **kwargs)
开发者ID:wannaphongcom,项目名称:dplython,代码行数:8,代码来源:dplython.py


示例17: __init__

    def __init__(self, namespaces=None, pollster_list=None):
        namespaces = namespaces or ['compute', 'central']
        pollster_list = pollster_list or []
        group_prefix = cfg.CONF.polling.partitioning_group_prefix

        # features of using coordination and pollster-list are exclusive, and
        # cannot be used at one moment to avoid both samples duplication and
        # samples being lost
        if pollster_list and cfg.CONF.coordination.backend_url:
            raise PollsterListForbidden()

        super(AgentManager, self).__init__()

        def _match(pollster):
            """Find out if pollster name matches to one of the list."""
            return any(utils.match(pollster.name, pattern) for
                       pattern in pollster_list)

        if type(namespaces) is not list:
            namespaces = [namespaces]

        # we'll have default ['compute', 'central'] here if no namespaces will
        # be passed
        extensions = (self._extensions('poll', namespace).extensions
                      for namespace in namespaces)
        # get the extensions from pollster builder
        extensions_fb = (self._extensions_from_builder('poll', namespace)
                         for namespace in namespaces)
        if pollster_list:
            extensions = (moves.filter(_match, exts)
                          for exts in extensions)
            extensions_fb = (moves.filter(_match, exts)
                             for exts in extensions_fb)

        self.extensions = list(itertools.chain(*list(extensions))) + list(
            itertools.chain(*list(extensions_fb)))

        if self.extensions == []:
            raise EmptyPollstersList()

        self.discovery_manager = self._extensions('discover')
        self.context = context.RequestContext('admin', 'admin', is_admin=True)
        self.partition_coordinator = coordination.PartitionCoordinator()

        # Compose coordination group prefix.
        # We'll use namespaces as the basement for this partitioning.
        namespace_prefix = '-'.join(sorted(namespaces))
        self.group_prefix = ('%s-%s' % (namespace_prefix, group_prefix)
                             if group_prefix else namespace_prefix)

        self.notifier = oslo_messaging.Notifier(
            messaging.get_transport(),
            driver=cfg.CONF.publisher_notifier.telemetry_driver,
            publisher_id="ceilometer.polling")

        self._keystone = None
        self._keystone_last_exception = None
开发者ID:thunderleo,项目名称:ceilometer,代码行数:57,代码来源:manager.py


示例18: startup

    def startup(self, group):
        """ Prepare for new run. """

        # Compute the inclusion lists for recording
        params = list(filter(self._check_path, group.params))
        unknowns = list(filter(self._check_path, group.unknowns))
        resids = list(filter(self._check_path, group.resids))

        self._filtered[group.pathname] = (params, unknowns, resids)
开发者ID:seanmwu,项目名称:OpenMDAO,代码行数:9,代码来源:baserecorder.py


示例19: purge_old

 def purge_old(self):
     cur_ts = time.time()
     prev_ts = cur_ts - self.purge_elapsed
     self.failed_log = list(filter(lambda x: x > prev_ts, self.failed_log))
     self.succeeded_log = list(filter(lambda x: x > prev_ts,
                                      self.succeeded_log))
     self.failed_cnt = len(self.failed_log)
     self.succeed_cnt = len(self.succeeded_log)
     self.start_point = self._begin_log_ts()
开发者ID:douban,项目名称:dpark,代码行数:9,代码来源:hostatus.py


示例20: load_numpy

def load_numpy(max_pos=-1, max_neg=-1, *, unpack = False, scale_to_largest_image=False, scale_to_size=None):
    # If you specify the maximum number of positive examples but not the maximum
    # number of negative examples, it tries to return a 50/50 split. That may not
    # actually make sense
    if max_neg==-1 and max_pos>-1:
        max_neg = max_pos

    if unpack:
        raise Exception('Unpacking to local FS is not supported yet')

    if scale_to_size is not None and scale_to_largest_image:
        raise Exception('Specify scaling to the largest image OR a size to scale to.')

    # Debatable whether this is actually needed.
    if scale_to_size is None and scale_to_largest_image:
        with zipfile.ZipFile(train_zip_path) as z:
            filelist = filter(jpg_re.match, z.namelist())
            scale_to_size = util.find_largest_image_in_zip(z, filelist)

    if scale_to_size is None and not scale_to_largest_image:
        # silently changing to a hardcoded value, beware!
        scale_to_size = [768,1024]

    n_pos = 0
    n_neg = 0
    X = list()
    y = list()
    # Actually do the load
    with zipfile.ZipFile(train_zip_path) as z:
        filelist = filter(jpg_re.match, z.namelist())
        for f in filelist:
            if cat_re.match(f) and n_pos != max_pos:
                y.append(1)
                n_pos += 1
                X.append(
                    util.process_img_from_file(
                        BytesIO(z.read(f)),
                        resize_dims = scale_to_size
                    )
                )
            elif (not cat_re.match(f)) and n_neg != max_neg:
                y.append(0)
                n_neg += 1
                X.append(
                    util.process_img_from_file(
                        BytesIO(z.read(f)),
                        resize_dims = scale_to_size
                    )
                )
            if n_pos == max_pos and n_neg == max_neg:
                break

    return (np.vstack(X), np.array(y
    ))
开发者ID:nlzimmerman,项目名称:catfinder,代码行数:54,代码来源:load_train.py



注:本文中的six.moves.filter函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python moves.filterfalse函数代码示例发布时间:2022-05-27
下一篇:
Python moves.cStringIO函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap