• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python all函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中all函数的典型用法代码示例。如果您正苦于以下问题:Python all函数的具体用法?Python all怎么用?Python all使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了all函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_with_shift_that_spans_previous_midnight

def test_with_shift_that_spans_previous_midnight():
    """
    0 1 2 3 4 5 6 7 8 9 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2
                        0 1 2 3 4 5 6 7 8 9 0 1 2 3 4
    *-*-*-**]
            [*-*-*-*-*]
                      [*-*-*-*-*]
                                [*-*-*-*-*]
    """
    shifts = []
    # 3 hour shifts, staggered 2 hours apart.
    shifts.append(ShiftDict(start_time=yesterday_at_hour(23), shift_minutes=5 * HOUR))
    shifts.append(ShiftDict(start_time=today_at_hour(4), shift_minutes=5 * HOUR))
    shifts.append(ShiftDict(start_time=today_at_hour(9), shift_minutes=5 * HOUR))
    shifts.append(ShiftDict(start_time=today_at_hour(14), shift_minutes=5 * HOUR))

    data = shifts_to_tabular_data(_wrap_in_lists(shifts), datetime.date.today())
    assert get_num_columns(data) == ONE_DAY

    assert_columns_all_at_correct_location(data)

    assert len(data) == 5 * HOUR + 4
    assert data[0]['columns'] == 4 * HOUR
    assert all(list(c['columns'] == 5 * HOUR for c in data[1:4]))
    assert all(list(c['columns'] == 1 for c in data[4:]))
开发者ID:pipermerriam,项目名称:voldb,代码行数:25,代码来源:test_grid_packing.py


示例2: invest

def invest(positions,num_trials):
    '''Returns a dataframe with percentage returns of the form [trial(i),position(j)], 
    where entry[i,j] represents the i'th independent trial of buying 'j' investments 
    of value 1000/'j' in an instrument the returns double 51% and zero 49%  of the time.'''
    
    #Exception Handling (positions argument)
    if type(positions)!=list:
        raise NotListError
    if all([(type(x)==int or type(x)==float) for x in positions])==False:
        raise NotNumError
    if all([x % 1==0.0 for x in positions])==False:
        raise NotIntError
    if all([(0<x<=1000) for x in positions])==False:
        raise InvalidPosError
        
    #Excption Handling (num_trials argument)    
    if (type(num_trials)!=int and type(num_trials)!=float):
        raise TrialNotNumError
    if 0>=(num_trials):
        raise TrialNegError
    
    #Program
    position_value = 1000/np.array(positions)
    cumu_ret = DataFrame(columns=positions,index=np.arange(1,num_trials+1))
    
    for i in position_value:
        col=1000/i
        cumu_ret[col] = col
        cumu_ret[col] = cumu_ret[col].map(calcCumRet)    
    daily_ret = (cumu_ret/1000)-1
    return daily_ret
开发者ID:im965,项目名称:assignment8,代码行数:31,代码来源:invest.py


示例3: test_history

    def test_history(self, manager, my_vcr):
        with my_vcr.use_cassette("pipeline/history_Consumer_Website"):
            name = "Consumer_Website"
            result = manager.history(name)

            assert all(isinstance(i, pipeline.PipelineInstance) for i in result)
            assert all(i.data.name == name for i in result)
开发者ID:barrowkwan,项目名称:yagocd,代码行数:7,代码来源:test_pipeline_manager.py


示例4: add_full_barcode_adapter_sets

def add_full_barcode_adapter_sets(matching_sets):
    """
    This function adds some new 'full' adapter sequences based on what was already found. For
    example, if the ligation adapters and the reverse barcode adapters are found, it assumes we are
    looking at a native barcoding run and so it adds the complete native barcoding adapter
    sequences (with the barcode's upstream and downstream context included).
    """
    matching_set_names = [x.name for x in matching_sets]

    for i in range(1, 97):

        # Native barcode full sequences
        if all(x in matching_set_names
               for x in ['SQK-NSK007', 'Barcode ' + str(i) + ' (reverse)']):
            matching_sets.append(make_full_native_barcode_adapter(i))

        # Rapid barcode full sequences
        if all(x in matching_set_names
               for x in ['SQK-NSK007', 'Rapid', 'Barcode ' + str(i) + ' (forward)']):
            matching_sets.append(make_full_rapid_barcode_adapter(i))
        # Added for test
        # PCR barcode full sequences
        if all(x in matching_set_names
               for x in ['PCR', 'Barcode ' + str(i) + ' (forward)']):
            matching_sets.append(make_full_PCR_barcode_adapter(i))

    return matching_sets
开发者ID:GenomicParisCentre,项目名称:dockerfiles,代码行数:27,代码来源:porechop_patched.py


示例5: test_multiple_problems

    def test_multiple_problems(self):
        if MPI:
            # split the comm and run an instance of the Problem in each subcomm
            subcomm = self.comm.Split(self.comm.rank)
            prob = Problem(Group(), impl=impl, comm=subcomm)

            size = 5
            value = self.comm.rank + 1
            values = np.ones(size)*value

            A1 = prob.root.add('A1', IndepVarComp('x', values))
            C1 = prob.root.add('C1', ABCDArrayComp(size))

            prob.root.connect('A1.x', 'C1.a')
            prob.root.connect('A1.x', 'C1.b')

            prob.setup(check=False)
            prob.run()

            # check the first output array and store in result
            self.assertTrue(all(prob['C1.c'] == np.ones(size)*(value*2)))
            result = prob['C1.c']

            # gather the results from the separate processes/problems and check
            # for expected values
            results = self.comm.allgather(result)
            self.assertEqual(len(results), self.comm.size)

            for n in range(self.comm.size):
                expected = np.ones(size)*2*(n+1)
                self.assertTrue(all(results[n] == expected))
开发者ID:theomission,项目名称:OpenMDAO,代码行数:31,代码来源:test_mpi.py


示例6: test_add_strategy_with_setitem

  def test_add_strategy_with_setitem(self):
    sdict = StrategyDict("sdict")
    sdict["add"] = operator.add
    sdict["mul"] = operator.mul
    sdict["+"] = operator.add

    assert len(sdict) == 2
    assert set(sdict.keys()) == {("add", "+"), ("mul",)}
    assert all(name in dir(sdict) for name in {"add", "+", "mul"})
    assert all(name in vars(sdict) for name in {"add", "+", "mul"})

    assert sdict.add(2, 3) == 5 == sdict["add"](2, 3)
    assert sdict.mul(2, 3) == 6 == sdict["mul"](2, 3)
    assert sdict(7, 8) == 15 == sdict.default(7, 8)

    del sdict["+"]
    assert len(sdict) == 2
    del sdict.add
    assert len(sdict) == 1
    assert sdict(7, 8) == NotImplemented == sdict.default(7, 8)

    sdict["pow"] = operator.pow
    assert len(sdict) == 2
    assert sdict(2, 3) == 8 == sdict.default(2, 3)
    assert sdict.pow(5, 2) == 25 == sdict["pow"](5, 2)
开发者ID:danilobellini,项目名称:audiolazy,代码行数:25,代码来源:test_core.py


示例7: _parse_table_name

    def _parse_table_name(self, table_id):
        """Parse a table name in the form of appid_YYYY_MM or
        YYYY_MM_appid and return a tuple consisting of YYYY-MM and the app id.

        Args:
            table_id: The table id as listed by BigQuery.

        Returns:
            Tuple containing year/month and app id. Returns None, None if the
            table id cannot be parsed.
        """

        # Prefix date
        attributes = table_id.split('_')
        year_month = "-".join(attributes[:2])
        app_id = "-".join(attributes[2:])

        # Check if date parsed correctly
        if year_month.count("-") == 1 and all(
                [num.isdigit() for num in year_month.split('-')]):
            return year_month, app_id

        # Postfix date
        attributes = table_id.split('_')
        year_month = "-".join(attributes[-2:])
        app_id = "-".join(attributes[:-2])

        # Check if date parsed correctly
        if year_month.count("-") == 1 and all(
                [num.isdigit() for num in year_month.split('-')]):
            return year_month, app_id

        return None, None
开发者ID:liadliv,项目名称:BigQuery-Python,代码行数:33,代码来源:client.py


示例8: from_list

  def from_list(index, queues):
    """Create a queue using the queue reference from `queues[index]`.

    Args:
      index: An integer scalar tensor that determines the input that gets
        selected.
      queues: A list of `QueueBase` objects.

    Returns:
      A `QueueBase` object.

    Raises:
      TypeError: When `queues` is not a list of `QueueBase` objects,
        or when the data types of `queues` are not all the same.
    """
    if ((not queues) or
        (not isinstance(queues, list)) or
        (not all(isinstance(x, QueueBase) for x in queues))):
      raise TypeError("A list of queues expected")

    dtypes = queues[0].dtypes
    if not all([dtypes == q.dtypes for q in queues[1:]]):
      raise TypeError("Queues do not have matching component dtypes.")

    queue_refs = [x.queue_ref for x in queues]
    selected_queue = control_flow_ops.ref_select(index, queue_refs)
    # TODO(josh11b): Unify the shapes of the queues too?
    return QueueBase(dtypes=dtypes, shapes=None, queue_ref=selected_queue)
开发者ID:DapengLan,项目名称:tensorflow,代码行数:28,代码来源:data_flow_ops.py


示例9: check_grade

 def check_grade(self, test_output, truth_output, test_input):
     def get_ans(output):
         pat = re.compile('Student has an ([A-D]) grade',re.IGNORECASE)
         m = pat.search(output)
         if m is None:
             p2 = re.compile('Student has (failed) the course',re.IGNORECASE)
             m2 = p2.search(output)
             if m2 is None:
                 raise Exception("GRADE: no answer found")
             return m2.group(1)
         return m.group(1)
     truth_answer = get_ans(truth_output)
     grades = set(['A','B','C','D'])
     grade_patterns = {g: re.compile("Student has an {} grade".format(g),
                                     re.IGNORECASE)
                       for g in grades}
     if truth_answer in grades:
         no_neg_match = all([rgx.search(test_output) is None
                             for g, rgx in grade_patterns.items()
                             if g != truth_answer])
         pos_match = grade_patterns[truth_answer].search(test_output)
     elif truth_answer == 'failed':
         failure_rgx = re.compile("Student has failed the course",
                                  re.IGNORECASE)
         no_neg_match = all([rgx.search(test_output) is None
                            for g, rgx in grade_patterns.items()])
         pos_match = failure_rgx.search(test_output)
     else:
         raise ValueError("Unknown grades truth {}".format(truth_answer))
     return pos_match is not None and no_neg_match
开发者ID:88rabbit,项目名称:SearchRepair,代码行数:30,代码来源:genprog_tests.py


示例10: compilable

    def compilable(cls, clf):
        """
        Verifies that the given fitted model is eligible to be compiled.

        Returns True if the model is eligible, and False otherwise.

        Parameters
        ----------

        clf:
          A fitted regression tree/ensemble.


        """
        # TODO - is there an established way to check `is_fitted``?
        if isinstance(clf, DecisionTreeRegressor):
            return clf.n_outputs_ == 1 and clf.n_classes_ == 1 \
                and clf.tree_ is not None

        if isinstance(clf, GradientBoostingRegressor):
            return clf.estimators_.size and all(cls.compilable(e)
                                                for e in clf.estimators_.flat)

        if isinstance(clf, ForestRegressor):
            estimators = np.asarray(clf.estimators_)
            return estimators.size and all(cls.compilable(e)
                                           for e in estimators.flat)
        return False
开发者ID:arnabkd,项目名称:sklearn-compiledtrees,代码行数:28,代码来源:compiled.py


示例11: sanitize_indices

def sanitize_indices(indices):
    """Check and possibly sanitize indices.

    Parameters
    ----------
    indices : int, slice, or sequence of ints and slices
        If an int or slice is passed in, it is converted to a
        1-tuple.

    Returns
    -------
    2-tuple
        ('point', indices) if all `indices` are ints, or
        ('view', indices) if some `indices` are slices.

    Raises
    ------
    TypeError
        If `indices` is not all ints or slices.
    """

    if isinstance(indices, int) or isinstance(indices, slice):
        return sanitize_indices((indices,))
    elif all(isinstance(i, int) for i in indices):
        return 'point', indices
    elif all(isinstance(i, int) or isinstance(i, slice) for i in indices):
        return 'view', indices
    else:
        raise TypeError("Index must be a sequence of ints and slices")
开发者ID:imclab,项目名称:distarray,代码行数:29,代码来源:utils.py


示例12: _non_dominated_front_old

def _non_dominated_front_old(iterable, key=lambda x: x, allowequality=True):
    """Return a subset of items from iterable which are not dominated by any
    other item in iterable."""
    items = list(iterable)
    keys = dict((i, key(i)) for i in items)
    dim = len(keys.values()[0])
    if any(dim != len(k) for k in keys.values()):
        raise ValueError("Wrong tuple size.")

    # Make a dictionary that holds the items another item dominates.
    dominations = collections.defaultdict(lambda: [])
    for i in items:
        for j in items:
            if allowequality:
                if all(keys[i][k] < keys[j][k] for k in xrange(dim)):
                    dominations[i].append(j)
            else:
                if all(keys[i][k] <= keys[j][k] for k in xrange(dim)):
                    dominations[i].append(j)

    dominates = lambda i, j: j in dominations[i]

    res = set()
    items = set(items)
    for i in items:
        res.add(i)
        for j in list(res):
            if i is j:
                continue
            if dominates(j, i):
                res.remove(i)
                break
            elif dominates(i, j):
                res.remove(j)
    return res
开发者ID:PHPDOTSQL,项目名称:pybrain,代码行数:35,代码来源:nondominated.py


示例13: test_default_instance_initialize

def test_default_instance_initialize():
    """
    Testing the default _instance_initialize provided by module.
    """

    class M1(Module):
        def __init__(self):
            super(M1, self).__init__()
            self.a = T.dscalar()
            self.b = T.lscalar()
            self.c = T.lvector()

    class M2(Module):
        def __init__(self):
            super(M2, self).__init__()
            self.a = T.lscalar()
            self.x = M1()
            self.y = self.x
            self.z = M1()

    m = M2().make(a = 13,
                  x = dict(a = 1, b = 2, c = [3, 4]),
                  z = dict(a = 5, b = 6, c = [7, 8]))

    assert m.a == 13
    assert m.x.a == 1
    assert m.x.b == 2
    assert all(m.x.c == [3, 4])
    assert m.y.a == 1
    assert m.y.b == 2
    assert all(m.y.c == [3, 4])
    assert m.z.a == 5
    assert m.z.b == 6
    assert all(m.z.c == [7, 8])
开发者ID:SinaHonari,项目名称:Theano,代码行数:34,代码来源:test_module.py


示例14: test_with_shift_that_spans_upcoming_midnight

def test_with_shift_that_spans_upcoming_midnight():
    """
    0 1 2 3 4 5 6 7 8 9 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2
                        0 1 2 3 4 5 6 7 8 9 0 1 2 3 4
              [*-*-*-*-*]
                        [*-*-*-*-*]
                                  [*-*-*-*-*]
                                            [*-*-*-*-
    """
    shifts = []
    # 3 hour shifts, staggered 2 hours apart.
    shifts.append(ShiftDict(start_time=today_at_hour(5), shift_minutes=5 * HOUR))
    shifts.append(ShiftDict(start_time=today_at_hour(10), shift_minutes=5 * HOUR))
    shifts.append(ShiftDict(start_time=today_at_hour(15), shift_minutes=5 * HOUR))
    shifts.append(ShiftDict(start_time=today_at_hour(20), shift_minutes=5 * HOUR))

    data = shifts_to_tabular_data(_wrap_in_lists(shifts), datetime.date.today())

    assert get_num_columns(data) == ONE_DAY

    assert_columns_all_at_correct_location(data)

    assert len(data) == 5 * HOUR + 4
    assert all(c['columns'] == 1 for c in data[:5 * HOUR])
    assert all(c['columns'] == 5 * HOUR for c in data[5 * HOUR:5 * HOUR + 3])
    assert data[-1]['columns'] == 4 * HOUR
开发者ID:pipermerriam,项目名称:voldb,代码行数:26,代码来源:test_grid_packing.py


示例15: test_http_pool_key_fields

    def test_http_pool_key_fields(self):
        """Assert the HTTPPoolKey fields are honored when selecting a pool."""
        connection_pool_kw = {
            'timeout': timeout.Timeout(3.14),
            'retries': retry.Retry(total=6, connect=2),
            'block': True,
            'strict': True,
            'source_address': '127.0.0.1',
        }
        p = PoolManager()
        conn_pools = [
            p.connection_from_url('http://example.com/'),
            p.connection_from_url('http://example.com:8000/'),
            p.connection_from_url('http://other.example.com/'),
        ]

        for key, value in connection_pool_kw.items():
            p.connection_pool_kw[key] = value
            conn_pools.append(p.connection_from_url('http://example.com/'))

        assert all(
            x is not y
            for i, x in enumerate(conn_pools)
            for j, y in enumerate(conn_pools)
            if i != j
        )
        assert all(isinstance(key, PoolKey) for key in p.pools.keys())
开发者ID:NickMinnellaCS96,项目名称:urllib3,代码行数:27,代码来源:test_poolmanager.py


示例16: test_directory_children

  def test_directory_children(self):
    # Creates 2 directories and 2 queries and saves to home directory
    dir1 = Directory.objects.create(name='test_dir1', owner=self.user)
    dir2 = Directory.objects.create(name='test_dir2', owner=self.user)
    query1 = Document2.objects.create(name='query1.sql', type='query-hive', owner=self.user, data={})
    query2 = Document2.objects.create(name='query2.sql', type='query-hive', owner=self.user, data={})
    children = [dir1, dir2, query1, query2]

    self.home_dir.children.add(*children)

    # Test that all children directories and documents are returned
    response = self.client.get('/desktop/api2/docs', {'path': '/'})
    data = json.loads(response.content)
    assert_true('children' in data)
    assert_equal(5, data['count'])  # This includes the 4 docs and .Trash

    # Test filter type
    response = self.client.get('/desktop/api2/docs', {'path': '/', 'type': ['directory']})
    data = json.loads(response.content)
    assert_equal(['directory'], data['types'])
    assert_equal(3, data['count'])
    assert_true(all(doc['type'] == 'directory' for doc in data['children']))

    # Test search text
    response = self.client.get('/desktop/api2/docs', {'path': '/', 'text': 'query'})
    data = json.loads(response.content)
    assert_equal('query', data['text'])
    assert_equal(2, data['count'])
    assert_true(all('query' in doc['name'] for doc in data['children']))

    # Test pagination with limit
    response = self.client.get('/desktop/api2/docs', {'path': '/', 'page': 2, 'limit': 2})
    data = json.loads(response.content)
    assert_equal(5, data['count'])
    assert_equal(2, len(data['children']))
开发者ID:san21886,项目名称:hue,代码行数:35,代码来源:tests_doc2.py


示例17: test_strategies_names_introspection

  def test_strategies_names_introspection(self):
    sd = StrategyDict()
    sd.strategy("first", "abc")(lambda val: "abc" + val)
    sd.strategy("second", "def")(lambda val: "def" + val) # Neglect 2nd name
    sd.strategy("third", "123")(lambda val: "123" + val) # Neglect 2nd name

    # Nothing new here: strategies do what they should...
    assert sd("x") == "abcx"
    assert sd.default("p") == "abcp"

    assert sd.first("w") == "abcw" == sd["first"]("w")
    assert sd.second("zsc") == "defzsc" == sd["second"]("zsc")
    assert sd.third("blah") == "123blah" == sd["third"]("blah")

    assert sd.abc("y") == "abcy" == sd["abc"]("y")
    assert sd["def"]("few") == "deffew"
    assert sd["123"]("lots") == "123lots"

    # Valid names for attributes
    all_names = {"first", "second", "third", "abc", "def", "123"}
    assert all(name in dir(sd) for name in all_names)
    assert all(name in vars(sd) for name in all_names)
    assert "default" in dir(sd)
    assert "default" in vars(sd)
    all_keys_tuples = sd.keys()
    all_keys = reduce(operator.concat, all_keys_tuples)
    assert set(all_keys) == all_names # Default not in keys
    assert set(all_keys_tuples) == {("first", "abc"),
                                    ("second", "def"),
                                    ("third", "123")}

    # First name is the __name__
    assert sd["abc"].__name__ == "first"
    assert sd["def"].__name__ == "second"
    assert sd["123"].__name__ == "third"
开发者ID:danilobellini,项目名称:audiolazy,代码行数:35,代码来源:test_core.py


示例18: _module_quotient

 def _module_quotient(self, other, relations=False):
     # See: [SCA, section 2.8.4]
     if relations and len(other.gens) != 1:
         raise NotImplementedError
     if len(other.gens) == 0:
         return self.ring.ideal(1)
     elif len(other.gens) == 1:
         # We do some trickery. Let f be the (vector!) generating ``other``
         # and f1, .., fn be the (vectors) generating self.
         # Consider the submodule of R^{r+1} generated by (f, 1) and
         # {(fi, 0) | i}. Then the intersection with the last module
         # component yields the quotient.
         g1 = list(other.gens[0]) + [1]
         gi = [list(x) + [0] for x in self.gens]
         # NOTE: We *need* to use an elimination order
         M = self.ring.free_module(self.rank + 1).submodule(*([g1] + gi),
                                         order='ilex', TOP=False)
         if not relations:
             return self.ring.ideal(*[x[-1] for x in M._groebner_vec() if
                                      all(y == self.ring.zero for y in x[:-1])])
         else:
             G, R = M._groebner_vec(extended=True)
             indices = [i for i, x in enumerate(G) if
                        all(y == self.ring.zero for y in x[:-1])]
             return (self.ring.ideal(*[G[i][-1] for i in indices]),
                     [[-x for x in R[i][1:]] for i in indices])
     # For more generators, we use I : <h1, .., hn> = intersection of
     #                                    {I : <hi> | i}
     # TODO this can be done more efficiently
     return reduce(lambda x, y: x.intersect(y),
         (self._module_quotient(self.container.submodule(x)) for x in other.gens))
开发者ID:abhi98khandelwal,项目名称:sympy,代码行数:31,代码来源:modules.py


示例19: _test_column_grouping

def _test_column_grouping(m=10, n=5000, num_repeat=5, verbose=False):
    print('\nTesting column_grouping ...')
    A = np.array([[True, False, False, False, False],
                  [True, True, False, True, True]])
    grps1 = _column_group_loop(A)
    grps2 = _column_group_recursive(A)
    grps3 = [np.array([0]),
             np.array([1, 3, 4]),
             np.array([2])]
    print('OK' if all([np.array_equal(a, b) for (a, b) in zip(grps1, grps2)]) else 'Fail')
    print('OK' if all([np.array_equal(a, b) for (a, b) in zip(grps1, grps3)]) else 'Fail')

    for i in range(0, num_repeat):
        A = np.random.rand(m, n)
        B = A > 0.5
        start = time.time()
        grps1 = _column_group_loop(B)
        elapsed_loop = time.time() - start
        start = time.time()
        grps2 = _column_group_recursive(B)
        elapsed_recursive = time.time() - start
        if verbose:
            print('Loop     :', elapsed_loop)
            print('Recursive:', elapsed_recursive)
        print('OK' if all([np.array_equal(a, b) for (a, b) in zip(grps1, grps2)]) else 'Fail')
    # sorted_idx = np.concatenate(grps)
    # print B
    # print sorted_idx
    # print B[:,sorted_idx]
    return
开发者ID:aremirata,项目名称:nonnegfac-python,代码行数:30,代码来源:nnls.py


示例20: insert_atomic_inputs

  def insert_atomic_inputs(self, atomic_inputs, events_list=None):
    '''Insert inputs into events_list in the same relative order as the
    original events list. This method is needed because set union as used in
    delta debugging does not make sense for event sequences (events are ordered)'''
    # Note: events_list should never be None (I think), since it does not make
    # sense to insert inputs into the original sequence that are already present
    if events_list is None:
      raise ValueError("Shouldn't be adding inputs to the original trace")

    inputs = self._expand_atomics(atomic_inputs)

    if not all(e in self._event2idx for e in inputs):
      raise ValueError("Not all inputs present in original events list %s" %
                       [e for e in input if e not in self._event2idx])
    if not all(e in self._event2idx for e in events_list):
      raise ValueError("Not all events in original events list %s" %
                       [e for e in events_list if e not in self._event2idx])

    result = []
    for _, successor in enumerate(events_list):
      orig_successor_idx = self._event2idx[successor]
      while len(inputs) > 0 and orig_successor_idx > self._event2idx[inputs[0]]:
        # If the current successor did in fact come after the next input in the
        # original trace, insert next input here
        input = inputs.pop(0)
        result.append(input)
      result.append(successor)

    # Any remaining inputs should be appended at the end -- they had no
    # successors
    result += inputs
    # Deal with newly added host migrations
    result = self._straighten_inserted_migrations(result)
    return EventDagView(self, result)
开发者ID:StonyBrookUniversity,项目名称:sts,代码行数:34,代码来源:event_dag.py



注:本文中的all函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python any函数代码示例发布时间:2022-05-24
下一篇:
Python accions函数代码示例发布时间:2022-05-24
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap