• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python utils.flatten函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中utils.flatten函数的典型用法代码示例。如果您正苦于以下问题:Python flatten函数的具体用法?Python flatten怎么用?Python flatten使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了flatten函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: p_program

 def p_program(self, p):
     """program : declarations fundefs instructions"""
     p[0] = AST.Program(AST.Declarations.mapTypedDeclarations(p[1]), utils.flatten(p[2]),
                        AST.Instructions(utils.flatten(p[3])))
     p[0].set_parents()
     p[0].set_scope()
     p[0].set_position(p.lexer.lexer.lineno, p.lexer.lexer.lexpos)
开发者ID:sebcioz,项目名称:kompilator,代码行数:7,代码来源:Parser.py


示例2: mergedict

def mergedict(dicts):
    result = {}
    keys = set(flatten([d.keys() for d in dicts]))
    for k in keys:
        vals = [v for v in [d.get(k) for d in dicts] if v]
        if len(vals) == 0:
            continue
        if k in ("_id", "timestamp"):
            continue
        if isinstance(vals[0], dict):
            result[k] = mergedict(vals)
        elif isinstance(vals[0], (str, unicode, int, bool, long)):
            v = set(flatten(vals))
            if len(v) == 1:
                result[k] = v.pop()
            else:
                result[k] = list(v)
        elif isinstance(vals[0], (list, tuple)) and isinstance(vals[0][0], (str, unicode, int, bool)):
            result[k] = list(set(flatten(vals)))
        elif k == "interfaces":
            result[k] = mergedict_bykeys(vals, "ifindex")
        elif k == "arp":
            result[k] = mergedict_bykeys(vals, "mac", "v4addr")
        elif k == "neighbours":
            result[k] = mergedict_bykeys(vals, "v4addr")
        elif k == "v4routes":
            result[k] = mergedict_bykeys(vals, "network")
        elif k == "bridges":
            result[k] = mergedict_bykeys(vals, "name")
        elif k == "addresses":
            result[k] = mergedict_bykeys(vals, "mac")
        else:
            raise MergeError("unhandled key: %s" % k)
    return result
开发者ID:tegola-hubs,项目名称:dendria,代码行数:34,代码来源:storage.py


示例3: get_ref_pixels

def get_ref_pixels(ref_wvl, wvlsol0, x=None):
    """
    Given the list of wavelengths tuples, return expected pixel
    positions from the initial wavelength solution of wvlsol0.

    """

    if x is None:
        x = np.arange(len(wvlsol0))
    um2pixel = interp1d(wvlsol0, x, bounds_error=False)

    ref_pixel = [um2pixel(w) for w in ref_wvl]

    # there could be cases when the ref lines fall out of bounds,
    # resulting nans.
    nan_filter = [np.all(np.isfinite(p)) for p in ref_pixel]
    valid_list = [[np.all(np.isfinite(p))]*len(p) for p in ref_pixel]

    group_flags = get_group_flags(ref_wvl)
    df = pd.DataFrame(dict(wavelength=flatten(ref_wvl),
                           valid=flatten(valid_list),
                           group_flag=group_flags,
                           group_id=np.add.accumulate(group_flags)))

    ref_pixel_filtered = [r for r, m in zip(ref_pixel, nan_filter) if m]
    df2 = df.join(pd.DataFrame(dict(pixel=flatten(ref_pixel_filtered)),
                               index=df.index[flatten(valid_list)]))

    return df2
开发者ID:igrins,项目名称:plp,代码行数:29,代码来源:ref_lines_db.py


示例4: semantics

def semantics(doc):
  prep = preprocess(doc)
  return (
    flatten( prep.pos_tags() ),
    prep.noun_phrases(),
    flatten( prep.get_entities() )
  )
开发者ID:jakobjoachim,项目名称:text-mining-haw-bachelor,代码行数:7,代码来源:data_import.py


示例5: fit

 def fit(self, X, y, copy=False):
     IRSystem.fit(self, X, y, copy)
     self._target = list(self._target)
     self._labels = tuple(set(flatten(self._target)))
     self._label_dist = Counter(flatten(self._target))
     self._target = np.array(self._target)
     self.compute_prior()
     self.compute_conditional()
开发者ID:dot-Sean,项目名称:dreams,代码行数:8,代码来源:IRSystem.py


示例6: count_intervals_in_all_songs

def count_intervals_in_all_songs(songs):
    intervals_direction = utils.flatten([song.intervals_with_direction for song in songs])
    intervals = utils.flatten([song.intervals for song in songs])
    print "All Songs\n"
    print "  With direction"
    utils.print_dic_perc(Counter(intervals_direction))
    print "  Without direction"
    utils.print_dic_perc(Counter(intervals))
开发者ID:kroger,项目名称:uirapuru,代码行数:8,代码来源:intervals.py


示例7: global_region_weights

	def global_region_weights(self):
		''' returns default weights for all similarities '''
		#return utils.norm_list(utils.flatten(self._feature_compare_helper(lambda fs1, not_used: [1] + utils.flatten(fs1.region_weights()), self, 'all')))
		#return utils.norm_list(utils.flatten(map(lambda fs: [1] + utils.flatten(fs.region_weights()), self.feature_sets))) 
		self._load_check()
		weights = list()
		for fs in self.feature_sets:
			region_weights = fs.region_weights()
			weights.append([1] + utils.flatten(region_weights))
		return utils.norm_list(utils.flatten(weights))
开发者ID:waldol1,项目名称:formCluster,代码行数:10,代码来源:doc.py


示例8: PLS_Models

def PLS_Models(model_dict, validation_dict, target, **args):
    '''This section of code will create and test the prospective models'''
    
    '''Pick the model building parameters out of args'''
    try: break_flag = args['break_flag']   #Decide whether or not we are going to test models that include a midseason split
    except KeyError: break_flag = 2 

    try: limits=utils.flatten([args['specificity']])
    except KeyError: limits = np.arange(11.)/100 + 0.85     #Default: test specificity limits from 0.85 to 0.95 
    
    try: threshold=utils.flatten([args['threshold']])
    except KeyError: threshold=[0,1]                          #Default: threshold by counts

    if break_flag != 1: model=pls.Model( model_dict, model_target=target.lower() )
    if break_flag != 0: mw=pls.Model_Wrapper( data=model_dict, model_target=target.lower() )

    results = list()

    #Test models w/ midseason split
    for spec_lim in limits:
        for threshold_method in threshold:
            if threshold_method==0: balance_method=1 #
            else: balance_method=0
            
            if break_flag != 0:
                '''mw.Generate_Models(breaks=1, specificity=spec_lim, wedge='julian', threshold_method=threshold_method, balance_method=balance_method)
                imbalance = mw.imbalance
                split_index = mlab.find(mw.imbalance[:,1] == np.min(mw.imbalance[:,1]))'''
                
                imbalance = pls_parallel.Tune_Split(mw, specificity=spec_lim, wedge='julian', threshold_method=threshold_method, balance_method=balance_method)
                split_index = mlab.find(imbalance[:,1] == np.min(imbalance[:,1]))
                
                for split in imbalance[split_index,0]:
                    mw.Split(wedge='julian', breakpoint=split)
                    mw.Assign_Thresholds(threshold_method=threshold_method, specificity=spec_lim)
                    
                    summary = Summarize(mw, validation_dict, **args)
                    summary.insert( 1, balance_method)
                    summary.insert( 1, threshold_method)
                    
                    results.append( summary )
              
    #Test models w/o midseason split
    if break_flag != 1:
        for spec_lim in limits:
            model.Threshold(specificity=spec_lim)
            
            summary = Summarize(model, validation_dict, **args)
            summary.insert(1, np.nan)
            summary.insert(1, np.nan)
                
            results.append( summary )
            
            
    return results
开发者ID:mnfienen,项目名称:beach_gui,代码行数:55,代码来源:model_script.py


示例9: extract_links

def extract_links(br):
    """Extract FP related links from the current page."""
    links_to_visit_text = list(ut.flatten([br.find_elements_by_partial_link_text(linktext) for linktext in LINK_LABELS]))
    links_to_visit_url = list(ut.flatten([br.find_elements_by_xpath('//a[contains(@href,"%s")]' % linkurl) for linkurl in LINK_URLS]))
    links_to_visit = [link for link in links_to_visit_text + links_to_visit_url if link]
    
    if len(links_to_visit) < NO_OF_LINKS_TO_CLICK: # if we cannot find links by href and link texts
        links_to_visit += extract_onclick_elements(br)  # we search for all elements with onclick event handler
    wl_log.info('%s links were found on %s' % (len(links_to_visit), br.current_url))
    
    return links_to_visit
开发者ID:abhiraw,项目名称:fpdetective,代码行数:11,代码来源:crawler.py


示例10: add_pattern

	def add_pattern(self, production, options): 
		self.progress(production, 'add_pattern: %s' % options)
		self.progress(production, '[\'pattern\',  %s, %s, %s' % (options.get('subject'), options.get('predicate'), options.get('object')))
		triple = {}
		for r,v in options.items():
			if isinstance(v,list) and len(flatten(v)) == 1:
				v = flatten(v)[0]
			if self.validate and not isinstance(v, Term):
				self.error("add_pattern", "Expected %s to be a resource, but it was %s" % (r, v), {'production' : production})
			triple[r] = v	
		self.add_prod_datum('pattern', Pattern(triple)) 
开发者ID:huyphan,项目名称:pysparql,代码行数:11,代码来源:parser.py


示例11: train

 def train(self, authors):
     self.stopwords = {ln: self.get_stop_words(ln) \
                             for ln in self.db.get_languages()}
     lang = self.db.get_author_language(authors[0])
     self.words = [self.db.get_author(a)["corpus"] for a in authors]
     self.words = utils.flatten(self.words)
     tokenizer = self.get_tokenizer()
     self.words = map(lambda x: tokenizer.tokenize(x), self.words)
     self.words = utils.flatten(self.words)
     self.words = list(set([x.lower() for x in self.words]))
     self.words = filter(lambda x: x in self.stopwords[lang], self.words)
     self.words.sort()
开发者ID:pan-webis-de,项目名称:AuthorIdentification-PFP,代码行数:12,代码来源:feature_extractor.py


示例12: tokenize_insight

def tokenize_insight(insight, twitter=False):
    """
    return subject, property and context tokens
    """
    url = twagUrl if twitter else stagUrl
    insight = dict(insight.items())
    context = tokenize_doc({'content':insight['content']}, twitter=twitter)
    subj = tokenize_doc({'content':insight['subject']}, twitter=twitter)
    prop = tokenize_doc({'content':insight['property']}, twitter=twitter)
    insight['context_toks'] = flatten(context['toks'])
    insight['subj_toks'] = flatten(subj['toks'])
    insight['prop_toks'] = flatten(prop['toks'])
    return insight 
开发者ID:robbymeals,项目名称:word_vectors,代码行数:13,代码来源:tok.py


示例13: cseg_similarity

def cseg_similarity(cseg1, cseg2):
    """Returns Marvin and Laprade (1987) CSIM(A, B) for a single
    cseg. It's a contour similarity function that measures similarity
    between two csegs of the same cardinality. The maximum similarity
    is 1, and minimum is 0.

    >>> cseg_similarity(Contour([0, 2, 3, 1]), Contour([3, 1, 0, 2]))
    0
    """

    cseg1_triangle = utils.flatten(cseg1.comparison_matrix().superior_triangle())
    cseg2_triangle = utils.flatten(cseg2.comparison_matrix().superior_triangle())

    return auxiliary.position_comparison(cseg1_triangle, cseg2_triangle)
开发者ID:msampaio,项目名称:music21,代码行数:14,代码来源:comparison.py


示例14: listspecies

def listspecies(reactions):
    # print "listspecies:"
    species=[]
    for r in reactions:
        lhs = list(r.LHS())
        rhs = list(r.RHS())
        mhs = list(r.MHS())
        s=list(set(utils.flatten([lhs,rhs,mhs])))
        # print "s=",s
        species.append(s)
    species=list(set(utils.flatten(species))) 
    if "Nil" in species: species.remove("Nil")  
    # print "species=",species
    return species
开发者ID:biomathman,项目名称:pycellerator,代码行数:14,代码来源:converter.py


示例15: func

    def func(*args, **kwargs):
        from scrapper.models import Criterion
        tweets = fn(*args, **kwargs)
        if tweets:
            users, hash_tags = zip(*map(
                lambda tweet:(tweet['mentions'], tweet['hash_tags']), tweets
            ))

            users = flatten(users)
            hash_tags = flatten(hash_tags)
            criteria_obj = [Criterion(type='hash_tag', value=hashtag) for hashtag in hash_tags if is_valid_hashtag(hashtag)]
            criteria_obj += [Criterion(type='user_name', value=username) for username in users if is_valid_username(username)]
            Criterion.objects.bulk_create_or_skip(hash_tags=hash_tags, user_name=users)
        return tweets
开发者ID:abo-elleef,项目名称:DAPOS_corpus_browser,代码行数:14,代码来源:decorators.py


示例16: viewfinder_corners

 def viewfinder_corners(corners, position, extensions):
     # [(1, 2), (10, 20)] -> [-1, 10, -2, 20]
     corners = [-corners[0][0], corners[1][0], -corners[0][1], corners[1][1]]
     # [5, 4] -> [5, 5, 4, 4]
     extensions = flatten([(x, x) for x in extensions])
     # [2, 3] -> [-2, 2, -3, 3]
     position = flatten([(-x, x) for x in position])
     visible = []
     for corner, extension, coordinate in zip(corners, extensions, position):
         extended_coordinate = coordinate + extension
         if extended_coordinate < corner:
             visible.append(abs(extended_coordinate))
         else:
             visible.append(abs(corner))
     return visible
开发者ID:superseal,项目名称:raisin,代码行数:15,代码来源:durr.py


示例17: possible_cseg

def possible_cseg(base_3):
    """Returns a cseg from a base 3 sequence, if the cseg is possible
    (Polansky and Bassein 1992).

    >>> possible_cseg([2, 2, 2])
    < 0 1 2 >
    """

    seq = utils.flatten(base_3)
    size = len(seq)
    for x in itertools.product(range(size), repeat=3):
        cseg = Contour(x)
        if utils.flatten(cseg.base_three_representation()) == seq:
            return Contour(x)
    return "Impossible cseg"
开发者ID:msampaio,项目名称:music21,代码行数:15,代码来源:contour.py


示例18: p_expr_list_or_empty

 def p_expr_list_or_empty(self, p):
     """expr_list_or_empty : expr_list
                           | """
     if len(p) > 1:
         p[0] = utils.flatten(p[1])
     else:
         p[0] = []
开发者ID:sebcioz,项目名称:kompilator,代码行数:7,代码来源:Parser.py


示例19: crawl

	def crawl(self):

		jobs = []
		target = self.target
		target_eng = self.target_eng
		target_kor = self.target_kor
		nth = self.nth

		city_code_list = self.city_codes()
		req_url = dict(town=self.urlPath_town_list, sgg=self.urlPath_sgg_list)
		param_dict = dict(town=self.urlParam_town_list, sgg=self.urlParam_sgg_list)

		# 광역자치단체 단위 페이지의 데이터 크롤링의 기본과정.
		print("\x1b[1;36mWaiting to connect http://info.nec.go.kr server (%s, %d-th)...\x1b[1;m" % (target_eng, nth))
		for city_code, city_name in city_code_list: # 각 광역자치단체 별로 아래 단계를 수행.
			req_param = self.JSON_url_param(city_code, copy.deepcopy(param_dict))
			job = gevent.spawn(self.parse_city, req_url, req_param, target, target_kor, nth, city_code, city_name)
			jobs.append(job)
		gevent.joinall(jobs)
		every_result = [{'election_type':target,'nth':nth,'results':flatten(job.get() for job in jobs)}]

		# 추가될 수도 있는 데이터 크롤링을 위해 next_crawler를 추가하는 내용.
		if hasattr(self, 'next_crawler'):
			next_result = self.next_crawler.crawl()
			every_result.extend(next_result)

		return every_result
开发者ID:comjoy91,项目名称:SKorean-Election_result-Crawler,代码行数:27,代码来源:base_provincePage.py


示例20: make_similarity_matrix

def make_similarity_matrix(matrix, size=MIN_ALIGN):
    singles = matrix.tolist()
    points = [flatten(t) for t in tuples(singles, size)]
    numPoints = len(points)
    # euclidean distance
    distMat = np.sqrt(np.sum((repmat(points, numPoints, 1) - repeat(points, numPoints, axis=0))**2, axis=1, dtype=np.float32))
    return distMat.reshape((numPoints, numPoints))
开发者ID:firebaugh,项目名称:partybot,代码行数:7,代码来源:earworm_support.py



注:本文中的utils.flatten函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python utils.float函数代码示例发布时间:2022-05-26
下一篇:
Python utils.flags函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap