本文整理汇总了Python中util.flatten函数的典型用法代码示例。如果您正苦于以下问题:Python flatten函数的具体用法?Python flatten怎么用?Python flatten使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了flatten函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: reduce_anno_s
def reduce_anno_s(tmpl, cls, mtd, s):
curried_e = partial(reduce_anno_e, tmpl, cls, mtd)
curried_s = partial(reduce_anno_s, tmpl, cls, mtd)
if s.kind in [C.S.EXP, C.S.ASSERT, C.S.RETURN]:
red_e = curried_e(s.e)
if type(red_e) is list: return red_e
else: s.e = red_e
elif s.kind == C.S.ASSIGN:
s.le = curried_e(s.le)
s.re = curried_e(s.re)
elif s.kind == C.S.IF:
s.e = curried_e(s.e)
s.t = util.flatten(map(curried_s, s.t))
s.f = util.flatten(map(curried_s, s.f))
elif s.kind in [C.S.WHILE, C.S.REPEAT]:
s.e = curried_e(s.e)
s.b = util.flatten(map(curried_s, s.b))
elif s.kind == C.S.FOR:
s.i = curried_e(s.i)
s.init = curried_e(s.init)
s.b = util.flatten(map(curried_s, s.b))
return [s]
开发者ID:plum-umd,项目名称:pasket,代码行数:28,代码来源:reducer.py
示例2: some_stats
def some_stats(corpus, docid, termv):
doc = corpus.get_doc(docid)
print(docid, "vocabulary {}:{} == {:.2}".format(doc.unique_len, doc.total_len, doc.unique_len / doc.total_len))
# exclude terms which appear only in one document (names, twitter handles)
termv = list(filter(lambda t: corpus.get_term(t).document_frequency > 1, termv))
# function: return 5 terms sorted by a key function
bykeyfun = lambda kf: sorted(zip(map(kf, termv), termv), reverse=True)[:10]
# key functions
tf = lambda stem: corpus.get_term(stem).term_frequency(docid)
idf = lambda stem: corpus.get_term(stem).inverse_document_frequency
tfidf = lambda stem: tf(stem) * idf(stem)
# table spec
cols = sorted(
{
"tf": tf,
"idf": idf,
"df": lambda stem: corpus.get_term(stem).document_frequency,
"cf": lambda stem: corpus.get_term(stem).corpus_frequency,
"tf*idf": tfidf,
}.items()
)
# line format, heading
hfmt = " | ".join(len(cols) * ["{:>6} {:<16}"])
fmt = " | ".join(len(cols) * ["{:>6.4g} {!s:<16.16}"])
print(hfmt.format(*util.flatten(zip([name for name, _ in cols], itertools.repeat("term")))))
# data
coldata = [bykeyfun(kf) for _, kf in cols]
for row in zip(*coldata):
print(fmt.format(*util.flatten(row)))
开发者ID:plredmond-homework,项目名称:irpy,代码行数:29,代码来源:main.py
示例3: group_by_sender
def group_by_sender(messages):
"""[Email] -> {str: [str]} : Associate lowercased email sender with a list of words."""
wordssd = collections.defaultdict(list)
for m in messages:
words = util.flatten(map(str.split, m.lines))
wordssd[m.sender.lower()].append(words)
return {sender: util.flatten(wordss) for sender, wordss in wordssd.items()}
开发者ID:plredmond-homework,项目名称:irpy,代码行数:7,代码来源:main.py
示例4: getPlayerId
def getPlayerId(self, *args):
"""Get the id of the current player"""
a = tuple(flatten(args))
if self.playerId is not None and len(a) == 0:
return self.playerId
else:
return int(self.conn.sendReceive_flat("world.getPlayerId", flatten(args)))
开发者ID:arpruss,项目名称:raspberryjammod-minetest,代码行数:7,代码来源:minecraft.py
示例5: test_flatten
def test_flatten(self):
def assertEqualListOrTuple(actual, expected):
assert isinstance(expected, (list, tuple,)), "Test logic error"
self.assertIsInstance(actual, (list, tuple,))
self.assertSequenceEqual(actual, expected)
assertEqualListOrTuple(util.flatten([]), [])
assertEqualListOrTuple(util.flatten([[([])]]), [])
assertEqualListOrTuple(util.flatten([[1,2],3,[[4]],[(5,[6,7],8)]]), [1,2,3,4,5,6,7,8])
开发者ID:nicLucian,项目名称:pytis,代码行数:8,代码来源:_test.py
示例6: getFormulaIdsFromPars
def getFormulaIdsFromPars(pars, onlyTheorems):
thmPars = None
if onlyTheorems:
thmPars = map(lambda x: x[1], filter(lambda par: re.search(r"thm", par[0]), pars.items()))
else:
thmPars = map(lambda x: x[1], pars.items())
formulaTokens = filter(lambda token : token[:5] == "<fid ", flatten(flatten(thmPars)))
return map(lambda token: token[5:-1], formulaTokens)
开发者ID:Zwackelmann,项目名称:zb_math_cluster_experiments,代码行数:10,代码来源:tokenize_paragraphs_with_formulas.py
示例7: Intersections
def Intersections(pts, console):
'''Returns a dictionary of Intersections with Connections, with strings as keys.'''
intind = OriginalIntersections(pts)
net = IntersectionsJoin(intind)
trimmed = IntersectionsTrim(net)
rejoined = IntersectionsJoin(trimmed)
intersections = IntersectionsBuild(rejoined, pts)
t0 = time.time()
intsInRange = list(set(util.flatten(map(lambda a: a[0], OptimalDistance(intersections))))) #makes list of intersections within reasonable distance to start/end
ultimate_trim = sorted(util.flatten(map(lambda a: intersections[a].references[0].references, intsInRange)))
console.add('Intersections', error=': '+str(time.time()-t0))
return IntersectionsBuild(IntersectionsJoin(ultimate_trim), pts)
开发者ID:dwinkelman,项目名称:EcoCartographer,代码行数:12,代码来源:main.py
示例8: smooth_hscroll
def smooth_hscroll(string, row, iterations, delay=0.2, font=default_FONT):
""" scrolls string at given row """
bytes = list(flatten(map(lambda c: font[c] + [0x00], string)))
for i in xrange(iterations):
position(0, row)
data(bytes[i:i+84])
time.sleep(delay)
开发者ID:N8body,项目名称:pcd8544,代码行数:7,代码来源:lcd.py
示例9: choose_multi_label
def choose_multi_label(labels, lang_model):
longest = util.argmax(labels, scorer=lambda ngram: len(ngram))
if len(longest) > 3:
best = util.argmax(bigrams.trigrams(longest), lambda ng: lang_model.lidstone(ng))
best = (best,)
elif len(longest) == 3:
best = longest
best = (best,)
elif len(longest) <= 2:
# this is kinda shitty set of them .. would rather want all possible skip n-grams (O(N^2) of them?)
z = [(tuple(x),) for x in labels] + bigrams.bigrams(labels) + bigrams.trigrams(labels)
assert z
z = [x for x in z if len(util.flatten(x)) <= 3]
# sum is too weird
# lexicographic ordering of the top-ranked sublabels in the multilabel
def scorer(ngrams):
scores = [lang_model.lidstone(ng) for ng in ngrams]
if len(scores) < 3:
scores += [0]*(3 - len(scores))
scores.sort(reverse=True)
# print "SCORE %-30s %s" % (scores, ngrams)
return scores
z.sort(key= scorer, reverse=True)
# print "RANKING",z
best = z[0]
else:
assert False
return best
开发者ID:AnnuSachan,项目名称:tweetmotif,代码行数:29,代码来源:deduper.py
示例10: make
def make(filenames, nprocs, cut):
'''Create time residual PDF for a set of data files.
Note: you may wish to use a smaller number of nprocs than you have CPUs;
this function will almost certainly be I/O-bound.
:param filenames: list of RAT ROOT files containing data
:param cut: A Cut instance with cuts to apply to data
:param nprocs: number of parallel jobs to run
'''
p = multiprocessing.Pool(nprocs)
erf = ERF(cut=cut)
res = np.array(list(util.flatten(p.map(erf, filenames))))
print
print len(res), 'entries'
h, e = np.histogram(res, bins=750, range=(cut.t[0],cut.t[1]), normed=True)
pdf = np.array(zip(e,h))
print 'total events:', total_events.value
print 'events reconstructed:', events_reconstructed.value
print 'events passing cuts:', events_passing_cuts.value
with open('event_counts.txt', 'a') as f:
f.write('%s %s %s %i %i %i %i\n' % (str(cut.e), str(cut.r), str(cut.r),
len(res),
total_events.value,
events_reconstructed.value,
events_passing_cuts.value))
return pdf
开发者ID:mastbaum,项目名称:tl208-residuals,代码行数:33,代码来源:pdf.py
示例11: __init__
def __init__(self, past, future, features = None):
"""Create a training pattern.
Parameters:
past -- past feature vectors as a tensor of shape [P, V]
where P is past days and V is the vectors/day
future -- future feature vectors as a tensor of [F, V]
where F is future days and V is the vectors/day
features -- a sequence of feature names to use
where None means use all features
"""
# calculate training input from past features
past_subfeatures = [[self._subfeatures(vector, features)
for vector in vectors]
for vectors in past]
self._input = numpy.array(
[list(util.flatten(vectors)) for vectors in past_subfeatures])
# calculate training output from future volatility
future_returns = numpy.log1p(
[[vector.ret for vector in vectors] for vectors in future])
self._output = numpy.std(future_returns, axis = 0, ddof = 1)\
* numpy.sqrt(252)
# calculate past returns for forecasts
self._past_returns = numpy.log1p(
[[vector.ret for vector in vectors] for vectors in past])
开发者ID:MichaelPaddon,项目名称:volatility,代码行数:28,代码来源:model.py
示例12: finalize
def finalize(self):
merged_clusters = []
for c1 in self.clusters.values():
existing = None
for m in c1:
for c2 in merged_clusters:
if m in c2:
existing = c2
break
if existing is not None:
break
if existing is not None:
print("Merging clusters (shouldn't happen very often.)")
existing.update(c1)
else:
merged_clusters.append(set(c1))
merged_clusters = [list(c) for c in merged_clusters]
all_mentions = util.flatten(merged_clusters)
assert len(all_mentions) == len(set(all_mentions))
return {
"doc_key": self.doc_key,
"sentences": self.sentences,
"speakers": self.speakers,
"clusters": merged_clusters
}
开发者ID:qq547276542,项目名称:e2e-coref,代码行数:26,代码来源:minimize.py
示例13: min_value
def min_value(self):
if self.min_scale_value:
return self.min_scale_value
data = map(itemgetter("data"), self.data)
if self.stacked:
data = self.get_cumulative_data()
return min(flatten(data))
开发者ID:renemilk,项目名称:slbot,代码行数:7,代码来源:line.py
示例14: __init__
def __init__(self, fsinput, fsgrammar, table=None):
"""
Initialize and return the object.
@param fsinput: The input feature structure
@type fsinput: C{nltk.featstruct.FeatStruct}
@param fsgrammar: The generation grammar
@type fsgrammar: C{nltk.featstruct.FeatStruct}
@param table: The feature value type table
@type table: C{fstypes.FeatureTypeTable}
"""
import copy
self.fsinput = fsinput
self.fsgrammar = fsgrammar
self.table = table
self.lr = LinkResolver()
self.gpr = GrammarPathResolver(copy.deepcopy(fsgrammar), table)
self.grammar_paths = flatten(self.gpr.resolve(copy.deepcopy(fsgrammar)))
# the type table has been passed in
# assign types to the feature values
if table:
for i, path in enumerate(self.grammar_paths):
path = assign_types(table, path)
self.grammar_paths[i] = path
开发者ID:Sandy4321,项目名称:nltk_contrib,代码行数:26,代码来源:fuf.py
示例15: featured_sources_by_category
def featured_sources_by_category(category=None):
q = Source.query(Source.featured_priority < 1)
if category: q = q.filter(Source.categories == category)
q = q.order(Source.featured_priority)
sources = q.fetch(400)
categories = util.unique_ordered_list(util.flatten(s.categories for s in sources))
if category and category not in categories: categories.append(category)
category_order = {category: i for i, category in enumerate(["Newspapers", "Culture", "Politics", "Tech", "Humor", "Local", "Longform"])}
categories.sort(key=lambda x: category_order.get(x, 99999))
sources_by_category = defaultdict(list)
for source in sources:
for category in source.categories:
sources_by_category[category].append(source)
max_items_per_category = 60 if category else 15
for category, items in sources_by_category.items():
sources_by_category[category] = items[:min(len(items), max_items_per_category)]
category_jsons = []
for category in categories:
category_jsons.append({"id": category, "name": category, "sources": [s.json() for s in sources_by_category[category]]})
return category_jsons
开发者ID:nate-parrott,项目名称:fast-news,代码行数:26,代码来源:api.py
示例16: parsToFeatureCounts
def parsToFeatureCounts(pars, onlyTheorems):
thmPars = None
if onlyTheorems:
thmPars = map(lambda x: x[1], filter(lambda par: re.search(r"thm", par[0]), pars.items()))
else:
thmPars = map(lambda x: x[1], pars.items())
textTokenList = filter(lambda token: not(token[:5] == "<fid "), flatten(flatten(thmPars)))
tokenCounts = {}
for token in textTokenList:
if token not in tokenCounts:
tokenCounts[token] = 0
tokenCounts[token] = tokenCounts[token] + 1
return tokenCounts
开发者ID:Zwackelmann,项目名称:zb_math_cluster_experiments,代码行数:16,代码来源:tokenize_paragraphs.py
示例17: tensorize_example
def tensorize_example(self, example, is_training, oov_counts=None):
clusters = example["clusters"]
gold_mentions = sorted(tuple(m) for m in util.flatten(clusters))
gold_mention_map = {m:i for i,m in enumerate(gold_mentions)}
cluster_ids = np.zeros(len(gold_mentions))
for cluster_id, cluster in enumerate(clusters):
for mention in cluster:
cluster_ids[gold_mention_map[tuple(mention)]] = cluster_id
sentences = example["sentences"]
num_words = sum(len(s) for s in sentences)
speakers = util.flatten(example["speakers"])
assert num_words == len(speakers)
max_sentence_length = max(len(s) for s in sentences)
max_word_length = max(max(max(len(w) for w in s) for s in sentences), max(self.config["filter_widths"]))
word_emb = np.zeros([len(sentences), max_sentence_length, self.embedding_size])
char_index = np.zeros([len(sentences), max_sentence_length, max_word_length])
text_len = np.array([len(s) for s in sentences])
for i, sentence in enumerate(sentences):
for j, word in enumerate(sentence):
current_dim = 0
for k, (d, (s,l)) in enumerate(zip(self.embedding_dicts, self.embedding_info)):
if l:
current_word = word.lower()
else:
current_word = word
if oov_counts is not None and current_word not in d:
oov_counts[k] += 1
word_emb[i, j, current_dim:current_dim + s] = util.normalize(d[current_word])
current_dim += s
char_index[i, j, :len(word)] = [self.char_dict[c] for c in word]
speaker_dict = { s:i for i,s in enumerate(set(speakers)) }
speaker_ids = np.array([speaker_dict[s] for s in speakers])
doc_key = example["doc_key"]
genre = self.genres[doc_key[:2]]
gold_starts, gold_ends = self.tensorize_mentions(gold_mentions)
if is_training and len(sentences) > self.config["max_training_sentences"]:
return self.truncate_example(word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids)
else:
return word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids
开发者ID:qq547276542,项目名称:e2e-coref,代码行数:47,代码来源:coref_model.py
示例18: test_helper2
def test_helper2():
statement = GeneralizedOr(Symbol("a"), Symbol("a"))
# need to manually set it to this as otherwise the constructor would flatten it automatically
statement.args[0] = Or(And(Symbol("b"), Not(Symbol("c"))), And(Symbol("c"), Not(Symbol("b"))))
new_statement, change = util.flatten(copy.deepcopy(statement))
assert_equal(new_statement, GeneralizedOr(Symbol("a"), And(Symbol("b"), Not(Symbol("c"))),
And(Symbol("c"), Not(Symbol("b")))))
assert_true(change)
开发者ID:MasterOdin,项目名称:LogicalEquivalency,代码行数:8,代码来源:tests.py
示例19: create_tfidf_vector
def create_tfidf_vector(self):
count_vect = CountVectorizer()
doc = map(lambda x: " ".join(flatten(x)) + " " + \
x[0], self.goal_actions_map.items())
X_train_counts = count_vect.fit_transform(doc)
tfidf_transformer = TfidfTransformer()
X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)
return X_train_tfidf
开发者ID:a33kuo,项目名称:procedural_knowledge,代码行数:8,代码来源:goal_cluster.py
示例20: fallbackGetCuboid
def fallbackGetCuboid(self, getBlock, *args):
(x0,y0,z0,x1,y1,z1) = map(lambda x:int(math.floor(float(x))), flatten(args))
out = []
for y in range(min(y0,y1),max(y0,y1)+1):
for x in range(min(x0,x1),max(x0,x1)+1):
for z in range(min(z0,z1),max(z0,z1)+1):
out.append(getBlock(x,y,z))
return out
开发者ID:arpruss,项目名称:raspberryjammod-minetest,代码行数:8,代码来源:minecraft.py
注:本文中的util.flatten函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论