本文整理汇总了Python中numpy.fromiter函数的典型用法代码示例。如果您正苦于以下问题:Python fromiter函数的具体用法?Python fromiter怎么用?Python fromiter使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了fromiter函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: decode4js
def decode4js(obj):
"""
return decoded Python object from encoded object.
"""
out = obj
if isinstance(obj, dict):
classname = obj.pop('__class__', None)
if classname is None:
return obj
elif classname == 'Complex':
out = obj['value'][0] + 1j*obj['value'][1]
elif classname in ('List', 'Tuple'):
out = []
for item in obj['value']:
out.append(decode4js(item))
if classname == 'Tuple':
out = tuple(out)
elif classname == 'Array':
if obj['__dtype__'].startswith('complex'):
re = np.fromiter(obj['value'][0], dtype='double')
im = np.fromiter(obj['value'][1], dtype='double')
out = re + 1j*im
else:
out = np.fromiter(obj['value'], dtype=obj['__dtype__'])
out.shape = obj['__shape__']
elif classname in ('Dict', 'Parameter', 'Group'):
out = {}
for key, val in obj.items():
out[key] = decode4js(val)
if classname == 'Parameter':
out = Parameter(**out)
elif classname == 'Group':
out = Group(**out)
return out
开发者ID:bruceravel,项目名称:xraylarch,代码行数:34,代码来源:jsonutils.py
示例2: calculate_switch_stats
def calculate_switch_stats(mappable, linkage_map_file, linkage_map_format, MST_grouping_threshold):
genotypes_of_locus = mappable
if linkage_map_format.lower() == 'mst':
ini_map, loci_on_lg = parse_map_file_MST(linkage_map_file)
elif linkage_map_format.lower() == 'rqtl':
ini_map, loci_on_lg = parse_map_file_rqtl(linkage_map_file)
else:
raise ValueError("unknown linkage_map_format")
int_arr = convert_genotypes_to_int_array(genotypes_of_locus, ini_map)
num_loci = int_arr.shape[0]
num_pairs = int((num_loci * (num_loci-1))/2)
pairs = itertools.combinations(int_arr, 2)
R = numpy.fromiter(getR(pairs), dtype = numpy.float64, count = num_pairs)
pairs = itertools.combinations(int_arr, 2)
NR = numpy.fromiter(getNR(pairs), dtype = numpy.float64, count = num_pairs)
ml_R_frac = get_ml_R_frac(R = R, NR = NR)
Z = get_LOD(R = R, NR = NR, R_frac = ml_R_frac)
NR_matrix = get_NR_matrix(NR)
#rf = get_rf_matrix(ml_R_frac)
lod = get_lod_matrix(Z)
index_of_lg = get_index_of_LG(loci_on_lg)
lgs_longer_than_1 = find_LGs_with_multiple_loci(index_of_lg, loci_on_lg)
#mean_rf = get_LG_pairwise_mean_rf(lgs_longer_than_1, rf, index_of_lg)
#mean_lod = get_LG_pairwise_mean_lod(lgs_longer_than_1,lod, index_of_lg)
sum_lod = get_LG_pairwise_sum_lod(lgs_longer_than_1,lod, index_of_lg)
sq_sum_lod = get_square_form(sum_lod, lgs_longer_than_1)
n = len(mappable.items()[0][1]) #number of individuals
NR_threshold = get_threshold_recombinants_for_same_LGs(n, MST_grouping_threshold)
NR_under_threshold = get_LG_pairwise_count_NR_threshold(lgs_longer_than_1, NR_matrix, index_of_lg, threshold = NR_threshold)
sq_NR_matrix = get_square_form(NR_under_threshold, lgs_longer_than_1)
return(ini_map, sq_sum_lod, sq_NR_matrix, R, NR, lgs_longer_than_1)
开发者ID:rwaples,项目名称:chum_populations,代码行数:32,代码来源:switch_allele_functions.py
示例3: _stats_to_movie_results
def _stats_to_movie_results(bam_stats, movie_names):
"""
Separate out per-movie results from process stats.
"""
results = []
movies = sorted(list(movie_names))
for movie_name in movies:
def _base_calls():
for r in bam_stats:
if r.movieName == movie_name:
yield r.qLen
def _num_passes():
for r in bam_stats:
if r.movieName == movie_name:
yield r.numPasses
def _accuracy():
for r in bam_stats:
if r.movieName == movie_name:
yield r.readScore
read_lengths = np.fromiter(_base_calls(), dtype=np.int64, count=-1)
num_passes = np.fromiter(_num_passes(), dtype=np.int64, count=-1)
accuracy = np.fromiter(_accuracy(), dtype=np.float, count=-1)
results.append(MovieResult(
movie_name, read_lengths, accuracy, num_passes))
return results
开发者ID:natechols,项目名称:pbreports,代码行数:29,代码来源:ccs.py
示例4: _bam_file_to_movie_results
def _bam_file_to_movie_results(file_name):
"""
Read what is assumed to be a single BAM file (as a ConsensusReadSet).
"""
from pbcore.io import IndexedBamReader
results = []
with IndexedBamReader(file_name) as bam:
for rg in bam.readGroupTable:
assert rg["ReadType"] == "CCS"
movies = list(set([rg["MovieName"] for rg in bam.readGroupTable]))
for movie_name in movies:
def _base_calls():
for r in bam:
if r.movieName == movie_name:
yield r.peer.query_length
def _num_passes():
for r in bam:
if r.movieName == movie_name:
yield r.numPasses
def _accuracy():
for r in bam:
if r.movieName == movie_name:
yield r.readScore
read_lengths = np.fromiter(_base_calls(), dtype=np.int64, count=-1)
num_passes = np.fromiter(_num_passes(), dtype=np.int64, count=-1)
accuracy = np.fromiter(_accuracy(), dtype=np.float, count=-1)
results.append(MovieResult(
file_name, movie_name, read_lengths, accuracy, num_passes))
return results
开发者ID:tkerelska,项目名称:pbreports,代码行数:34,代码来源:ccs.py
示例5: _listparser
def _listparser(dlist, freq=None):
"Constructs a DateArray from a list."
dlist = np.array(dlist, copy=False, ndmin=1)
# Case #1: dates as strings .................
if dlist.dtype.kind in 'SU':
#...construct a list of dates
dlist = np.fromiter((Date(freq, string=s).value for s in dlist),
dtype=int)
# Case #2: dates as numbers .................
elif dlist.dtype.kind in 'if':
#...hopefully, they are values
pass
# Case #3: dates as objects .................
elif dlist.dtype.kind == 'O':
template = dlist[0]
#...as Date objects
if isinstance(template, Date):
dlist = np.fromiter((d.value for d in dlist), dtype=int)
if freq in (_c.FR_UND, None):
freq = template.freq
#...as mx.DateTime objects
elif hasattr(template, 'absdays'):
dlist = np.fromiter((Date(freq, datetime=m) for m in dlist),
dtype=int)
#...as datetime objects
elif hasattr(template, 'toordinal'):
dlist = np.fromiter((Date(freq, datetime=d) for d in dlist),
dtype=int)
#
result = dlist.view(DateArray)
result.freq = freq
return result
开发者ID:ndawe,项目名称:scikit-timeseries,代码行数:32,代码来源:tdates.py
示例6: _fit_once
def _fit_once(self, X, initial_order):
adj_matrix = self.adj_matrix_strategy(X)
N = adj_matrix.shape[0]
degrees = adj_matrix.sum(axis=1)
boundary = np.zeros(N)
ordering = prc.createOrder(initial_order)
policy = prc.tiloPolicyStruct()
if self.refine_order:
prc.RefineTILO(adj_matrix, ordering, policy)
else:
prc.TILO(adj_matrix, ordering, policy)
boundary = np.fromiter(ordering.b.b, dtype=float)[:-1]
ordering = np.fromiter(ordering.vdata, dtype=int)
#print 'BDR', boundary
#print 'PRS', pinch_ratios(boundary)
#print 'ORD', ordering
pinch_ratios, clusters = self._find_clusters(ordering, boundary)
labels = np.zeros(N, dtype=int)
for i, cluster in enumerate(clusters):
labels[cluster] = i
return ordering, boundary, labels, pinch_ratios
开发者ID:rsbowman,项目名称:sklearn-prc,代码行数:25,代码来源:cluster.py
示例7: system_values
def system_values(self, when: Union[Real, Sequence[Real]], which: Union[str, Sequence[str]]=None):
which = self._observable_names if which is None else which
max_when = when if isinstance(when, Real) else max(when)
self.integrate_to(max_when)
if len(self.solution_times) == 1:
# Handle scipy bug when there is only one time point
# TODO (drhagen): super hacky solution here
state_interpolator = lambda t: self.solution_states[0]
else:
state_interpolator = interp1d(self.solution_times, self.solution_states, axis=0, assume_sorted=True,
copy=False)
# Extract values from solution
output_fun = self.ode_system.outputs
if isinstance(which, str) and isinstance(when, Real):
states = state_interpolator(when)
return output_fun(which, when, states)
elif isinstance(which, str):
return np.fromiter((output_fun(which, when_i, state_interpolator(when_i)) for when_i in when),
'float', count=len(when))
elif isinstance(when, Real):
states = state_interpolator(when)
return np.fromiter((output_fun(which_i, when, states) for which_i in which),
'float', count=len(which))
else:
def values():
for when_i in when:
states = state_interpolator(when_i)
for which_i in which:
yield output_fun(which_i, when_i, states)
values = np.fromiter(values(), 'float', count=len(which)*len(when))
return np.reshape(values, [len(when), len(which)])
开发者ID:drhagen,项目名称:biolucia,代码行数:35,代码来源:experiment.py
示例8: _computeNormalizations
def _computeNormalizations(self):
#Use a generator instead of a list to gain speed
generator1 = (x.value for x in self.parameters.values()[1::2])
self.alphas = numpy.fromiter(generator1,float)
#alphasDiff = self.alphas[:-1]-self.alphas[1:]
generator2 = (x.value for x in self.parameters.values()[2::2])
self.betas = numpy.fromiter(generator2,float)
#betasDiff = self.betas[:-1]-self.betas[1:]
#bLogEpivot = self.betas*self.logPivotEnergies
#bLogEpivotDiff = bLogEpivot[1:]-bLogEpivot[:-1]
self.normalizations[0] = self.parameters['K'].value
self.normalizations[1:-1] = (self._logP(self.energyBreaks,self.alphas[:-1],self.betas[:-1],self.pivotEnergies[:-1])/
self._logP(self.energyBreaks,self.alphas[1:],self.betas[1:],self.pivotEnergies[1:])
)
self.normalizations[-1] = 1.0
#This compute the cumulative product of the array
#(i.e., the first elements is a0, the second a0*a1,
#the third a0*a1*a2, and so on...)
self.products = numpy.cumprod(self.normalizations)
开发者ID:cdr397,项目名称:3ML,代码行数:25,代码来源:manylogporabolas.py
示例9: get_world_endpoints
def get_world_endpoints(edges, pos, scale):
"""Returns the edge endpoints in homogeneous world coordinates
Parameters
----------
edges : iterable of Edge
pos : numpy array
scale : float
Returns
-------
tuple of iterable of points
a value in the form `(start_points, end_points)`, where
`start_points` and `end_points` are in the form of a numpy matrix
"""
edge_starts = (coord
for edge in edges
for coord in chain(scale * edge.start + pos, (1.0, )))
edge_ends = (coord
for edge in edges
for coord in chain(scale * edge.end + pos, (1.0, )))
homo_starts = np.fromiter(edge_starts, np.float, count=4 * len(edges))
homo_ends = np.fromiter(edge_ends, np.float, count=4 * len(edges))
homo_starts = homo_starts.reshape((len(edges), 4))
homo_ends = homo_ends.reshape((len(edges), 4))
return homo_starts, homo_ends
开发者ID:lyze,项目名称:pygame-3d-wireframes,代码行数:29,代码来源:viewport.py
示例10: test_vector
def test_vector(self):
v1 = Vector(self.list1)
v2 = Vector(2*x for x in self.list1)
self.assertEqual(2*v1, v2)
n1 = np.fromiter(v1, int)
n2 = np.fromiter(v2, int)
self.assertEqual(v1.dot(v2), np.dot(n1,n2))
开发者ID:radovankavicky,项目名称:econpy,代码行数:7,代码来源:test_pytrix.py
示例11: get_charge_resolution
def get_charge_resolution(self):
"""
Calculate and obtain the charge resolution graph arrays.
Returns
-------
true_charge : ndarray
The X axis true charges.
chargeres : ndarray
The Y axis charge resolution values.
chargeres_error : ndarray
The error on the charge resolution.
scaled_chargeres : ndarray
The Y axis charge resolution divided by the Goal.
scaled_chargeres_error : ndarray
The error on the charge resolution divided by the Goal.
"""
log.debug('[chargeres] Calculating charge resolution')
true_charge = np.fromiter(iter(self.sum_dict.keys()), dtype=int)
summed_charge = np.fromiter(iter(self.sum_dict.values()), dtype=float)
num = np.fromiter(iter(self.n_dict.values()), dtype=int)
chargeres = np.sqrt((summed_charge / num) + true_charge) / true_charge
chargeres_error = chargeres * (1 / np.sqrt(2 * num))
scale = self.goal(true_charge)
scaled_chargeres = chargeres/scale
scaled_chargeres_error = chargeres_error/scale
return true_charge, chargeres, chargeres_error, \
scaled_chargeres, scaled_chargeres_error
开发者ID:ctrichard,项目名称:ctapipe,代码行数:31,代码来源:chargeresolution.py
示例12: __init__
def __init__(self, image):
# number of points
self.nx = int(image.shape[0])
self.ny = int(image.shape[1])
# spacing
self.dx = 1.0
self.dy = 1.0
# limits
self.xmin = 0
self.ymin = 0
self.xmax = float(self.nx)
self.ymax = float(self.ny)
# lengths
self.lx = abs(self.xmax - self.xmin)
self.ly = abs(self.ymax - self.ymin)
# mesh
self.y, self.x = np.meshgrid(
np.fromiter(((0.5 + i) * self.dx for i in range(self.nx)),
dtype=np.float64, count=self.nx),
np.fromiter(((0.5 + i) * self.dy for i in range(self.ny)),
dtype=np.float64, count=self.ny))
开发者ID:JoshuaSBrown,项目名称:langmuir,代码行数:26,代码来源:fft2D.py
示例13: extract_surf
def extract_surf(jpgfile):
start = time.time()
out = os.path.join(os.path.dirname(jpgfile), os.path.basename(jpgfile)[:-4] + 'surf.npy')
if os.path.exists(out):
INFO('%s already exists' % out)
return
im = cv.LoadImageM(jpgfile, cv.CV_LOAD_IMAGE_GRAYSCALE)
INFO('cv loaded %dx%d image' % (im.rows, im.cols))
g, features = cv.ExtractSURF(im, None, cv.CreateMemStorage(), (0, 500, 3, 4))
data = np.ndarray(len(features), SURFReader.surf_dtype)
for i in range(len(features)):
data[i]['vec'] = np.fromiter(features[i], np.float32)
data[i]['geom'] = np.fromiter([g[i][0][0], g[i][0][1], g[i][2]], np.uint16)
data[i]['index'] = 0
## Simple Quantization into bytes
# for i in range(len(features)):
# surfvalues = np.fromiter(features[i], np.float)
#
# assert max(surfvalues) <= 1.0
# assert min(surfvalues) >= -1.0
#
# data[i]['vec'] = np.int8(127*surfvalues)
# data[i]['geom'] = np.fromiter([g[i][0][0], g[i][0][1], g[i][2]], np.uint16)
# data[i]['index'] = 0
save_atomic(lambda d: np.save(d, data), out)
INFO('cv wrote %d features' % len(features))
INFO_TIMING('took %f seconds' % (time.time() - start))
开发者ID:jasonzliang,项目名称:image_retreival,代码行数:32,代码来源:surf.py
示例14: decodePacket
def decodePacket(bin_data, data_size = None, packet_mode = 'i64u', track_t0 = False):
# Works only for i64bit unpacked mode
global compressed_t0
#assert (packet_mode == 'i64u')
if packet_mode == 'i64u':
data_size = len(bin_data)//ctypes.sizeof(Timetag_I64)
t = ctypes.cast(bin_data, timetag_I64_p)
time = np.fromiter((i.time for i in t), np.int64, data_size)
channel = np.fromiter((i.channel for i in t), np.int8, data_size)
if packet_mode == 'i64c':
data_size = len(bin_data)//ctypes.sizeof(Timetag_I64c)
t = ctypes.cast(bin_data, timetag_I64c_p)
#if t[0].highlow == 0:
# ctypes.cast(bin_data, timetag_I64c_p)
highlow = np.fromiter((i.highlow for i in t ), np.uint64, data_size)
time = np.fromiter((i.timehigh for i in t ), np.uint64, data_size)+(cumsum(highlow))*2**27
channel = np.fromiter((i.channel for i in t ), np.uint8, data_size)
time = time[highlow == 0]
channel = channel[highlow == 0]
if track_t0:
time = time + compressed_t0
compressed_t0 += sum(highlow)*2**27
else:
track_t0 = 0
return(time, channel)
开发者ID:actionfarsi,项目名称:farsilab,代码行数:30,代码来源:ttm.py
示例15: token_type
def token_type(disc_clsdict, wrd_corpus, fragments_within, fragments_cross,
dest, verbose, n_jobs):
if verbose:
print banner('TOKEN/TYPE')
ptoc, rtoc, ptyc, rtyc = _token_type_sub(disc_clsdict, wrd_corpus,
fragments_cross, 'cross',
verbose, n_jobs)
ftoc = np.fromiter((fscore(ptoc[i], rtoc[i]) for i in xrange(ptoc.shape[0])),
dtype=np.double)
ftyc = np.fromiter((fscore(ptyc[i], rtyc[i]) for i in xrange(ptyc.shape[0])),
dtype=np.double)
ptow, rtow, ptyw, rtyw = _token_type_sub(disc_clsdict, wrd_corpus,
fragments_within, 'within',
verbose, n_jobs)
ftow = np.fromiter((fscore(ptow[i], rtow[i]) for i in xrange(ptow.shape[0])),
dtype=np.double)
ftyw = np.fromiter((fscore(ptyw[i], rtyw[i]) for i in xrange(rtyw.shape[0])),
dtype=np.double)
with open(path.join(dest, 'token_type'), 'w') as fid:
fid.write(pretty_score_f(ptoc, rtoc, ftoc, 'token total',
len(fragments_cross),
sum(map(len, fragments_cross))))
fid.write('\n')
fid.write(pretty_score_f(ptyc, rtyc, ftyc, 'type total',
len(fragments_cross),
sum(map(len, fragments_cross))))
fid.write('\n')
fid.write(pretty_score_f(ptow, rtow, ftow, 'token within-speaker only',
len(fragments_within),
sum(map(len, fragments_within))))
fid.write('\n')
fid.write(pretty_score_f(ptyw, rtyw, ftyw, 'type within-speaker only',
len(fragments_within),
sum(map(len, fragments_within))))
开发者ID:bootphon,项目名称:tde,代码行数:35,代码来源:sample_eval2.py
示例16: SNrest
def SNrest():
path = "../data/restframe/"
objnames, band, mjd, mag, magerr, stype = [],[],[],[],[], []
formatcode = ('|S16,'.rstrip('#') +'f8,'*6 + '|S16,' + 4 * 'f8,' + '|S16,' * 3 + 'f8,' * 2 + '|S16,' + 'f8,' * 2)
filenames = os.listdir(path)
for filename in filenames:
data = np.recfromtxt(os.path.join(path, filename),usecols = (0,1,2,3,4), dtype = formatcode, names = True, skip_header = 13, case_sensitive = 'lower', invalid_raise = False)
name = np.empty(len(data.band), dtype = 'S20')
name.fill(filename)
objnames.append(name)
data.band = [x.lower() for x in data.band]
band.append(data.band)
mjd.append(data.phase)
mag.append(data.mag)
magerr.append(data.err)
objnames = np.fromiter(itertools.chain.from_iterable(objnames), dtype = 'S20')
band = np.fromiter(itertools.chain.from_iterable(band), dtype = 'S16')
mjd = np.fromiter(itertools.chain.from_iterable(mjd), dtype = 'float')
mag = np.fromiter(itertools.chain.from_iterable(mag), dtype = 'float')
magerr = np.fromiter(itertools.chain.from_iterable(magerr), dtype = 'float')
stype = np.full(len(objnames), 1)
LC = Lightcurve(objnames, band, mjd, mag, magerr, stype)
return LC
开发者ID:tayebzaidi,项目名称:snova_analysis,代码行数:25,代码来源:readin.py
示例17: fit
def fit(self, X):
adj_matrix = self.adj_matrix_strategy(X)
if self.initial_ordering is None:
ordering = np.arange(len(X), dtype=int)
else:
ordering = self.initial_ordering
assert len(ordering) == len(X), \
"initial_ordering has wrong length"
order = prc.createOrder(ordering)
labels = prc.ivec([0]*len(X))
policy = prc.iprPolicyStruct()
policy.iprNumberOfClustering = self.number_of_clustering
policy.iprMaxIterations = self.max_iterations
policy.iprConvergenceThreshold = self.convergence_threshold
res = prc.ipr(adj_matrix, order, labels,
self.n_clusters, policy)
self._ordering = np.fromiter(order.vdata, dtype=int)
## calculate boundaries of original matrix w/ new ordering
prc.calcBoundaries(adj_matrix, order)
raw_boundaries = order.b.b
self._boundary = np.fromiter(raw_boundaries,
dtype=float)[:-1] # XXX: slice needed?
self._width = np.sort(self._boundary)[::-1]
self.labels_ = np.fromiter(labels, dtype=int)
self._pinch_ratios, _ = compute_pr_cluster_indices(
self._ordering, self._boundary, self.n_clusters,
compute_thick_part_PR)
开发者ID:rsbowman,项目名称:sklearn-prc,代码行数:27,代码来源:cluster.py
示例18: CalculateMASE
def CalculateMASE(train_guess, train_correct, test_guess, test_correct):
"""Calculates the Mean Absolute Scaled Error"""
def CacluateNaive(train_correct):
error = 0
c = 0
for t1, t2 in zip(train_correct[1:], train_correct):
res = abs(t1-t2)
if not numpy.isnan(res):
error += res
c += 1
return error/c
try:
abs_error_train = abs(train_guess - train_correct)
abs_error_test = abs(test_guess - test_correct)
except TypeError:
#if they're the wrong type then convert them accordingly
train_guess = numpy.fromiter(train_guess, numpy.float)
train_correct = numpy.fromiter(train_correct, numpy.float)
test_guess = numpy.fromiter(test_guess, numpy.float)
test_correct = numpy.fromiter(test_correct, numpy.float)
abs_error_train = abs(train_guess - train_correct)
abs_error_test = abs(test_guess - test_correct)
naive_scale = CacluateNaive(train_correct)
train_scaled_errors = abs_error_train/naive_scale
test_scaled_errors = abs_error_test/naive_scale
train_mase = nanmean(train_scaled_errors)
test_mase = nanmean(test_scaled_errors)
return train_mase, test_mase
开发者ID:JudoWill,项目名称:wills-kaggle-comp,代码行数:35,代码来源:process.py
示例19: __init__
def __init__(self, parsed_mesh, borders=None, default_border="land",
ignore_given_edges=False, projection=None):
if borders is None:
borders = {}
self.elements = parsed_mesh.elements
self.nodes = meshtools.project_nodes(projection, parsed_mesh.elements,
parsed_mesh.nodes,
attempt_flatten=True)
self.edge_collections = \
meshtools.organize_edges(parsed_mesh.edges, borders=borders,
default_border=default_border)
if max(map(len, self.edge_collections.values())) == 0 \
or ignore_given_edges:
self.edge_collections = {default_border:
set(meshtools.extract_boundary_edges(self.elements))}
if len(np.unique(self.elements)) != self.nodes.shape[0]:
self._fix_unused_nodes()
self.boundary_nodes = {}
interior_nodes = set(range(1, len(self.nodes)+1))
for name, edge_collection in self.edge_collections.items():
self.boundary_nodes[name] = \
np.fromiter(set(node for edge in edge_collection
for node in edge[0:-1]), int)
interior_nodes -= set(self.boundary_nodes[name])
self.interior_nodes = np.fromiter(interior_nodes, int)
self.order = _element_order(self.elements.shape[1])
self.mean_stepsize = self._get_stepsize()
开发者ID:VT-ICAM,项目名称:ArgyrisPack,代码行数:31,代码来源:meshes.py
示例20: _join_staves
def _join_staves(staff_dist, *sections):
staff_dict = dict((s, np.array([s])) for s in sections[0])
for i, cur_staves in enumerate(sections[1:]):
last_staves = np.sort(np.fromiter(staff_dict.keys(), int))
dist = np.abs(last_staves[None, :] - cur_staves[:, None])
did_match = dist.min(axis=1) < staff_dist
new_matches = dict()
matching_staves = cur_staves[did_match]
matches = np.argmin(dist[did_match, :], axis=1)
matches, idx = np.unique(matches, return_index=True)
matching_staves = matching_staves[idx]
for staff_ind, new_point in zip(matches, matching_staves):
prev_staff = staff_dict[last_staves[staff_ind]]
new_matches[new_point] = np.concatenate([prev_staff, [new_point]])
non_matches = cur_staves[~did_match]
for non_match in non_matches:
new_matches[non_match] = np.asarray([-non_match] * (i + 1) + [non_match])
skipped = set(staff_dict.keys()).difference(s[-2] for s in new_matches.values())
for s in skipped:
new_matches[s] = np.concatenate([staff_dict[s], [-s]])
staff_dict = new_matches
return np.asarray([staff_dict[s] for s in np.sort(np.fromiter(staff_dict.keys(), int))])
开发者ID:ringw,项目名称:MetaOMR,代码行数:26,代码来源:base.py
注:本文中的numpy.fromiter函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论