本文整理汇总了Python中six.moves.zip函数的典型用法代码示例。如果您正苦于以下问题:Python zip函数的具体用法?Python zip怎么用?Python zip使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了zip函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: __to_dp_matrix_mt
def __to_dp_matrix_mt(self, value_matrix):
from concurrent import futures
col_data_map = {}
try:
with futures.ProcessPoolExecutor(self.max_workers) as executor:
future_list = [
executor.submit(
_to_dp_list_helper,
self,
col_idx,
values,
self.__get_col_type_hint(col_idx),
self.strip_str_value,
)
for col_idx, values in enumerate(zip(*value_matrix))
]
for future in futures.as_completed(future_list):
col_idx, value_dp_list = future.result()
col_data_map[col_idx] = value_dp_list
finally:
logger.debug("shutdown ProcessPoolExecutor: workers={}".format(self.max_workers))
executor.shutdown()
return list(zip(*[col_data_map[col_idx] for col_idx in sorted(col_data_map)]))
开发者ID:thombashi,项目名称:DataProperty,代码行数:27,代码来源:_extractor.py
示例2: reverse_points_if_backwards
def reverse_points_if_backwards(xy, xy_next):
"""
This function aligns xy_next so that it is in the same direction as xy.
Nothing occurs if they are already aligned
inputs:
xy, xy_next - list of tuples [(x1, y1), (x2, y2) ...]
xy and xy_next are seperated by one timestep.
the function returns the reversed spine and a flag to see it was reversed
"""
x, y = zip(*xy)
xnext, ynext = zip(*xy_next)
xnext_rev = xnext[::-1]
ynext_rev = ynext[::-1]
distance_original = 0.
distance_rev = 0.
for k in range(len(x)):
distance_original += ((x[k] - xnext[k]) ** 2 + (y[k] - ynext[k]) ** 2)
distance_rev += (x[k] - xnext_rev[k]) ** 2 + (y[k] - ynext_rev[k]) ** 2
if (distance_original > distance_rev):
#print "reversed", index, distance_rev, distance_original
newxy = list(zip(xnext_rev, ynext_rev))
return (newxy, True)
else:
#print "ok", index
return (xy_next, False)
开发者ID:amarallab,项目名称:waldo,代码行数:28,代码来源:create_spine.py
示例3: _write_atoms
def _write_atoms(self, atoms):
self.f.write('\n')
self.f.write('Atoms\n')
self.f.write('\n')
try:
charges = atoms.charges
except (NoDataError, AttributeError):
has_charges = False
else:
has_charges = True
indices = atoms.indices + 1
types = atoms.types.astype(np.int32)
if self.convert_units:
coordinates = self.convert_pos_to_native(atoms.positions, inplace=False)
if has_charges:
for index, atype, charge, coords in zip(indices, types, charges,
coordinates):
self.f.write('{i:d} 0 {t:d} {c:f} {x:f} {y:f} {z:f}\n'.format(
i=index, t=atype, c=charge, x=coords[0],
y=coords[1], z=coords[2]))
else:
for index, atype, coords in zip(indices, types, coordinates):
self.f.write('{i:d} 0 {t:d} {x:f} {y:f} {z:f}\n'.format(
i=index, t=atype, x=coords[0], y=coords[1],
z=coords[2]))
开发者ID:alejob,项目名称:mdanalysis,代码行数:29,代码来源:LAMMPS.py
示例4: save_weights_to_hdf5_group
def save_weights_to_hdf5_group(f, layers):
from tensorflow.python.keras._impl.keras import __version__ as keras_version # pylint: disable=g-import-not-at-top
save_attributes_to_hdf5_group(
f, 'layer_names', [layer.name.encode('utf8') for layer in layers])
f.attrs['backend'] = K.backend().encode('utf8')
f.attrs['keras_version'] = str(keras_version).encode('utf8')
for layer in layers:
g = f.create_group(layer.name)
symbolic_weights = layer.weights
weight_values = K.batch_get_value(symbolic_weights)
weight_names = []
for i, (w, val) in enumerate(zip(symbolic_weights, weight_values)):
if hasattr(w, 'name') and w.name:
name = str(w.name)
else:
name = 'param_' + str(i)
weight_names.append(name.encode('utf8'))
save_attributes_to_hdf5_group(g, 'weight_names', weight_names)
for name, val in zip(weight_names, weight_values):
param_dset = g.create_dataset(name, val.shape, dtype=val.dtype)
if not val.shape:
# scalar
param_dset[()] = val
else:
param_dset[:] = val
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:27,代码来源:saving.py
示例5: staged_predict_proba
def staged_predict_proba(self, X, vote_function=None):
"""
Predict probabilities on each stage. To get unbiased predictions, you can pass training dataset
(with same order of events) and vote_function=None.
:param X: pandas.DataFrame of shape [n_samples, n_features]
:param vote_function: function to combine prediction of folds' estimators.
If None then self.vote_function is used.
:type vote_function: None or function
:return: iterator for numpy.array of shape [n_samples, n_classes] with probabilities
"""
if vote_function is not None:
print('Using voting KFold prediction')
X = self._get_train_features(X)
iterators = [estimator.staged_predict_proba(X) for estimator in self.estimators]
for fold_prob in zip(*iterators):
probabilities = numpy.array(fold_prob)
yield vote_function(probabilities)
else:
print('Default prediction')
X = self._get_train_features(X)
folds_column = self._get_folds_column(len(X))
iterators = [self.estimators[fold].staged_predict_proba(X.iloc[folds_column == fold, :])
for fold in range(self.n_folds)]
for fold_prob in zip(*iterators):
probabilities = numpy.zeros(shape=(len(X), 2))
for fold in range(self.n_folds):
probabilities[folds_column == fold] = fold_prob[fold]
yield probabilities
开发者ID:Afey,项目名称:rep,代码行数:30,代码来源:folding.py
示例6: _reduce
def _reduce(results, dataset_out, data_name, dtype, shuffle, rng):
if len(results) > 0 and (len(data_name) != len(results[0]) or
len(dtype) != len(results[0])):
raise ValueError('Returned [{}] results but only given [{}] name and'
' [{}] dtype'.format(
len(results[0]), len(data_name), len(dtype)))
final = [[] for i in range(len(results[0]))]
for res in results:
for i, j in zip(res, final):
j.append(i)
final = [np.vstack(i)
if isinstance(i[0], np.ndarray)
else np.asarray(reduce(lambda x, y: x + y, i))
for i in final]
# shufle features
if shuffle > 2:
permutation = rng.permutation(final[0].shape[0])
final = [i[permutation] for i in final]
# save to dataset
for i, name, dt in zip(final, data_name, dtype):
shape = i.shape
dt = np.dtype(dt)
x = dataset_out.get_data(name, dtype=dt, shape=shape, value=i)
x.flush()
return None
开发者ID:trungnt13,项目名称:blocks,代码行数:26,代码来源:feature_recipes.py
示例7: make_factor_text
def make_factor_text(factor, name):
collapse_uniform = True
if collapse_uniform and ut.almost_allsame(factor.values):
# Reduce uniform text
ftext = name + ':\nuniform(%.3f)' % (factor.values[0],)
else:
values = factor.values
try:
rowstrs = ['p(%s)=%.3f' % (','.join(n), v,)
for n, v in zip(zip(*factor.statenames), values)]
except Exception:
rowstrs = ['p(%s)=%.3f' % (','.join(n), v,)
for n, v in zip(factor._row_labels(False), values)]
idxs = ut.list_argmaxima(values)
for idx in idxs:
rowstrs[idx] += '*'
thresh = 4
always_sort = True
if len(rowstrs) > thresh:
sortx = factor.values.argsort()[::-1]
rowstrs = ut.take(rowstrs, sortx[0:(thresh - 1)])
rowstrs += ['... %d more' % ((len(values) - len(rowstrs)),)]
elif always_sort:
sortx = factor.values.argsort()[::-1]
rowstrs = ut.take(rowstrs, sortx)
ftext = name + ': \n' + '\n'.join(rowstrs)
return ftext
开发者ID:heroinlin,项目名称:ibeis,代码行数:27,代码来源:pgm_viz.py
示例8: append
def append(self, *arrays):
if self.read_only:
raise RuntimeError("This Data is set in read-only mode")
accepted_arrays = []
add_size = 0
# ====== check if shape[1:] matching ====== #
for a, d in zip(arrays, self._data):
if hasattr(a, 'shape'):
if a.shape[1:] == d.shape[1:]:
accepted_arrays.append(a)
add_size += a.shape[0]
else:
accepted_arrays.append(None)
# ====== resize ====== #
old_size = self.__len__()
# special case, Mmap is init with temporary size = 1 (all zeros),
# NOTE: risky to calculate sum of big array here
if old_size == 1 and \
sum(np.sum(np.abs(d[:])) for d in self._data) == 0.:
old_size = 0
# resize and append data
self.resize(old_size + add_size) # resize only once will be faster
# ====== update values ====== #
for a, d in zip(accepted_arrays, self._data):
if a is not None:
d[old_size:old_size + a.shape[0]] = a
return self
开发者ID:imito,项目名称:odin,代码行数:27,代码来源:data.py
示例9: test_format_1_converter
def test_format_1_converter(self):
filename = os.path.join(self.tempdir, 'svhn_format_1.hdf5')
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
subparser = subparsers.add_parser('svhn')
svhn.fill_subparser(subparser)
subparser.set_defaults(directory=self.tempdir, output_file=filename)
args = parser.parse_args(['svhn', '1'])
args_dict = vars(args)
func = args_dict.pop('func')
func(**args_dict)
h5file = h5py.File(filename, mode='r')
expected_features = sum((self.f1_mock[split]['image']
for split in ('train', 'test', 'extra')), [])
for val, truth in zip(h5file['features'][...], expected_features):
assert_equal(val, truth.transpose(2, 0, 1).flatten())
expected_labels = sum((self.f1_mock[split]['label']
for split in ('train', 'test', 'extra')), [])
for val, truth in zip(h5file['bbox_labels'][...], expected_labels):
truth[truth == 10] = 0
assert_equal(val, truth)
expected_lefts = sum((self.f1_mock[split]['left']
for split in ('train', 'test', 'extra')), [])
for val, truth in zip(h5file['bbox_lefts'][...], expected_lefts):
assert_equal(val, truth)
开发者ID:mohseniaref,项目名称:fuel,代码行数:28,代码来源:test_converters.py
示例10: write_card
def write_card(self, size=8, is_double=False):
msg = '\n$' + '-' * 80
msg += '\n$ %s Matrix %s\n' % ('DMI', self.name)
list_fields = ['DMI', self.name, 0, self.form, self.tin,
self.tout, None, self.nRows, self.nCols]
if size == 8:
msg += print_card_8(list_fields)
#elif is_double:
#msg += print_card_double(list_fields)
else:
msg += print_card_16(list_fields)
#msg += self.print_card(list_fields,size=16,isD=False)
if self.is_complex():
for (gci, gcj, reali, imagi) in zip(self.GCi, self.GCj, self.Real, self.Complex):
list_fields = ['DMI', self.name, gcj, gci, reali, imagi]
if size == 8:
msg += print_card_8(list_fields)
elif is_double:
msg += print_card_double(list_fields)
else:
msg += print_card_16(list_fields)
else:
for (gci, gcj, reali) in zip(self.GCi, self.GCj, self.Real):
list_fields = ['DMI', self.name, gcj, gci, reali]
if size == 8:
msg += print_card_8(list_fields)
elif is_double:
msg += print_card_double(list_fields)
else:
msg += print_card_16(list_fields)
return msg
开发者ID:ClaesFredo,项目名称:pyNastran,代码行数:32,代码来源:dmig.py
示例11: _write_sort1_as_sort2
def _write_sort1_as_sort2(self, f, page_num, page_stamp, header, words):
element = self.element
element_type = self.element_data_type
times = self._times
node_id = 0 ## TODO: fix the node id
for inode, (eid, etypei) in enumerate(zip(element, element_type)):
t1 = self.data[:, inode, 0].ravel()
t2 = self.data[:, inode, 1].ravel()
t3 = self.data[:, inode, 2].ravel()
r1 = self.data[:, inode, 3].ravel()
r2 = self.data[:, inode, 4].ravel()
r3 = self.data[:, inode, 5].ravel()
header[1] = ' POINT-ID = %10i\n' % node_id
f.write(''.join(header + words))
for dt, t1i, t2i, t3i, r1i, r2i, r3i in zip(times, t1, t2, t3, r1, r2, r3):
vals = [t1i, t2i, t3i, r1i, r2i, r3i]
vals2 = write_floats_13e(vals)
(dx, dy, dz, rx, ry, rz) = vals2
f.write('%14s %6s %-13s %-13s %-13s %-13s %-13s %s\n' % (
write_float_12E(dt), etypei, dx, dy, dz, rx, ry, rz))
f.write(page_stamp % page_num)
page_num += 1
return page_num
开发者ID:EmanueleCannizzaro,项目名称:pyNastran,代码行数:25,代码来源:op2_result_element_table_object.py
示例12: detect_gid_list
def detect_gid_list(ibs, gid_list, tree_path_list, downsample=True, **kwargs):
"""
Args:
gid_list (list of int): the list of IBEIS image_rowids that need detection
tree_path_list (list of str): the list of trees to load for detection
downsample (bool, optional): a flag to indicate if the original image
sizes should be used; defaults to True
True: ibs.get_image_detectpaths() is used
False: ibs.get_image_paths() is used
Kwargs (optional): refer to the PyRF documentation for configuration settings
Yields:
results (list of dict)
"""
# Get new gpaths if downsampling
if downsample:
gpath_list = ibs.get_image_detectpaths(gid_list)
neww_list = [vt.open_image_size(gpath)[0] for gpath in gpath_list]
oldw_list = [oldw for (oldw, oldh) in ibs.get_image_sizes(gid_list)]
downsample_list = [oldw / neww for oldw, neww in zip(oldw_list, neww_list)]
else:
gpath_list = ibs.get_image_paths(gid_list)
downsample_list = [None] * len(gpath_list)
# Run detection
results_iter = detect(ibs, gpath_list, tree_path_list, **kwargs)
# Upscale the results
for gid, downsample, (gpath, result_list) in zip(gid_list, downsample_list, results_iter):
# Upscale the results back up to the original image size
if downsample is not None and downsample != 1.0:
for result in result_list:
for key in ["centerx", "centery", "xtl", "ytl", "width", "height"]:
result[key] = int(result[key] * downsample)
yield gid, gpath, result_list
开发者ID:Erotemic,项目名称:ibeis,代码行数:35,代码来源:randomforest.py
示例13: reorder
def reorder(outcomes, pmf, sample_space, index=None):
"""
Helper function to reorder outcomes and pmf to match sample_space.
"""
try:
order = [(sample_space.index(outcome), i)
for i, outcome in enumerate(outcomes)]
except ValueError:
# Let's identify which outcomes were not in the sample space.
bad = []
for outcome in outcomes:
try:
sample_space.index(outcome)
except ValueError:
bad.append(outcome)
if len(bad) == 1:
single = True
else:
single = False
raise InvalidOutcome(bad, single=single)
order.sort()
_, order = zip(*order)
if index is None:
index = dict(zip(outcomes, range(len(outcomes))))
outcomes = [outcomes[i] for i in order]
pmf = [pmf[i] for i in order]
new_index = dict(zip(outcomes, range(len(outcomes))))
return outcomes, pmf, new_index
开发者ID:chebee7i,项目名称:dit,代码行数:32,代码来源:helpers.py
示例14: pr_dict
def pr_dict(dbpr):
d = {dk: getattr(dbpr, sk) for sk, dk in zip(SRC_PR_KEYS, PR_KEYS)}
dd = {dk: getattr(dbpr, sk) for sk, dk in zip(('name',), ('Label',))}
d.update(dd)
return d
开发者ID:NMGRL,项目名称:pychron,代码行数:7,代码来源:mass_spec_irradiation_exporter.py
示例15: setup_plan
def setup_plan(plan):
"""Sets up a TensorFlow Fold plan for MNIST.
The inputs are 28 x 28 images represented as 784-dimensional float32
vectors (scaled to [0, 1] and categorical digit labels in [0, 9].
The training loss is softmax cross-entropy. There is only one
metric, accuracy. In inference mode, the output is a class label.
Dropout is applied before every layer (including on the inputs).
Args:
plan: A TensorFlow Fold plan to set up.
"""
# Convert the input NumPy array into a tensor.
model_block = td.Vector(INPUT_LENGTH)
# Create a placeholder for dropout, if we are in train mode.
keep_prob = (tf.placeholder_with_default(1.0, [], name='keep_prob')
if plan.mode == plan.mode_keys.TRAIN else None)
# Add the fully connected hidden layers.
for _ in xrange(FLAGS.num_layers):
model_block >>= td.FC(FLAGS.num_units, input_keep_prob=keep_prob)
# Add the linear output layer.
model_block >>= td.FC(NUM_LABELS, activation=None, input_keep_prob=keep_prob)
if plan.mode == plan.mode_keys.INFER:
# In inference mode, we run the model directly on images.
plan.compiler = td.Compiler.create(model_block)
logits, = plan.compiler.output_tensors
else:
# In training/eval mode, we run the model on (image, label) pairs.
plan.compiler = td.Compiler.create(
td.Record((model_block, td.Scalar(tf.int64))))
logits, y_ = plan.compiler.output_tensors
y = tf.argmax(logits, 1) # create the predicted output tensor
datasets = tf.contrib.learn.datasets.mnist.load_mnist(FLAGS.logdir_base)
if plan.mode == plan.mode_keys.INFER:
plan.examples = datasets.test.images
plan.outputs = [y]
else:
# Create loss and accuracy tensors, and add them to the plan.
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=y_)
plan.losses['cross_entropy'] = loss
accuracy = tf.reduce_mean(tf.cast(tf.equal(y, y_), tf.float32))
plan.metrics['accuracy'] = accuracy
if plan.mode == plan.mode_keys.TRAIN:
plan.examples = zip(datasets.train.images, datasets.train.labels)
plan.dev_examples = zip(datasets.validation.images,
datasets.validation.labels)
# Turn dropout on for training, off for validation.
plan.train_feeds[keep_prob] = FLAGS.keep_prob
else:
assert plan.mode == plan.mode_keys.EVAL
plan.examples = zip(datasets.test.images, datasets.test.labels)
开发者ID:wangbosdqd,项目名称:fold,代码行数:60,代码来源:mnist.py
示例16: test_get_strain_state_dict
def test_get_strain_state_dict(self):
strain_inds = [(0,), (1,), (2,), (1, 3), (1, 2, 3)]
vecs = {}
strain_states = []
for strain_ind in strain_inds:
ss = np.zeros(6)
np.put(ss, strain_ind, 1)
strain_states.append(tuple(ss))
vec = np.zeros((4, 6))
rand_values = np.random.uniform(0.1, 1, 4)
for i in strain_ind:
vec[:, i] = rand_values
vecs[strain_ind] = vec
all_strains = [Strain.from_voigt(v).zeroed() for vec in vecs.values()
for v in vec]
random.shuffle(all_strains)
all_stresses = [Stress.from_voigt(np.random.random(6)).zeroed()
for s in all_strains]
strain_dict = {k.tostring():v for k,v in zip(all_strains, all_stresses)}
ss_dict = get_strain_state_dict(all_strains, all_stresses, add_eq=False)
# Check length of ss_dict
self.assertEqual(len(strain_inds), len(ss_dict))
# Check sets of strain states are correct
self.assertEqual(set(strain_states), set(ss_dict.keys()))
for strain_state, data in ss_dict.items():
# Check correspondence of strains/stresses
for strain, stress in zip(data["strains"], data["stresses"]):
self.assertArrayAlmostEqual(Stress.from_voigt(stress),
strain_dict[Strain.from_voigt(strain).tostring()])
开发者ID:czhengsci,项目名称:pymatgen,代码行数:29,代码来源:test_elastic.py
示例17: raw_method
def raw_method(self):
list_fields = []
if self.method in ['HESS', 'INV']:
for (alphaA, omegaA, alphaB, omegaB, Lj, NEj, NDj) in zip(
self.alphaAjs, self.omegaAjs, self.alphaBjs, self.omegaBjs,
self.LJs, self.NEJs, self.NDJs):
alphaA = set_blank_if_default(alphaA, 0.0)
omegaA = set_blank_if_default(omegaA, 0.0)
alphaB = set_blank_if_default(alphaB, 0.0)
omegaB = set_blank_if_default(omegaB, 0.0)
list_fields += [alphaA, omegaA, alphaB, omegaB, Lj, NEj, NDj, None]
elif self.method == 'CLAN':
assert len(self.alphaAjs) == len(self.omegaAjs)
assert len(self.alphaAjs) == len(self.mblkszs)
assert len(self.alphaAjs) == len(self.iblkszs)
assert len(self.alphaAjs) == len(self.ksteps)
assert len(self.alphaAjs) == len(self.NJIs)
for (alphaA, omegaA, mblksz, iblksz, kstep, Nj) in zip(
self.alphaAjs, self.omegaAjs, self.mblkszs, self.iblkszs,
self.ksteps, self.NJIs):
alphaA = set_blank_if_default(alphaA, 0.0)
omegaA = set_blank_if_default(omegaA, 0.0)
mblksz = set_blank_if_default(mblksz, 7)
iblksz = set_blank_if_default(iblksz, 2)
kstep = set_blank_if_default(kstep, 5)
list_fields += [alphaA, omegaA, mblksz, iblksz,
kstep, None, Nj, None]
else:
msg = 'invalid EIGC method...method=%r' % self.method
raise RuntimeError(msg)
return list_fields
开发者ID:EmanueleCannizzaro,项目名称:pyNastran,代码行数:33,代码来源:methods.py
示例18: sort_together
def sort_together(iterables, key_list=(0,), reverse=False):
"""Return the input iterables sorted together, with *key_list* as the
priority for sorting. All iterables are trimmed to the length of the
shortest one.
This can be used like the sorting function in a spreadsheet. If each
iterable represents a column of data, the key list determines which
columns are used for sorting.
By default, all iterables are sorted using the ``0``-th iterable::
>>> iterables = [(4, 3, 2, 1), ('a', 'b', 'c', 'd')]
>>> sort_together(iterables)
[(1, 2, 3, 4), ('d', 'c', 'b', 'a')]
Set a different key list to sort according to another iterable.
Specifying mutliple keys dictates how ties are broken::
>>> iterables = [(3, 1, 2), (0, 1, 0), ('c', 'b', 'a')]
>>> sort_together(iterables, key_list=(1, 2))
[(2, 3, 1), (0, 0, 1), ('a', 'c', 'b')]
Set *reverse* to ``True`` to sort in descending order.
>>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True)
[(3, 2, 1), ('a', 'b', 'c')]
"""
return list(zip(*sorted(zip(*iterables),
key=itemgetter(*key_list),
reverse=reverse)))
开发者ID:jtviegas,项目名称:codepit,代码行数:31,代码来源:more.py
示例19: compile_gem
def compile_gem(return_variables, expressions, prefix_ordering, remove_zeros=False):
"""Compiles GEM to Impero.
:arg return_variables: return variables for each root (type: GEM expressions)
:arg expressions: multi-root expression DAG (type: GEM expressions)
:arg prefix_ordering: outermost loop indices
:arg remove_zeros: remove zero assignment to return variables
"""
expressions = optimise.remove_componenttensors(expressions)
# Remove zeros
if remove_zeros:
rv = []
es = []
for var, expr in zip(return_variables, expressions):
if not isinstance(expr, gem.Zero):
rv.append(var)
es.append(expr)
return_variables, expressions = rv, es
# Collect indices in a deterministic order
indices = OrderedSet()
for node in traversal(expressions):
if isinstance(node, gem.Indexed):
for index in node.multiindex:
if isinstance(index, gem.Index):
indices.add(index)
elif isinstance(node, gem.FlexiblyIndexed):
for offset, idxs in node.dim2idxs:
for index, stride in idxs:
if isinstance(index, gem.Index):
indices.add(index)
# Build ordered index map
index_ordering = make_prefix_ordering(indices, prefix_ordering)
apply_ordering = make_index_orderer(index_ordering)
get_indices = lambda expr: apply_ordering(expr.free_indices)
# Build operation ordering
ops = scheduling.emit_operations(list(zip(return_variables, expressions)), get_indices)
# Empty kernel
if len(ops) == 0:
raise NoopError()
# Drop unnecessary temporaries
ops = inline_temporaries(expressions, ops)
# Build Impero AST
tree = make_loop_tree(ops, get_indices)
# Collect temporaries
temporaries = collect_temporaries(ops)
# Determine declarations
declare, indices = place_declarations(ops, tree, temporaries, get_indices)
# Prepare ImperoC (Impero AST + other data for code generation)
return ImperoC(tree, temporaries, declare, indices)
开发者ID:firedrakeproject,项目名称:tsfc,代码行数:60,代码来源:impero_utils.py
示例20: evaluate_srl_classify
def evaluate_srl_classify(no_repeat=False, gold_file=None):
"""Evaluates the performance of the network on the SRL classifying task."""
# load data
md = Metadata.load_from_file('srl_classify')
nn = taggers.load_network(md)
r = taggers.create_reader(md, gold_file)
r.create_converter()
r.codify_sentences()
hits = 0
total_args = 0
for sentence, tags, predicates, args in zip(r.sentences, r.tags,
r.predicates, r.arg_limits):
# the answer includes all predicates
answer = nn.tag_sentence(sentence, predicates, args,
allow_repeats=not no_repeat)
for pred_answer, pred_gold in zip(answer, tags):
for net_tag, gold_tag in zip(pred_answer, pred_gold):
if net_tag == gold_tag:
hits += 1
total_args += len(pred_gold)
print('Accuracy: %f' % (float(hits) / total_args))
开发者ID:erickrf,项目名称:nlpnet,代码行数:28,代码来源:nlpnet-test.py
注:本文中的six.moves.zip函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论