本文整理汇总了Python中six.itervalues函数的典型用法代码示例。如果您正苦于以下问题:Python itervalues函数的具体用法?Python itervalues怎么用?Python itervalues使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了itervalues函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _cross_reference_materials
def _cross_reference_materials(self):
"""
Links the materials to materials (e.g. MAT1, CREEP)
often this is a pass statement
"""
for mat in itervalues(self.materials): # MAT1
try:
mat.cross_reference(self)
except (SyntaxError, RuntimeError, AssertionError, KeyError, ValueError) as e:
self._ixref_errors += 1
var = traceback.format_exception_only(type(e), e)
self._stored_xref_errors.append((mat, var))
if self._ixref_errors > self._nxref_errors:
self.pop_xref_errors()
# CREEP - depends on MAT1
data = [self.MATS1, self.MATS3, self.MATS8,
self.MATT1, self.MATT2, self.MATT3, self.MATT4, self.MATT5,
self.MATT8, self.MATT9]
for material_deps in data:
for mat in itervalues(material_deps):
try:
mat.cross_reference(self)
except (SyntaxError, RuntimeError, AssertionError, KeyError, ValueError) as e:
self._ixref_errors += 1
var = traceback.format_exception_only(type(e), e)
self._stored_xref_errors.append((mat, var))
if self._ixref_errors > self._nxref_errors:
self.pop_xref_errors()
开发者ID:hurlei,项目名称:pyNastran,代码行数:29,代码来源:cross_reference.py
示例2: _convert_aero
def _convert_aero(model, xyz_scale, time_scale, weight_scale):
"""
Converts the aero cards
- CAEROx, PAEROx, SPLINEx, AECOMP, AELIST, AEPARAM, AESTAT, AESURF, AESURFS
"""
area_scale = xyz_scale ** 2
velocity_scale = xyz_scale / time_scale
pressure_scale = weight_scale / xyz_scale ** 2
density_scale = weight_scale / xyz_scale ** 3
for aero in itervalues(model.aero):
#if hasattr(model, 'aero'):
#aero = model.aero
print(aero.object_attributes())
aero.refc *= xyz_scale
aero.refb *= xyz_scale
aero.sref *= area_scale
aero.velocity *= velocity_scale
assert np.allclose(aero.density, 1.0), aero
for aeros in itervalues(model.aeros):
#print(aeros)
#print(aeros.object_attributes())
aeros.cref *= xyz_scale
aeros.bref *= xyz_scale
aeros.sref *= area_scale
for caero in itervalues(model.caeros):
if caero.type in ['CAERO1']:
caero.p1 *= xyz_scale
caero.p4 *= xyz_scale
caero.x12 *= xyz_scale
caero.x43 *= xyz_scale
else:
raise NotImplementedError(caero)
#for paero in itervalues(model.paeros):
#paero.cross_reference(model)
for trim in itervalues(model.trims):
trim.q *= pressure_scale
#for spline in itervalues(model.splines):
#spline.convert(model)
#for aecomp in itervalues(model.aecomps):
#aecomp.cross_reference(model)
#for aelist in itervalues(model.aelists):
#aelist.cross_reference(model)
#for aeparam in itervalues(model.aeparams):
#aeparam.cross_reference(model)
#for aestat in itervalues(model.aestats):
#aestat.cross_reference(model)
#for aesurf in itervalues(model.aesurf):
#aesurf.cross_reference(model)
#for aesurfs in itervalues(model.aesurfs):
#aesurfs.cross_reference(model)
# update only the FLFACTs corresponding to density
flfact_ids = set([])
for flutter in itervalues(model.flutters):
flfact = flutter.density
flfact_ids.add(flfact.sid)
for flfact_id in flfact_ids: # density
flfact = model.flfacts[flfact_id]
flfact.factors *= density_scale
开发者ID:marcinch18,项目名称:pyNastran,代码行数:60,代码来源:convert.py
示例3: _setup_communicators
def _setup_communicators(self, comm, parent_dir):
"""
Assign communicator to this `Component`.
Args
----
comm : an MPI communicator (real or fake)
The communicator being offered by the parent system.
parent_dir : str
The absolute directory of the parent, or '' if unspecified. Used to
determine the absolute directory of all FileRefs.
"""
super(Component, self)._setup_communicators(comm, parent_dir)
# set absolute directories of any FileRefs
for meta in chain(itervalues(self._init_unknowns_dict),
itervalues(self._init_params_dict)):
val = meta['val']
#if var is a FileRef, set its absolute directory
if isinstance(val, FileRef):
self._fileref_setup(val)
if not self.is_active():
for meta in itervalues(self._init_params_dict):
meta['remote'] = True
for meta in itervalues(self._init_unknowns_dict):
meta['remote'] = True
开发者ID:kiranhegde,项目名称:OpenMDAO,代码行数:29,代码来源:component.py
示例4: __init__
def __init__(self, wf_dict, gi=None):
super(Workflow, self).__init__(wf_dict, gi=gi)
missing_ids = []
if gi:
tools_list_by_id = [t.id for t in gi.tools.get_previews()]
else:
tools_list_by_id = []
for k, v in six.iteritems(self.steps):
# convert step ids to str for consistency with outer keys
v['id'] = str(v['id'])
for i in six.itervalues(v['input_steps']):
i['source_step'] = str(i['source_step'])
step = self._build_step(v, self)
self.steps[k] = step
if step.type == 'tool':
if not step.tool_inputs or step.tool_id not in tools_list_by_id:
missing_ids.append(k)
input_labels_to_ids = {}
for id_, d in six.iteritems(self.inputs):
input_labels_to_ids.setdefault(d['label'], set()).add(id_)
tool_labels_to_ids = {}
for s in six.itervalues(self.steps):
if s.type == 'tool':
tool_labels_to_ids.setdefault(s.tool_id, set()).add(s.id)
object.__setattr__(self, 'input_labels_to_ids', input_labels_to_ids)
object.__setattr__(self, 'tool_labels_to_ids', tool_labels_to_ids)
dag, inv_dag = self._get_dag()
heads, tails = set(dag), set(inv_dag)
object.__setattr__(self, 'dag', dag)
object.__setattr__(self, 'inv_dag', inv_dag)
object.__setattr__(self, 'source_ids', heads - tails)
assert self.data_input_ids == set(self.inputs)
object.__setattr__(self, 'sink_ids', tails - heads)
object.__setattr__(self, 'missing_ids', missing_ids)
开发者ID:gmauro,项目名称:bioblend,代码行数:34,代码来源:wrappers.py
示例5: test_multiple_fonts
def test_multiple_fonts(self):
vera = os.path.join(os.path.dirname(__file__), "..", "fonts", "Vera.ttf")
__current_test_mode_setting = settings.CAPTCHA_FONT_PATH
settings.CAPTCHA_FONT_PATH = vera
for key in [store.hashkey for store in six.itervalues(self.stores)]:
response = self.client.get(reverse("captcha-image", kwargs=dict(key=key)))
self.assertEqual(response.status_code, 200)
self.assertEqual(response._headers.get("content-type"), ("Content-Type", "image/png"))
settings.CAPTCHA_FONT_PATH = [vera, vera, vera]
for key in [store.hashkey for store in six.itervalues(self.stores)]:
response = self.client.get(reverse("captcha-image", kwargs=dict(key=key)))
self.assertEqual(response.status_code, 200)
self.assertEqual(response._headers.get("content-type"), ("Content-Type", "image/png"))
settings.CAPTCHA_FONT_PATH = False
for key in [store.hashkey for store in six.itervalues(self.stores)]:
try:
response = self.client.get(reverse("captcha-image", kwargs=dict(key=key)))
self.fail()
except ImproperlyConfigured:
pass
settings.CAPTCHA_FONT_PATH = __current_test_mode_setting
开发者ID:nidhibarthwal1,项目名称:django-simple-captcha,代码行数:25,代码来源:tests.py
示例6: mergedirs
def mergedirs(listing):
# type: (List[Dict[Text, Any]]) -> List[Dict[Text, Any]]
r = [] # type: List[Dict[Text, Any]]
ents = {} # type: Dict[Text, Any]
collided = set() # type: Set[Text]
for e in listing:
if e["basename"] not in ents:
ents[e["basename"]] = e
elif e["class"] == "Directory":
if e.get("listing"):
ents[e["basename"]].setdefault("listing", []).extend(e["listing"])
if ents[e["basename"]]["location"].startswith("_:"):
ents[e["basename"]]["location"] = e["location"]
elif e["location"] != ents[e["basename"]]["location"]:
# same basename, different location, collision,
# rename both.
collided.add(e["basename"])
e2 = ents[e["basename"]]
e["basename"] = urllib.parse.quote(e["location"], safe="")
e2["basename"] = urllib.parse.quote(e2["location"], safe="")
e["nameroot"], e["nameext"] = os.path.splitext(e["basename"])
e2["nameroot"], e2["nameext"] = os.path.splitext(e2["basename"])
ents[e["basename"]] = e
ents[e2["basename"]] = e2
for c in collided:
del ents[c]
for e in itervalues(ents):
if e["class"] == "Directory" and "listing" in e:
e["listing"] = mergedirs(e["listing"])
r.extend(itervalues(ents))
return r
开发者ID:pvanheus,项目名称:cwltool,代码行数:34,代码来源:process.py
示例7: start
def start(self, register=True):
self.running = True
logger.info('starting %s at %s (pid=%s)', ', '.join(self.service_types), self.endpoint, os.getpid())
self.recv_loop_greenlet = self.spawn(self.recv_loop)
self.monitor.start()
self.service_registry.on_start()
self.event_system.on_start()
for service in six.itervalues(self.installed_services):
service.on_start()
service.configure({})
if register:
for service_type, service in six.iteritems(self.installed_services):
if not service.register_with_coordinator:
continue
try:
self.service_registry.register(self, service_type)
except RegistrationFailure:
logger.info("registration failed %s, %s", service_type, service)
self.stop()
for interface in six.itervalues(self.installed_services):
for pattern, handler in type(interface).event_dispatcher:
self.subscribe(pattern)
开发者ID:castillo,项目名称:lymph,代码行数:25,代码来源:container.py
示例8: get_signals_to_object
def get_signals_to_object(self, sink_object):
"""Get the signals received by a sink object.
Returns
-------
{port : [ReceptionSpec, ...], ...}
Dictionary mapping ports to the lists of objects specifying
incoming signals.
"""
signals = collections.defaultdict(list)
# For all connections we have reference to identify those which
# terminate at the given object. For those that do add a new entry to
# the signal dictionary.
params_and_sinks = chain(*chain(*(itervalues(x) for x in
itervalues(self._connections))))
for param_and_sinks in params_and_sinks:
# tp_sinks are pairs of transmission parameters and sinks
# Extract the transmission parameters
sig_params, _ = param_and_sinks.parameters
# For each sink, if the sink object is the specified object
# then add signal to the list.
for sink in param_and_sinks.sinks:
if sink.sink_object is sink_object:
# This is the desired sink object, so remember the
# signal. First construction the reception
# specification.
signals[sink.port].append(
ReceptionSpec(sig_params, sink.reception_parameters)
)
return signals
开发者ID:mahmoodalmansooei,项目名称:nengo_spinnaker,代码行数:33,代码来源:model.py
示例9: build_update
def build_update(self):
"""
Simulate Langevin dynamics using a discretized integrator. Its
discretization error goes to zero as the learning rate decreases.
"""
old_sample = {z: tf.gather(qz.params, tf.maximum(self.t - 1, 0))
for z, qz in six.iteritems(self.latent_vars)}
# Simulate Langevin dynamics.
learning_rate = self.step_size / tf.cast(self.t + 1, tf.float32)
grad_log_joint = tf.gradients(self._log_joint(old_sample),
list(six.itervalues(old_sample)))
sample = {}
for z, qz, grad_log_p in \
zip(six.iterkeys(self.latent_vars),
six.itervalues(self.latent_vars),
grad_log_joint):
event_shape = qz.get_event_shape()
normal = Normal(mu=tf.zeros(event_shape),
sigma=learning_rate * tf.ones(event_shape))
sample[z] = old_sample[z] + 0.5 * learning_rate * grad_log_p + \
normal.sample()
# Update Empirical random variables.
assign_ops = []
variables = {x.name: x for x in
tf.get_default_graph().get_collection(tf.GraphKeys.VARIABLES)}
for z, qz in six.iteritems(self.latent_vars):
variable = variables[qz.params.op.inputs[0].op.inputs[0].name]
assign_ops.append(tf.scatter_update(variable, self.t, sample[z]))
# Increment n_accept.
assign_ops.append(self.n_accept.assign_add(1))
return tf.group(*assign_ops)
开发者ID:blei-lab,项目名称:edward,代码行数:34,代码来源:sgld.py
示例10: _Net_batch
def _Net_batch(self, blobs):
"""
Batch blob lists according to net's batch size.
Take
blobs: Keys blob names and values are lists of blobs (of any length).
Naturally, all the lists should have the same length.
Give (yield)
batch: {blob name: list of blobs} dict for a single batch.
"""
num = len(six.next(six.itervalues(blobs)))
batch_size = six.next(six.itervalues(self.blobs)).num
remainder = num % batch_size
num_batches = num // batch_size
# Yield full batches.
for b in range(num_batches):
i = b * batch_size
yield {name: blobs[name][i:i + batch_size] for name in blobs}
# Yield last padded batch, if any.
if remainder > 0:
padded_batch = {}
for name in blobs:
padding = np.zeros((batch_size - remainder,)
+ blobs[name].shape[1:])
padded_batch[name] = np.concatenate([blobs[name][-remainder:],
padding])
yield padded_batch
开发者ID:mickaelmaillard,项目名称:caffe,代码行数:30,代码来源:pycaffe.py
示例11: test_pre_fit
def test_pre_fit():
y0 = synthetic_spectrum()
x0 = np.arange(len(y0))
# the following items should appear
item_list = ['Ar_K', 'Fe_K', 'compton', 'elastic']
param = get_para()
# fit without weights
x, y_total, area_v = linear_spectrum_fitting(x0, y0, param, weights=None)
for v in item_list:
assert_true(v in y_total)
sum1 = np.sum(six.itervalues(y_total))
# r squares as a measurement
r1 = 1- np.sum((sum1-y0)**2)/np.sum((y0-np.mean(y0))**2)
assert_true(r1 > 0.85)
# fit with weights
w = 1/np.sqrt(y0)
x, y_total, area_v = linear_spectrum_fitting(x0, y0, param, weights=1/np.sqrt(y0))
for v in item_list:
assert_true(v in y_total)
sum2 = np.sum(six.itervalues(y_total))
# r squares as a measurement
r2 = 1- np.sum((sum2-y0)**2)/np.sum((y0-np.mean(y0))**2)
assert_true(r2 > 0.85)
开发者ID:iTerminate,项目名称:scikit-xray,代码行数:26,代码来源:test_xrf_fit.py
示例12: _Net_forward_all
def _Net_forward_all(self, blobs=None, **kwargs):
"""
Run net forward in batches.
Take
blobs: list of blobs to extract as in forward()
kwargs: Keys are input blob names and values are blob ndarrays.
Refer to forward().
Give
all_outs: {blob name: list of blobs} dict.
"""
# Collect outputs from batches
all_outs = {out: [] for out in set(self.outputs + (blobs or []))}
for batch in self._batch(kwargs):
outs = self.forward(blobs=blobs, **batch)
for out, out_blob in six.iteritems(outs):
all_outs[out].extend(out_blob.copy())
# Package in ndarray.
for out in all_outs:
all_outs[out] = np.asarray(all_outs[out])
# Discard padding.
pad = len(six.next(six.itervalues(all_outs))) - len(six.next(six.itervalues(kwargs)))
if pad:
for out in all_outs:
all_outs[out] = all_outs[out][:-pad]
return all_outs
开发者ID:mickaelmaillard,项目名称:caffe,代码行数:27,代码来源:pycaffe.py
示例13: __init__
def __init__(self, history_specs, initial_sids, initial_dt):
# History specs to be served by this container.
self.history_specs = history_specs
self.frequency_groups = \
group_by_frequency(itervalues(self.history_specs))
# The set of fields specified by all history specs
self.fields = set(spec.field for spec in itervalues(history_specs))
# This panel contains raw minutes for periods that haven't been fully
# completed. When a frequency period rolls over, these minutes are
# digested using some sort of aggregation call on the panel (e.g. `sum`
# for volume, `max` for high, `min` for low, etc.).
self.buffer_panel = self.create_buffer_panel(
initial_sids,
initial_dt,
)
# Dictionaries with Frequency objects as keys.
self.digest_panels, self.cur_window_starts, self.cur_window_closes = \
self.create_digest_panels(initial_sids, initial_dt)
# Populating initial frames here, so that the cost of creating the
# initial frames does not show up when profiling. These frames are
# cached since mid-stream creation of containing data frames on every
# bar is expensive.
self.create_return_frames(initial_dt)
# Helps prop up the prior day panel against having a nan, when the data
# has been seen.
self.last_known_prior_values = {field: {} for field in self.fields}
开发者ID:ellisonbg,项目名称:zipline,代码行数:32,代码来源:history_container.py
示例14: stop_all
def stop_all(self):
for p in itervalues(self._programs):
p.log_cpu_times()
for p in itervalues(self._programs):
p.stop()
for p in itervalues(self._programs):
p.wait_or_kill()
开发者ID:Nyrio,项目名称:cms,代码行数:7,代码来源:programstarter.py
示例15: _iter_vars
def _iter_vars(self):
for var in itervalues(self.vars):
if var.is_expression() or not var.is_indexed():
yield var
else:
for v in itervalues(var):
yield v
开发者ID:qtothec,项目名称:pyomo,代码行数:7,代码来源:connector.py
示例16: _promoted
def _promoted(self, name):
"""Determine if the given variable name is being promoted from this
`System`.
Args
----
name : str
The name of a variable, relative to this `System`.
Returns
-------
bool
True if the named variable is being promoted from this `System`.
Raises
------
TypeError
if the promoted variable specifications are not in a valid format
"""
if isinstance(self._promotes, string_types):
raise TypeError("'%s' promotes must be specified as a list, "
"tuple or other iterator of strings, but '%s' was specified" %
(self.name, self._promotes))
for prom in self._promotes:
if fnmatch(name, prom):
for meta in chain(itervalues(self._params_dict),
itervalues(self._unknowns_dict)):
if name == meta.get('promoted_name'):
return True
return False
开发者ID:kishenr12,项目名称:OpenMDAO,代码行数:32,代码来源:system.py
示例17: open
def open(self, filename=None):
""" Open config file and read it.
"""
logging.debug(__name__ + ": open")
if filename != None:
self._filename = str(filename)
global imported_configs
self._isReplaceConfig = False
self._history=None
# import input-config and make list of all imported configs
for i in imported_configs.iterkeys():
if i in sys.modules.keys():
del sys.modules[i]
sys.path.insert(0, os.path.dirname(self._filename))
common_imports = sys.modules.copy()
import imp
theFile = open(self._filename)
self._file = imp.load_module(os.path.splitext(os.path.basename(self._filename))[0].replace(".", "_"), theFile, self._filename, ("py", "r", 1))
theFile.close()
imported_configs = sys.modules.copy()
for i in common_imports.iterkeys():
del imported_configs[i]
# make dictionary that connects every cms-object with the file in which it is defined
for j in six.itervalues(imported_configs):
setj = set(dir(j))
for entry in setj:
if entry[0] != "_" and entry != "cms":
source = 1
for k in six.itervalues(imported_configs):
if hasattr(k, entry):
setk = set(dir(k))
if len(setk) < len(setj) and setk < setj:
source = 0
if source == 1:
filen = self._filename
if hasattr(j, "__file__"):
filen = j.__file__
file_dict[entry] = filen
# collect all path/sequences/modules of the input-config in a list
if self.process():
self.setProcess(self.process())
self._readHeaderInfo()
self._history=self.process().dumpHistory()
if not self._isReplaceConfig and hasattr(self.process(),"resetHistory"):
self.process().resetHistory()
else:
self._initLists()
for entry in dir(self._file):
o=getattr(self._file, entry)
if entry[0] != "_" and entry != "cms" and hasattr(o, "label_"):
getattr(self._file, entry).setLabel(entry)
text = os.path.splitext(os.path.basename(file_dict[o.label_()]))[0]
if text == os.path.splitext(os.path.basename(self._filename))[0] and not o in self._allObjects:
self._readRecursive(None, o)
return True
开发者ID:hotdrinkbrian,项目名称:cmssw,代码行数:60,代码来源:ConfigDataAccessor.py
示例18: __init__
def __init__(self, iterator, optimizer, converter=convert.concat_examples,
device=None, loss_func=None, loss_scale=None,
auto_new_epoch=True):
if device is not None:
device = backend._get_device_compat(device)
if isinstance(iterator, iterator_module.Iterator):
iterator = {'main': iterator}
self._iterators = iterator
if not isinstance(optimizer, dict):
optimizer = {'main': optimizer}
self._optimizers = optimizer
if device is not None:
for optimizer in six.itervalues(self._optimizers):
optimizer.target._to_device(
device, skip_between_cupy_devices=True)
self.converter = converter
self.loss_func = loss_func
self.device = device
self.iteration = 0
self.loss_scale = loss_scale
if loss_scale is not None:
for optimizer in six.itervalues(self._optimizers):
optimizer.set_loss_scale(loss_scale)
self.auto_new_epoch = auto_new_epoch
if auto_new_epoch:
for o in six.itervalues(self._optimizers):
o.use_auto_new_epoch = True
开发者ID:hvy,项目名称:chainer,代码行数:33,代码来源:standard_updater.py
示例19: metric_variables
def metric_variables(self):
v = []
for metric in six.itervalues(self._metrics):
v += metric.variables
for evaluator in six.itervalues(self._evaluators):
v += evaluator.metric_variables
return v
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:7,代码来源:evaluator.py
示例20: record_iteration
def record_iteration(self, params, unknowns, resids, metadata):
"""Record the current iteration.
Args
----
params : `VecWrapper`
`VecWrapper` containing parameters. (p)
unknowns : `VecWrapper`
`VecWrapper` containing outputs and states. (u)
resids : `VecWrapper`
`VecWrapper` containing residuals. (r)
metadata : dict
Dictionary containing execution metadata (e.g. iteration coordinate).
"""
iteration_coordinate = metadata['coord']
if self.options['record_params']:
params = self._filter_vector(params, 'p', iteration_coordinate)
else:
params = None
if self.options['record_unknowns']:
unknowns = self._filter_vector(unknowns, 'u', iteration_coordinate)
else:
unknowns = None
if self.options['record_resids']:
resids = self._filter_vector(resids, 'r', iteration_coordinate)
else:
resids = None
if self._wrote_header is False:
header = []
if params is not None:
header.extend(params)
if unknowns is not None:
header.extend(unknowns)
if resids is not None:
header.extend(resids)
if self.options['record_derivs']:
header.append('Derivatives')
self.ncol = len(header)
self.writer.writerow(header)
self._wrote_header = True
row = []
if params is not None:
row.extend(serialize(value) for value in itervalues(params))
if unknowns is not None:
row.extend(serialize(value) for value in itervalues(unknowns))
if resids is not None:
row.extend(serialize(value) for value in itervalues(resids))
if self.options['record_derivs']:
row.append(None)
self.writer.writerow(row)
if self.out:
self.out.flush()
开发者ID:alexanderzimmerman,项目名称:OpenMDAO,代码行数:60,代码来源:csv_recorder.py
注:本文中的six.itervalues函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论