本文整理汇总了Python中sahara.context.ctx函数的典型用法代码示例。如果您正苦于以下问题:Python ctx函数的具体用法?Python ctx怎么用?Python ctx使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ctx函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_verification_start
def test_verification_start(self, get_health_checks):
cluster = self._cluster_sample()
get_health_checks.return_value = [Check]
verification_base.handle_verification(cluster, {
'verification': {'status': 'START'}})
cluster = self.api.cluster_get(context.ctx(), cluster)
ver = cluster.verification
self.assertEqual('GREEN', ver['status'])
self.assertEqual(1, len(ver['checks']))
self.assertEqual('No criminality', ver.checks[0]['description'])
id = ver['id']
get_health_checks.return_value = [YellowCheck, Check, Check]
verification_base.handle_verification(cluster, {
'verification': {'status': 'START'}})
cluster = self.api.cluster_get(context.ctx(), cluster)
ver = cluster.verification
self.assertEqual('YELLOW', ver['status'])
self.assertEqual(3, len(ver['checks']))
self.assertNotEqual(ver['id'], id)
get_health_checks.return_value = [RedCheck, YellowCheck]
verification_base.handle_verification(cluster, {
'verification': {'status': 'START'}})
cluster = self.api.cluster_get(context.ctx(), cluster)
ver = cluster.verification
self.assertEqual('RED', ver['status'])
self.assertEqual(2, len(ver['checks']))
self.assertNotEqual(ver['id'], id)
self.assertEqual("James bond check", ver['checks'][0]['name'])
开发者ID:Imperat,项目名称:sahara,代码行数:35,代码来源:test_verification_base.py
示例2: _generate_heat_stack_name
def _generate_heat_stack_name(cluster):
cluster = conductor.cluster_get(context.ctx(), cluster)
hsn = cluster.name + cluster.id[:8]
extra = cluster.extra.to_dict() if cluster.extra else {}
extra['heat_stack_name'] = hsn
conductor.cluster_update(context.ctx(), cluster, {'extra': extra})
return conductor.cluster_get(context.ctx(), cluster)
开发者ID:egafford,项目名称:sahara,代码行数:7,代码来源:heat_engine.py
示例3: job_execution_requires_proxy_user
def job_execution_requires_proxy_user(job_execution):
'''Returns True if the job execution requires a proxy user.'''
def _check_values(values):
return any(value.startswith(
su.SWIFT_INTERNAL_PREFIX) for value in values if (
isinstance(value, six.string_types)))
if CONF.use_domain_for_proxy_users is False:
return False
paths = [conductor.data_source_get(context.ctx(), job_execution.output_id),
conductor.data_source_get(context.ctx(), job_execution.input_id)]
if _check_values(ds.url for ds in paths if ds):
return True
if _check_values(six.itervalues(
job_execution.job_configs.get('configs', {}))):
return True
if _check_values(six.itervalues(
job_execution.job_configs.get('params', {}))):
return True
if _check_values(job_execution.job_configs.get('args', [])):
return True
job = conductor.job_get(context.ctx(), job_execution.job_id)
if _check_values(main.url for main in job.mains):
return True
if _check_values(lib.url for lib in job.libs):
return True
# We did the simple checks, now if data_source referencing is
# enabled and we have values that could be a name or uuid,
# query for data_sources that match and contain a swift path
by_name, by_uuid = job_utils.may_contain_data_source_refs(
job_execution.job_configs)
if by_name:
names = tuple(job_utils.find_possible_data_source_refs_by_name(
job_execution.job_configs))
# do a query here for name in names and path starts with swift-prefix
if names and conductor.data_source_count(
context.ctx(),
name=names,
url=su.SWIFT_INTERNAL_PREFIX+'%') > 0:
return True
if by_uuid:
uuids = tuple(job_utils.find_possible_data_source_refs_by_uuid(
job_execution.job_configs))
# do a query here for id in uuids and path starts with swift-prefix
if uuids and conductor.data_source_count(
context.ctx(),
id=uuids,
url=su.SWIFT_INTERNAL_PREFIX+'%') > 0:
return True
return False
开发者ID:Chinmoy-Dey,项目名称:sahara,代码行数:60,代码来源:proxy.py
示例4: _write_result
def _write_result(self, status, description):
cond.cluster_health_check_update(
context.ctx(), self.health_check_id,
{'status': status, 'description': description})
self.health_check = cond.cluster_health_check_get(
context.ctx(), self.health_check_id)
sender.health_notify(self.cluster, self.health_check)
开发者ID:openstack,项目名称:sahara,代码行数:7,代码来源:health_check_base.py
示例5: test_get_hadoop_ssh_keys
def test_get_hadoop_ssh_keys(self):
cluster_dict = {
'name': 'cluster1',
'plugin_name': 'mock_plugin',
'hadoop_version': 'mock_version',
'default_image_id': 'initial',
'node_groups': [tu.make_ng_dict("ng1", "f1", ["s1"], 1)]}
cluster1 = conductor.cluster_create(context.ctx(), cluster_dict)
(private_key1, public_key1) = c_h.get_hadoop_ssh_keys(cluster1)
#should store keys for old cluster
cluster1 = conductor.cluster_get(context.ctx(), cluster1)
(private_key2, public_key2) = c_h.get_hadoop_ssh_keys(cluster1)
self.assertEqual(public_key1, public_key2)
self.assertEqual(private_key1, private_key2)
#should generate new keys for new cluster
cluster_dict.update({'name': 'cluster2'})
cluster2 = conductor.cluster_create(context.ctx(), cluster_dict)
(private_key3, public_key3) = c_h.get_hadoop_ssh_keys(cluster2)
self.assertNotEqual(public_key1, public_key3)
self.assertNotEqual(private_key1, private_key3)
开发者ID:AspirinSJL,项目名称:sahara,代码行数:25,代码来源:test_plugin.py
示例6: execute_job
def execute_job(job_id, data):
# Elements common to all job types
cluster_id = data['cluster_id']
configs = data.get('job_configs', {})
interface = data.get('interface', {})
# Not in Java job types but present for all others
input_id = data.get('input_id', None)
output_id = data.get('output_id', None)
# Since we will use a unified class in the database, we pass
# a superset for all job types
job_ex_dict = {'input_id': input_id, 'output_id': output_id,
'job_id': job_id, 'cluster_id': cluster_id,
'info': {'status': edp.JOB_STATUS_PENDING},
'job_configs': configs, 'extra': {},
'interface': interface}
job_execution = conductor.job_execution_create(context.ctx(), job_ex_dict)
context.set_current_job_execution_id(job_execution.id)
# check to use proxy user
if p.job_execution_requires_proxy_user(job_execution):
try:
p.create_proxy_user_for_job_execution(job_execution)
except ex.SaharaException as e:
LOG.error(_LE("Can't run job execution. "
"(Reasons: {reason})").format(reason=e))
conductor.job_execution_destroy(context.ctx(), job_execution)
raise e
OPS.run_edp_job(job_execution.id)
return job_execution
开发者ID:rsaha,项目名称:sahara,代码行数:33,代码来源:api.py
示例7: get_oozie_password
def get_oozie_password(cluster):
cluster = conductor.cluster_get(context.ctx(), cluster)
extra = cluster.extra.to_dict()
if 'oozie_pass_id' not in extra:
extra['oozie_pass_id'] = u.generate_random_password()
conductor.cluster_update(context.ctx(), cluster, {'extra': extra})
return castellan.get_secret(extra['oozie_pass_id'])
开发者ID:Imperat,项目名称:sahara,代码行数:7,代码来源:utils.py
示例8: update_plugin
def update_plugin(self, plugin_name, values):
ctx = context.ctx()
current = self.get_label_details(plugin_name)
if not conductor.plugin_get(ctx, plugin_name):
current['name'] = plugin_name
conductor.plugin_create(ctx, current)
del current['name']
if values.get(PLUGIN_LABELS_SCOPE):
for label in values.get(PLUGIN_LABELS_SCOPE).keys():
current[PLUGIN_LABELS_SCOPE][label].update(
values.get(PLUGIN_LABELS_SCOPE).get(label))
else:
del current[PLUGIN_LABELS_SCOPE]
if values.get(VERSION_LABELS_SCOPE):
vl = values.get(VERSION_LABELS_SCOPE)
for version in vl.keys():
for label in vl.get(version).keys():
current[VERSION_LABELS_SCOPE][version][label].update(
vl[version][label])
else:
del current[VERSION_LABELS_SCOPE]
conductor.plugin_update(context.ctx(), plugin_name, current)
开发者ID:Imperat,项目名称:sahara,代码行数:25,代码来源:labels.py
示例9: test_get_instances
def test_get_instances(self):
cluster = self._make_sample()
ctx = context.ctx()
idx = 0
ids = []
for ng in cluster.node_groups:
for i in range(ng.count):
idx += 1
ids.append(self.api.instance_add(context.ctx(), ng, {
'instance_id': str(idx),
'instance_name': str(idx),
}))
cluster = self.api.cluster_get(ctx, cluster)
instances = general.get_instances(cluster, ids)
ids = set()
for inst in instances:
ids.add(inst.instance_id)
self.assertEqual(idx, len(ids))
for i in range(1, idx):
self.assertIn(str(i), ids)
instances = general.get_instances(cluster)
ids = set()
for inst in instances:
ids.add(inst.instance_id)
self.assertEqual(idx, len(ids))
for i in range(1, idx):
self.assertIn(str(i), ids)
开发者ID:AlexanderYAPPO,项目名称:sahara,代码行数:28,代码来源:test_general.py
示例10: clean_verification_data
def clean_verification_data(cluster):
cluster = cond.cluster_get(context.ctx(), cluster)
if verification_exists(cluster):
try:
vid = cluster.verification.id
cond.cluster_verification_delete(context.ctx(), vid)
except exceptions.NotFoundException:
LOG.debug("Verification data already cleaned")
开发者ID:Imperat,项目名称:sahara,代码行数:8,代码来源:verification_base.py
示例11: _indicate_start
def _indicate_start(self):
vid = self.cluster.verification.id
self.health_check_id = cond.cluster_health_check_add(
context.ctx(), vid, {'status': common.HEALTH_STATUS_CHECKING,
'name': self.get_health_check_name()}).id
self.health_check = cond.cluster_health_check_get(
context.ctx(), self.health_check_id)
sender.health_notify(self.cluster, self.health_check)
开发者ID:openstack,项目名称:sahara,代码行数:8,代码来源:health_check_base.py
示例12: check_data_sources_are_different
def check_data_sources_are_different(data_source_1_id, data_source_2_id):
ds1 = conductor.data_source_get(context.ctx(), data_source_1_id)
ds2 = conductor.data_source_get(context.ctx(), data_source_2_id)
if ds1.type == ds2.type and ds1.url == ds2.url:
raise ex.InvalidDataException(_('Provided input and output '
'DataSources reference the same '
'location: %s') % ds1.url)
开发者ID:crobby,项目名称:sahara,代码行数:8,代码来源:base.py
示例13: test_apply_recommended_configs
def test_apply_recommended_configs(self, cond_cluster, cond_node_group,
fake_flavor):
fake_flavor.return_value = FakeObject(ram=2048, vcpus=1)
to_tune = {
'cluster_configs': {
'dfs.replication': ('dfs', 'replica')
},
'node_configs': {
'mapreduce.task.io.sort.mb': ('bond', 'extra_name')
}
}
fake_plugin_configs = [
FakeObject(applicable_target='dfs', name='replica',
default_value=3)]
fake_ng = FakeObject(
use_autoconfig=True,
count=2,
node_processes=['dog_datanode'],
flavor_id='fake_id',
node_configs=Configs({
'bond': {
'name': 'james'
}
})
)
fake_cluster = FakeObject(
cluster_configs=Configs({
'cat': {
'talk': 'meow',
}
}),
node_groups=[fake_ng],
use_autoconfig=True,
)
v = ru.HadoopAutoConfigsProvider(
to_tune, fake_plugin_configs, fake_cluster,
{'datanode_process_name': "dog_datanode"})
v.apply_recommended_configs()
self.assertEqual([mock.call(context.ctx(), fake_cluster, {
'cluster_configs': {
'cat': {
'talk': 'meow'
},
'dfs': {
'replica': 2
}
}
})], cond_cluster.call_args_list)
self.assertEqual([mock.call(context.ctx(), fake_ng, {
'node_configs': {
'bond': {
'name': 'james',
'extra_name': 102
}
}
})], cond_node_group.call_args_list)
开发者ID:rsaha,项目名称:sahara,代码行数:58,代码来源:test_provide_recommendations.py
示例14: update_cluster
def update_cluster(id, values):
if "update_keypair" in values:
if values["update_keypair"]:
api.OPS.update_keypair(id)
values.pop("update_keypair")
if verification_base.update_verification_required(values):
api.OPS.handle_verification(id, values)
return conductor.cluster_get(context.ctx(), id)
return conductor.cluster_update(context.ctx(), id, values)
开发者ID:openstack,项目名称:sahara,代码行数:9,代码来源:clusters.py
示例15: get_raw_binary
def get_raw_binary(job_binary):
url = job_binary.url
if url.startswith("internal-db://"):
res = db.get_raw_data(context.ctx(), job_binary)
# TODO(mattf): remove support for OLD_SWIFT_INTERNAL_PREFIX
if url.startswith(su.SWIFT_INTERNAL_PREFIX) or (
url.startswith(su.OLD_SWIFT_INTERNAL_PREFIX)):
res = i_swift.get_raw_data(context.ctx(), job_binary)
return res
开发者ID:AspirinSJL,项目名称:sahara,代码行数:11,代码来源:dispatch.py
示例16: _await_networks
def _await_networks(self, cluster, instances):
if not instances:
return
cpo.add_provisioning_step(cluster.id, _("Assign IPs"), len(instances))
ips_assigned = set()
self._ips_assign(ips_assigned, cluster, instances)
LOG.info(
_LI("Cluster {cluster_id}: all instances have IPs assigned")
.format(cluster_id=cluster.id))
cluster = conductor.cluster_get(context.ctx(), cluster)
instances = g.get_instances(cluster, ips_assigned)
cpo.add_provisioning_step(
cluster.id, _("Wait for instance accessibility"), len(instances))
with context.ThreadGroup() as tg:
for instance in instances:
tg.spawn("wait-for-ssh-%s" % instance.instance_name,
self._wait_until_accessible, instance)
LOG.info(_LI("Cluster {cluster_id}: all instances are accessible")
.format(cluster_id=cluster.id))
开发者ID:YongchaoTIAN,项目名称:sahara,代码行数:26,代码来源:engine.py
示例17: test_transient_cluster_terminate
def test_transient_cluster_terminate(self, terminate_cluster,
use_os_admin_auth_token):
timeutils.set_time_override(datetime.datetime(2005, 2, 1, 0, 0))
ctx = context.ctx()
job = self.api.job_create(ctx, te.SAMPLE_JOB)
ds = self.api.data_source_create(ctx, te.SAMPLE_DATA_SOURCE)
self._make_cluster('1')
self._make_cluster('2')
self._create_job_execution({"end_time": timeutils.utcnow(),
"id": 1,
"cluster_id": "1"},
job, ds, ds)
self._create_job_execution({"end_time": None,
"id": 2,
"cluster_id": "2"},
job, ds, ds)
self._create_job_execution({"end_time": None,
"id": 3,
"cluster_id": "2"},
job, ds, ds)
timeutils.set_time_override(datetime.datetime(2005, 2, 1, 0, 1))
p._make_periodic_tasks().terminate_unneeded_transient_clusters(None)
self.assertEqual(1, terminate_cluster.call_count)
terminate_cluster.assert_has_calls([mock.call(u'1')])
self.assertEqual(1, use_os_admin_auth_token.call_count)
开发者ID:uladz,项目名称:sahara,代码行数:31,代码来源:test_periodic.py
示例18: _await_networks
def _await_networks(self, cluster, instances):
if not instances:
return
ips_assigned = set()
while len(ips_assigned) != len(instances):
if not g.check_cluster_exists(cluster):
return
for instance in instances:
if instance.id not in ips_assigned:
if networks.init_instances_ips(instance):
ips_assigned.add(instance.id)
context.sleep(1)
LOG.info(
_LI("Cluster '%s': all instances have IPs assigned"), cluster.id)
cluster = conductor.cluster_get(context.ctx(), cluster)
instances = g.get_instances(cluster, ips_assigned)
with context.ThreadGroup() as tg:
for instance in instances:
tg.spawn("wait-for-ssh-%s" % instance.instance_name,
self._wait_until_accessible, instance)
LOG.info(_LI("Cluster '%s': all instances are accessible"), cluster.id)
开发者ID:degorenko,项目名称:sahara,代码行数:27,代码来源:engine.py
示例19: apply_cluster_configs
def apply_cluster_configs(self):
"""Method applies configs for cluster using conductor api, which were
calculated with recommend_cluster_configs method.
:return: None.
"""
cluster = self.cluster
if not cluster.use_autoconfig:
return
to_update = self.cluster_configs_to_update
recommended_cluster_configs = self._get_recommended_cluster_configs()
if not recommended_cluster_configs:
# Nothing to configure
return
current_dict = cluster.cluster_configs.to_dict()
configuration = {}
for ncfg in six.iterkeys(to_update):
if ncfg not in recommended_cluster_configs:
continue
n_section = to_update[ncfg][0]
n_name = to_update[ncfg][1]
proposed_config_value = recommended_cluster_configs[ncfg]
if n_section not in configuration:
configuration.update({n_section: {}})
configuration[n_section].update({n_name: proposed_config_value})
current_dict = self._merge_configs(current_dict, configuration)
conductor.cluster_update(context.ctx(), cluster,
{'cluster_configs': current_dict})
开发者ID:Imperat,项目名称:sahara,代码行数:28,代码来源:recommendations_utils.py
示例20: _set_cluster_info
def _set_cluster_info(self, cluster):
nn = vu.get_namenode(cluster)
jt = vu.get_jobtracker(cluster)
oozie = vu.get_oozie(cluster)
info = {}
if jt:
ui_port = c_helper.get_port_from_config("MapReduce", "mapred.job.tracker.http.address", cluster)
jt_port = c_helper.get_port_from_config("MapReduce", "mapred.job.tracker", cluster)
info["MapReduce"] = {
"Web UI": "http://%s:%s" % (jt.management_ip, ui_port),
"JobTracker": "%s:%s" % (jt.hostname(), jt_port),
}
if nn:
ui_port = c_helper.get_port_from_config("HDFS", "dfs.http.address", cluster)
nn_port = c_helper.get_port_from_config("HDFS", "fs.default.name", cluster)
info["HDFS"] = {
"Web UI": "http://%s:%s" % (nn.management_ip, ui_port),
"NameNode": "hdfs://%s:%s" % (nn.hostname(), nn_port),
}
if oozie:
# TODO(yrunts) change from hardcode value
info["JobFlow"] = {"Oozie": "http://%s:11000" % oozie.management_ip}
ctx = context.ctx()
conductor.cluster_update(ctx, cluster, {"info": info})
开发者ID:metasensus,项目名称:sahara,代码行数:30,代码来源:versionhandler.py
注:本文中的sahara.context.ctx函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论