本文整理汇总了Python中savanna.context.ctx函数的典型用法代码示例。如果您正苦于以下问题:Python ctx函数的具体用法?Python ctx怎么用?Python ctx使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ctx函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_get_hadoop_ssh_keys
def test_get_hadoop_ssh_keys(self):
cluster_dict = {
'name': 'cluster1',
'plugin_name': 'mock_plugin',
'hadoop_version': 'mock_version',
'default_image_id': 'initial',
'node_groups': [tu._make_ng_dict("ng1", "f1", ["s1"], 1)]}
cluster1 = conductor.cluster_create(context.ctx(), cluster_dict)
(private_key1, public_key1) = c_h.get_hadoop_ssh_keys(cluster1)
#should store keys for old cluster
cluster1 = conductor.cluster_get(context.ctx(), cluster1)
(private_key2, public_key2) = c_h.get_hadoop_ssh_keys(cluster1)
self.assertEqual(public_key1, public_key2)
self.assertEqual(private_key1, private_key2)
#should generate new keys for new cluster
cluster_dict.update({'name': 'cluster2'})
cluster2 = conductor.cluster_create(context.ctx(), cluster_dict)
(private_key3, public_key3) = c_h.get_hadoop_ssh_keys(cluster2)
self.assertNotEqual(public_key1, public_key3)
self.assertNotEqual(private_key1, private_key3)
开发者ID:hguemar,项目名称:sahara,代码行数:25,代码来源:test_plugin.py
示例2: get_raw_binary
def get_raw_binary(job_binary):
url = job_binary.url
if url.startswith("savanna-db://"):
res = db.get_raw_data(context.ctx(), job_binary)
if url.startswith(su.SWIFT_INTERNAL_PREFIX):
res = i_swift.get_raw_data(context.ctx(), job_binary)
return res
开发者ID:hguemar,项目名称:sahara,代码行数:9,代码来源:dispatch.py
示例3: test_ip_assignment_use_no_floating
def test_ip_assignment_use_no_floating(self, cfg, novaclient):
cfg.CONF.use_floating_ips = False
nova = _create_nova_mock(novaclient)
node_groups = [_make_ng_dict("test_group_1", "test_flavor",
["data node", "test tracker"], 2, 'pool'),
_make_ng_dict("test_group_2", "test_flavor",
["name node", "test tracker"], 1)]
ctx = context.ctx()
cluster = _create_cluster_mock(node_groups, ["data node"])
instances._create_instances(cluster)
cluster = conductor.cluster_get(ctx, cluster)
instances_list = instances._get_instances(cluster)
instances._assign_floating_ips(instances_list)
nova.floating_ips.create.assert_has_calls(
[mock.call("pool"),
mock.call("pool")],
any_order=False
)
self.assertEqual(nova.floating_ips.create.call_count, 2,
"Not expected floating IPs number found.")
开发者ID:rrader,项目名称:savanna,代码行数:27,代码来源:test_instances.py
示例4: test_crud_operation_create_list_delete_update
def test_crud_operation_create_list_delete_update(self):
ctx = context.ctx()
job = self.api.job_create(ctx, SAMPLE_JOB)
ds_input = self.api.data_source_create(ctx, SAMPLE_DATA_SOURCE)
SAMPLE_DATA_OUTPUT = copy.copy(SAMPLE_DATA_SOURCE)
SAMPLE_DATA_OUTPUT['name'] = 'output'
ds_output = self.api.data_source_create(ctx, SAMPLE_DATA_OUTPUT)
SAMPLE_JOB_EXECUTION['job_id'] = job['id']
SAMPLE_JOB_EXECUTION['input_id'] = ds_input['id']
SAMPLE_JOB_EXECUTION['output_id'] = ds_output['id']
self.api.job_execution_create(ctx, SAMPLE_JOB_EXECUTION)
lst = self.api.job_execution_get_all(ctx)
self.assertEqual(len(lst), 1)
job_ex_id = lst[0]['id']
self.assertEqual(lst[0]['progress'], 0.1)
self.api.job_execution_update(ctx, job_ex_id, {'progress': '0.2'})
updated_job = self.api.job_execution_get(ctx, job_ex_id)
self.assertEqual(updated_job['progress'], 0.2)
self.api.job_execution_destroy(ctx, job_ex_id)
lst = self.api.job_execution_get_all(ctx)
self.assertEqual(len(lst), 0)
开发者ID:simedcn,项目名称:savanna,代码行数:28,代码来源:test_edp.py
示例5: _await_networks
def _await_networks(cluster, instances):
if not instances:
return
ips_assigned = set()
while len(ips_assigned) != len(instances):
if not g.check_cluster_exists(instances[0].node_group.cluster):
return
for instance in instances:
if instance.id not in ips_assigned:
if networks.init_instances_ips(instance):
ips_assigned.add(instance.id)
context.sleep(1)
LOG.info("Cluster '%s': all instances have IPs assigned" % cluster.id)
ctx = context.ctx()
cluster = conductor.cluster_get(ctx, instances[0].node_group.cluster)
instances = _get_instances(cluster, ips_assigned)
with context.ThreadGroup() as tg:
for instance in instances:
tg.spawn("wait-for-ssh-%s" % instance.instance_name,
_wait_until_accessible, instance)
LOG.info("Cluster '%s': all instances are accessible" % cluster.id)
开发者ID:rrader,项目名称:savanna,代码行数:27,代码来源:instances.py
示例6: test_one_node_groups_and_no_affinity_group
def test_one_node_groups_and_no_affinity_group(self, novaclient):
node_groups = [_make_ng_dict("test_group", "test_flavor", ["data node", "task tracker"], 2)]
cluster = _create_cluster_mock(node_groups, [])
nova = _create_nova_mock(novaclient)
instances._create_instances(cluster)
userdata = _generate_user_data_script(cluster)
nova.servers.create.assert_has_calls(
[
mock.call(
"test_cluster-test_group-001",
"initial",
"test_flavor",
scheduler_hints=None,
userdata=userdata,
key_name="user_keypair",
),
mock.call(
"test_cluster-test_group-002",
"initial",
"test_flavor",
scheduler_hints=None,
userdata=userdata,
key_name="user_keypair",
),
],
any_order=False,
)
ctx = context.ctx()
cluster_obj = conductor.cluster_get_all(ctx)[0]
self.assertEqual(len(cluster_obj.node_groups[0].instances), 2)
开发者ID:hanjinze,项目名称:savanna,代码行数:33,代码来源:test_instances.py
示例7: test_remove_instance
def test_remove_instance(self):
ctx = context.ctx()
cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER)
_id = cluster_db_obj["id"]
ng_id = cluster_db_obj["node_groups"][-1]["id"]
count = cluster_db_obj["node_groups"][-1]["count"]
node_group = self._add_instance(ctx, ng_id)
instance_id = node_group["instances"][-1]["id"]
cluster_db_obj = self.api.cluster_get(ctx, _id)
for ng in cluster_db_obj["node_groups"]:
if ng["id"] != ng_id:
continue
self.assertEqual(count + 1, ng["count"])
self.api.instance_remove(ctx, instance_id)
cluster_db_obj = self.api.cluster_get(ctx, _id)
for ng in cluster_db_obj["node_groups"]:
if ng["id"] != ng_id:
continue
self.assertEqual(count, ng["count"])
with self.assertRaises(RuntimeError):
self.api.instance_remove(ctx, instance_id)
开发者ID:rnirmal,项目名称:savanna,代码行数:29,代码来源:test_clusters.py
示例8: test_cluster_terminate
def test_cluster_terminate(self, terminate_cluster, get_job_status):
cfg.CONF.set_override("use_identity_api_v3", True)
try:
ctx = context.ctx()
job = self.api.job_create(ctx, te.SAMPLE_JOB)
ds = self.api.data_source_create(ctx, te.SAMPLE_DATA_SOURCE)
c = tc.SAMPLE_CLUSTER.copy()
c["status"] = "Active"
c["id"] = "1"
c["name"] = "1"
self.api.cluster_create(ctx, c)
c["id"] = "2"
c["name"] = "2"
self.api.cluster_create(ctx, c)
self._create_job_execution({"end_time": datetime.datetime.now(),
"id": 1,
"cluster_id": "1"},
job, ds, ds)
self._create_job_execution({"end_time": None,
"id": 2,
"cluster_id": "2"},
job, ds, ds)
self._create_job_execution({"end_time": None,
"id": 3,
"cluster_id": "2"},
job, ds, ds)
p.SavannaPeriodicTasks().terminate_unneeded_clusters(None)
self.assertEqual(terminate_cluster.call_count, 1)
terminate_cluster.assert_has_calls([mock.call(u'1')])
finally:
cfg.CONF.clear_override("use_identity_api_v3")
开发者ID:hguemar,项目名称:sahara,代码行数:31,代码来源:test_periodic.py
示例9: _provision_nodes
def _provision_nodes(id, node_group_id_map):
ctx = context.ctx()
cluster = conductor.cluster_get(ctx, id)
plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
cluster = conductor.cluster_update(ctx, cluster, {"status": "Scaling"})
LOG.info(g.format_cluster_status(cluster))
instances = i.scale_cluster(cluster, node_group_id_map, plugin)
if instances:
cluster = conductor.cluster_update(ctx, cluster,
{"status": "Configuring"})
LOG.info(g.format_cluster_status(cluster))
try:
plugin.scale_cluster(cluster, i.get_instances(cluster, instances))
except Exception as ex:
LOG.exception("Can't scale cluster '%s' (reason: %s)",
cluster.name, ex)
conductor.cluster_update(ctx, cluster, {"status": "Error"})
LOG.info(g.format_cluster_status(cluster))
return
# cluster is now up and ready
cluster = conductor.cluster_update(ctx, cluster, {"status": "Active"})
LOG.info(g.format_cluster_status(cluster))
开发者ID:joelmathew,项目名称:savanna,代码行数:25,代码来源:api.py
示例10: _set_cluster_info
def _set_cluster_info(self, cluster):
mng = u.get_instances(cluster, 'manager')[0]
nn = u.get_namenode(cluster)
jt = u.get_jobtracker(cluster)
oozie = u.get_oozie(cluster)
#TODO(alazarev) make port configurable (bug #1262895)
info = {'IDH Manager': {
'Web UI': 'https://%s:9443' % mng.management_ip
}}
if jt:
#TODO(alazarev) make port configurable (bug #1262895)
info['MapReduce'] = {
'Web UI': 'http://%s:50030' % jt.management_ip
}
#TODO(alazarev) make port configurable (bug #1262895)
info['MapReduce']['JobTracker'] = '%s:54311' % jt.hostname()
if nn:
#TODO(alazarev) make port configurable (bug #1262895)
info['HDFS'] = {
'Web UI': 'http://%s:50070' % nn.management_ip
}
#TODO(alazarev) make port configurable (bug #1262895)
info['HDFS']['NameNode'] = 'hdfs://%s:8020' % nn.hostname()
if oozie:
#TODO(alazarev) make port configurable (bug #1262895)
info['JobFlow'] = {
'Oozie': 'http://%s:11000' % oozie.management_ip
}
ctx = context.ctx()
conductor.cluster_update(ctx, cluster, {'info': info})
开发者ID:hguemar,项目名称:sahara,代码行数:34,代码来源:plugin.py
示例11: test_one_node_groups_and_no_affinity_group
def test_one_node_groups_and_no_affinity_group(self, novaclient):
node_groups = [m.NodeGroup("test_group", "test_flavor",
["data node", "test tracker"], 2)]
node_groups[0]._username = "root"
cluster = _create_cluster_mock(node_groups, [])
nova = _create_nova_mock(novaclient)
instances._create_instances(cluster)
userdata = _generate_user_data_script(cluster)
nova.servers.create.assert_has_calls(
[mock.call("test_cluster-test_group-001",
"initial",
"test_flavor",
scheduler_hints=None,
userdata=userdata,
key_name='user_keypair'),
mock.call("test_cluster-test_group-002",
"initial",
"test_flavor",
scheduler_hints=None,
userdata=userdata,
key_name='user_keypair')],
any_order=False)
session = ctx.ctx().session
with session.begin():
self.assertEqual(session.query(m.Instance).count(), 2)
开发者ID:jfzhang1984,项目名称:savanna,代码行数:27,代码来源:test_instances.py
示例12: clean_cluster_from_empty_ng
def clean_cluster_from_empty_ng(cluster):
ctx = context.ctx()
for ng in cluster.node_groups:
if ng.count == 0:
conductor.node_group_remove(ctx, ng)
return conductor.cluster_get(ctx, cluster)
开发者ID:StokesB1,项目名称:savanna,代码行数:7,代码来源:instances.py
示例13: convert
def convert(self, config, plugin_name, version, cluster_template_create):
normalized_config = clusterspec.ClusterSpec(config).normalize()
#TODO(jspeidel): can we get the name (first arg) from somewhere?
node_groups = []
for ng in normalized_config.node_groups:
node_group = {
"name": ng.name,
"flavor_id": ng.flavor,
"node_processes": ng.node_processes,
"count": ng.count
}
node_groups.append(node_group)
cluster_configs = dict()
for entry in normalized_config.cluster_configs:
ci = entry.config
# get the associated service dictionary
target = entry.config.applicable_target
service_dict = cluster_configs.get(target, {})
service_dict[ci.name] = entry.value
cluster_configs[target] = service_dict
ctx = context.ctx()
return cluster_template_create(ctx,
{"name": uuidutils.generate_uuid(),
"plugin_name": plugin_name,
"hadoop_version": version,
"node_groups": node_groups,
"cluster_configs": cluster_configs})
开发者ID:StokesB1,项目名称:savanna,代码行数:31,代码来源:ambariplugin.py
示例14: create_cluster
def create_cluster(cluster):
ctx = context.ctx()
try:
# create all instances
conductor.cluster_update(ctx, cluster, {"status": "Spawning"})
LOG.info(g.format_cluster_status(cluster))
_create_instances(cluster)
# wait for all instances are up and accessible
cluster = conductor.cluster_update(ctx, cluster, {"status": "Waiting"})
LOG.info(g.format_cluster_status(cluster))
cluster = _await_instances(cluster)
# attach volumes
volumes.attach(cluster)
# prepare all instances
cluster = conductor.cluster_update(ctx, cluster,
{"status": "Preparing"})
LOG.info(g.format_cluster_status(cluster))
_configure_instances(cluster)
except Exception as ex:
LOG.warn("Can't start cluster '%s' (reason: %s)", cluster.name, ex)
with excutils.save_and_reraise_exception():
cluster = conductor.cluster_update(ctx, cluster,
{"status": "Error",
"status_description": str(ex)})
LOG.info(g.format_cluster_status(cluster))
_rollback_cluster_creation(cluster, ex)
开发者ID:StokesB1,项目名称:savanna,代码行数:30,代码来源:instances.py
示例15: _await_networks
def _await_networks(instances):
if not instances:
return
ips_assigned = set()
while len(ips_assigned) != len(instances):
if not _check_cluster_exists(instances[0].node_group.cluster):
return
for instance in instances:
if instance.id not in ips_assigned:
if networks.init_instances_ips(instance):
ips_assigned.add(instance.id)
context.sleep(1)
ctx = context.ctx()
cluster = conductor.cluster_get(ctx, instances[0].node_group.cluster)
instances = get_instances(cluster, ips_assigned)
accessible_instances = set()
while len(accessible_instances) != len(instances):
if not _check_cluster_exists(instances[0].node_group.cluster):
return
for instance in instances:
if instance.id not in accessible_instances:
if _check_if_accessible(instance):
accessible_instances.add(instance.id)
context.sleep(1)
开发者ID:chiehwen,项目名称:savanna,代码行数:29,代码来源:instances.py
示例16: _scale_cluster
def _scale_cluster(cluster, target_count):
ctx = context.ctx()
rollback_count = _get_ng_counts(cluster)
launcher = _ScaleLauncher()
try:
launcher.launch_instances(ctx, cluster, target_count)
except Exception as ex:
LOG.warn("Can't scale cluster '%s' (reason: %s)", cluster.name, ex)
with excutils.save_and_reraise_exception():
cluster = conductor.cluster_get(ctx, cluster)
try:
_rollback_cluster_scaling(ctx, cluster, rollback_count, target_count)
except Exception:
# if something fails during the rollback, we stop
# doing anything further
cluster = conductor.cluster_update(ctx, cluster, {"status": "Error"})
LOG.info(g.format_cluster_status(cluster))
LOG.error("Unable to complete rollback, aborting")
raise
cluster = conductor.cluster_update(ctx, cluster, {"status": "Active"})
LOG.info(g.format_cluster_status(cluster))
LOG.warn("Rollback successful. Throwing off an initial exception.")
finally:
cluster = conductor.cluster_get(ctx, cluster)
_clean_cluster_from_empty_ng(cluster)
return launcher.inst_ids
开发者ID:rrader,项目名称:savanna,代码行数:32,代码来源:instances_heat.py
示例17: test_one_node_groups_and_one_affinity_group
def test_one_node_groups_and_one_affinity_group(self, novaclient):
node_groups = [_make_ng_dict('test_group', 'test_flavor',
['data node'], 2)]
cluster = _create_cluster_mock(node_groups, ["data node"])
nova = _create_nova_mock(novaclient)
instances._create_instances(cluster)
userdata = _generate_user_data_script(cluster)
nova.servers.create.assert_has_calls(
[mock.call("test_cluster-test_group-001",
"initial",
"test_flavor",
scheduler_hints=None,
userdata=userdata,
key_name='user_keypair'),
mock.call("test_cluster-test_group-002",
"initial",
"test_flavor",
scheduler_hints={'different_host': ["1"]},
userdata=userdata,
key_name='user_keypair')],
any_order=False)
ctx = context.ctx()
cluster_obj = conductor.cluster_get_all(ctx)[0]
self.assertEqual(len(cluster_obj.node_groups[0].instances), 2)
开发者ID:rrader,项目名称:savanna,代码行数:26,代码来源:test_instances.py
示例18: test_cluster_create_from_templates
def test_cluster_create_from_templates(self):
ctx = context.ctx()
# create node_group_template
ng_tmpl = copy.deepcopy(test_templates.SAMPLE_NGT)
ng_tmpl['volumes_size'] = 10
ng_tmpl['node_configs']['service_1']['config_2'] = 'value_2'
ng_tmpl = self.api.node_group_template_create(ctx, ng_tmpl)
# create cluster template
cl_tmpl = self.api.cluster_template_create(ctx,
test_templates.SAMPLE_CLT)
# create cluster
cluster_val = copy.deepcopy(test_clusters.SAMPLE_CLUSTER)
cluster_val['cluster_template_id'] = cl_tmpl['id']
cluster_val['node_groups'][0]['node_group_template_id'] = ng_tmpl['id']
cluster = self.api.cluster_create(ctx, cluster_val)
self.assertEqual(CORRECT_CONF, cluster['cluster_configs'])
for node_group in cluster['node_groups']:
if node_group['name'] == 'ng_1':
self.assertEqual(['p1', 'p2'], node_group['node_processes'])
self.assertEqual(10, node_group['volumes_size'])
self.assertEqual(CORRECT_CONF, node_group['node_configs'])
开发者ID:hguemar,项目名称:sahara,代码行数:25,代码来源:test_from_template.py
示例19: create_hadoop_ssh_keys
def create_hadoop_ssh_keys(cluster):
private_key, public_key = crypto.generate_key_pair()
extra = {
'hadoop_private_ssh_key': private_key,
'hadoop_public_ssh_key': public_key
}
return conductor.cluster_update(context.ctx(), cluster, {'extra': extra})
开发者ID:hguemar,项目名称:sahara,代码行数:7,代码来源:installer.py
示例20: init_instances_ips
def init_instances_ips(instance):
"""Extracts internal and management ips.
As internal ip will be used the first ip from the nova networks CIDRs.
If use_floating_ip flag is set than management ip will be the first
non-internal ip.
"""
server = nova.get_instance_info(instance)
management_ip = None
internal_ip = None
for network_label, addresses in six.iteritems(server.addresses):
for address in addresses:
if address['OS-EXT-IPS:type'] == 'fixed':
internal_ip = internal_ip or address['addr']
else:
management_ip = management_ip or address['addr']
if not CONF.use_floating_ips:
management_ip = internal_ip
conductor.instance_update(context.ctx(), instance,
{"management_ip": management_ip,
"internal_ip": internal_ip})
return internal_ip and management_ip
开发者ID:chiehwen,项目名称:savanna,代码行数:28,代码来源:networks.py
注:本文中的savanna.context.ctx函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论