本文整理汇总了Python中savanna.context.sleep函数的典型用法代码示例。如果您正苦于以下问题:Python sleep函数的具体用法?Python sleep怎么用?Python sleep使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sleep函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _configure_instances
def _configure_instances(cluster):
"""Configure active instances.
* generate /etc/hosts
* setup passwordless login
* etc.
"""
hosts = _generate_etc_hosts(cluster)
for node_group in cluster.node_groups:
for instance in node_group.instances:
with remote.get_remote(instance) as r:
r.write_file_to('etc-hosts', hosts)
r.execute_command('sudo mv etc-hosts /etc/hosts')
# wait generate id_rsa key
timeout = 10
cur_time = 0
while cur_time < timeout:
code, _ = r.execute_command('ls .ssh/id_rsa',
raise_when_error=False)
if code:
cur_time += 1
context.sleep(1)
else:
break
else:
raise RuntimeError("Error getting user private key")
r.execute_command('sudo chown $USER:$USER .ssh/id_rsa')
r.execute_command('chmod 400 .ssh/id_rsa')
开发者ID:jfzhang1984,项目名称:savanna,代码行数:30,代码来源:instances.py
示例2: _await_networks
def _await_networks(cluster, instances):
if not instances:
return
ips_assigned = set()
while len(ips_assigned) != len(instances):
if not g.check_cluster_exists(instances[0].node_group.cluster):
return
for instance in instances:
if instance.id not in ips_assigned:
if networks.init_instances_ips(instance):
ips_assigned.add(instance.id)
context.sleep(1)
LOG.info("Cluster '%s': all instances have IPs assigned" % cluster.id)
ctx = context.ctx()
cluster = conductor.cluster_get(ctx, instances[0].node_group.cluster)
instances = _get_instances(cluster, ips_assigned)
with context.ThreadGroup() as tg:
for instance in instances:
tg.spawn("wait-for-ssh-%s" % instance.instance_name,
_wait_until_accessible, instance)
LOG.info("Cluster '%s': all instances are accessible" % cluster.id)
开发者ID:rrader,项目名称:savanna,代码行数:27,代码来源:instances.py
示例3: _await_networks
def _await_networks(instances):
if not instances:
return
ips_assigned = set()
while len(ips_assigned) != len(instances):
if not _check_cluster_exists(instances[0].node_group.cluster):
return
for instance in instances:
if instance.id not in ips_assigned:
if networks.init_instances_ips(instance):
ips_assigned.add(instance.id)
context.sleep(1)
ctx = context.ctx()
cluster = conductor.cluster_get(ctx, instances[0].node_group.cluster)
instances = get_instances(cluster, ips_assigned)
accessible_instances = set()
while len(accessible_instances) != len(instances):
if not _check_cluster_exists(instances[0].node_group.cluster):
return
for instance in instances:
if instance.id not in accessible_instances:
if _check_if_accessible(instance):
accessible_instances.add(instance.id)
context.sleep(1)
开发者ID:chiehwen,项目名称:savanna,代码行数:29,代码来源:instances.py
示例4: _wait_for_host_registrations
def _wait_for_host_registrations(self, num_hosts, ambari_info):
LOG.info(
'Waiting for all Ambari agents to register with server ...')
url = 'http://{0}/api/v1/hosts'.format(ambari_info.get_address())
result = None
json_result = None
#TODO(jspeidel): timeout
while result is None or len(json_result['items']) < num_hosts:
context.sleep(5)
try:
result = requests.get(url, auth=(ambari_info.user,
ambari_info.password))
json_result = json.loads(result.text)
# TODO(jspeidel): just for debug
LOG.info('Registered Hosts: {0} of {1}'.format(
len(json_result['items']), num_hosts))
for hosts in json_result['items']:
LOG.debug('Registered Host: {0}'.format(
hosts['Hosts']['host_name']))
except requests.ConnectionError:
#TODO(jspeidel): max wait time
LOG.info('Waiting to connect to ambari server ...')
开发者ID:hanjinze,项目名称:savanna,代码行数:25,代码来源:ambariplugin.py
示例5: decommission_dn
def decommission_dn(nn, inst_to_be_deleted, survived_inst):
with remote.get_remote(nn) as r:
r.write_file_to("/etc/hadoop/dn.excl", utils.generate_fqdn_host_names(inst_to_be_deleted))
run.refresh_nodes(remote.get_remote(nn), "dfsadmin")
context.sleep(3)
att_amount = 100
while att_amount:
cmd = r.execute_command("sudo su -c 'hadoop dfsadmin -report' hadoop")
all_found = True
datanodes_info = parse_dfs_report(cmd[1])
for i in inst_to_be_deleted:
for dn in datanodes_info:
if (dn["Name"].startswith(i.internal_ip)) and (dn["Decommission Status"] != "Decommissioned"):
all_found = False
break
if all_found:
r.write_files_to(
{"/etc/hadoop/dn.incl": utils.generate_fqdn_host_names(survived_inst), "/etc/hadoop/dn.excl": ""}
)
break
context.sleep(3)
att_amount -= 1
if not att_amount:
raise Exception("Cannot finish decommission")
开发者ID:rrader,项目名称:savanna,代码行数:27,代码来源:scaling.py
示例6: decommission_tt
def decommission_tt(jt, inst_to_be_deleted, survived_inst):
with remote.get_remote(jt) as r:
r.write_file_to("/etc/hadoop/tt.excl", utils.generate_fqdn_host_names(inst_to_be_deleted))
run.refresh_nodes(remote.get_remote(jt), "mradmin")
context.sleep(3)
r.write_files_to(
{"/etc/hadoop/tt.incl": utils.generate_fqdn_host_names(survived_inst), "/etc/hadoop/tt.excl": ""}
)
开发者ID:rrader,项目名称:savanna,代码行数:8,代码来源:scaling.py
示例7: _await_instances
def _await_instances(cluster):
"""Await all instances are in Active status and available."""
all_up = False
while not all_up:
all_up = True
for node_group in cluster.node_groups:
for instance in node_group.instances:
if not _check_if_up(instance):
all_up = False
context.sleep(1)
开发者ID:akshayms,项目名称:savanna,代码行数:10,代码来源:instances.py
示例8: _await_attach_volume
def _await_attach_volume(instance, device_path):
timeout = 10
for _ in six.moves.xrange(timeout):
device_paths = _get_device_paths(instance)
if device_path in device_paths:
return
else:
context.sleep(1)
raise RuntimeError("Error attach volume to instance %s" %
instance.instance_name)
开发者ID:jfzhang1984,项目名称:savanna,代码行数:11,代码来源:volumes.py
示例9: decommission_tt
def decommission_tt(jt, inst_to_be_deleted, survived_inst):
with jt.remote as r:
r.write_file_to('/etc/hadoop/tt.excl',
utils.generate_fqdn_host_names(
inst_to_be_deleted))
run.refresh_nodes(jt.remote, "mradmin")
context.sleep(3)
r.write_files_to({'/etc/hadoop/tt.incl':
utils.generate_fqdn_host_names(survived_inst),
'/etc/hadoop/tt.excl': "",
})
开发者ID:akshayms,项目名称:savanna,代码行数:11,代码来源:scaling.py
示例10: _await_attach_volumes
def _await_attach_volumes(instance, count_volumes):
timeout = 10
step = 2
while timeout > 0:
if len(_get_unmounted_devices(instance)) == count_volumes:
return
timeout -= step
context.sleep(step)
raise RuntimeError("Error attach volume to instance %s" %
instance.instance_name)
开发者ID:rrader,项目名称:savanna,代码行数:12,代码来源:volumes.py
示例11: _await_active
def _await_active(instances):
"""Await all instances are in Active status and available."""
if not instances:
return
active_ids = set()
while len(active_ids) != len(instances):
if not _check_cluster_exists(instances[0].node_group.cluster):
return
for instance in instances:
if instance.id not in active_ids:
if _check_if_active(instance):
active_ids.add(instance.id)
context.sleep(1)
开发者ID:chiehwen,项目名称:savanna,代码行数:15,代码来源:instances.py
示例12: _create_attach_volume
def _create_attach_volume(instance, size, device_path, display_name=None,
volume_type=None):
volume = cinder.client().volumes.create(size=size,
display_name=display_name,
volume_type=volume_type)
instance.volumes.append(volume.id)
while volume.status != 'available':
volume = cinder.get_volume(volume.id)
if volume.status == 'error':
raise RuntimeError("Volume %s has error status" % volume.id)
context.sleep(1)
nova.client().volumes.create_server_volume(instance.instance_id,
volume.id, device_path)
开发者ID:jfzhang1984,项目名称:savanna,代码行数:16,代码来源:volumes.py
示例13: _wait_until_accessible
def _wait_until_accessible(instance):
while True:
try:
# check if ssh is accessible and cloud-init
# script is finished generating authorized_keys
exit_code, stdout = instance.remote().execute_command("ls .ssh/authorized_keys", raise_when_error=False)
if exit_code == 0:
LOG.debug("Instance %s is accessible" % instance.instance_name)
return
except Exception as ex:
LOG.debug("Can't login to node %s (%s), reason %s", instance.instance_name, instance.management_ip, ex)
context.sleep(5)
if not g.check_cluster_exists(instance.node_group.cluster):
return
开发者ID:rrader,项目名称:savanna,代码行数:17,代码来源:instances_heat.py
示例14: _wait_for_async_request
def _wait_for_async_request(self, request_url, auth):
started = False
while not started:
result = requests.get(request_url, auth=auth)
LOG.debug(
'async request ' + request_url + ' response:\n' + result.text)
json_result = json.loads(result.text)
started = True
for items in json_result['items']:
status = items['Tasks']['status']
if status == 'FAILED' or status == 'ABORTED':
return False
else:
if status != 'COMPLETED':
started = False
context.sleep(5)
return started
开发者ID:hanjinze,项目名称:savanna,代码行数:18,代码来源:ambariplugin.py
示例15: run_in_subprocess
def run_in_subprocess(proc, func, args=(), kwargs={}):
try:
pickle.dump(func, proc.stdin)
pickle.dump(args, proc.stdin)
pickle.dump(kwargs, proc.stdin)
proc.stdin.flush()
result = pickle.load(proc.stdout)
if 'exception' in result:
raise SubprocessException(result['exception'])
return result['output']
finally:
# NOTE(dmitryme): in openstack/common/processutils.py it
# is suggested to sleep a little between calls to multiprocessing.
# That should allow it make some necessary cleanup
context.sleep(0)
开发者ID:chiehwen,项目名称:savanna,代码行数:18,代码来源:procutils.py
示例16: start
def start(self):
url = ('/cluster/%s/services/%s/commands/start'
% (self.cluster_name, self.service))
self.rest.post(url)
#TODO(alazarev) make timeout configurable (bug #1262897)
timeout = 600
cur_time = 0
while cur_time < timeout:
context.sleep(2)
if self.status() == 'running':
break
else:
cur_time += 2
else:
raise iex.IntelPluginException(
"Service '%s' has failed to start in %s seconds"
% (self.service, timeout))
开发者ID:hguemar,项目名称:sahara,代码行数:19,代码来源:services.py
示例17: _wait_for_async_request
def _wait_for_async_request(self, request_id, cluster_name, ambari_host):
request_url = "http://{0}:8080/api/v1/clusters/{1}/requests/{" "2}/tasks?fields=Tasks/status".format(
ambari_host.management_ip, cluster_name, request_id
)
started = False
while not started:
result = requests.get(request_url, auth=(self.ambari_user, self.ambari_password))
LOG.debug("async request " + request_url + " response:\n" + result.text)
json_result = json.loads(result.text)
started = True
for items in json_result["items"]:
status = items["Tasks"]["status"]
if status == "FAILED" or status == "ABORTED":
return False
else:
if status != "COMPLETED":
started = False
context.sleep(5)
return started
开发者ID:ruben11,项目名称:savanna,代码行数:20,代码来源:ambariplugin.py
示例18: _await_datanodes
def _await_datanodes(self, cluster):
datanodes_count = len(utils.get_datanodes(cluster))
if datanodes_count < 1:
return
LOG.info("Waiting %s datanodes to start up" % datanodes_count)
with remote.get_remote(utils.get_namenode(cluster)) as r:
while True:
if run.check_datanodes_count(r, datanodes_count):
LOG.info(
'Datanodes on cluster %s has been started' %
cluster.name)
return
context.sleep(1)
if not g.check_cluster_exists(cluster):
LOG.info(
'Stop waiting datanodes on cluster %s since it has '
'been deleted' % cluster.name)
return
开发者ID:rrader,项目名称:savanna,代码行数:21,代码来源:plugin.py
示例19: _wait_for_host_registrations
def _wait_for_host_registrations(self, num_hosts, ambari_host):
LOG.info("Waiting for all Ambari agents to register with server ...")
url = "http://{0}:8080/api/v1/hosts".format(ambari_host.management_ip)
result = None
json_result = None
# TODO(jspeidel): timeout
while result is None or len(json_result["items"]) < num_hosts:
context.sleep(5)
try:
result = requests.get(url, auth=(self.ambari_user, self.ambari_password))
json_result = json.loads(result.text)
# TODO(jspeidel): just for debug
LOG.info("Registered Hosts: {0} of {1}".format(len(json_result["items"]), num_hosts))
for hosts in json_result["items"]:
LOG.debug("Registered Host: {0}".format(hosts["Hosts"]["host_name"]))
except requests.ConnectionError:
# TODO(jspeidel): max wait time
LOG.info("Waiting to connect to ambari server ...")
开发者ID:ruben11,项目名称:savanna,代码行数:21,代码来源:ambariplugin.py
示例20: _wait_for_async_request
def _wait_for_async_request(self, request_id, cluster_name, ambari_host):
request_url = 'http://{0}:8080/api/v1/clusters/{1}/requests/{' \
'2}/tasks?fields=Tasks/status'.format(
ambari_host.management_ip, cluster_name, request_id)
started = False
while not started:
result = requests.get(request_url, auth=('admin', 'admin'))
LOG.debug(
'async request ' + request_url + ' response:\n' + result.text)
json_result = json.loads(result.text)
started = True
for items in json_result['items']:
status = items['Tasks']['status']
if status == 'FAILED' or status == 'ABORTED':
return False
else:
if status != 'COMPLETED':
started = False
context.sleep(5)
return started
开发者ID:jfzhang1984,项目名称:savanna,代码行数:21,代码来源:ambariplugin.py
注:本文中的savanna.context.sleep函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论