本文整理汇总了Python中savanna.utils.remote.get_remote函数的典型用法代码示例。如果您正苦于以下问题:Python get_remote函数的具体用法?Python get_remote怎么用?Python get_remote使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_remote函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: start_cluster
def start_cluster(self, cluster):
nn_instance = utils.get_namenode(cluster)
datanodes = utils.get_datanodes(cluster)
jt_instance = utils.get_jobtracker(cluster)
tasktrackers = utils.get_tasktrackers(cluster)
with remote.get_remote(nn_instance) as r:
run.format_namenode(r)
run.start_process(r, "namenode")
snns = utils.get_secondarynamenodes(cluster)
if snns:
for snn in snns:
run.start_process(remote.get_remote(snn), "secondarynamenode")
for dn in datanodes:
run.start_process(remote.get_remote(dn), "datanode")
LOG.info("HDFS service at '%s' has been started",
nn_instance.hostname)
if jt_instance:
run.start_process(remote.get_remote(jt_instance), "jobtracker")
for tt in tasktrackers:
run.start_process(remote.get_remote(tt), "tasktracker")
LOG.info("MapReduce service at '%s' has been started",
jt_instance.hostname)
LOG.info('Cluster %s has been started successfully' % cluster.name)
self._set_cluster_info(cluster)
开发者ID:zuiwanyuan,项目名称:savanna,代码行数:28,代码来源:plugin.py
示例2: decommission_dn
def decommission_dn(nn, inst_to_be_deleted, survived_inst):
with remote.get_remote(nn) as r:
r.write_file_to("/etc/hadoop/dn.excl", utils.generate_fqdn_host_names(inst_to_be_deleted))
run.refresh_nodes(remote.get_remote(nn), "dfsadmin")
context.sleep(3)
att_amount = 100
while att_amount:
cmd = r.execute_command("sudo su -c 'hadoop dfsadmin -report' hadoop")
all_found = True
datanodes_info = parse_dfs_report(cmd[1])
for i in inst_to_be_deleted:
for dn in datanodes_info:
if (dn["Name"].startswith(i.internal_ip)) and (dn["Decommission Status"] != "Decommissioned"):
all_found = False
break
if all_found:
r.write_files_to(
{"/etc/hadoop/dn.incl": utils.generate_fqdn_host_names(survived_inst), "/etc/hadoop/dn.excl": ""}
)
break
context.sleep(3)
att_amount -= 1
if not att_amount:
raise Exception("Cannot finish decommission")
开发者ID:rrader,项目名称:savanna,代码行数:27,代码来源:scaling.py
示例3: decommission_tt
def decommission_tt(jt, inst_to_be_deleted, survived_inst):
with remote.get_remote(jt) as r:
r.write_file_to("/etc/hadoop/tt.excl", utils.generate_fqdn_host_names(inst_to_be_deleted))
run.refresh_nodes(remote.get_remote(jt), "mradmin")
context.sleep(3)
r.write_files_to(
{"/etc/hadoop/tt.incl": utils.generate_fqdn_host_names(survived_inst), "/etc/hadoop/tt.excl": ""}
)
开发者ID:rrader,项目名称:savanna,代码行数:8,代码来源:scaling.py
示例4: scale_cluster
def scale_cluster(self, cluster, instances):
self._setup_instances(cluster, instances)
run.refresh_nodes(remote.get_remote(
utils.get_namenode(cluster)), "dfsadmin")
jt = utils.get_jobtracker(cluster)
if jt:
run.refresh_nodes(remote.get_remote(jt), "mradmin")
self._start_tt_dn_processes(instances)
开发者ID:rrader,项目名称:savanna,代码行数:10,代码来源:plugin.py
示例5: _push_configs_to_nodes
def _push_configs_to_nodes(self, cluster, instances=None):
extra = self._extract_configs_to_extra(cluster)
if instances is None:
instances = utils.get_instances(cluster)
for inst in instances:
ng_extra = extra[inst.node_group.id]
files = {
'/etc/hadoop/core-site.xml': ng_extra['xml']['core-site'],
'/etc/hadoop/mapred-site.xml': ng_extra['xml']['mapred-site'],
'/etc/hadoop/hdfs-site.xml': ng_extra['xml']['hdfs-site'],
'/tmp/savanna-hadoop-init.sh': ng_extra['setup_script']
}
with remote.get_remote(inst) as r:
# TODO(aignatov): sudo chown is wrong solution. But it works.
r.execute_command(
'sudo chown -R $USER:$USER /etc/hadoop'
)
r.execute_command(
'sudo chown -R $USER:$USER /opt/oozie/conf'
)
r.write_files_to(files)
r.execute_command(
'sudo chmod 0500 /tmp/savanna-hadoop-init.sh'
)
r.execute_command(
'sudo /tmp/savanna-hadoop-init.sh '
'>> /tmp/savanna-hadoop-init.log 2>&1')
nn = utils.get_namenode(cluster)
jt = utils.get_jobtracker(cluster)
with remote.get_remote(nn) as r:
r.write_file_to('/etc/hadoop/dn.incl', utils.
generate_fqdn_host_names(
utils.get_datanodes(cluster)))
if jt:
with remote.get_remote(jt) as r:
r.write_file_to('/etc/hadoop/tt.incl', utils.
generate_fqdn_host_names(
utils.get_tasktrackers(cluster)))
oozie = utils.get_oozie(cluster)
if oozie:
with remote.get_remote(oozie) as r:
r.write_file_to('/opt/oozie/conf/oozie-site.xml',
extra[oozie.node_group.id]
['xml']['oozie-site'])
开发者ID:ruben11,项目名称:savanna,代码行数:49,代码来源:plugin.py
示例6: _configure_instances
def _configure_instances(cluster):
"""Configure active instances.
* generate /etc/hosts
* setup passwordless login
* etc.
"""
hosts = _generate_etc_hosts(cluster)
for node_group in cluster.node_groups:
for instance in node_group.instances:
with remote.get_remote(instance) as r:
r.write_file_to('etc-hosts', hosts)
r.execute_command('sudo mv etc-hosts /etc/hosts')
# wait generate id_rsa key
timeout = 10
cur_time = 0
while cur_time < timeout:
code, _ = r.execute_command('ls .ssh/id_rsa',
raise_when_error=False)
if code:
cur_time += 1
context.sleep(1)
else:
break
else:
raise RuntimeError("Error getting user private key")
r.execute_command('sudo chown $USER:$USER .ssh/id_rsa')
r.execute_command('chmod 400 .ssh/id_rsa')
开发者ID:jfzhang1984,项目名称:savanna,代码行数:30,代码来源:instances.py
示例7: _check_if_up
def _check_if_up(instance):
if hasattr(instance, '_is_up'):
return True
server = nova.get_instance_info(instance)
if server.status == 'ERROR':
# TODO(slukjanov): replace with specific error
raise RuntimeError("node %s has error status" % server.name)
if server.status != 'ACTIVE':
return False
if len(server.networks) == 0:
return False
if not networks.init_instances_ips(instance, server):
return False
try:
exit_code, _ = remote.get_remote(instance).execute_command("hostname")
if exit_code:
return False
except Exception as ex:
LOG.debug("Can't login to node %s (%s), reason %s",
server.name, instance.management_ip, ex)
return False
instance._is_up = True
return True
开发者ID:jfzhang1984,项目名称:savanna,代码行数:29,代码来源:instances.py
示例8: create_workflow_dir
def create_workflow_dir(where, job, hdfs_user):
constructed_dir = '/user/%s/' % hdfs_user
constructed_dir = _add_postfix(constructed_dir)
constructed_dir += '%s/%s' % (job.name, six.text_type(uuid.uuid4()))
with remote.get_remote(where) as r:
h.create_dir(r, constructed_dir, hdfs_user)
return constructed_dir
开发者ID:hguemar,项目名称:sahara,代码行数:8,代码来源:job_manager.py
示例9: create_workflow_dir
def create_workflow_dir(where, job):
constructed_dir = '/user/hadoop/'
constructed_dir = _add_postfix(constructed_dir)
constructed_dir += '%s/%s' % (job.name, uuidutils.generate_uuid())
with remote.get_remote(where) as r:
h.create_dir(r, constructed_dir)
return constructed_dir
开发者ID:StokesB1,项目名称:savanna,代码行数:8,代码来源:job_manager.py
示例10: scale_cluster
def scale_cluster(self, cluster, instances):
self._push_configs_to_nodes(cluster, instances=instances)
self._write_hadoop_user_keys(cluster.private_key,
instances)
run.refresh_nodes(remote.get_remote(
utils.get_namenode(cluster)), "dfsadmin")
jt = utils.get_jobtracker(cluster)
if jt:
run.refresh_nodes(remote.get_remote(jt), "mradmin")
for i in instances:
with remote.get_remote(i) as r:
if "datanode" in i.node_group.node_processes:
run.start_process(r, "datanode")
if "tasktracker" in i.node_group.node_processes:
run.start_process(r, "tasktracker")
开发者ID:ruben11,项目名称:savanna,代码行数:17,代码来源:plugin.py
示例11: upload_job_file
def upload_job_file(where, job_dir, job_origin, job):
main_binary = conductor.job_binary_get_raw_data(context.ctx(),
job_origin.url)
if job.type == 'Jar':
job_dir += '/lib'
with remote.get_remote(where) as r:
h.put_file_to_hdfs(r, main_binary, main_res_names[job.type], job_dir)
return "%s/%s" % (job_dir, main_res_names[job.type])
开发者ID:jfzhang2013,项目名称:savanna,代码行数:9,代码来源:job_manager.py
示例12: start_cluster
def start_cluster(self, cluster):
nn_instance = utils.get_namenode(cluster)
datanodes = utils.get_datanodes(cluster)
jt_instance = utils.get_jobtracker(cluster)
tasktrackers = utils.get_tasktrackers(cluster)
oozie = utils.get_oozie(cluster)
hive_server = utils.get_hiveserver(cluster)
with remote.get_remote(nn_instance) as r:
run.format_namenode(r)
run.start_process(r, "namenode")
snns = utils.get_secondarynamenodes(cluster)
if snns:
for snn in snns:
run.start_process(remote.get_remote(snn), "secondarynamenode")
for dn in datanodes:
run.start_process(remote.get_remote(dn), "datanode")
LOG.info("HDFS service at '%s' has been started",
nn_instance.hostname)
if jt_instance:
run.start_process(remote.get_remote(jt_instance), "jobtracker")
for tt in tasktrackers:
run.start_process(remote.get_remote(tt), "tasktracker")
LOG.info("MapReduce service at '%s' has been started",
jt_instance.hostname)
if oozie:
with remote.get_remote(oozie) as r:
if c_helper.is_mysql_enable(cluster):
run.mysql_start(r, oozie)
run.oozie_create_db(r)
run.oozie_share_lib(r, nn_instance.hostname)
run.start_oozie(r)
LOG.info("Oozie service at '%s' has been started",
nn_instance.hostname)
if hive_server:
with remote.get_remote(nn_instance) as r:
run.hive_create_warehouse_dir(r)
if c_helper.is_mysql_enable(cluster):
with remote.get_remote(hive_server) as h:
if not oozie or hive_server.hostname != oozie.hostname:
run.mysql_start(h, hive_server)
run.hive_create_db(h)
run.hive_metastore_start(h)
LOG.info("Hive Metastore server at %s has been started",
hive_server.hostname)
LOG.info('Cluster %s has been started successfully' % cluster.name)
self._set_cluster_info(cluster)
开发者ID:vrovachev,项目名称:savanna,代码行数:52,代码来源:plugin.py
示例13: _push_configs_to_nodes
def _push_configs_to_nodes(self, cluster, instances=None):
if instances is None:
instances = utils.get_instances(cluster)
for inst in instances:
files = {
'/etc/hadoop/core-site.xml': inst.node_group.extra['xml'][
'core-site'],
'/etc/hadoop/mapred-site.xml': inst.node_group.extra['xml'][
'mapred-site'],
'/etc/hadoop/hdfs-site.xml': inst.node_group.extra['xml'][
'hdfs-site'],
'/tmp/savanna-hadoop-init.sh': inst.node_group.extra[
'setup_script']
}
with remote.get_remote(inst) as r:
r.execute_command(
'sudo chown -R $USER:$USER /etc/hadoop'
)
r.write_files_to(files)
r.execute_command(
'sudo chmod 0500 /tmp/savanna-hadoop-init.sh'
)
r.execute_command(
'sudo /tmp/savanna-hadoop-init.sh '
'>> /tmp/savanna-hadoop-init.log 2>&1')
nn = utils.get_namenode(cluster)
jt = utils.get_jobtracker(cluster)
with remote.get_remote(nn) as r:
r.write_file_to('/etc/hadoop/dn.incl', utils.
generate_fqdn_host_names(
utils.get_datanodes(cluster)))
if jt:
with remote.get_remote(jt) as r:
r.write_file_to('/etc/hadoop/tt.incl', utils.
generate_fqdn_host_names(
utils.get_tasktrackers(cluster)))
开发者ID:zuiwanyuan,项目名称:savanna,代码行数:39,代码来源:plugin.py
示例14: _push_configs_to_existing_node
def _push_configs_to_existing_node(self, cluster, extra, instance):
node_processes = instance.node_group.node_processes
need_update = (c_helper.is_data_locality_enabled(cluster) or
'namenode' in node_processes or
'jobtracker' in node_processes or
'oozie' in node_processes or
'hiveserver' in node_processes)
if not need_update:
return
with remote.get_remote(instance) as r:
self._write_topology_data(r, cluster, extra)
self._push_master_configs(r, cluster, extra, instance)
开发者ID:rrader,项目名称:savanna,代码行数:14,代码来源:plugin.py
示例15: _mount_volume
def _mount_volume(instance, device_path, mount_point):
codes = []
with remote.get_remote(instance) as r:
code, _ = r.execute_command('sudo mkdir -p %s' % mount_point)
codes.append(code)
code, _ = r.execute_command('sudo mkfs.ext4 %s' % device_path)
codes.append(code)
code, _ = r.execute_command('sudo mount %s %s' % (device_path,
mount_point))
codes.append(code)
if any(codes):
raise RuntimeError("Error mounting volume to instance %s" %
instance.instance_id)
开发者ID:jfzhang1984,项目名称:savanna,代码行数:14,代码来源:volumes.py
示例16: _write_hadoop_user_keys
def _write_hadoop_user_keys(self, instances, private_key, public_key):
files = {
'id_rsa': private_key,
'authorized_keys': public_key
}
mv_cmd = 'sudo mkdir -p /home/hadoop/.ssh/; ' \
'sudo mv id_rsa authorized_keys /home/hadoop/.ssh ; ' \
'sudo chown -R hadoop:hadoop /home/hadoop/.ssh; ' \
'sudo chmod 600 /home/hadoop/.ssh/{id_rsa,authorized_keys}'
for instance in instances:
with remote.get_remote(instance) as r:
r.write_files_to(files)
r.execute_command(mv_cmd)
开发者ID:vrovachev,项目名称:savanna,代码行数:15,代码来源:plugin.py
示例17: upload_job_files
def upload_job_files(where, job_dir, job_origin):
mains = job_origin.mains or []
libs = job_origin.libs or []
uploaded_paths = []
with remote.get_remote(where) as r:
for main in mains:
raw_data = dispatch.get_raw_binary(main)
h.put_file_to_hdfs(r, raw_data, main.name, job_dir)
uploaded_paths.append(job_dir + '/' + main.name)
for lib in libs:
raw_data = dispatch.get_raw_binary(lib)
h.put_file_to_hdfs(r, raw_data, lib.name, job_dir + "/lib")
uploaded_paths.append(job_dir + '/lib/' + lib.name)
return uploaded_paths
开发者ID:StokesB1,项目名称:savanna,代码行数:16,代码来源:job_manager.py
示例18: _get_device_paths
def _get_device_paths(instance):
code, part_info = remote.get_remote(instance).execute_command(
'cat /proc/partitions')
if code:
raise RuntimeError("Unable get device paths info")
out = part_info.split('\n')[1:]
device_paths = []
for line in out:
spl = line.split()
if len(spl) > 3:
dev = spl[3]
if not re.search('\d$', dev):
device_paths.append('/dev/' + dev)
return device_paths
开发者ID:jfzhang1984,项目名称:savanna,代码行数:16,代码来源:volumes.py
示例19: _push_configs_to_new_node
def _push_configs_to_new_node(self, cluster, extra, instance):
ng_extra = extra[instance.node_group.id]
private_key, public_key = c_helper.get_hadoop_ssh_keys(cluster)
files = {
'/etc/hadoop/core-site.xml': ng_extra['xml']['core-site'],
'/etc/hadoop/mapred-site.xml': ng_extra['xml']['mapred-site'],
'/etc/hadoop/hdfs-site.xml': ng_extra['xml']['hdfs-site'],
'/tmp/savanna-hadoop-init.sh': ng_extra['setup_script'],
'id_rsa': private_key,
'authorized_keys': public_key
}
key_cmd = 'sudo mkdir -p /home/hadoop/.ssh/ && ' \
'sudo mv id_rsa authorized_keys /home/hadoop/.ssh && ' \
'sudo chown -R hadoop:hadoop /home/hadoop/.ssh && ' \
'sudo chmod 600 /home/hadoop/.ssh/{id_rsa,authorized_keys}'
with remote.get_remote(instance) as r:
# TODO(aignatov): sudo chown is wrong solution. But it works.
r.execute_command(
'sudo chown -R $USER:$USER /etc/hadoop'
)
r.execute_command(
'sudo chown -R $USER:$USER /opt/oozie/conf'
)
r.write_files_to(files)
r.execute_command(
'sudo chmod 0500 /tmp/savanna-hadoop-init.sh'
)
r.execute_command(
'sudo /tmp/savanna-hadoop-init.sh '
'>> /tmp/savanna-hadoop-init.log 2>&1')
r.execute_command(key_cmd)
if c_helper.is_data_locality_enabled(cluster):
r.write_file_to(
'/etc/hadoop/topology.sh',
f.get_file_text(
'plugins/vanilla/resources/topology.sh'))
r.execute_command(
'sudo chmod +x /etc/hadoop/topology.sh'
)
self._write_topology_data(r, cluster, extra)
self._push_master_configs(r, cluster, extra, instance)
开发者ID:rrader,项目名称:savanna,代码行数:47,代码来源:plugin.py
示例20: _await_datanodes
def _await_datanodes(self, cluster):
datanodes_count = len(utils.get_datanodes(cluster))
if datanodes_count < 1:
return
LOG.info("Waiting %s datanodes to start up" % datanodes_count)
with remote.get_remote(utils.get_namenode(cluster)) as r:
while True:
if run.check_datanodes_count(r, datanodes_count):
LOG.info(
'Datanodes on cluster %s has been started' %
cluster.name)
return
context.sleep(1)
if not g.check_cluster_exists(cluster):
LOG.info(
'Stop waiting datanodes on cluster %s since it has '
'been deleted' % cluster.name)
return
开发者ID:rrader,项目名称:savanna,代码行数:21,代码来源:plugin.py
注:本文中的savanna.utils.remote.get_remote函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论