本文整理汇总了Python中sahara.plugins.vanilla.utils.get_namenode函数的典型用法代码示例。如果您正苦于以下问题:Python get_namenode函数的具体用法?Python get_namenode怎么用?Python get_namenode使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_namenode函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_get_namenode
def test_get_namenode(self):
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
[self.ng_manager, self.ng_namenode])
self.assertEqual('nn1', u.get_namenode(cl).instance_id)
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
[self.ng_manager])
self.assertIsNone(u.get_namenode(cl))
开发者ID:uladz,项目名称:sahara,代码行数:8,代码来源:test_utils.py
示例2: start_cluster
def start_cluster(self, cluster):
nn = vu.get_namenode(cluster)
run.format_namenode(nn)
run.start_hadoop_process(nn, "namenode")
for snn in vu.get_secondarynamenodes(cluster):
run.start_hadoop_process(snn, "secondarynamenode")
rm = vu.get_resourcemanager(cluster)
if rm:
run.start_yarn_process(rm, "resourcemanager")
for dn in vu.get_datanodes(cluster):
run.start_hadoop_process(dn, "datanode")
run.await_datanodes(cluster)
for nm in vu.get_nodemanagers(cluster):
run.start_yarn_process(nm, "nodemanager")
hs = vu.get_historyserver(cluster)
if hs:
run.start_historyserver(hs)
oo = vu.get_oozie(cluster)
if oo:
run.start_oozie_process(oo)
self._set_cluster_info(cluster)
开发者ID:B-Rich,项目名称:sahara,代码行数:29,代码来源:versionhandler.py
示例3: start_cluster
def start_cluster(self, cluster):
nn = vu.get_namenode(cluster)
run.format_namenode(nn)
run.start_hadoop_process(nn, 'namenode')
for snn in vu.get_secondarynamenodes(cluster):
run.start_hadoop_process(snn, 'secondarynamenode')
rm = vu.get_resourcemanager(cluster)
if rm:
run.start_yarn_process(rm, 'resourcemanager')
run.start_dn_nm_processes(utils.get_instances(cluster))
run.await_datanodes(cluster)
hs = vu.get_historyserver(cluster)
if hs:
run.start_historyserver(hs)
oo = vu.get_oozie(cluster)
if oo:
run.start_oozie_process(self.pctx, oo)
hiveserver = vu.get_hiveserver(cluster)
if hiveserver:
run.start_hiveserver_process(self.pctx, hiveserver)
self._set_cluster_info(cluster)
开发者ID:AlexanderYAPPO,项目名称:sahara,代码行数:29,代码来源:versionhandler.py
示例4: _set_cluster_info
def _set_cluster_info(self, cluster):
nn = vu.get_namenode(cluster)
jt = vu.get_jobtracker(cluster)
oozie = vu.get_oozie(cluster)
info = {}
if jt:
ui_port = c_helper.get_port_from_config("MapReduce", "mapred.job.tracker.http.address", cluster)
jt_port = c_helper.get_port_from_config("MapReduce", "mapred.job.tracker", cluster)
info["MapReduce"] = {
"Web UI": "http://%s:%s" % (jt.management_ip, ui_port),
"JobTracker": "%s:%s" % (jt.hostname(), jt_port),
}
if nn:
ui_port = c_helper.get_port_from_config("HDFS", "dfs.http.address", cluster)
nn_port = c_helper.get_port_from_config("HDFS", "fs.default.name", cluster)
info["HDFS"] = {
"Web UI": "http://%s:%s" % (nn.management_ip, ui_port),
"NameNode": "hdfs://%s:%s" % (nn.hostname(), nn_port),
}
if oozie:
# TODO(yrunts) change from hardcode value
info["JobFlow"] = {"Oozie": "http://%s:11000" % oozie.management_ip}
ctx = context.ctx()
conductor.cluster_update(ctx, cluster, {"info": info})
开发者ID:metasensus,项目名称:sahara,代码行数:30,代码来源:versionhandler.py
示例5: _set_cluster_info
def _set_cluster_info(self, cluster):
nn = vu.get_namenode(cluster)
rm = vu.get_resourcemanager(cluster)
hs = vu.get_historyserver(cluster)
oo = vu.get_oozie(cluster)
info = {}
if rm:
info['YARN'] = {
'Web UI': 'http://%s:%s' % (rm.management_ip, '8088'),
'ResourceManager': 'http://%s:%s' % (rm.management_ip, '8032')
}
if nn:
info['HDFS'] = {
'Web UI': 'http://%s:%s' % (nn.management_ip, '50070'),
'NameNode': 'hdfs://%s:%s' % (nn.hostname(), '9000')
}
if oo:
info['JobFlow'] = {
'Oozie': 'http://%s:%s' % (oo.management_ip, '11000')
}
if hs:
info['MapReduce JobHistory Server'] = {
'Web UI': 'http://%s:%s' % (hs.management_ip, '19888')
}
ctx = context.ctx()
conductor.cluster_update(ctx, cluster, {'info': info})
开发者ID:AlexanderYAPPO,项目名称:sahara,代码行数:32,代码来源:versionhandler.py
示例6: _set_cluster_info
def _set_cluster_info(self, cluster):
nn = vu.get_namenode(cluster)
rm = vu.get_resourcemanager(cluster)
hs = vu.get_historyserver(cluster)
oo = vu.get_oozie(cluster)
info = {}
if rm:
info["YARN"] = {
"Web UI": "http://%s:%s" % (rm.management_ip, "8088"),
"ResourceManager": "http://%s:%s" % (rm.management_ip, "8032"),
}
if nn:
info["HDFS"] = {
"Web UI": "http://%s:%s" % (nn.management_ip, "50070"),
"NameNode": "hdfs://%s:%s" % (nn.hostname(), "9000"),
}
if oo:
info["JobFlow"] = {"Oozie": "http://%s:%s" % (oo.management_ip, "11000")}
if hs:
info["MapReduce JobHistory Server"] = {"Web UI": "http://%s:%s" % (hs.management_ip, "19888")}
ctx = context.ctx()
conductor.cluster_update(ctx, cluster, {"info": info})
开发者ID:B-Rich,项目名称:sahara,代码行数:28,代码来源:versionhandler.py
示例7: _get_hadoop_configs
def _get_hadoop_configs(node_group):
cluster = node_group.cluster
nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster))
dirs = _get_hadoop_dirs(node_group)
confs = {
'Hadoop': {
'fs.defaultFS': 'hdfs://%s:9000' % nn_hostname
},
'HDFS': {
'dfs.namenode.name.dir': ','.join(dirs['hadoop_name_dirs']),
'dfs.namenode.data.dir': ','.join(dirs['hadoop_data_dirs']),
'dfs.hosts': '%s/dn-include' % HADOOP_CONF_DIR,
'dfs.hosts.exclude': '%s/dn-exclude' % HADOOP_CONF_DIR
}
}
res_hostname = vu.get_instance_hostname(vu.get_resourcemanager(cluster))
if res_hostname:
confs['YARN'] = {
'yarn.nodemanager.aux-services': 'mapreduce_shuffle',
'yarn.resourcemanager.hostname': '%s' % res_hostname,
'yarn.resourcemanager.nodes.include-path': '%s/nm-include' % (
HADOOP_CONF_DIR),
'yarn.resourcemanager.nodes.exclude-path': '%s/nm-exclude' % (
HADOOP_CONF_DIR)
}
confs['MapReduce'] = {
'mapreduce.framework.name': 'yarn'
}
oozie = vu.get_oozie(cluster)
if oozie:
hadoop_cfg = {
'hadoop.proxyuser.hadoop.hosts': '*',
'hadoop.proxyuser.hadoop.groups': 'hadoop'
}
confs['Hadoop'].update(hadoop_cfg)
oozie_cfg = o_helper.get_oozie_required_xml_configs(HADOOP_CONF_DIR)
if c_helper.is_mysql_enabled(cluster):
oozie_cfg.update(o_helper.get_oozie_mysql_configs())
confs['JobFlow'] = oozie_cfg
if c_helper.get_config_value(c_helper.ENABLE_SWIFT.applicable_target,
c_helper.ENABLE_SWIFT.name, cluster):
swift_configs = {}
for config in swift.get_swift_configs():
swift_configs[config['name']] = config['value']
confs['Hadoop'].update(swift_configs)
if c_helper.is_data_locality_enabled(cluster):
confs['Hadoop'].update(th.TOPOLOGY_CONFIG)
confs['Hadoop'].update({"topology.script.file.name":
HADOOP_CONF_DIR + "/topology.sh"})
return confs, c_helper.get_env_configs()
开发者ID:hongbin,项目名称:sahara,代码行数:58,代码来源:config.py
示例8: scale_cluster
def scale_cluster(self, cluster, instances):
self._setup_instances(cluster, instances)
run.refresh_nodes(remote.get_remote(vu.get_namenode(cluster)), "dfsadmin")
jt = vu.get_jobtracker(cluster)
if jt:
run.refresh_nodes(remote.get_remote(jt), "mradmin")
self._start_tt_dn_processes(instances)
开发者ID:metasensus,项目名称:sahara,代码行数:9,代码来源:versionhandler.py
示例9: _start_oozie
def _start_oozie(self, cluster, oozie):
nn_instance = vu.get_namenode(cluster)
with remote.get_remote(oozie) as r:
if c_helper.is_mysql_enable(cluster):
run.mysql_start(r, oozie)
run.oozie_create_db(r)
run.oozie_share_lib(r, nn_instance.hostname())
run.start_oozie(r)
LOG.info(_LI("Oozie service at {host} has been started").format(host=nn_instance.hostname()))
开发者ID:YongchaoTIAN,项目名称:sahara,代码行数:10,代码来源:versionhandler.py
示例10: _start_oozie
def _start_oozie(self, cluster, oozie):
nn_instance = vu.get_namenode(cluster)
with remote.get_remote(oozie) as r:
with context.set_current_instance_id(oozie.instance_id):
if c_helper.is_mysql_enable(cluster):
run.mysql_start(r)
run.oozie_create_db(r)
run.oozie_share_lib(r, nn_instance.hostname())
run.start_oozie(r)
LOG.info(_LI("Oozie service has been started"))
开发者ID:metasensus,项目名称:sahara,代码行数:11,代码来源:versionhandler.py
示例11: await_datanodes
def await_datanodes(cluster):
datanodes_count = len(vu.get_datanodes(cluster))
if datanodes_count < 1:
return
l_message = _("Waiting on %s datanodes to start up") % datanodes_count
with vu.get_namenode(cluster).remote() as r:
poll_utils.plugin_option_poll(
cluster, _check_datanodes_count,
c_helper.DATANODES_STARTUP_TIMEOUT, l_message, 1, {
'remote': r, 'count': datanodes_count})
开发者ID:Imperat,项目名称:sahara,代码行数:11,代码来源:run_scripts.py
示例12: get_datanodes_status
def get_datanodes_status(cluster):
statuses = {}
namenode = u.get_namenode(cluster)
status_regexp = r'^Hostname: (.*)\nDecommission Status : (.*)$'
matcher = re.compile(status_regexp, re.MULTILINE)
dfs_report = namenode.remote().execute_command(
'sudo su - -c "hdfs dfsadmin -report" hadoop')[1]
for host, status in matcher.findall(dfs_report):
statuses[host] = status.lower()
return statuses
开发者ID:Imperat,项目名称:sahara,代码行数:12,代码来源:utils.py
示例13: generate_sahara_configs
def generate_sahara_configs(cluster, node_group=None):
nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster))
jt_hostname = vu.get_instance_hostname(vu.get_jobtracker(cluster))
oozie_hostname = vu.get_instance_hostname(vu.get_oozie(cluster))
hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster))
storage_path = node_group.storage_paths() if node_group else None
# inserting common configs depends on provisioned VMs and HDFS placement
# TODO(aignatov): should be moved to cluster context
cfg = {
'fs.default.name': 'hdfs://%s:8020' % nn_hostname,
'dfs.name.dir': extract_hadoop_path(storage_path,
'/lib/hadoop/hdfs/namenode'),
'dfs.data.dir': extract_hadoop_path(storage_path,
'/lib/hadoop/hdfs/datanode'),
'dfs.hosts': '/etc/hadoop/dn.incl',
'dfs.hosts.exclude': '/etc/hadoop/dn.excl',
}
if jt_hostname:
mr_cfg = {
'mapred.job.tracker': '%s:8021' % jt_hostname,
'mapred.system.dir': extract_hadoop_path(storage_path,
'/mapred/mapredsystem'),
'mapred.local.dir': extract_hadoop_path(storage_path,
'/lib/hadoop/mapred'),
'mapred.hosts': '/etc/hadoop/tt.incl',
'mapred.hosts.exclude': '/etc/hadoop/tt.excl',
}
cfg.update(mr_cfg)
if oozie_hostname:
o_cfg = {
'hadoop.proxyuser.hadoop.hosts': "localhost," + oozie_hostname,
'hadoop.proxyuser.hadoop.groups': 'hadoop',
}
cfg.update(o_cfg)
LOG.debug('Applied Oozie configs for core-site.xml')
cfg.update(o_h.get_oozie_required_xml_configs())
LOG.debug('Applied Oozie configs for oozie-site.xml')
if hive_hostname:
h_cfg = {
'hive.warehouse.subdir.inherit.perms': True,
'javax.jdo.option.ConnectionURL':
'jdbc:derby:;databaseName=/opt/hive/metastore_db;create=true'
}
cfg.update(h_cfg)
LOG.debug('Applied Hive config for hive metastore server')
return cfg
开发者ID:a9261,项目名称:sahara,代码行数:53,代码来源:config_helper.py
示例14: _await_datanodes
def _await_datanodes(self, cluster):
datanodes_count = len(vu.get_datanodes(cluster))
if datanodes_count < 1:
return
l_message = _("Waiting on %s datanodes to start up") % datanodes_count
LOG.info(l_message)
with remote.get_remote(vu.get_namenode(cluster)) as r:
poll_utils.plugin_option_poll(
cluster, run.check_datanodes_count,
c_helper.DATANODES_STARTUP_TIMEOUT, l_message, 1, {
'remote': r,
'count': datanodes_count})
开发者ID:al-indigo,项目名称:sahara,代码行数:13,代码来源:versionhandler.py
示例15: start_cluster
def start_cluster(self, cluster):
nn_instance = vu.get_namenode(cluster)
with remote.get_remote(nn_instance) as r:
run.format_namenode(r)
run.start_processes(r, "namenode")
for snn in vu.get_secondarynamenodes(cluster):
run.start_processes(remote.get_remote(snn), "secondarynamenode")
jt_instance = vu.get_jobtracker(cluster)
if jt_instance:
run.start_processes(remote.get_remote(jt_instance), "jobtracker")
self._start_tt_dn_processes(utils.get_instances(cluster))
self._await_datanodes(cluster)
LOG.info(_LI("Hadoop services in cluster %s have been started"),
cluster.name)
oozie = vu.get_oozie(cluster)
if oozie:
with remote.get_remote(oozie) as r:
if c_helper.is_mysql_enable(cluster):
run.mysql_start(r, oozie)
run.oozie_create_db(r)
run.oozie_share_lib(r, nn_instance.hostname())
run.start_oozie(r)
LOG.info(_LI("Oozie service at '%s' has been started"),
nn_instance.hostname())
hive_server = vu.get_hiveserver(cluster)
if hive_server:
with remote.get_remote(hive_server) as r:
run.hive_create_warehouse_dir(r)
run.hive_copy_shared_conf(
r, edp.get_hive_shared_conf_path('hadoop'))
if c_helper.is_mysql_enable(cluster):
if not oozie or hive_server.hostname() != oozie.hostname():
run.mysql_start(r, hive_server)
run.hive_create_db(r)
run.hive_metastore_start(r)
LOG.info(_LI("Hive Metastore server at %s has been "
"started"),
hive_server.hostname())
LOG.info(_LI('Cluster %s has been started successfully'), cluster.name)
self._set_cluster_info(cluster)
开发者ID:stannie42,项目名称:sahara,代码行数:49,代码来源:versionhandler.py
示例16: await_datanodes
def await_datanodes(cluster):
datanodes_count = len(vu.get_datanodes(cluster))
if datanodes_count < 1:
return
LOG.info("Waiting %s datanodes to start up" % datanodes_count)
with vu.get_namenode(cluster).remote() as r:
while True:
if _check_datanodes_count(r, datanodes_count):
LOG.info("Datanodes on cluster %s has been started" % cluster.name)
return
context.sleep(1)
if not g.check_cluster_exists(cluster):
LOG.info("Stop waiting datanodes on cluster %s since it has " "been deleted" % cluster.name)
return
开发者ID:kevinshan,项目名称:sahara,代码行数:17,代码来源:run_scripts.py
示例17: decommission_nodes
def decommission_nodes(self, cluster, instances):
tts = vu.get_tasktrackers(cluster)
dns = vu.get_datanodes(cluster)
decommission_dns = False
decommission_tts = False
for i in instances:
if "datanode" in i.node_group.node_processes:
dns.remove(i)
decommission_dns = True
if "tasktracker" in i.node_group.node_processes:
tts.remove(i)
decommission_tts = True
nn = vu.get_namenode(cluster)
jt = vu.get_jobtracker(cluster)
if decommission_tts:
sc.decommission_tt(jt, instances, tts)
if decommission_dns:
sc.decommission_dn(nn, instances, dns)
开发者ID:metasensus,项目名称:sahara,代码行数:21,代码来源:versionhandler.py
示例18: _await_datanodes
def _await_datanodes(self, cluster):
datanodes_count = len(vu.get_datanodes(cluster))
if datanodes_count < 1:
return
LOG.info(_LI("Waiting %s datanodes to start up"), datanodes_count)
with remote.get_remote(vu.get_namenode(cluster)) as r:
while True:
if run.check_datanodes_count(r, datanodes_count):
LOG.info(
_LI('Datanodes on cluster %s has been started'),
cluster.name)
return
context.sleep(1)
if not g.check_cluster_exists(cluster):
LOG.info(
_LI('Stop waiting datanodes on cluster %s since it has'
' been deleted'), cluster.name)
return
开发者ID:stannie42,项目名称:sahara,代码行数:21,代码来源:versionhandler.py
示例19: _set_cluster_info
def _set_cluster_info(self, cluster):
nn = vu.get_namenode(cluster)
jt = vu.get_jobtracker(cluster)
oozie = vu.get_oozie(cluster)
info = {}
if jt:
ui_port = c_helper.get_port_from_config(
'MapReduce', 'mapred.job.tracker.http.address', cluster)
jt_port = c_helper.get_port_from_config(
'MapReduce', 'mapred.job.tracker', cluster)
info['MapReduce'] = {
'Web UI': 'http://%s:%s' % (jt.management_ip, ui_port),
'JobTracker': '%s:%s' % (jt.hostname(), jt_port)
}
if nn:
ui_port = c_helper.get_port_from_config('HDFS', 'dfs.http.address',
cluster)
nn_port = c_helper.get_port_from_config('HDFS', 'fs.default.name',
cluster)
info['HDFS'] = {
'Web UI': 'http://%s:%s' % (nn.management_ip, ui_port),
'NameNode': 'hdfs://%s:%s' % (nn.hostname(), nn_port)
}
if oozie:
# TODO(yrunts) change from hardcode value
info['JobFlow'] = {
'Oozie': 'http://%s:11000' % oozie.management_ip
}
ctx = context.ctx()
conductor.cluster_update(ctx, cluster, {'info': info})
开发者ID:stannie42,项目名称:sahara,代码行数:36,代码来源:versionhandler.py
示例20: start_namenode
def start_namenode(cluster):
nn = vu.get_namenode(cluster)
_start_namenode(nn)
开发者ID:Imperat,项目名称:sahara,代码行数:3,代码来源:starting_scripts.py
注:本文中的sahara.plugins.vanilla.utils.get_namenode函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论