本文整理汇总了Python中sahara.plugins.vanilla.utils.get_oozie函数的典型用法代码示例。如果您正苦于以下问题:Python get_oozie函数的具体用法?Python get_oozie怎么用?Python get_oozie使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_oozie函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_get_oozie
def test_get_oozie(self):
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
[self.ng_manager, self.ng_oozie])
self.assertEqual('ooz1', u.get_oozie(cl).instance_id)
cl = tu.create_cluster('cl1', 't1', 'vanilla', '2.6.0',
[self.ng_manager])
self.assertIsNone(u.get_oozie(cl))
开发者ID:uladz,项目名称:sahara,代码行数:8,代码来源:test_utils.py
示例2: _extract_configs_to_extra
def _extract_configs_to_extra(self, cluster):
oozie = vu.get_oozie(cluster)
hive = vu.get_hiveserver(cluster)
extra = dict()
if hive:
extra['hive_mysql_passwd'] = six.text_type(uuid.uuid4())
for ng in cluster.node_groups:
extra[ng.id] = {
'xml': c_helper.generate_xml_configs(
cluster, ng, extra['hive_mysql_passwd'] if hive else None),
'setup_script': c_helper.generate_setup_script(
ng.storage_paths(),
c_helper.extract_environment_confs(ng.configuration()),
append_oozie=(
oozie and oozie.node_group.id == ng.id)
)
}
if c_helper.is_data_locality_enabled(cluster):
topology_data = th.generate_topology_map(
cluster, CONF.enable_hypervisor_awareness)
extra['topology_data'] = "\n".join(
[k + " " + v for k, v in topology_data.items()]) + "\n"
return extra
开发者ID:stannie42,项目名称:sahara,代码行数:28,代码来源:versionhandler.py
示例3: _set_cluster_info
def _set_cluster_info(self, cluster):
nn = vu.get_namenode(cluster)
rm = vu.get_resourcemanager(cluster)
hs = vu.get_historyserver(cluster)
oo = vu.get_oozie(cluster)
info = {}
if rm:
info["YARN"] = {
"Web UI": "http://%s:%s" % (rm.management_ip, "8088"),
"ResourceManager": "http://%s:%s" % (rm.management_ip, "8032"),
}
if nn:
info["HDFS"] = {
"Web UI": "http://%s:%s" % (nn.management_ip, "50070"),
"NameNode": "hdfs://%s:%s" % (nn.hostname(), "9000"),
}
if oo:
info["JobFlow"] = {"Oozie": "http://%s:%s" % (oo.management_ip, "11000")}
if hs:
info["MapReduce JobHistory Server"] = {"Web UI": "http://%s:%s" % (hs.management_ip, "19888")}
ctx = context.ctx()
conductor.cluster_update(ctx, cluster, {"info": info})
开发者ID:B-Rich,项目名称:sahara,代码行数:28,代码来源:versionhandler.py
示例4: _set_cluster_info
def _set_cluster_info(self, cluster):
nn = vu.get_namenode(cluster)
jt = vu.get_jobtracker(cluster)
oozie = vu.get_oozie(cluster)
info = {}
if jt:
ui_port = c_helper.get_port_from_config("MapReduce", "mapred.job.tracker.http.address", cluster)
jt_port = c_helper.get_port_from_config("MapReduce", "mapred.job.tracker", cluster)
info["MapReduce"] = {
"Web UI": "http://%s:%s" % (jt.management_ip, ui_port),
"JobTracker": "%s:%s" % (jt.hostname(), jt_port),
}
if nn:
ui_port = c_helper.get_port_from_config("HDFS", "dfs.http.address", cluster)
nn_port = c_helper.get_port_from_config("HDFS", "fs.default.name", cluster)
info["HDFS"] = {
"Web UI": "http://%s:%s" % (nn.management_ip, ui_port),
"NameNode": "hdfs://%s:%s" % (nn.hostname(), nn_port),
}
if oozie:
# TODO(yrunts) change from hardcode value
info["JobFlow"] = {"Oozie": "http://%s:11000" % oozie.management_ip}
ctx = context.ctx()
conductor.cluster_update(ctx, cluster, {"info": info})
开发者ID:metasensus,项目名称:sahara,代码行数:30,代码来源:versionhandler.py
示例5: _set_cluster_info
def _set_cluster_info(self, cluster):
nn = vu.get_namenode(cluster)
rm = vu.get_resourcemanager(cluster)
hs = vu.get_historyserver(cluster)
oo = vu.get_oozie(cluster)
info = {}
if rm:
info['YARN'] = {
'Web UI': 'http://%s:%s' % (rm.management_ip, '8088'),
'ResourceManager': 'http://%s:%s' % (rm.management_ip, '8032')
}
if nn:
info['HDFS'] = {
'Web UI': 'http://%s:%s' % (nn.management_ip, '50070'),
'NameNode': 'hdfs://%s:%s' % (nn.hostname(), '9000')
}
if oo:
info['JobFlow'] = {
'Oozie': 'http://%s:%s' % (oo.management_ip, '11000')
}
if hs:
info['MapReduce JobHistory Server'] = {
'Web UI': 'http://%s:%s' % (hs.management_ip, '19888')
}
ctx = context.ctx()
conductor.cluster_update(ctx, cluster, {'info': info})
开发者ID:AlexanderYAPPO,项目名称:sahara,代码行数:32,代码来源:versionhandler.py
示例6: start_cluster
def start_cluster(self, cluster):
nn = vu.get_namenode(cluster)
run.format_namenode(nn)
run.start_hadoop_process(nn, "namenode")
for snn in vu.get_secondarynamenodes(cluster):
run.start_hadoop_process(snn, "secondarynamenode")
rm = vu.get_resourcemanager(cluster)
if rm:
run.start_yarn_process(rm, "resourcemanager")
for dn in vu.get_datanodes(cluster):
run.start_hadoop_process(dn, "datanode")
run.await_datanodes(cluster)
for nm in vu.get_nodemanagers(cluster):
run.start_yarn_process(nm, "nodemanager")
hs = vu.get_historyserver(cluster)
if hs:
run.start_historyserver(hs)
oo = vu.get_oozie(cluster)
if oo:
run.start_oozie_process(oo)
self._set_cluster_info(cluster)
开发者ID:B-Rich,项目名称:sahara,代码行数:29,代码来源:versionhandler.py
示例7: start_cluster
def start_cluster(self, cluster):
nn = vu.get_namenode(cluster)
run.format_namenode(nn)
run.start_hadoop_process(nn, 'namenode')
for snn in vu.get_secondarynamenodes(cluster):
run.start_hadoop_process(snn, 'secondarynamenode')
rm = vu.get_resourcemanager(cluster)
if rm:
run.start_yarn_process(rm, 'resourcemanager')
run.start_dn_nm_processes(utils.get_instances(cluster))
run.await_datanodes(cluster)
hs = vu.get_historyserver(cluster)
if hs:
run.start_historyserver(hs)
oo = vu.get_oozie(cluster)
if oo:
run.start_oozie_process(self.pctx, oo)
hiveserver = vu.get_hiveserver(cluster)
if hiveserver:
run.start_hiveserver_process(self.pctx, hiveserver)
self._set_cluster_info(cluster)
开发者ID:AlexanderYAPPO,项目名称:sahara,代码行数:29,代码来源:versionhandler.py
示例8: _get_hadoop_configs
def _get_hadoop_configs(node_group):
cluster = node_group.cluster
nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster))
dirs = _get_hadoop_dirs(node_group)
confs = {
'Hadoop': {
'fs.defaultFS': 'hdfs://%s:9000' % nn_hostname
},
'HDFS': {
'dfs.namenode.name.dir': ','.join(dirs['hadoop_name_dirs']),
'dfs.namenode.data.dir': ','.join(dirs['hadoop_data_dirs']),
'dfs.hosts': '%s/dn-include' % HADOOP_CONF_DIR,
'dfs.hosts.exclude': '%s/dn-exclude' % HADOOP_CONF_DIR
}
}
res_hostname = vu.get_instance_hostname(vu.get_resourcemanager(cluster))
if res_hostname:
confs['YARN'] = {
'yarn.nodemanager.aux-services': 'mapreduce_shuffle',
'yarn.resourcemanager.hostname': '%s' % res_hostname,
'yarn.resourcemanager.nodes.include-path': '%s/nm-include' % (
HADOOP_CONF_DIR),
'yarn.resourcemanager.nodes.exclude-path': '%s/nm-exclude' % (
HADOOP_CONF_DIR)
}
confs['MapReduce'] = {
'mapreduce.framework.name': 'yarn'
}
oozie = vu.get_oozie(cluster)
if oozie:
hadoop_cfg = {
'hadoop.proxyuser.hadoop.hosts': '*',
'hadoop.proxyuser.hadoop.groups': 'hadoop'
}
confs['Hadoop'].update(hadoop_cfg)
oozie_cfg = o_helper.get_oozie_required_xml_configs(HADOOP_CONF_DIR)
if c_helper.is_mysql_enabled(cluster):
oozie_cfg.update(o_helper.get_oozie_mysql_configs())
confs['JobFlow'] = oozie_cfg
if c_helper.get_config_value(c_helper.ENABLE_SWIFT.applicable_target,
c_helper.ENABLE_SWIFT.name, cluster):
swift_configs = {}
for config in swift.get_swift_configs():
swift_configs[config['name']] = config['value']
confs['Hadoop'].update(swift_configs)
if c_helper.is_data_locality_enabled(cluster):
confs['Hadoop'].update(th.TOPOLOGY_CONFIG)
confs['Hadoop'].update({"topology.script.file.name":
HADOOP_CONF_DIR + "/topology.sh"})
return confs, c_helper.get_env_configs()
开发者ID:hongbin,项目名称:sahara,代码行数:58,代码来源:config.py
示例9: generate_xml_configs
def generate_xml_configs(cluster, node_group, hive_mysql_passwd):
oozie_hostname = vu.get_instance_hostname(vu.get_oozie(cluster))
hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster))
ng_configs = node_group.configuration()
general_cfg = get_general_configs(hive_hostname, hive_mysql_passwd)
all_cfg = generate_sahara_configs(cluster, node_group)
# inserting user-defined configs
for key, value in extract_xml_confs(ng_configs):
all_cfg[key] = value
# applying swift configs if user enabled it
swift_xml_confs = swift.get_swift_configs()
all_cfg = generate_cfg_from_general(all_cfg, ng_configs, general_cfg)
# invoking applied configs to appropriate xml files
core_all = CORE_DEFAULT + swift_xml_confs
mapred_all = MAPRED_DEFAULT
if CONF.enable_data_locality:
all_cfg.update(topology.TOPOLOGY_CONFIG)
# applying vm awareness configs
core_all += topology.vm_awareness_core_config()
mapred_all += topology.vm_awareness_mapred_config()
xml_configs = {
'core-site': x.create_hadoop_xml(all_cfg, core_all),
'mapred-site': x.create_hadoop_xml(all_cfg, mapred_all),
'hdfs-site': x.create_hadoop_xml(all_cfg, HDFS_DEFAULT)
}
if hive_hostname:
cfg = all_cfg
cfg_filter = HIVE_DEFAULT
proxy_configs = cluster.cluster_configs.get('proxy_configs')
if CONF.use_identity_api_v3 and proxy_configs:
cfg, cfg_filter = _inject_swift_trust_info(cfg,
cfg_filter,
proxy_configs)
xml_configs.update({'hive-site':
x.create_hadoop_xml(cfg, cfg_filter)})
LOG.debug('Generated hive-site.xml for hive {host}'.format(
host=hive_hostname))
if oozie_hostname:
xml_configs.update({'oozie-site':
x.create_hadoop_xml(all_cfg, o_h.OOZIE_DEFAULT)})
LOG.debug('Generated oozie-site.xml for oozie {host}'.format(
host=oozie_hostname))
return xml_configs
开发者ID:AlexanderYAPPO,项目名称:sahara,代码行数:55,代码来源:config_helper.py
示例10: generate_sahara_configs
def generate_sahara_configs(cluster, node_group=None):
nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster))
jt_hostname = vu.get_instance_hostname(vu.get_jobtracker(cluster))
oozie_hostname = vu.get_instance_hostname(vu.get_oozie(cluster))
hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster))
storage_path = node_group.storage_paths() if node_group else None
# inserting common configs depends on provisioned VMs and HDFS placement
# TODO(aignatov): should be moved to cluster context
cfg = {
'fs.default.name': 'hdfs://%s:8020' % nn_hostname,
'dfs.name.dir': extract_hadoop_path(storage_path,
'/lib/hadoop/hdfs/namenode'),
'dfs.data.dir': extract_hadoop_path(storage_path,
'/lib/hadoop/hdfs/datanode'),
'dfs.hosts': '/etc/hadoop/dn.incl',
'dfs.hosts.exclude': '/etc/hadoop/dn.excl',
}
if jt_hostname:
mr_cfg = {
'mapred.job.tracker': '%s:8021' % jt_hostname,
'mapred.system.dir': extract_hadoop_path(storage_path,
'/mapred/mapredsystem'),
'mapred.local.dir': extract_hadoop_path(storage_path,
'/lib/hadoop/mapred'),
'mapred.hosts': '/etc/hadoop/tt.incl',
'mapred.hosts.exclude': '/etc/hadoop/tt.excl',
}
cfg.update(mr_cfg)
if oozie_hostname:
o_cfg = {
'hadoop.proxyuser.hadoop.hosts': "localhost," + oozie_hostname,
'hadoop.proxyuser.hadoop.groups': 'hadoop',
}
cfg.update(o_cfg)
LOG.debug('Applied Oozie configs for core-site.xml')
cfg.update(o_h.get_oozie_required_xml_configs())
LOG.debug('Applied Oozie configs for oozie-site.xml')
if hive_hostname:
h_cfg = {
'hive.warehouse.subdir.inherit.perms': True,
'javax.jdo.option.ConnectionURL':
'jdbc:derby:;databaseName=/opt/hive/metastore_db;create=true'
}
cfg.update(h_cfg)
LOG.debug('Applied Hive config for hive metastore server')
return cfg
开发者ID:a9261,项目名称:sahara,代码行数:53,代码来源:config_helper.py
示例11: _start_hiveserver
def _start_hiveserver(self, cluster, hive_server):
oozie = vu.get_oozie(cluster)
with remote.get_remote(hive_server) as r:
run.hive_create_warehouse_dir(r)
run.hive_copy_shared_conf(r, edp.get_hive_shared_conf_path("hadoop"))
if c_helper.is_mysql_enable(cluster):
if not oozie or hive_server.hostname() != oozie.hostname():
run.mysql_start(r, hive_server)
run.hive_create_db(r, cluster.extra["hive_mysql_passwd"])
run.hive_metastore_start(r)
LOG.info(_LI("Hive Metastore server at {host} has been " "started").format(host=hive_server.hostname()))
开发者ID:YongchaoTIAN,项目名称:sahara,代码行数:13,代码来源:versionhandler.py
示例12: start_cluster
def start_cluster(self, cluster):
nn_instance = vu.get_namenode(cluster)
with remote.get_remote(nn_instance) as r:
run.format_namenode(r)
run.start_processes(r, "namenode")
for snn in vu.get_secondarynamenodes(cluster):
run.start_processes(remote.get_remote(snn), "secondarynamenode")
jt_instance = vu.get_jobtracker(cluster)
if jt_instance:
run.start_processes(remote.get_remote(jt_instance), "jobtracker")
self._start_tt_dn_processes(utils.get_instances(cluster))
self._await_datanodes(cluster)
LOG.info(_LI("Hadoop services in cluster %s have been started"),
cluster.name)
oozie = vu.get_oozie(cluster)
if oozie:
with remote.get_remote(oozie) as r:
if c_helper.is_mysql_enable(cluster):
run.mysql_start(r, oozie)
run.oozie_create_db(r)
run.oozie_share_lib(r, nn_instance.hostname())
run.start_oozie(r)
LOG.info(_LI("Oozie service at '%s' has been started"),
nn_instance.hostname())
hive_server = vu.get_hiveserver(cluster)
if hive_server:
with remote.get_remote(hive_server) as r:
run.hive_create_warehouse_dir(r)
run.hive_copy_shared_conf(
r, edp.get_hive_shared_conf_path('hadoop'))
if c_helper.is_mysql_enable(cluster):
if not oozie or hive_server.hostname() != oozie.hostname():
run.mysql_start(r, hive_server)
run.hive_create_db(r)
run.hive_metastore_start(r)
LOG.info(_LI("Hive Metastore server at %s has been "
"started"),
hive_server.hostname())
LOG.info(_LI('Cluster %s has been started successfully'), cluster.name)
self._set_cluster_info(cluster)
开发者ID:stannie42,项目名称:sahara,代码行数:49,代码来源:versionhandler.py
示例13: _start_hiveserver
def _start_hiveserver(self, cluster, hive_server):
oozie = vu.get_oozie(cluster)
with remote.get_remote(hive_server) as r:
with context.set_current_instance_id(hive_server.instance_id):
run.hive_create_warehouse_dir(r)
run.hive_copy_shared_conf(
r, edp.get_hive_shared_conf_path('hadoop'))
if c_helper.is_mysql_enable(cluster):
if not oozie or hive_server.hostname() != oozie.hostname():
run.mysql_start(r)
run.hive_create_db(r, cluster.extra['hive_mysql_passwd'])
run.hive_metastore_start(r)
LOG.info(_LI("Hive Metastore server has been started"))
开发者ID:al-indigo,项目名称:sahara,代码行数:15,代码来源:versionhandler.py
示例14: start_hiveserver_process
def start_hiveserver_process(pctx, instance):
with context.set_current_instance_id(instance.instance_id):
with instance.remote() as r:
_hive_create_warehouse_dir(r)
_hive_copy_shared_conf(r, edp.get_hive_shared_conf_path("hadoop"))
if c_helper.is_mysql_enabled(pctx, instance.cluster):
oozie = vu.get_oozie(instance.node_group.cluster)
if not oozie or instance.hostname() != oozie.hostname():
_start_mysql(r)
sql_script = files.get_file_text("plugins/vanilla/hadoop2/resources/create_hive_db.sql")
r.write_file_to("/tmp/create_hive_db.sql", sql_script)
_hive_create_db(r)
_hive_metastore_start(r)
LOG.info(_LI("Hive Metastore server at {host} has been " "started").format(host=instance.hostname()))
开发者ID:egafford,项目名称:sahara,代码行数:17,代码来源:run_scripts.py
示例15: generate_xml_configs
def generate_xml_configs(cluster, node_group, hive_mysql_passwd):
oozie_hostname = vu.get_instance_hostname(vu.get_oozie(cluster))
hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster))
ng_configs = node_group.configuration()
general_cfg = get_general_configs(hive_hostname, hive_mysql_passwd)
all_cfg = generate_sahara_configs(cluster, node_group)
# inserting user-defined configs
for key, value in extract_xml_confs(ng_configs):
all_cfg[key] = value
# applying swift configs if user enabled it
swift_xml_confs = swift.get_swift_configs()
all_cfg = generate_cfg_from_general(all_cfg, ng_configs, general_cfg)
# invoking applied configs to appropriate xml files
core_all = CORE_DEFAULT + swift_xml_confs
mapred_all = MAPRED_DEFAULT
if CONF.enable_data_locality:
all_cfg.update(topology.TOPOLOGY_CONFIG)
# applying vm awareness configs
core_all += topology.vm_awareness_core_config()
mapred_all += topology.vm_awareness_mapred_config()
xml_configs = {
"core-site": x.create_hadoop_xml(all_cfg, core_all),
"mapred-site": x.create_hadoop_xml(all_cfg, mapred_all),
"hdfs-site": x.create_hadoop_xml(all_cfg, HDFS_DEFAULT),
}
if hive_hostname:
xml_configs.update({"hive-site": x.create_hadoop_xml(all_cfg, HIVE_DEFAULT)})
LOG.debug("Generated hive-site.xml for hive % s", hive_hostname)
if oozie_hostname:
xml_configs.update({"oozie-site": x.create_hadoop_xml(all_cfg, o_h.OOZIE_DEFAULT)})
LOG.debug("Generated oozie-site.xml for oozie % s", oozie_hostname)
return xml_configs
开发者ID:JohannaMW,项目名称:sahara,代码行数:44,代码来源:config_helper.py
示例16: start_hiveserver_process
def start_hiveserver_process(pctx, instance):
with instance.remote() as r:
_hive_create_warehouse_dir(r)
_hive_copy_shared_conf(
r, edp.get_hive_shared_conf_path('hadoop'))
if c_helper.is_mysql_enabled(pctx, instance.node_group.cluster):
oozie = vu.get_oozie(instance.node_group.cluster)
if not oozie or instance.hostname() != oozie.hostname():
_start_mysql(r)
sql_script = files.get_file_text(
'plugins/vanilla/hadoop2/resources/create_hive_db.sql'
)
r.write_file_to('/tmp/create_hive_db.sql', sql_script)
_hive_create_db(r)
_hive_metastore_start(r)
LOG.info(_LI("Hive Metastore server at %s has been "
"started"),
instance.hostname())
开发者ID:ilivessevili,项目名称:sahara,代码行数:21,代码来源:run_scripts.py
示例17: _set_cluster_info
def _set_cluster_info(self, cluster):
nn = vu.get_namenode(cluster)
jt = vu.get_jobtracker(cluster)
oozie = vu.get_oozie(cluster)
info = {}
if jt:
ui_port = c_helper.get_port_from_config(
'MapReduce', 'mapred.job.tracker.http.address', cluster)
jt_port = c_helper.get_port_from_config(
'MapReduce', 'mapred.job.tracker', cluster)
info['MapReduce'] = {
'Web UI': 'http://%s:%s' % (jt.management_ip, ui_port),
'JobTracker': '%s:%s' % (jt.hostname(), jt_port)
}
if nn:
ui_port = c_helper.get_port_from_config('HDFS', 'dfs.http.address',
cluster)
nn_port = c_helper.get_port_from_config('HDFS', 'fs.default.name',
cluster)
info['HDFS'] = {
'Web UI': 'http://%s:%s' % (nn.management_ip, ui_port),
'NameNode': 'hdfs://%s:%s' % (nn.hostname(), nn_port)
}
if oozie:
# TODO(yrunts) change from hardcode value
info['JobFlow'] = {
'Oozie': 'http://%s:11000' % oozie.management_ip
}
ctx = context.ctx()
conductor.cluster_update(ctx, cluster, {'info': info})
开发者ID:stannie42,项目名称:sahara,代码行数:36,代码来源:versionhandler.py
示例18: start_oozie
def start_oozie(self, cluster):
oo = vu.get_oozie(cluster)
if oo:
run.start_oozie_process(self.pctx, oo)
开发者ID:AlexanderYAPPO,项目名称:sahara,代码行数:4,代码来源:versionhandler.py
示例19: get_oozie_server
def get_oozie_server(self, cluster):
return vu.get_oozie(cluster)
开发者ID:stannie42,项目名称:sahara,代码行数:2,代码来源:edp_engine.py
示例20: _get_hadoop_configs
def _get_hadoop_configs(pctx, instance):
cluster = instance.node_group.cluster
nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster))
dirs = _get_hadoop_dirs(instance)
confs = {
"Hadoop": {"fs.defaultFS": "hdfs://%s:9000" % nn_hostname},
"HDFS": {
"dfs.namenode.name.dir": ",".join(dirs["hadoop_name_dirs"]),
"dfs.datanode.data.dir": ",".join(dirs["hadoop_data_dirs"]),
"dfs.hosts": "%s/dn-include" % HADOOP_CONF_DIR,
"dfs.hosts.exclude": "%s/dn-exclude" % HADOOP_CONF_DIR,
},
}
res_hostname = vu.get_instance_hostname(vu.get_resourcemanager(cluster))
if res_hostname:
confs["YARN"] = {
"yarn.nodemanager.aux-services": "mapreduce_shuffle",
"yarn.resourcemanager.hostname": "%s" % res_hostname,
"yarn.resourcemanager.nodes.include-path": "%s/nm-include" % (HADOOP_CONF_DIR),
"yarn.resourcemanager.nodes.exclude-path": "%s/nm-exclude" % (HADOOP_CONF_DIR),
}
confs["MapReduce"] = {"mapreduce.framework.name": "yarn"}
hs_hostname = vu.get_instance_hostname(vu.get_historyserver(cluster))
if hs_hostname:
confs["MapReduce"]["mapreduce.jobhistory.address"] = "%s:10020" % hs_hostname
oozie = vu.get_oozie(cluster)
if oozie:
hadoop_cfg = {"hadoop.proxyuser.hadoop.hosts": "*", "hadoop.proxyuser.hadoop.groups": "hadoop"}
confs["Hadoop"].update(hadoop_cfg)
oozie_cfg = o_helper.get_oozie_required_xml_configs(HADOOP_CONF_DIR)
if c_helper.is_mysql_enabled(pctx, cluster):
oozie_cfg.update(o_helper.get_oozie_mysql_configs())
confs["JobFlow"] = oozie_cfg
if c_helper.is_swift_enabled(pctx, cluster):
swift_configs = {}
for config in swift.get_swift_configs():
swift_configs[config["name"]] = config["value"]
confs["Hadoop"].update(swift_configs)
if c_helper.is_data_locality_enabled(pctx, cluster):
confs["Hadoop"].update(th.TOPOLOGY_CONFIG)
confs["Hadoop"].update({"topology.script.file.name": HADOOP_CONF_DIR + "/topology.sh"})
hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster))
if hive_hostname:
hive_cfg = {
"hive.warehouse.subdir.inherit.perms": True,
"javax.jdo.option.ConnectionURL": "jdbc:derby:;databaseName=/opt/hive/metastore_db;create=true",
}
if c_helper.is_mysql_enabled(pctx, cluster):
hive_cfg.update(
{
"javax.jdo.option.ConnectionURL": "jdbc:mysql://%s/metastore" % hive_hostname,
"javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver",
"javax.jdo.option.ConnectionUserName": "hive",
"javax.jdo.option.ConnectionPassword": "pass",
"datanucleus.autoCreateSchema": "false",
"datanucleus.fixedDatastore": "true",
"hive.metastore.uris": "thrift://%s:9083" % hive_hostname,
}
)
proxy_configs = cluster.cluster_configs.get("proxy_configs")
if proxy_configs and c_helper.is_swift_enabled(pctx, cluster):
key = key_manager.API().get(context.current(), proxy_configs["proxy_password"])
password = key.get_encoded()
hive_cfg.update(
{
swift.HADOOP_SWIFT_USERNAME: proxy_configs["proxy_username"],
swift.HADOOP_SWIFT_PASSWORD: password,
swift.HADOOP_SWIFT_TRUST_ID: proxy_configs["proxy_trust_id"],
swift.HADOOP_SWIFT_DOMAIN_NAME: CONF.proxy_user_domain_name,
}
)
confs["Hive"] = hive_cfg
return confs
开发者ID:uladz,项目名称:sahara,代码行数:85,代码来源:config.py
注:本文中的sahara.plugins.vanilla.utils.get_oozie函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论