本文整理汇总了Python中sahara.swift.swift_helper.get_swift_configs函数的典型用法代码示例。如果您正苦于以下问题:Python get_swift_configs函数的具体用法?Python get_swift_configs怎么用?Python get_swift_configs使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_swift_configs函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _configure_swift
def _configure_swift(client, cluster):
swift_enable = c_helper.get_config_value(
cluster.cluster_configs.get('general'), c_helper.ENABLE_SWIFT)
if swift_enable:
swift_configs = swift.get_swift_configs()
for conf in swift_configs:
client.params.hadoop.add(conf['name'], conf['value'])
开发者ID:qinweiwei,项目名称:sahara,代码行数:7,代码来源:installer.py
示例2: test_generate_xml_configs
def test_generate_xml_configs(self, auth_url):
auth_url.return_value = "http://localhost:5000/v2/"
# Make a dict of swift configs to verify generated values
swift_vals = c_helper.extract_name_values(swift.get_swift_configs())
# Make sure that all the swift configs are in core-site
c = c_helper.generate_xml_configs({}, ['/mnt/one'], 'localhost', None)
doc = xml.parseString(c['core-site'])
configuration = doc.getElementsByTagName('configuration')
properties = xmlutils.get_property_dict(configuration[0])
self.assertDictContainsSubset(swift_vals, properties)
# Make sure that user values have precedence over defaults
c = c_helper.generate_xml_configs(
{'HDFS': {'fs.swift.service.sahara.tenant': 'fred'}},
['/mnt/one'], 'localhost', None)
doc = xml.parseString(c['core-site'])
configuration = doc.getElementsByTagName('configuration')
properties = xmlutils.get_property_dict(configuration[0])
mod_swift_vals = copy.copy(swift_vals)
mod_swift_vals['fs.swift.service.sahara.tenant'] = 'fred'
self.assertDictContainsSubset(mod_swift_vals, properties)
# Make sure that swift confgs are left out if not enabled
c = c_helper.generate_xml_configs(
{'HDFS': {'fs.swift.service.sahara.tenant': 'fred'},
'general': {'Enable Swift': False}},
['/mnt/one'], 'localhost', None)
doc = xml.parseString(c['core-site'])
configuration = doc.getElementsByTagName('configuration')
properties = xmlutils.get_property_dict(configuration[0])
for key in mod_swift_vals.keys():
self.assertNotIn(key, properties)
开发者ID:AlexanderYAPPO,项目名称:sahara,代码行数:34,代码来源:test_config_helper.py
示例3: _get_hadoop_configs
def _get_hadoop_configs(node_group):
cluster = node_group.cluster
nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster))
dirs = _get_hadoop_dirs(node_group)
confs = {
'Hadoop': {
'fs.defaultFS': 'hdfs://%s:9000' % nn_hostname
},
'HDFS': {
'dfs.namenode.name.dir': ','.join(dirs['hadoop_name_dirs']),
'dfs.namenode.data.dir': ','.join(dirs['hadoop_data_dirs']),
'dfs.hosts': '%s/dn-include' % HADOOP_CONF_DIR,
'dfs.hosts.exclude': '%s/dn-exclude' % HADOOP_CONF_DIR
}
}
res_hostname = vu.get_instance_hostname(vu.get_resourcemanager(cluster))
if res_hostname:
confs['YARN'] = {
'yarn.nodemanager.aux-services': 'mapreduce_shuffle',
'yarn.resourcemanager.hostname': '%s' % res_hostname,
'yarn.resourcemanager.nodes.include-path': '%s/nm-include' % (
HADOOP_CONF_DIR),
'yarn.resourcemanager.nodes.exclude-path': '%s/nm-exclude' % (
HADOOP_CONF_DIR)
}
confs['MapReduce'] = {
'mapreduce.framework.name': 'yarn'
}
oozie = vu.get_oozie(cluster)
if oozie:
hadoop_cfg = {
'hadoop.proxyuser.hadoop.hosts': '*',
'hadoop.proxyuser.hadoop.groups': 'hadoop'
}
confs['Hadoop'].update(hadoop_cfg)
oozie_cfg = o_helper.get_oozie_required_xml_configs(HADOOP_CONF_DIR)
if c_helper.is_mysql_enabled(cluster):
oozie_cfg.update(o_helper.get_oozie_mysql_configs())
confs['JobFlow'] = oozie_cfg
if c_helper.get_config_value(c_helper.ENABLE_SWIFT.applicable_target,
c_helper.ENABLE_SWIFT.name, cluster):
swift_configs = {}
for config in swift.get_swift_configs():
swift_configs[config['name']] = config['value']
confs['Hadoop'].update(swift_configs)
if c_helper.is_data_locality_enabled(cluster):
confs['Hadoop'].update(th.TOPOLOGY_CONFIG)
confs['Hadoop'].update({"topology.script.file.name":
HADOOP_CONF_DIR + "/topology.sh"})
return confs, c_helper.get_env_configs()
开发者ID:hongbin,项目名称:sahara,代码行数:58,代码来源:config.py
示例4: _get_core_site_props
def _get_core_site_props(self, context):
result = {}
if context.is_node_aware:
result.update(self._get_core_site_node_aware_props())
for conf in swift_helper.get_swift_configs():
result[conf['name']] = conf['value']
for conf in self._get_impersonation_props():
result[conf['name']] = conf['value']
return result
开发者ID:AlexanderYAPPO,项目名称:sahara,代码行数:9,代码来源:mapreduce.py
示例5: test_get_swift_configs
def test_get_swift_configs(self):
self.setup_context(tenant_name="test_tenant", auth_uri="http://localhost:8080/v2.0/")
self.override_config("os_region_name", "regionOne")
result = h.get_swift_configs()
self.assertEqual(8, len(result))
self.assertIn({"name": "fs.swift.service.sahara.tenant", "value": "test_tenant", "description": ""}, result)
self.assertIn({"name": "fs.swift.service.sahara.http.port", "value": "8080", "description": ""}, result)
self.assertIn({"name": "fs.swift.service.sahara.region", "value": "regionOne", "description": ""}, result)
开发者ID:JohannaMW,项目名称:sahara,代码行数:9,代码来源:test_swift_helper.py
示例6: _get_core_site_props
def _get_core_site_props(self, context):
result = {
'hadoop.proxyuser.mapr.groups': '*',
'hadoop.proxyuser.mapr.hosts': '*',
}
if context.is_node_aware:
result.update(self._get_core_site_node_aware_props())
for conf in swift_helper.get_swift_configs():
result[conf['name']] = conf['value']
return result
开发者ID:AlexanderYAPPO,项目名称:sahara,代码行数:10,代码来源:yarn.py
示例7: generate_xml_configs
def generate_xml_configs(cluster, node_group, hive_mysql_passwd):
oozie_hostname = vu.get_instance_hostname(vu.get_oozie(cluster))
hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster))
ng_configs = node_group.configuration()
general_cfg = get_general_configs(hive_hostname, hive_mysql_passwd)
all_cfg = generate_sahara_configs(cluster, node_group)
# inserting user-defined configs
for key, value in extract_xml_confs(ng_configs):
all_cfg[key] = value
# applying swift configs if user enabled it
swift_xml_confs = swift.get_swift_configs()
all_cfg = generate_cfg_from_general(all_cfg, ng_configs, general_cfg)
# invoking applied configs to appropriate xml files
core_all = CORE_DEFAULT + swift_xml_confs
mapred_all = MAPRED_DEFAULT
if CONF.enable_data_locality:
all_cfg.update(topology.TOPOLOGY_CONFIG)
# applying vm awareness configs
core_all += topology.vm_awareness_core_config()
mapred_all += topology.vm_awareness_mapred_config()
xml_configs = {
'core-site': x.create_hadoop_xml(all_cfg, core_all),
'mapred-site': x.create_hadoop_xml(all_cfg, mapred_all),
'hdfs-site': x.create_hadoop_xml(all_cfg, HDFS_DEFAULT)
}
if hive_hostname:
cfg = all_cfg
cfg_filter = HIVE_DEFAULT
proxy_configs = cluster.cluster_configs.get('proxy_configs')
if CONF.use_identity_api_v3 and proxy_configs:
cfg, cfg_filter = _inject_swift_trust_info(cfg,
cfg_filter,
proxy_configs)
xml_configs.update({'hive-site':
x.create_hadoop_xml(cfg, cfg_filter)})
LOG.debug('Generated hive-site.xml for hive {host}'.format(
host=hive_hostname))
if oozie_hostname:
xml_configs.update({'oozie-site':
x.create_hadoop_xml(all_cfg, o_h.OOZIE_DEFAULT)})
LOG.debug('Generated oozie-site.xml for oozie {host}'.format(
host=oozie_hostname))
return xml_configs
开发者ID:AlexanderYAPPO,项目名称:sahara,代码行数:55,代码来源:config_helper.py
示例8: _configure_swift_to_inst
def _configure_swift_to_inst(instance):
cluster = instance.node_group.cluster
with instance.remote() as r:
r.execute_command('sudo curl %s -o %s/hadoop-openstack.jar' % (
c_helper.get_swift_lib_url(cluster), HADOOP_LIB_DIR))
core_site = r.read_file_from(PATH_TO_CORE_SITE_XML)
configs = xmlutils.parse_hadoop_xml_with_name_and_value(core_site)
configs.extend(swift_helper.get_swift_configs())
confs = dict((c['name'], c['value']) for c in configs)
new_core_site = xmlutils.create_hadoop_xml(confs)
r.write_file_to(PATH_TO_CORE_SITE_XML, new_core_site, run_as_root=True)
开发者ID:viplav,项目名称:sahara,代码行数:11,代码来源:deploy.py
示例9: get_cluster_params
def get_cluster_params(cluster):
configs = _create_ambari_configs(cluster.cluster_configs,
cluster.hadoop_version)
swift_configs = {x["name"]: x["value"]
for x in swift_helper.get_swift_configs()}
configs.setdefault("core-site", {})
configs["core-site"].update(swift_configs)
if utils.get_instance(cluster, common.RANGER_ADMIN):
configs.setdefault("admin-properties", {})
configs["admin-properties"]["db_root_password"] = (
cluster.extra["ranger_db_password"])
return _serialize_ambari_configs(configs)
开发者ID:jfrodriguez,项目名称:sahara,代码行数:12,代码来源:configs.py
示例10: test_get_swift_configs
def test_get_swift_configs(self, authUrlConfig):
self.setup_context(tenant_name='test_tenant')
self.override_config("os_auth_protocol", 'http')
self.override_config("os_auth_port", '8080')
authUrlConfig.return_value = "http://localhost:8080/v2.0/"
result = h.get_swift_configs()
self.assertEqual(7, len(result))
self.assertIn({'name': "fs.swift.service.sahara.tenant",
'value': 'test_tenant', 'description': ''}, result)
self.assertIn({'name': "fs.swift.service.sahara.http.port",
'value': '8080', 'description': ''}, result)
开发者ID:qinweiwei,项目名称:sahara,代码行数:12,代码来源:test_swift_helper.py
示例11: test_get_swift_configs
def test_get_swift_configs(self, url_for_mock):
url_for_mock.return_value = 'http://localhost:5000/v2.0'
self.setup_context(tenant_name='test_tenant')
self.override_config("os_region_name", 'regionOne')
result = h.get_swift_configs()
self.assertEqual(8, len(result))
self.assertIn({'name': "fs.swift.service.sahara.tenant",
'value': 'test_tenant', 'description': ''}, result)
self.assertIn({'name': "fs.swift.service.sahara.http.port",
'value': '8080', 'description': ''}, result)
self.assertIn({'name': "fs.swift.service.sahara.region",
'value': 'regionOne', 'description': ''}, result)
开发者ID:AlexanderYAPPO,项目名称:sahara,代码行数:13,代码来源:test_swift_helper.py
示例12: generate_xml_configs
def generate_xml_configs(cluster, node_group, hive_mysql_passwd):
oozie_hostname = _get_hostname(utils.get_oozie(cluster))
hive_hostname = _get_hostname(utils.get_hiveserver(cluster))
ng_configs = node_group.configuration()
general_cfg = get_general_configs(hive_hostname, hive_mysql_passwd)
all_cfg = generate_sahara_configs(cluster, node_group)
# inserting user-defined configs
for key, value in extract_xml_confs(ng_configs):
all_cfg[key] = value
# applying swift configs if user enabled it
swift_xml_confs = swift.get_swift_configs()
all_cfg = generate_cfg_from_general(all_cfg, ng_configs, general_cfg)
# invoking applied configs to appropriate xml files
core_all = CORE_DEFAULT + swift_xml_confs
mapred_all = MAPRED_DEFAULT
if CONF.enable_data_locality:
all_cfg.update(topology.TOPOLOGY_CONFIG)
# applying vm awareness configs
core_all += topology.vm_awareness_core_config()
mapred_all += topology.vm_awareness_mapred_config()
xml_configs = {
'core-site': x.create_hadoop_xml(all_cfg, core_all),
'mapred-site': x.create_hadoop_xml(all_cfg, mapred_all),
'hdfs-site': x.create_hadoop_xml(all_cfg, HDFS_DEFAULT)
}
if hive_hostname:
xml_configs.update({'hive-site':
x.create_hadoop_xml(all_cfg, HIVE_DEFAULT)})
LOG.debug('Generated hive-site.xml for hive % s', hive_hostname)
if oozie_hostname:
xml_configs.update({'oozie-site':
x.create_hadoop_xml(all_cfg, o_h.OOZIE_DEFAULT)})
LOG.debug('Generated oozie-site.xml for oozie % s', oozie_hostname)
return xml_configs
开发者ID:qinweiwei,项目名称:sahara,代码行数:46,代码来源:config_helper.py
示例13: generate_xml_configs
def generate_xml_configs(configs, storage_path, nn_hostname, hadoop_port):
if hadoop_port is None:
hadoop_port = 8020
cfg = {
'fs.defaultFS': 'hdfs://%s:%s' % (nn_hostname, str(hadoop_port)),
'dfs.namenode.name.dir': extract_hadoop_path(storage_path,
'/dfs/nn'),
'dfs.datanode.data.dir': extract_hadoop_path(storage_path,
'/dfs/dn'),
'hadoop.tmp.dir': extract_hadoop_path(storage_path,
'/dfs'),
'dfs.hosts': '/etc/hadoop/dn.incl',
'dfs.hosts.exclude': '/etc/hadoop/dn.excl'
}
# inserting user-defined configs
for key, value in extract_hadoop_xml_confs(configs):
cfg[key] = value
# Add the swift defaults if they have not been set by the user
swft_def = []
if is_swift_enabled(configs):
swft_def = SWIFT_DEFAULTS
swift_configs = extract_name_values(swift.get_swift_configs())
for key, value in six.iteritems(swift_configs):
if key not in cfg:
cfg[key] = value
# invoking applied configs to appropriate xml files
core_all = CORE_DEFAULT + swft_def
if CONF.enable_data_locality:
cfg.update(topology.TOPOLOGY_CONFIG)
# applying vm awareness configs
core_all += topology.vm_awareness_core_config()
xml_configs = {
'core-site': x.create_hadoop_xml(cfg, core_all),
'hdfs-site': x.create_hadoop_xml(cfg, HDFS_DEFAULT)
}
return xml_configs
开发者ID:AllenFromMinneapolis,项目名称:sahara,代码行数:43,代码来源:config_helper.py
示例14: get_general_configs
def get_general_configs(hive_hostname, passwd_hive_mysql):
config = {
ENABLE_SWIFT.name: {
'default_value': ENABLE_SWIFT.default_value,
'conf': extract_name_values(swift.get_swift_configs())
},
ENABLE_MYSQL.name: {
'default_value': ENABLE_MYSQL.default_value,
'conf': m_h.get_required_mysql_configs(
hive_hostname, passwd_hive_mysql)
}
}
if CONF.enable_data_locality:
config.update({
ENABLE_DATA_LOCALITY.name: {
'default_value': ENABLE_DATA_LOCALITY.default_value,
'conf': extract_name_values(topology.vm_awareness_all_config())
}
})
return config
开发者ID:a9261,项目名称:sahara,代码行数:20,代码来源:config_helper.py
示例15: test_generate_xml_configs
def test_generate_xml_configs(self, auth_url):
auth_url.return_value = "http://localhost:5000/v2/"
# Make a dict of swift configs to verify generated values
swift_vals = c_helper.extract_name_values(swift.get_swift_configs())
# Make sure that all the swift configs are in core-site
c = c_helper.generate_xml_configs({}, ["/mnt/one"], "localhost", None)
doc = xml.parseString(c["core-site"])
configuration = doc.getElementsByTagName("configuration")
properties = xmlutils.get_property_dict(configuration[0])
self.assertDictContainsSubset(swift_vals, properties)
# Make sure that user values have precedence over defaults
c = c_helper.generate_xml_configs(
{"HDFS": {"fs.swift.service.sahara.tenant": "fred"}}, ["/mnt/one"], "localhost", None
)
doc = xml.parseString(c["core-site"])
configuration = doc.getElementsByTagName("configuration")
properties = xmlutils.get_property_dict(configuration[0])
mod_swift_vals = copy.copy(swift_vals)
mod_swift_vals["fs.swift.service.sahara.tenant"] = "fred"
self.assertDictContainsSubset(mod_swift_vals, properties)
# Make sure that swift confgs are left out if not enabled
c = c_helper.generate_xml_configs(
{"HDFS": {"fs.swift.service.sahara.tenant": "fred"}, "general": {"Enable Swift": False}},
["/mnt/one"],
"localhost",
None,
)
doc = xml.parseString(c["core-site"])
configuration = doc.getElementsByTagName("configuration")
properties = xmlutils.get_property_dict(configuration[0])
for key in mod_swift_vals.keys():
self.assertNotIn(key, properties)
开发者ID:egafford,项目名称:sahara,代码行数:36,代码来源:test_config_helper.py
示例16: _get_hadoop_configs
def _get_hadoop_configs(pctx, instance):
cluster = instance.node_group.cluster
nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster))
dirs = _get_hadoop_dirs(instance)
confs = {
"Hadoop": {"fs.defaultFS": "hdfs://%s:9000" % nn_hostname},
"HDFS": {
"dfs.namenode.name.dir": ",".join(dirs["hadoop_name_dirs"]),
"dfs.datanode.data.dir": ",".join(dirs["hadoop_data_dirs"]),
"dfs.hosts": "%s/dn-include" % HADOOP_CONF_DIR,
"dfs.hosts.exclude": "%s/dn-exclude" % HADOOP_CONF_DIR,
},
}
res_hostname = vu.get_instance_hostname(vu.get_resourcemanager(cluster))
if res_hostname:
confs["YARN"] = {
"yarn.nodemanager.aux-services": "mapreduce_shuffle",
"yarn.resourcemanager.hostname": "%s" % res_hostname,
"yarn.resourcemanager.nodes.include-path": "%s/nm-include" % (HADOOP_CONF_DIR),
"yarn.resourcemanager.nodes.exclude-path": "%s/nm-exclude" % (HADOOP_CONF_DIR),
}
confs["MapReduce"] = {"mapreduce.framework.name": "yarn"}
hs_hostname = vu.get_instance_hostname(vu.get_historyserver(cluster))
if hs_hostname:
confs["MapReduce"]["mapreduce.jobhistory.address"] = "%s:10020" % hs_hostname
oozie = vu.get_oozie(cluster)
if oozie:
hadoop_cfg = {"hadoop.proxyuser.hadoop.hosts": "*", "hadoop.proxyuser.hadoop.groups": "hadoop"}
confs["Hadoop"].update(hadoop_cfg)
oozie_cfg = o_helper.get_oozie_required_xml_configs(HADOOP_CONF_DIR)
if c_helper.is_mysql_enabled(pctx, cluster):
oozie_cfg.update(o_helper.get_oozie_mysql_configs())
confs["JobFlow"] = oozie_cfg
if c_helper.is_swift_enabled(pctx, cluster):
swift_configs = {}
for config in swift.get_swift_configs():
swift_configs[config["name"]] = config["value"]
confs["Hadoop"].update(swift_configs)
if c_helper.is_data_locality_enabled(pctx, cluster):
confs["Hadoop"].update(th.TOPOLOGY_CONFIG)
confs["Hadoop"].update({"topology.script.file.name": HADOOP_CONF_DIR + "/topology.sh"})
hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster))
if hive_hostname:
hive_cfg = {
"hive.warehouse.subdir.inherit.perms": True,
"javax.jdo.option.ConnectionURL": "jdbc:derby:;databaseName=/opt/hive/metastore_db;create=true",
}
if c_helper.is_mysql_enabled(pctx, cluster):
hive_cfg.update(
{
"javax.jdo.option.ConnectionURL": "jdbc:mysql://%s/metastore" % hive_hostname,
"javax.jdo.option.ConnectionDriverName": "com.mysql.jdbc.Driver",
"javax.jdo.option.ConnectionUserName": "hive",
"javax.jdo.option.ConnectionPassword": "pass",
"datanucleus.autoCreateSchema": "false",
"datanucleus.fixedDatastore": "true",
"hive.metastore.uris": "thrift://%s:9083" % hive_hostname,
}
)
proxy_configs = cluster.cluster_configs.get("proxy_configs")
if proxy_configs and c_helper.is_swift_enabled(pctx, cluster):
key = key_manager.API().get(context.current(), proxy_configs["proxy_password"])
password = key.get_encoded()
hive_cfg.update(
{
swift.HADOOP_SWIFT_USERNAME: proxy_configs["proxy_username"],
swift.HADOOP_SWIFT_PASSWORD: password,
swift.HADOOP_SWIFT_TRUST_ID: proxy_configs["proxy_trust_id"],
swift.HADOOP_SWIFT_DOMAIN_NAME: CONF.proxy_user_domain_name,
}
)
confs["Hive"] = hive_cfg
return confs
开发者ID:uladz,项目名称:sahara,代码行数:85,代码来源:config.py
示例17: _get_configs
def _get_configs(self, service, cluster=None, node_group=None):
def get_hadoop_dirs(mount_points, suffix):
return ','.join([x + suffix for x in mount_points])
all_confs = {}
if cluster:
zk_count = v._get_inst_count(cluster, 'ZOOKEEPER_SERVER')
hbm_count = v._get_inst_count(cluster, 'HBASE_MASTER')
snt_count = v._get_inst_count(cluster, 'SENTRY_SERVER')
ks_count = v._get_inst_count(cluster, 'KEY_VALUE_STORE_INDEXER')
imp_count = v._get_inst_count(cluster, 'IMPALA_CATALOGSERVER')
hive_count = v._get_inst_count(cluster, 'HIVE_METASTORE')
slr_count = v._get_inst_count(cluster, 'SOLR_SERVER')
sqp_count = v._get_inst_count(cluster, 'SQOOP_SERVER')
core_site_safety_valve = ''
if self.pu.c_helper.is_swift_enabled(cluster):
configs = swift_helper.get_swift_configs()
confs = {c['name']: c['value'] for c in configs}
core_site_safety_valve = xmlutils.create_elements_xml(confs)
all_confs = {
'HDFS': {
'zookeeper_service':
self.ZOOKEEPER_SERVICE_NAME if zk_count else '',
'dfs_block_local_path_access_user':
'impala' if imp_count else '',
'core_site_safety_valve': core_site_safety_valve
},
'HIVE': {
'mapreduce_yarn_service': self.YARN_SERVICE_NAME,
'sentry_service':
self.SENTRY_SERVICE_NAME if snt_count else '',
'zookeeper_service':
self.ZOOKEEPER_SERVICE_NAME if zk_count else ''
},
'OOZIE': {
'mapreduce_yarn_service': self.YARN_SERVICE_NAME,
'hive_service':
self.HIVE_SERVICE_NAME if hive_count else '',
'zookeeper_service':
self.ZOOKEEPER_SERVICE_NAME if zk_count else ''
},
'YARN': {
'hdfs_service': self.HDFS_SERVICE_NAME,
'zookeeper_service':
self.ZOOKEEPER_SERVICE_NAME if zk_count else ''
},
'HUE': {
'hive_service': self.HIVE_SERVICE_NAME,
'oozie_service': self.OOZIE_SERVICE_NAME,
'sentry_service':
self.SENTRY_SERVICE_NAME if snt_count else '',
'solr_service':
self.SOLR_SERVICE_NAME if slr_count else '',
'zookeeper_service':
self.ZOOKEEPER_SERVICE_NAME if zk_count else '',
'hbase_service':
self.HBASE_SERVICE_NAME if hbm_count else '',
'impala_service':
self.IMPALA_SERVICE_NAME if imp_count else '',
'sqoop_service':
self.SQOOP_SERVICE_NAME if sqp_count else ''
},
'SPARK_ON_YARN': {
'yarn_service': self.YARN_SERVICE_NAME
},
'HBASE': {
'hdfs_service': self.HDFS_SERVICE_NAME,
'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME,
'hbase_enable_indexing': 'true' if ks_count else 'false',
'hbase_enable_replication':
'true' if ks_count else 'false'
},
'FLUME': {
'hdfs_service': self.HDFS_SERVICE_NAME,
'solr_service':
self.SOLR_SERVICE_NAME if slr_count else '',
'hbase_service':
self.HBASE_SERVICE_NAME if hbm_count else ''
},
'SENTRY': {
'hdfs_service': self.HDFS_SERVICE_NAME,
'sentry_server_config_safety_valve': (
c_helper.SENTRY_IMPALA_CLIENT_SAFETY_VALVE
if imp_count else '')
},
'SOLR': {
'hdfs_service': self.HDFS_SERVICE_NAME,
'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME
},
'SQOOP': {
'mapreduce_yarn_service': self.YARN_SERVICE_NAME
},
'KS_INDEXER': {
'hbase_service': self.HBASE_SERVICE_NAME,
'solr_service': self.SOLR_SERVICE_NAME
},
'IMPALA': {
'hdfs_service': self.HDFS_SERVICE_NAME,
'hbase_service':
self.HBASE_SERVICE_NAME if hbm_count else '',
#.........这里部分代码省略.........
开发者ID:esikachev,项目名称:sahara-backup,代码行数:101,代码来源:cloudera_utils.py
示例18: _get_swift_properties
def _get_swift_properties(self):
return h.get_swift_configs()
开发者ID:AspirinSJL,项目名称:sahara,代码行数:2,代码来源:services.py
示例19: _get_hadoop_configs
def _get_hadoop_configs(pctx, instance):
cluster = instance.node_group.cluster
nn_hostname = vu.get_instance_hostname(vu.get_namenode(cluster))
dirs = _get_hadoop_dirs(instance)
confs = {
'Hadoop': {
'fs.defaultFS': 'hdfs://%s:9000' % nn_hostname
},
'HDFS': {
'dfs.namenode.name.dir': ','.join(dirs['hadoop_name_dirs']),
'dfs.datanode.data.dir': ','.join(dirs['hadoop_data_dirs']),
'dfs.hosts': '%s/dn-include' % HADOOP_CONF_DIR,
'dfs.hosts.exclude': '%s/dn-exclude' % HADOOP_CONF_DIR
}
}
res_hostname = vu.get_instance_hostname(vu.get_resourcemanager(cluster))
if res_hostname:
confs['YARN'] = {
'yarn.nodemanager.aux-services': 'mapreduce_shuffle',
'yarn.resourcemanager.hostname': '%s' % res_hostname,
'yarn.resourcemanager.nodes.include-path': '%s/nm-include' % (
HADOOP_CONF_DIR),
'yarn.resourcemanager.nodes.exclude-path': '%s/nm-exclude' % (
HADOOP_CONF_DIR)
}
confs['MapReduce'] = {
'mapreduce.framework.name': 'yarn'
}
hs_hostname = vu.get_instance_hostname(vu.get_historyserver(cluster))
if hs_hostname:
confs['MapReduce']['mapreduce.jobhistory.address'] = (
"%s:10020" % hs_hostname)
oozie = vu.get_oozie(cluster)
if oozie:
hadoop_cfg = {
'hadoop.proxyuser.hadoop.hosts': '*',
'hadoop.proxyuser.hadoop.groups': 'hadoop'
}
confs['Hadoop'].update(hadoop_cfg)
oozie_cfg = o_helper.get_oozie_required_xml_configs(HADOOP_CONF_DIR)
if c_helper.is_mysql_enabled(pctx, cluster):
oozie_cfg.update(o_helper.get_oozie_mysql_configs())
confs['JobFlow'] = oozie_cfg
if c_helper.is_swift_enabled(pctx, cluster):
swift_configs = {}
for config in swift.get_swift_configs():
swift_configs[config['name']] = config['value']
confs['Hadoop'].update(swift_configs)
if c_helper.is_data_locality_enabled(pctx, cluster):
confs['Hadoop'].update(th.TOPOLOGY_CONFIG)
confs['Hadoop'].update({"topology.script.file.name":
HADOOP_CONF_DIR + "/topology.sh"})
hive_hostname = vu.get_instance_hostname(vu.get_hiveserver(cluster))
if hive_hostname:
hive_cfg = {
'hive.warehouse.subdir.inherit.perms': True,
'javax.jdo.option.ConnectionURL':
'jdbc:derby:;databaseName=/opt/hive/metastore_db;create=true'
}
if c_helper.is_mysql_enabled(pctx, cluster):
hive_cfg.update({
'javax.jdo.option.ConnectionURL':
'jdbc:mysql://%s/metastore' % hive_hostname,
'javax.jdo.option.ConnectionDriverName':
'com.mysql.jdbc.Driver',
'javax.jdo.option.ConnectionUserName': 'hive',
'javax.jdo.option.ConnectionPassword': 'pass',
'datanucleus.autoCreateSchema': 'false',
'datanucleus.fixedDatastore': 'true',
'hive.metastore.uris': 'thrift://%s:9083' % hive_hostname,
})
proxy_configs = cluster.cluster_configs.get('proxy_configs')
if proxy_configs and c_helper.is_swift_enabled(pctx, cluster):
hive_cfg.update({
swift.HADOOP_SWIFT_USERNAME: proxy_configs['proxy_username'],
swift.HADOOP_SWIFT_PASSWORD: proxy_configs['proxy_password'],
swift.HADOOP_SWIFT_TRUST_ID: proxy_configs['proxy_trust_id'],
swift.HADOOP_SWIFT_DOMAIN_NAME: CONF.proxy_user_domain_name
})
confs['Hive'] = hive_cfg
return confs
开发者ID:egafford,项目名称:sahara,代码行数:93,代码来源:config.py
示例20: _get_configs
def _get_configs(self, service, cluster=None, instance=None):
def get_hadoop_dirs(mount_points, suffix):
return ','.join([x + suffix for x in mount_points])
all_confs = {}
if cluster:
zk_count = v._get_inst_count(cluster, 'ZOOKEEPER_SERVER')
core_site_safety_valve = ''
if self.pu.c_helper.is_swift_enabled(cluster):
configs = swift_helper.get_swift_configs()
confs = {c['name']: c['value'] for c in configs}
core_site_safety_valve = xmlutils.create_elements_xml(confs)
all_confs = {
'HDFS': {
'zookeeper_service':
self.ZOOKEEPER_SERVICE_NAME if zk_count else '',
'core_site_safety_valve': core_site_safety_valve
},
'HIVE': {
'mapreduce_yarn_service': self.YARN_SERVICE_NAME,
'zookeeper_service':
self.ZOOKEEPER_SERVICE_NAME if zk_count else ''
},
'OOZIE': {
'mapreduce_yarn_service': self.YARN_SERVICE_NAME,
'zookeeper_service':
self.ZOOKEEPER_SERVICE_NAME if zk_count else ''
},
'YARN': {
'hdfs_service': self.HDFS_SERVICE_NAME,
'zookeeper_service':
self.ZOOKEEPER_SERVICE_NAME if zk_count else ''
},
'HUE': {
'hive_service': self.HIVE_SERVICE_NAME,
'oozie_service': self.OOZIE_SERVICE_NAME,
'zookeeper_service':
self.ZOOKEEPER_SERVICE_NAME if zk_count else ''
},
'SPARK_ON_YARN': {
'yarn_service': self.YARN_SERVICE_NAME
},
'HBASE': {
'hdfs_service': self.HDFS_SERVICE_NAME,
'zookeeper_service': self.ZOOKEEPER_SERVICE_NAME
}
}
hive_confs = {
'HIVE': {
'hive_metastore_database_type': 'postgresql',
'hive_metastore_database_host':
self.pu.get_manager(cluster).internal_ip,
'hive_metastore_database_port': '7432',
'hive_metastore_database_password':
self.pu.db_helper.get_hive_db_password(cluster)
}
}
hue_confs = {
'HUE': {
'hue_webhdfs':
self.pu.get_role_name(self.pu.get_namenode(cluster),
'NAMENODE')
}
}
all_confs = s_cfg.merge_configs(all_confs, hue_confs)
all_confs = s_cfg.merge_configs(all_confs, hive_confs)
all_confs = s_cfg.merge_configs(all_confs, cluster.cluster_configs)
if instance:
paths = instance.storage_paths()
instance_default_confs = {
'NAMENODE': {
'dfs_name_dir_list': get_hadoop_dirs(paths, '/fs/nn')
},
'SECONDARYNAMENODE': {
'fs_checkpoint_dir_list':
get_hadoop_dirs(paths, '/fs/snn')
},
'DATANODE': {
'dfs_data_dir_list': get_hadoop_dirs(paths, '/fs/dn'),
'dfs_datanode_data_dir_perm': 755,
'dfs_datanode_handler_count': 30
},
'NODEMANAGER': {
'yarn_nodemanager_local_dirs':
get_hadoop_dirs(paths, '/yarn/local')
},
'SERVER': {
'maxSessionTimeout': 60000
}
}
ng_user_confs = self.pu.convert_process_configs(
instance.node_group.node_configs)
all_confs = s_cfg.merge_configs(all_confs, ng_user_confs)
all_confs = s_cfg.merge_configs(all_confs, instance_default_confs)
return all_confs.get(service, {})
开发者ID:egafford,项目名称:sahara,代码行数:100,代码来源:cloudera_utils.py
注:本文中的sahara.swift.swift_helper.get_swift_configs函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论