本文整理汇总了Python中sahara.utils.files.get_file_text函数的典型用法代码示例。如果您正苦于以下问题:Python get_file_text函数的具体用法?Python get_file_text怎么用?Python get_file_text使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_file_text函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: get_config_files
def get_config_files(self, cluster_context, configs, instance=None):
template = 'plugins/mapr/services/hue/resources/hue_%s.template'
# hue.ini
hue_ini = bcf.TemplateFile("hue.ini")
hue_ini.remote_path = self.conf_dir(cluster_context)
hue_ini.parse(files.get_file_text(template % self.version))
hue_ini.add_properties(self._get_hue_ini_props(cluster_context))
hue_ini.add_property("thrift_version",
configs[self.THRIFT_VERSION.name])
# # hue.sh
hue_sh_template = 'plugins/mapr/services/hue/' \
'resources/hue_sh_%s.template'
hue_sh = bcf.TemplateFile("hue.sh")
hue_sh.remote_path = self.home_dir(cluster_context) + '/bin'
hue_sh.parse(files.get_file_text(hue_sh_template % self.version))
hue_sh.add_property('hadoop_version', cluster_context.hadoop_version)
hue_sh.mode = 777
hue_instances = cluster_context.get_instances(HUE)
for instance in hue_instances:
if instance not in cluster_context.changed_instances():
cluster_context.should_be_restarted[self] += [instance]
return [hue_ini, hue_sh]
开发者ID:frgaudet,项目名称:sahara,代码行数:25,代码来源:hue.py
示例2: _post_configuration
def _post_configuration(pctx, instance):
dirs = _get_hadoop_dirs(instance)
args = {
"hadoop_user": HADOOP_USER,
"hadoop_group": HADOOP_GROUP,
"hadoop_conf_dir": HADOOP_CONF_DIR,
"oozie_conf_dir": OOZIE_CONF_DIR,
"hadoop_name_dirs": " ".join(dirs["hadoop_name_dirs"]),
"hadoop_data_dirs": " ".join(dirs["hadoop_data_dirs"]),
"hadoop_log_dir": dirs["hadoop_log_dir"],
"hadoop_secure_dn_log_dir": dirs["hadoop_secure_dn_log_dir"],
"yarn_log_dir": dirs["yarn_log_dir"],
}
post_conf_script = f.get_file_text("plugins/vanilla/hadoop2/resources/post_conf.template")
post_conf_script = post_conf_script.format(**args)
with instance.remote() as r:
r.write_file_to("/tmp/post_conf.sh", post_conf_script)
r.execute_command("chmod +x /tmp/post_conf.sh")
r.execute_command("sudo /tmp/post_conf.sh")
if c_helper.is_data_locality_enabled(pctx, instance.cluster):
t_script = HADOOP_CONF_DIR + "/topology.sh"
r.write_file_to(
t_script, f.get_file_text("plugins/vanilla/hadoop2/resources/topology.sh"), run_as_root=True
)
r.execute_command("chmod +x " + t_script, run_as_root=True)
开发者ID:uladz,项目名称:sahara,代码行数:27,代码来源:config.py
示例3: _post_configuration
def _post_configuration(instance):
node_group = instance.node_group
dirs = _get_hadoop_dirs(node_group)
args = {
'hadoop_user': HADOOP_USER,
'hadoop_group': HADOOP_GROUP,
'hadoop_conf_dir': HADOOP_CONF_DIR,
'oozie_conf_dir': OOZIE_CONF_DIR,
'hadoop_name_dirs': " ".join(dirs['hadoop_name_dirs']),
'hadoop_data_dirs': " ".join(dirs['hadoop_data_dirs']),
'hadoop_log_dir': dirs['hadoop_log_dir'],
'hadoop_secure_dn_log_dir': dirs['hadoop_secure_dn_log_dir'],
'yarn_log_dir': dirs['yarn_log_dir']
}
post_conf_script = f.get_file_text(
'plugins/vanilla/v2_3_0/resources/post_conf.template')
post_conf_script = post_conf_script.format(**args)
with instance.remote() as r:
r.write_file_to('/tmp/post_conf.sh', post_conf_script)
r.execute_command('chmod +x /tmp/post_conf.sh')
r.execute_command('sudo /tmp/post_conf.sh')
if c_helper.is_data_locality_enabled(instance.node_group.cluster):
t_script = HADOOP_CONF_DIR + '/topology.sh'
r.write_file_to(t_script, f.get_file_text(
'plugins/vanilla/v2_3_0/resources/topology.sh'),
run_as_root=True)
r.execute_command('chmod +x ' + t_script, run_as_root=True)
开发者ID:B-Rich,项目名称:sahara,代码行数:29,代码来源:config.py
示例4: _get_kdc_config
def _get_kdc_config(cluster, os):
if os == "ubuntu":
data = files.get_file_text('plugins/resources/kdc_conf')
else:
data = files.get_file_text('plugins/resources/kdc_conf_redhat')
return data % {
'realm_name': get_realm_name(cluster)
}
开发者ID:openstack,项目名称:sahara,代码行数:8,代码来源:kerberos.py
示例5: _edp_pig_test
def _edp_pig_test(self):
pig_job = f.get_file_text(RESOURCES_PATH + 'edp-job.pig')
pig_lib = f.get_file_text(RESOURCES_PATH + 'edp-lib.jar')
self.edp_testing(job_type=utils_edp.JOB_TYPE_PIG,
job_data_list=[{'pig': pig_job}],
lib_data_list=[{'jar': pig_lib}],
swift_binaries=True,
hdfs_local_output=True)
开发者ID:JohannaMW,项目名称:sahara,代码行数:8,代码来源:test_vanilla_two_gating.py
示例6: _edp_test
def _edp_test(self):
path = 'tests/integration/tests/resources/'
# check pig
pig_job = f.get_file_text(path + 'edp-job.pig')
pig_lib = f.get_file_text(path + 'edp-lib.jar')
self.edp_testing(job_type=utils_edp.JOB_TYPE_PIG,
job_data_list=[{'pig': pig_job}],
lib_data_list=[{'jar': pig_lib}],
swift_binaries=True,
hdfs_local_output=True)
# check mapreduce
mapreduce_jar = f.get_file_text(path + 'edp-mapreduce.jar')
mapreduce_configs = {
'configs': {
'mapred.mapper.class': 'org.apache.oozie.example.SampleMapper',
'mapred.reducer.class':
'org.apache.oozie.example.SampleReducer'
}
}
self.edp_testing(job_type=utils_edp.JOB_TYPE_MAPREDUCE,
job_data_list=[],
lib_data_list=[{'jar': mapreduce_jar}],
configs=mapreduce_configs,
swift_binaries=True,
hdfs_local_output=True)
# check mapreduce streaming
mapreduce_streaming_configs = {
'configs': {
'edp.streaming.mapper': '/bin/cat',
'edp.streaming.reducer': '/usr/bin/wc'
}
}
self.edp_testing(job_type=utils_edp.JOB_TYPE_MAPREDUCE_STREAMING,
job_data_list=[],
lib_data_list=[],
configs=mapreduce_streaming_configs)
# check java
java_jar = f.get_file_text(
path + 'hadoop-mapreduce-examples-2.3.0.jar')
java_configs = {
'configs': {
'edp.java.main_class':
'org.apache.hadoop.examples.QuasiMonteCarlo'
},
'args': ['10', '10']
}
self.edp_testing(utils_edp.JOB_TYPE_JAVA,
job_data_list=[],
lib_data_list=[{'jar': java_jar}],
configs=java_configs)
开发者ID:B-Rich,项目名称:sahara,代码行数:54,代码来源:test_hdp2_gating.py
示例7: _update_jackson_libs
def _update_jackson_libs(self, context, instances):
hadoop_lib = context.hadoop_lib
core_asl = f.get_file_text(JACKSON_CORE_ASL)
mapper_asl = f.get_file_text(JACKSON_MAPPER_ASL)
core_asl_path = '%s/%s' % (hadoop_lib, 'jackson-core-asl-1.9.13.jar')
mapper_path = '%s/%s' % (hadoop_lib, 'jackson-mapper-asl-1.9.13.jar')
libs = {
core_asl_path: core_asl,
mapper_path: mapper_asl
}
for instance in instances:
with instance.remote() as r:
r.execute_command('rm %s/jackson-*.jar' % hadoop_lib,
run_as_root=True)
r.write_files_to(libs, run_as_root=True)
开发者ID:AlexanderYAPPO,项目名称:sahara,代码行数:15,代码来源:mapreduce.py
示例8: test_load_template_with_anti_affinity_single_ng
def test_load_template_with_anti_affinity_single_ng(self):
"""This test checks Heat cluster template with Neutron enabled
and anti-affinity feature enabled for single node process
in single node group.
"""
ng1 = tu.make_ng_dict('master', 42, ['namenode'], 1,
floating_ip_pool='floating', image_id=None,
volumes_per_node=0, volumes_size=0, id=1)
ng2 = tu.make_ng_dict('worker', 42, ['datanode'], 2,
floating_ip_pool='floating', image_id=None,
volumes_per_node=0, volumes_size=0, id=2)
cluster = tu.create_cluster("cluster", "tenant1", "general",
"1.2.1", [ng1, ng2],
user_keypair_id='user_key',
neutron_management_network='private_net',
default_image_id='1',
anti_affinity=['datanode'], image_id=None)
aa_heat_template = h.ClusterTemplate(cluster)
aa_heat_template.add_node_group_extra(ng1['id'], 1,
get_ud_generator('line1\nline2'))
aa_heat_template.add_node_group_extra(ng2['id'], 2,
get_ud_generator('line2\nline3'))
self.override_config("use_neutron", True)
main_template = h._load_template(
'main.heat', {'resources':
aa_heat_template._serialize_resources()})
self.assertEqual(
json.loads(main_template),
json.loads(f.get_file_text(
"tests/unit/resources/"
"test_serialize_resources_aa.heat")))
开发者ID:phamtruong91,项目名称:sahara,代码行数:34,代码来源:test_heat.py
示例9: _install_swift_jar
def _install_swift_jar(self, context, instances):
LOG.debug('Installing Swift jar')
jar = f.get_file_text(Swift.HADOOP_SWIFT_JAR)
path = '%s/swift.jar' % context.hadoop_lib
for instance in instances:
with instance.remote() as r:
r.write_file_to(path, jar, run_as_root=True)
开发者ID:AlexanderYAPPO,项目名称:sahara,代码行数:7,代码来源:swift.py
示例10: _get_krb5_config
def _get_krb5_config(cluster, server_fqdn):
data = files.get_file_text('plugins/resources/krb5_config')
return data % {
'realm_name': get_realm_name(cluster),
'server': server_fqdn,
'node_domain': CONF.node_domain,
}
开发者ID:openstack,项目名称:sahara,代码行数:7,代码来源:kerberos.py
示例11: get_builtin_binaries
def get_builtin_binaries(job, configs):
if job.type == JOB_TYPE_JAVA:
if is_adapt_for_oozie_enabled(configs):
path = 'service/edp/resources/edp-main-wrapper.jar'
name = 'builtin-%s.jar' % six.text_type(uuid.uuid4())
return [{'raw': files.get_file_text(path),
'name': name}]
return []
开发者ID:AllenFromMinneapolis,项目名称:sahara,代码行数:8,代码来源:edp.py
示例12: load_properties_file
def load_properties_file(path):
predicate = fu.and_predicate(lambda i: len(i) != 0,
lambda i: not i.isspace(),
lambda i: not i.startswith('#'))
mapper = fu.chain_function(lambda i: tuple(i.split('=')),
lambda i: (i[0].strip(), i[1].strip()))
lines = f.get_file_text(path).splitlines()
return dict(map(mapper, filter(predicate, lines)))
开发者ID:a9261,项目名称:sahara,代码行数:8,代码来源:config_file_utils.py
示例13: _create_script_obj
def _create_script_obj(filename, template, **kwargs):
script = cf.TemplateFile(filename)
script.remote_path = '/tmp/'
script.parse(f.get_file_text(
'plugins/mapr/services/mysql/resources/%s' % template))
for k, v in six.iteritems(kwargs):
script.add_property(k, v)
return script
开发者ID:AllenFromMinneapolis,项目名称:sahara,代码行数:8,代码来源:mysql.py
示例14: _push_configs_to_new_node
def _push_configs_to_new_node(self, cluster, extra, instance):
ng_extra = extra[instance.node_group.id]
files_hadoop = {
"/etc/hadoop/conf/core-site.xml": ng_extra["xml"]["core-site"],
"/etc/hadoop/conf/hdfs-site.xml": ng_extra["xml"]["hdfs-site"],
}
sp_home = self._spark_home(cluster)
files_spark = {
os.path.join(sp_home, "conf/spark-env.sh"): ng_extra["sp_master"],
os.path.join(sp_home, "conf/slaves"): ng_extra["sp_slaves"],
}
files_init = {
"/tmp/sahara-hadoop-init.sh": ng_extra["setup_script"],
"id_rsa": cluster.management_private_key,
"authorized_keys": cluster.management_public_key,
}
# pietro: This is required because the (secret) key is not stored in
# .ssh which hinders password-less ssh required by spark scripts
key_cmd = (
"sudo cp $HOME/id_rsa $HOME/.ssh/; "
"sudo chown $USER $HOME/.ssh/id_rsa; "
"sudo chmod 600 $HOME/.ssh/id_rsa"
)
for ng in cluster.node_groups:
dn_path = c_helper.extract_hadoop_path(ng.storage_paths(), "/dfs/dn")
nn_path = c_helper.extract_hadoop_path(ng.storage_paths(), "/dfs/nn")
hdfs_dir_cmd = ("sudo mkdir -p %s %s;" "sudo chown -R hdfs:hadoop %s %s;" "sudo chmod 755 %s %s;") % (
nn_path,
dn_path,
nn_path,
dn_path,
nn_path,
dn_path,
)
with remote.get_remote(instance) as r:
r.execute_command("sudo chown -R $USER:$USER /etc/hadoop")
r.execute_command("sudo chown -R $USER:$USER %s" % sp_home)
r.write_files_to(files_hadoop)
r.write_files_to(files_spark)
r.write_files_to(files_init)
r.execute_command("sudo chmod 0500 /tmp/sahara-hadoop-init.sh")
r.execute_command("sudo /tmp/sahara-hadoop-init.sh " ">> /tmp/sahara-hadoop-init.log 2>&1")
r.execute_command(hdfs_dir_cmd)
r.execute_command(key_cmd)
if c_helper.is_data_locality_enabled(cluster):
r.write_file_to("/etc/hadoop/topology.sh", f.get_file_text("plugins/spark/resources/topology.sh"))
r.execute_command("sudo chmod +x /etc/hadoop/topology.sh")
self._write_topology_data(r, cluster, extra)
self._push_master_configs(r, cluster, extra, instance)
开发者ID:hao707822882,项目名称:sahara,代码行数:58,代码来源:plugin.py
示例15: get_config_files
def get_config_files(self, cluster_context, configs, instance=None):
hive_default = 'plugins/mapr/services/hive/resources/hive-default.xml'
hive_site = bcf.HadoopXML("hive-site.xml")
hive_site.remote_path = self.conf_dir(cluster_context)
if instance:
hive_site.fetch(instance)
hive_site.parse(files.get_file_text(hive_default))
hive_site.add_properties(self._get_hive_site_props(cluster_context))
return [hive_site]
开发者ID:jfrodriguez,项目名称:sahara,代码行数:9,代码来源:hive.py
示例16: oozie_create_db
def oozie_create_db(remote):
LOG.debug("Creating Oozie DB Schema...")
sql_script = files.get_file_text(
'plugins/vanilla/v1_2_1/resources/create_oozie_db.sql')
script_location = "create_oozie_db.sql"
remote.write_file_to(script_location, sql_script)
remote.execute_command('mysql -u root < %(script_location)s && '
'rm %(script_location)s' %
{"script_location": script_location})
开发者ID:a9261,项目名称:sahara,代码行数:9,代码来源:run_scripts.py
示例17: run_script
def run_script(instance, script, run_as=None, *args, **kwargs):
with instance.remote() as r:
path = '/tmp/%s.sh' % uuid.uuid4()
script = files.get_file_text(script) % kwargs
r.write_file_to(path, script, run_as_root=(run_as == 'root'))
r.execute_command(_run_as(run_as, 'chmod +x %s' % path))
r.execute_command(_run_as(run_as, '%s %s' % (path, ' '.join(args))))
# FIXME(aosadchyi): reuse existing remote
remove(instance, path, run_as=run_as)
开发者ID:AllenFromMinneapolis,项目名称:sahara,代码行数:9,代码来源:general.py
示例18: configure_topology
def configure_topology(self, topology_str, r):
r.write_file_to(
'/etc/hadoop/conf/topology.sh',
f.get_file_text(
'plugins/hdp/versions/version_1_3_2/resources/topology.sh'))
r.execute_command(
'chmod +x /etc/hadoop/conf/topology.sh', run_as_root=True
)
r.write_file_to('/etc/hadoop/conf/topology.data', topology_str)
开发者ID:YongchaoTIAN,项目名称:sahara,代码行数:9,代码来源:hadoopserver.py
示例19: _edp_test
def _edp_test(self):
path = 'tests/integration/tests/resources/'
# check pig
pig_job = f.get_file_text(path + 'edp-job.pig')
pig_lib = f.get_file_text(path + 'edp-lib.jar')
self.edp_testing('Pig', [{'pig': pig_job}], [{'jar': pig_lib}])
# check mapreduce
mapreduce_jar = f.get_file_text(path + 'edp-mapreduce.jar')
mapreduce_configs = {
'configs': {
'mapred.mapper.class': 'org.apache.oozie.example.SampleMapper',
'mapred.reducer.class':
'org.apache.oozie.example.SampleReducer'
}
}
self.edp_testing('MapReduce', [], [{'jar': mapreduce_jar}],
mapreduce_configs)
# check mapreduce streaming
mapreduce_streaming_configs = {
'configs': {
'edp.streaming.mapper': '/bin/cat',
'edp.streaming.reducer': '/usr/bin/wc'
}
}
self.edp_testing('MapReduce.Streaming', [], [],
mapreduce_streaming_configs)
# check java
java_jar = f.get_file_text(
path + 'hadoop-mapreduce-examples-2.3.0.jar')
java_configs = {
'configs': {
'edp.java.main_class':
'org.apache.hadoop.examples.QuasiMonteCarlo'
},
'args': ['10', '10']
}
self.edp_testing('Java', [], lib_data_list=[{'jar': java_jar}],
configs=java_configs)
开发者ID:qinweiwei,项目名称:sahara,代码行数:43,代码来源:test_vanilla_two_gating.py
示例20: hive_create_db
def hive_create_db(remote, hive_mysql_passwd):
LOG.debug("Creating Hive metastore db...")
sql_script = files.get_file_text(
'plugins/vanilla/v1_2_1/resources/create_hive_db.sql')
sql_script = sql_script.replace('pass', hive_mysql_passwd)
script_location = "create_hive_db.sql"
remote.write_file_to(script_location, sql_script)
remote.execute_command('mysql -u root < %(script_location)s && '
'rm %(script_location)s' %
{"script_location": script_location})
开发者ID:a9261,项目名称:sahara,代码行数:10,代码来源:run_scripts.py
注:本文中的sahara.utils.files.get_file_text函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论