本文整理汇总了Python中sahara.plugins.utils.get_instances函数的典型用法代码示例。如果您正苦于以下问题:Python get_instances函数的具体用法?Python get_instances怎么用?Python get_instances使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_instances函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_get_instances
def test_get_instances(self):
res = pu.get_instances(self.cluster)
self.assertEqual([
FakeInstance("1"), FakeInstance("2"), FakeInstance("3")], res)
res = pu.get_instances(self.cluster, "node_process1")
self.assertEqual([FakeInstance("1")], res)
开发者ID:openstack,项目名称:sahara,代码行数:7,代码来源:test_utils.py
示例2: test_get_instances
def test_get_instances(self):
self.assertEqual(5, len(u.get_instances(self.c1)))
self.assertEqual([], u.get_instances(self.c1, "wrong-process"))
self.assertEqual(self.ng1.instances, u.get_instances(self.c1, "nn"))
instances = list(self.ng2.instances)
instances += self.ng3.instances
self.assertEqual(instances, u.get_instances(self.c1, "dn"))
开发者ID:egafford,项目名称:sahara,代码行数:7,代码来源:test_utils.py
示例3: test_get_instances
def test_get_instances(self):
self.assertEqual(len(u.get_instances(self.c1)), 5)
self.assertEqual(u.get_instances(self.c1, 'wrong-process'), [])
self.assertEqual(u.get_instances(self.c1, 'nn'),
self.ng1.instances)
instances = list(self.ng2.instances)
instances += self.ng3.instances
self.assertEqual(u.get_instances(self.c1, 'dn'), instances)
开发者ID:a9261,项目名称:sahara,代码行数:8,代码来源:test_utils.py
示例4: restart_nns_and_rms
def restart_nns_and_rms(cluster):
nns = plugin_utils.get_instances(cluster, p_common.NAMENODE)
for nn in nns:
restart_namenode(cluster, nn)
rms = plugin_utils.get_instances(cluster, p_common.RESOURCEMANAGER)
for rm in rms:
restart_resourcemanager(cluster, rm)
开发者ID:Imperat,项目名称:sahara,代码行数:8,代码来源:deploy.py
示例5: deploy_kerberos
def deploy_kerberos(self, cluster):
all_instances = plugin_utils.get_instances(cluster)
namenodes = plugin_utils.get_instances(cluster, 'namenode')
server = None
if len(namenodes) > 0:
server = namenodes[0]
elif len(all_instances) > 0:
server = all_instances[0]
if server:
krb.deploy_infrastructure(cluster, server)
开发者ID:Imperat,项目名称:sahara,代码行数:10,代码来源:plugin.py
示例6: _extract_configs_to_extra
def _extract_configs_to_extra(self, cluster):
st_master = utils.get_instance(cluster, "nimbus")
zk_servers = utils.get_instances(cluster, "zookeeper")
extra = dict()
config_instances = ''
if st_master is not None:
if zk_servers is not None:
zknames = []
for zk in zk_servers:
zknames.append(zk.hostname())
config_instances = c_helper.generate_storm_config(
st_master.hostname(),
zknames)
config = self._convert_dict_to_yaml(config_instances)
supervisor_conf = c_helper.generate_slave_supervisor_conf()
nimbus_ui_conf = c_helper.generate_master_supervisor_conf()
zk_conf = c_helper.generate_zookeeper_conf()
for ng in cluster.node_groups:
extra[ng.id] = {
'st_instances': config,
'slave_sv_conf': supervisor_conf,
'master_sv_conf': nimbus_ui_conf,
'zk_conf': zk_conf
}
return extra
开发者ID:frgaudet,项目名称:sahara,代码行数:31,代码来源:plugin.py
示例7: start_cluster
def start_cluster(self, cluster):
self._set_cluster_info(cluster)
deploy.start_cluster(cluster)
cluster_instances = plugin_utils.get_instances(cluster)
swift_helper.install_ssl_certs(cluster_instances)
deploy.add_hadoop_swift_jar(cluster_instances)
deploy.prepare_hive(cluster)
开发者ID:Imperat,项目名称:sahara,代码行数:7,代码来源:plugin.py
示例8: _clear_exclude_files
def _clear_exclude_files(cluster):
for instance in u.get_instances(cluster):
with instance.remote() as r:
r.execute_command(
'sudo su - -c "echo > %s/dn-exclude" hadoop' % HADOOP_CONF_DIR)
r.execute_command(
'sudo su - -c "echo > %s/nm-exclude" hadoop' % HADOOP_CONF_DIR)
开发者ID:a9261,项目名称:sahara,代码行数:7,代码来源:scaling.py
示例9: get_plain_instances
def get_plain_instances(self):
fs = self.get_fs_instances()
zk = self.get_zk_instances()
cldb = self.get_cldb_instances()
zk_fs_cldb = zk + fs + cldb
instances = u.get_instances(self.get_cluster())
return [i for i in instances if i not in zk_fs_cldb]
开发者ID:a9261,项目名称:sahara,代码行数:7,代码来源:base_context.py
示例10: start_cluster
def start_cluster(self, cluster):
nn_instance = utils.get_instance(cluster, "namenode")
sm_instance = utils.get_instance(cluster, "master")
dn_instances = utils.get_instances(cluster, "datanode")
# Start the name node
with remote.get_remote(nn_instance) as r:
run.format_namenode(r)
run.start_processes(r, "namenode")
# start the data nodes
self._start_slave_datanode_processes(dn_instances)
LOG.info(_LI("Hadoop services in cluster %s have been started"),
cluster.name)
with remote.get_remote(nn_instance) as r:
r.execute_command("sudo -u hdfs hdfs dfs -mkdir -p /user/$USER/")
r.execute_command("sudo -u hdfs hdfs dfs -chown $USER "
"/user/$USER/")
# start spark nodes
if sm_instance:
with remote.get_remote(sm_instance) as r:
run.start_spark_master(r, self._spark_home(cluster))
LOG.info(_LI("Spark service at '%s' has been started"),
sm_instance.hostname())
LOG.info(_LI('Cluster %s has been started successfully'),
cluster.name)
self._set_cluster_info(cluster)
开发者ID:degorenko,项目名称:sahara,代码行数:31,代码来源:plugin.py
示例11: start_cluster
def start_cluster(self, cluster):
nn = vu.get_namenode(cluster)
run.format_namenode(nn)
run.start_hadoop_process(nn, 'namenode')
for snn in vu.get_secondarynamenodes(cluster):
run.start_hadoop_process(snn, 'secondarynamenode')
rm = vu.get_resourcemanager(cluster)
if rm:
run.start_yarn_process(rm, 'resourcemanager')
run.start_dn_nm_processes(utils.get_instances(cluster))
run.await_datanodes(cluster)
hs = vu.get_historyserver(cluster)
if hs:
run.start_historyserver(hs)
oo = vu.get_oozie(cluster)
if oo:
run.start_oozie_process(self.pctx, oo)
hiveserver = vu.get_hiveserver(cluster)
if hiveserver:
run.start_hiveserver_process(self.pctx, hiveserver)
self._set_cluster_info(cluster)
开发者ID:AlexanderYAPPO,项目名称:sahara,代码行数:29,代码来源:versionhandler.py
示例12: _setup_instances
def _setup_instances(self, cluster, instances=None):
extra = self._extract_configs_to_extra(cluster)
if instances is None:
instances = utils.get_instances(cluster)
self._push_configs_to_nodes(cluster, extra, instances)
开发者ID:crobby,项目名称:sahara,代码行数:7,代码来源:plugin.py
示例13: _validate_existing_ng_scaling
def _validate_existing_ng_scaling(self, cluster, existing):
scalable_processes = self._get_scalable_processes()
dn_to_delete = 0
for ng in cluster.node_groups:
if ng.id in existing:
if ng.count > existing[ng.id] and ("datanode" in
ng.node_processes):
dn_to_delete += ng.count - existing[ng.id]
if not set(ng.node_processes).issubset(scalable_processes):
raise ex.NodeGroupCannotBeScaled(
ng.name, _("Spark plugin cannot scale nodegroup"
" with processes: %s") %
' '.join(ng.node_processes))
dn_amount = len(utils.get_instances(cluster, "datanode"))
rep_factor = utils.get_config_value_or_default('HDFS',
"dfs.replication",
cluster)
if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor:
raise ex.ClusterCannotBeScaled(
cluster.name, _("Spark plugin cannot shrink cluster because "
"there would be not enough nodes for HDFS "
"replicas (replication factor is %s)") %
rep_factor)
开发者ID:crobby,项目名称:sahara,代码行数:25,代码来源:plugin.py
示例14: start_cluster
def start_cluster(self, cluster):
nn_instance = utils.get_instance(cluster, "namenode")
dn_instances = utils.get_instances(cluster, "datanode")
zep_instance = utils.get_instance(cluster, "zeppelin")
# Start the name node
self._start_namenode(nn_instance)
# start the data nodes
self._start_datanode_processes(dn_instances)
LOG.info(_LI("Hadoop services have been started"))
with remote.get_remote(nn_instance) as r:
r.execute_command("sudo -u hdfs hdfs dfs -mkdir -p /user/$USER/")
r.execute_command("sudo -u hdfs hdfs dfs -chown $USER "
"/user/$USER/")
# start spark nodes
self.start_spark(cluster)
# start zeppelin, if necessary
if zep_instance:
self._start_zeppelin(zep_instance)
LOG.info(_LI('Cluster has been started successfully'))
self._set_cluster_info(cluster)
开发者ID:crobby,项目名称:sahara,代码行数:27,代码来源:plugin.py
示例15: setup_agents
def setup_agents(cluster, instances=None):
LOG.debug("Set up Ambari agents")
manager_address = plugin_utils.get_instance(
cluster, p_common.AMBARI_SERVER).fqdn()
if not instances:
instances = plugin_utils.get_instances(cluster)
_setup_agents(instances, manager_address)
开发者ID:Imperat,项目名称:sahara,代码行数:7,代码来源:deploy.py
示例16: configure_cluster_for_hdfs
def configure_cluster_for_hdfs(cluster, data_source_url):
host = urlparse.urlparse(data_source_url).hostname
etc_hosts_information = _get_cluster_hosts_information(host, cluster)
if etc_hosts_information is None:
# Ip address hasn't been resolved, the last chance is for VM itself
return
# If the cluster was already configured for this data source
# there's no need to configure it again
if _is_cluster_configured(cluster, etc_hosts_information.splitlines()):
return
etc_hosts_update = ('/tmp/etc-hosts-update'
'.%s' % six.text_type(uuidutils.generate_uuid()))
tmp_etc_hosts = ('/tmp/etc-hosts'
'.%s' % six.text_type(uuidutils.generate_uuid()))
update_etc_hosts_cmd = (
'cat %(etc_hosts_update)s /etc/hosts | '
'sort | uniq > %(tmp_etc_hosts)s && '
'cat %(tmp_etc_hosts)s > /etc/hosts && '
'rm -f %(tmp_etc_hosts)s %(etc_hosts_update)s' %
{'etc_hosts_update': etc_hosts_update, 'tmp_etc_hosts': tmp_etc_hosts})
for inst in u.get_instances(cluster):
with inst.remote() as r:
r.write_file_to(etc_hosts_update, etc_hosts_information)
r.execute_command(update_etc_hosts_cmd, run_as_root=True)
开发者ID:openstack,项目名称:sahara,代码行数:28,代码来源:hdfs_helper.py
示例17: start_cluster
def start_cluster(self, cluster):
sm_instance = utils.get_instance(cluster, "nimbus")
sl_instances = utils.get_instances(cluster, "supervisor")
zk_instances = utils.get_instances(cluster, "zookeeper")
# start zookeeper processes
self._start_zookeeper_processes(zk_instances)
# start storm master
if sm_instance:
self._start_storm_master(sm_instance)
# start storm slaves
self._start_slave_processes(sl_instances)
LOG.info(_LI("Cluster {cluster} has been started successfully").format(cluster=cluster.name))
self._set_cluster_info(cluster)
开发者ID:egafford,项目名称:sahara,代码行数:17,代码来源:plugin.py
示例18: disable_repos
def disable_repos(cluster):
if configs.use_base_repos_needed(cluster):
LOG.debug("Using base repos")
return
instances = plugin_utils.get_instances(cluster)
with context.ThreadGroup() as tg:
for inst in instances:
tg.spawn("disable-repos-%s" % inst.instance_name,
_disable_repos_on_inst, inst)
开发者ID:Imperat,项目名称:sahara,代码行数:9,代码来源:deploy.py
示例19: _update_exclude_files
def _update_exclude_files(cluster, instances):
datanodes = _get_instances_with_service(instances, "datanode")
nodemanagers = _get_instances_with_service(instances, "nodemanager")
dn_hosts = u.generate_fqdn_host_names(datanodes)
nm_hosts = u.generate_fqdn_host_names(nodemanagers)
for instance in u.get_instances(cluster):
with instance.remote() as r:
r.execute_command("sudo su - -c \"echo '%s' > %s/dn-exclude\" hadoop" % (dn_hosts, HADOOP_CONF_DIR))
r.execute_command("sudo su - -c \"echo '%s' > %s/nm-exclude\" hadoop" % (nm_hosts, HADOOP_CONF_DIR))
开发者ID:jfrodriguez,项目名称:sahara,代码行数:9,代码来源:scaling.py
示例20: setup_agents
def setup_agents(cluster):
LOG.debug("Set up Ambari agents")
manager_address = plugin_utils.get_instance(
cluster, p_common.AMBARI_SERVER).fqdn()
with context.ThreadGroup() as tg:
for inst in plugin_utils.get_instances(cluster):
tg.spawn("hwx-agent-setup-%s" % inst.id,
_setup_agent, inst, manager_address)
LOG.debug("Ambari agents has been installed")
开发者ID:rogeryu27,项目名称:sahara,代码行数:9,代码来源:deploy.py
注:本文中的sahara.plugins.utils.get_instances函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论