本文整理汇总了Python中sahara.plugins.utils.get_instance函数的典型用法代码示例。如果您正苦于以下问题:Python get_instance函数的具体用法?Python get_instance怎么用?Python get_instance使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_instance函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: start_cluster
def start_cluster(self, cluster):
nn_instance = utils.get_instance(cluster, "namenode")
sm_instance = utils.get_instance(cluster, "master")
dn_instances = utils.get_instances(cluster, "datanode")
# Start the name node
with remote.get_remote(nn_instance) as r:
run.format_namenode(r)
run.start_processes(r, "namenode")
# start the data nodes
self._start_slave_datanode_processes(dn_instances)
LOG.info(_LI("Hadoop services in cluster %s have been started"),
cluster.name)
with remote.get_remote(nn_instance) as r:
r.execute_command("sudo -u hdfs hdfs dfs -mkdir -p /user/$USER/")
r.execute_command("sudo -u hdfs hdfs dfs -chown $USER "
"/user/$USER/")
# start spark nodes
if sm_instance:
with remote.get_remote(sm_instance) as r:
run.start_spark_master(r, self._spark_home(cluster))
LOG.info(_LI("Spark service at '%s' has been started"),
sm_instance.hostname())
LOG.info(_LI('Cluster %s has been started successfully'),
cluster.name)
self._set_cluster_info(cluster)
开发者ID:degorenko,项目名称:sahara,代码行数:31,代码来源:plugin.py
示例2: start_cluster
def start_cluster(self, cluster):
nn_instance = utils.get_instance(cluster, "namenode")
dn_instances = utils.get_instances(cluster, "datanode")
zep_instance = utils.get_instance(cluster, "zeppelin")
# Start the name node
self._start_namenode(nn_instance)
# start the data nodes
self._start_datanode_processes(dn_instances)
LOG.info(_LI("Hadoop services have been started"))
with remote.get_remote(nn_instance) as r:
r.execute_command("sudo -u hdfs hdfs dfs -mkdir -p /user/$USER/")
r.execute_command("sudo -u hdfs hdfs dfs -chown $USER "
"/user/$USER/")
# start spark nodes
self.start_spark(cluster)
# start zeppelin, if necessary
if zep_instance:
self._start_zeppelin(zep_instance)
LOG.info(_LI('Cluster has been started successfully'))
self._set_cluster_info(cluster)
开发者ID:crobby,项目名称:sahara,代码行数:27,代码来源:plugin.py
示例3: _prepare_ranger
def _prepare_ranger(cluster):
ranger = plugin_utils.get_instance(cluster, p_common.RANGER_ADMIN)
if not ranger:
return
ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER)
with ambari.remote() as r:
sudo = functools.partial(r.execute_command, run_as_root=True)
sudo("yum install -y mysql-connector-java")
sudo("ambari-server setup --jdbc-db=mysql "
"--jdbc-driver=/usr/share/java/mysql-connector-java.jar")
init_db_template = (
"create user 'root'@'%' identified by '{password}';\n"
"set password for 'root'@'localhost' = password('{password}');")
password = uuidutils.generate_uuid()
extra = cluster.extra.to_dict() if cluster.extra else {}
extra["ranger_db_password"] = password
ctx = context.ctx()
conductor.cluster_update(ctx, cluster, {"extra": extra})
with ranger.remote() as r:
sudo = functools.partial(r.execute_command, run_as_root=True)
# TODO(sreshetnyak): add ubuntu support
sudo("yum install -y mysql-server")
sudo("service mysqld start")
r.write_file_to("/tmp/init.sql",
init_db_template.format(password=password))
sudo("mysql < /tmp/init.sql")
sudo("rm /tmp/init.sql")
开发者ID:Imperat,项目名称:sahara,代码行数:27,代码来源:deploy.py
示例4: test_get_instance
def test_get_instance(self):
self.assertRaises(ex.InvalidComponentCountException,
pu.get_instance, self.cluster, None)
res = pu.get_instance(self.cluster, "node_process")
self.assertIsNone(res)
res = pu.get_instance(self.cluster, "node_process1")
self.assertEqual(FakeInstance("1"), res)
开发者ID:openstack,项目名称:sahara,代码行数:9,代码来源:test_utils.py
示例5: _extract_configs_to_extra
def _extract_configs_to_extra(self, cluster):
nn = utils.get_instance(cluster, "namenode")
sp_master = utils.get_instance(cluster, "master")
sp_slaves = utils.get_instances(cluster, "slave")
extra = dict()
config_master = config_slaves = ''
if sp_master is not None:
config_master = c_helper.generate_spark_env_configs(cluster)
if sp_slaves is not None:
slavenames = []
for slave in sp_slaves:
slavenames.append(slave.hostname())
config_slaves = c_helper.generate_spark_slaves_configs(slavenames)
else:
config_slaves = "\n"
# Any node that might be used to run spark-submit will need
# these libs for swift integration
config_defaults = c_helper.generate_spark_executor_classpath(cluster)
extra['job_cleanup'] = c_helper.generate_job_cleanup_config(cluster)
for ng in cluster.node_groups:
extra[ng.id] = {
'xml': c_helper.generate_xml_configs(
ng.configuration(),
ng.storage_paths(),
nn.hostname(), None
),
'setup_script': c_helper.generate_hadoop_setup_script(
ng.storage_paths(),
c_helper.extract_hadoop_environment_confs(
ng.configuration())
),
'sp_master': config_master,
'sp_slaves': config_slaves,
'sp_defaults': config_defaults
}
if "zeppelin" in ng.node_processes:
extra[ng.id].update({
"zeppelin_setup_script":
c_helper.generate_zeppelin_setup_script(sp_master)})
if c_helper.is_data_locality_enabled(cluster):
topology_data = th.generate_topology_map(
cluster, CONF.enable_hypervisor_awareness)
extra['topology_data'] = "\n".join(
[k + " " + v for k, v in topology_data.items()]) + "\n"
return extra
开发者ID:crobby,项目名称:sahara,代码行数:53,代码来源:plugin.py
示例6: scale_cluster
def scale_cluster(self, cluster, instances):
master = utils.get_instance(cluster, "master")
r_master = remote.get_remote(master)
run.stop_spark(r_master, self._spark_home(cluster))
self._setup_instances(cluster, instances)
nn = utils.get_instance(cluster, "namenode")
run.refresh_nodes(remote.get_remote(nn), "dfsadmin")
self._start_slave_datanode_processes(instances)
run.start_spark_master(r_master, self._spark_home(cluster))
LOG.info(_LI("Spark master service at '%s' has been restarted"),
master.hostname())
开发者ID:degorenko,项目名称:sahara,代码行数:14,代码来源:plugin.py
示例7: scale_cluster
def scale_cluster(self, cluster, instances):
master = utils.get_instance(cluster, "master")
r_master = remote.get_remote(master)
run.stop_spark(r_master, self._spark_home(cluster))
self._setup_instances(cluster, instances)
nn = utils.get_instance(cluster, "namenode")
run.refresh_nodes(remote.get_remote(nn), "dfsadmin")
dn_instances = [instance for instance in instances if "datanode" in instance.node_group.node_processes]
self._start_datanode_processes(dn_instances)
run.start_spark_master(r_master, self._spark_home(cluster))
LOG.info(_LI("Spark master service has been restarted"))
开发者ID:zhangjunli177,项目名称:sahara,代码行数:14,代码来源:plugin.py
示例8: manage_host_components
def manage_host_components(cluster, instances):
ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER)
password = cluster.extra["ambari_password"]
requests_ids = []
with ambari_client.AmbariClient(ambari, password=password) as client:
clients = p_common.get_clients(cluster)
for instance in instances:
services = p_common.get_ambari_proc_list(instance.node_group)
services.extend(clients)
for service in services:
client.add_service_to_host(instance, service)
requests_ids.append(
client.start_service_on_host(
instance, service, 'INSTALLED'))
client.wait_ambari_requests(requests_ids, cluster.name)
# all services added and installed, let's start them
requests_ids = []
for instance in instances:
services = p_common.get_ambari_proc_list(instance.node_group)
services.extend(p_common.ALL_LIST)
for service in services:
requests_ids.append(
client.start_service_on_host(
instance, service, 'STARTED'))
client.wait_ambari_requests(requests_ids, cluster.name)
开发者ID:gongwayne,项目名称:Openstack,代码行数:25,代码来源:deploy.py
示例9: _extract_configs_to_extra
def _extract_configs_to_extra(self, cluster):
st_master = utils.get_instance(cluster, "nimbus")
zk_servers = utils.get_instances(cluster, "zookeeper")
extra = dict()
config_instances = ''
if st_master is not None:
if zk_servers is not None:
zknames = []
for zk in zk_servers:
zknames.append(zk.hostname())
config_instances = c_helper.generate_storm_config(
st_master.hostname(),
zknames)
config = self._convert_dict_to_yaml(config_instances)
supervisor_conf = c_helper.generate_slave_supervisor_conf()
nimbus_ui_conf = c_helper.generate_master_supervisor_conf()
zk_conf = c_helper.generate_zookeeper_conf()
for ng in cluster.node_groups:
extra[ng.id] = {
'st_instances': config,
'slave_sv_conf': supervisor_conf,
'master_sv_conf': nimbus_ui_conf,
'zk_conf': zk_conf
}
return extra
开发者ID:frgaudet,项目名称:sahara,代码行数:31,代码来源:plugin.py
示例10: __init__
def __init__(self, cluster):
super(EdpCdhSparkEngine, self).__init__(cluster)
self.master = plugin_utils.get_instance(cluster, "CLOUDERA_MANAGER")
self.plugin_params["spark-user"] = "sudo -u spark "
self.plugin_params["spark-submit"] = "spark-submit"
self.plugin_params["deploy-mode"] = "cluster"
self.plugin_params["master"] = "yarn-cluster"
开发者ID:AlexanderYAPPO,项目名称:sahara,代码行数:7,代码来源:edp_engine_cdh.py
示例11: start_cluster
def start_cluster(cluster):
cl_tmpl = {
"blueprint": cluster.name,
"default_password": uuidutils.generate_uuid(),
"host_groups": []
}
for ng in cluster.node_groups:
for instance in ng.instances:
cl_tmpl["host_groups"].append({
"name": instance.instance_name,
"hosts": [{"fqdn": instance.fqdn()}]
})
ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER)
password = cluster.extra["ambari_password"]
with ambari_client.AmbariClient(ambari, password=password) as client:
req_id = client.create_cluster(cluster.name, cl_tmpl)["id"]
while True:
status = client.check_request_status(cluster.name, req_id)
LOG.debug("Task %s in %s state. Completed %.1f%%" % (
status["request_context"], status["request_status"],
status["progress_percent"]))
if status["request_status"] == "COMPLETED":
return
if status["request_status"] in ["IN_PROGRESS", "PENDING"]:
context.sleep(5)
else:
raise p_exc.HadoopProvisionError(
_("Ambari request in %s state") % status["request_status"])
开发者ID:rogeryu27,项目名称:sahara,代码行数:28,代码来源:deploy.py
示例12: setup_agents
def setup_agents(cluster, instances=None):
LOG.debug("Set up Ambari agents")
manager_address = plugin_utils.get_instance(
cluster, p_common.AMBARI_SERVER).fqdn()
if not instances:
instances = plugin_utils.get_instances(cluster)
_setup_agents(instances, manager_address)
开发者ID:Imperat,项目名称:sahara,代码行数:7,代码来源:deploy.py
示例13: create_blueprint
def create_blueprint(cluster):
_prepare_ranger(cluster)
cluster = conductor.cluster_get(context.ctx(), cluster.id)
host_groups = []
for ng in cluster.node_groups:
procs = p_common.get_ambari_proc_list(ng)
procs.extend(p_common.get_clients(cluster))
for instance in ng.instances:
hg = {
"name": instance.instance_name,
"configurations": configs.get_instance_params(instance),
"components": []
}
for proc in procs:
hg["components"].append({"name": proc})
host_groups.append(hg)
bp = {
"Blueprints": {
"stack_name": "HDP",
"stack_version": cluster.hadoop_version
},
"host_groups": host_groups,
"configurations": configs.get_cluster_params(cluster)
}
ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER)
password = cluster.extra["ambari_password"]
with ambari_client.AmbariClient(ambari, password=password) as client:
client.create_blueprint(cluster.name, bp)
开发者ID:rogeryu27,项目名称:sahara,代码行数:28,代码来源:deploy.py
示例14: _get_job_status_from_remote
def _get_job_status_from_remote(self, job_execution, retries=3):
topology_name, inst_id = self._get_instance_if_running(
job_execution)
if topology_name is None or inst_id is None:
return edp.JOB_STATUSES_TERMINATED
topology_name = self._get_topology_name(job_execution)
master = plugin_utils.get_instance(self.cluster, "nimbus")
cmd = (
"%(storm)s -c nimbus.host=%(host)s "
"list | grep %(topology_name)s | awk '{print $2}'") % (
{
"storm": "/usr/local/storm/bin/storm",
"host": master.hostname(),
"topology_name": topology_name
})
for i in range(retries):
with remote.get_remote(master) as r:
ret, stdout = r.execute_command("%s " % (cmd))
# If the status is ACTIVE is there, it's still running
if stdout.strip() == "ACTIVE":
return {"status": edp.JOB_STATUS_RUNNING}
else:
if i == retries - 1:
return {"status": edp.JOB_STATUS_KILLED}
context.sleep(10)
开发者ID:openstack,项目名称:sahara,代码行数:28,代码来源:engine.py
示例15: get_alerts_data
def get_alerts_data(self, service=None):
if self._data is not None:
# return cached data
return self._data.get(service, []) if service else self._data
self._data = {}
self._cluster_services = []
try:
ambari = plugin_utils.get_instance(
self.cluster, p_common.AMBARI_SERVER)
password = self.cluster.extra.get("ambari_password")
with client.AmbariClient(ambari, password=password) as ambari:
resp = ambari.get_alerts_data(self.cluster)
for alert in resp:
alert = alert.get('Alert', {})
service = alert.get('service_name').lower()
if service not in self._data:
self._data[service] = []
self._cluster_services.append(service)
self._data[service].append(alert)
except Exception as e:
prefix = _("Can't get response from Ambari Monitor")
msg = _("%(problem)s: %(description)s") % {
'problem': prefix, 'description': six.text_type(e)}
# don't put in exception to logs, it will be done by log.exception
LOG.exception(prefix)
self._exception_store = msg
开发者ID:frgaudet,项目名称:sahara,代码行数:26,代码来源:health.py
示例16: generate_spark_env_configs
def generate_spark_env_configs(cluster):
configs = []
# master configuration
sp_master = utils.get_instance(cluster, "master")
configs.append('SPARK_MASTER_IP=' + sp_master.hostname())
# point to the hadoop conf dir so that Spark can read things
# like the swift configuration without having to copy core-site
# to /opt/spark/conf
configs.append('HADOOP_CONF_DIR=' + HADOOP_CONF_DIR)
masterport = utils.get_config_value_or_default("Spark",
"Master port",
cluster)
if masterport and masterport != _get_spark_opt_default("Master port"):
configs.append('SPARK_MASTER_PORT=' + str(masterport))
masterwebport = utils.get_config_value_or_default("Spark",
"Master webui port",
cluster)
if (masterwebport and
masterwebport != _get_spark_opt_default("Master webui port")):
configs.append('SPARK_MASTER_WEBUI_PORT=' + str(masterwebport))
# configuration for workers
workercores = utils.get_config_value_or_default("Spark",
"Worker cores",
cluster)
if workercores and workercores != _get_spark_opt_default("Worker cores"):
configs.append('SPARK_WORKER_CORES=' + str(workercores))
workermemory = utils.get_config_value_or_default("Spark",
"Worker memory",
cluster)
if (workermemory and
workermemory != _get_spark_opt_default("Worker memory")):
configs.append('SPARK_WORKER_MEMORY=' + str(workermemory))
workerport = utils.get_config_value_or_default("Spark",
"Worker port",
cluster)
if workerport and workerport != _get_spark_opt_default("Worker port"):
configs.append('SPARK_WORKER_PORT=' + str(workerport))
workerwebport = utils.get_config_value_or_default("Spark",
"Worker webui port",
cluster)
if (workerwebport and
workerwebport != _get_spark_opt_default("Worker webui port")):
configs.append('SPARK_WORKER_WEBUI_PORT=' + str(workerwebport))
workerinstances = utils.get_config_value_or_default("Spark",
"Worker instances",
cluster)
if (workerinstances and
workerinstances != _get_spark_opt_default("Worker instances")):
configs.append('SPARK_WORKER_INSTANCES=' + str(workerinstances))
return '\n'.join(configs)
开发者ID:gongwayne,项目名称:Openstack,代码行数:59,代码来源:config_helper.py
示例17: manage_config_groups
def manage_config_groups(cluster, instances):
groups = []
ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER)
password = cluster.extra["ambari_password"]
for instance in instances:
groups.extend(configs.get_config_group(instance))
with ambari_client.AmbariClient(ambari, password=password) as client:
client.create_config_group(cluster, groups)
开发者ID:gongwayne,项目名称:Openstack,代码行数:8,代码来源:deploy.py
示例18: start_cluster
def start_cluster(cluster):
ambari_template = _build_ambari_cluster_template(cluster)
ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER)
password = cluster.extra["ambari_password"]
with ambari_client.AmbariClient(ambari, password=password) as client:
req_id = client.create_cluster(cluster.name, ambari_template)["id"]
client.wait_ambari_request(req_id, cluster.name)
开发者ID:gongwayne,项目名称:Openstack,代码行数:8,代码来源:deploy.py
示例19: _set_cluster_info
def _set_cluster_info(self, cluster):
nn = utils.get_instance(cluster, "namenode")
sp_master = utils.get_instance(cluster, "master")
info = {}
if nn:
address = utils.get_config_value_or_default("HDFS", "dfs.http.address", cluster)
port = address[address.rfind(":") + 1 :]
info["HDFS"] = {"Web UI": "http://%s:%s" % (nn.management_ip, port)}
info["HDFS"]["NameNode"] = "hdfs://%s:8020" % nn.hostname()
if sp_master:
port = utils.get_config_value_or_default("Spark", "Master webui port", cluster)
if port is not None:
info["Spark"] = {"Web UI": "http://%s:%s" % (sp_master.management_ip, port)}
ctx = context.ctx()
conductor.cluster_update(ctx, cluster, {"info": info})
开发者ID:zhangjunli177,项目名称:sahara,代码行数:17,代码来源:plugin.py
示例20: __init__
def __init__(self, cluster):
super(EDPSparkEngine, self).__init__(cluster)
# searching for spark instance
self.master = plugin_utils.get_instance(
cluster, p_common.SPARK_JOBHISTORYSERVER)
self.plugin_params["spark-user"] = "sudo -u spark "
self.plugin_params["spark-submit"] = "spark-submit"
self.plugin_params["deploy-mode"] = "cluster"
self.plugin_params["master"] = "yarn-cluster"
开发者ID:egafford,项目名称:sahara,代码行数:9,代码来源:edp_engine.py
注:本文中的sahara.plugins.utils.get_instance函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论