• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python utils.get_instances函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中sahara.plugins.general.utils.get_instances函数的典型用法代码示例。如果您正苦于以下问题:Python get_instances函数的具体用法?Python get_instances怎么用?Python get_instances使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了get_instances函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_get_instances

 def test_get_instances(self):
     self.assertEqual(len(u.get_instances(self.c1)), 5)
     self.assertEqual(u.get_instances(self.c1, 'wrong-process'), [])
     self.assertEqual(u.get_instances(self.c1, 'nn'),
                      self.ng1.instances)
     instances = list(self.ng2.instances)
     instances += self.ng3.instances
     self.assertEqual(u.get_instances(self.c1, 'dn'), instances)
开发者ID:B-Rich,项目名称:sahara,代码行数:8,代码来源:test_utils.py


示例2: _validate_existing_ng_scaling

    def _validate_existing_ng_scaling(self, cluster, existing):
        scalable_processes = self._get_scalable_processes()
        dn_to_delete = 0
        for ng in cluster.node_groups:
            if ng.id in existing:
                if ng.count > existing[ng.id] and ("datanode" in ng.node_processes):
                    dn_to_delete += ng.count - existing[ng.id]
                if not set(ng.node_processes).issubset(scalable_processes):
                    raise ex.NodeGroupCannotBeScaled(
                        ng.name,
                        _("Spark plugin cannot scale nodegroup" " with processes: %s") % " ".join(ng.node_processes),
                    )

        dn_amount = len(utils.get_instances(cluster, "datanode"))
        rep_factor = c_helper.get_config_value("HDFS", "dfs.replication", cluster)

        if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor:
            raise ex.ClusterCannotBeScaled(
                cluster.name,
                _(
                    "Spark plugin cannot shrink cluster because "
                    "there would be not enough nodes for HDFS "
                    "replicas (replication factor is %s)"
                )
                % rep_factor,
            )
开发者ID:hao707822882,项目名称:sahara,代码行数:26,代码来源:plugin.py


示例3: _clear_exclude_files

def _clear_exclude_files(cluster):
    for instance in u.get_instances(cluster):
        with instance.remote() as r:
            r.execute_command(
                'sudo su - -c "echo > %s/dn-exclude" hadoop' % HADOOP_CONF_DIR)
            r.execute_command(
                'sudo su - -c "echo > %s/nm-exclude" hadoop' % HADOOP_CONF_DIR)
开发者ID:JohannaMW,项目名称:sahara,代码行数:7,代码来源:scaling.py


示例4: _setup_instances

    def _setup_instances(self, cluster, instances=None):
        extra = self._extract_configs_to_extra(cluster)

        if instances is None:
            instances = utils.get_instances(cluster)

        self._push_configs_to_nodes(cluster, extra, instances)
开发者ID:JohannaMW,项目名称:sahara,代码行数:7,代码来源:plugin.py


示例5: start_cluster

    def start_cluster(self, cluster):
        nn = vu.get_namenode(cluster)
        run.format_namenode(nn)
        run.start_hadoop_process(nn, 'namenode')

        for snn in vu.get_secondarynamenodes(cluster):
            run.start_hadoop_process(snn, 'secondarynamenode')

        rm = vu.get_resourcemanager(cluster)
        if rm:
            run.start_yarn_process(rm, 'resourcemanager')

        run.start_all_processes(utils.get_instances(cluster),
                                ['datanode', 'nodemanager'])

        run.await_datanodes(cluster)

        hs = vu.get_historyserver(cluster)
        if hs:
            run.start_historyserver(hs)

        oo = vu.get_oozie(cluster)
        if oo:
            run.start_oozie_process(self.pctx, oo)

        self._set_cluster_info(cluster)
开发者ID:stannie42,项目名称:sahara,代码行数:26,代码来源:versionhandler.py


示例6: start_cluster

    def start_cluster(self, cluster):
        nn_instance = utils.get_instance(cluster, "namenode")
        sm_instance = utils.get_instance(cluster, "master")
        dn_instances = utils.get_instances(cluster, "datanode")

        # Start the name node
        with remote.get_remote(nn_instance) as r:
            run.format_namenode(r)
            run.start_processes(r, "namenode")

        # start the data nodes
        self._start_slave_datanode_processes(dn_instances)

        LOG.info("Hadoop services in cluster %s have been started" %
                 cluster.name)

        with remote.get_remote(nn_instance) as r:
            r.execute_command("sudo -u hdfs hdfs dfs -mkdir -p /user/$USER/")
            r.execute_command(("sudo -u hdfs hdfs dfs -chown $USER "
                               "/user/$USER/"))

        # start spark nodes
        if sm_instance:
            with remote.get_remote(sm_instance) as r:
                run.start_spark_master(r, self._spark_home(cluster))
                LOG.info("Spark service at '%s' has been started",
                         sm_instance.hostname())

        LOG.info('Cluster %s has been started successfully' % cluster.name)
        self._set_cluster_info(cluster)
开发者ID:JohannaMW,项目名称:sahara,代码行数:30,代码来源:plugin.py


示例7: _set_cluster_info

    def _set_cluster_info(self, cluster):
        mng = u.get_instances(cluster, 'manager')[0]
        nn = u.get_namenode(cluster)
        jt = u.get_jobtracker(cluster)
        oozie = u.get_oozie(cluster)

        #TODO(alazarev) make port configurable (bug #1262895)
        info = {'IDH Manager': {
            'Web UI': 'https://%s:9443' % mng.management_ip
        }}

        if jt:
            #TODO(alazarev) make port configurable (bug #1262895)
            info['MapReduce'] = {
                'Web UI': 'http://%s:50030' % jt.management_ip
            }
            #TODO(alazarev) make port configurable (bug #1262895)
            info['MapReduce']['JobTracker'] = '%s:54311' % jt.hostname()
        if nn:
            #TODO(alazarev) make port configurable (bug #1262895)
            info['HDFS'] = {
                'Web UI': 'http://%s:50070' % nn.management_ip
            }
            #TODO(alazarev) make port configurable (bug #1262895)
            info['HDFS']['NameNode'] = 'hdfs://%s:8020' % nn.hostname()

        if oozie:
            #TODO(alazarev) make port configurable (bug #1262895)
            info['JobFlow'] = {
                'Oozie': 'http://%s:11000' % oozie.management_ip
            }

        ctx = context.ctx()
        conductor.cluster_update(ctx, cluster, {'info': info})
开发者ID:qinweiwei,项目名称:sahara,代码行数:34,代码来源:versionhandler.py


示例8: _extract_configs_to_extra

    def _extract_configs_to_extra(self, cluster):
        nn = utils.get_instance(cluster, "namenode")
        sp_master = utils.get_instance(cluster, "master")
        sp_slaves = utils.get_instances(cluster, "slave")

        extra = dict()

        config_master = config_slaves = ""
        if sp_master is not None:
            config_master = c_helper.generate_spark_env_configs(cluster)

        if sp_slaves is not None:
            slavenames = []
            for slave in sp_slaves:
                slavenames.append(slave.hostname())
            config_slaves = c_helper.generate_spark_slaves_configs(slavenames)
        else:
            config_slaves = "\n"

        for ng in cluster.node_groups:
            extra[ng.id] = {
                "xml": c_helper.generate_xml_configs(ng.configuration(), ng.storage_paths(), nn.hostname(), None),
                "setup_script": c_helper.generate_hadoop_setup_script(
                    ng.storage_paths(), c_helper.extract_hadoop_environment_confs(ng.configuration())
                ),
                "sp_master": config_master,
                "sp_slaves": config_slaves,
            }

        if c_helper.is_data_locality_enabled(cluster):
            topology_data = th.generate_topology_map(cluster, CONF.enable_hypervisor_awareness)
            extra["topology_data"] = "\n".join([k + " " + v for k, v in topology_data.items()]) + "\n"

        return extra
开发者ID:hao707822882,项目名称:sahara,代码行数:34,代码来源:plugin.py


示例9: install_cluster

def install_cluster(cluster):
    mng_instance = u.get_instance(cluster, 'manager')

    all_hosts = list(set([i.fqdn() for i in u.get_instances(cluster)]))

    client = c.IntelClient(mng_instance, cluster.name)

    LOG.info("Create cluster")
    client.cluster.create()

    LOG.info("Add nodes to cluster")
    rack = '/Default'
    client.nodes.add(all_hosts, rack, 'hadoop',
                     '/home/hadoop/.ssh/id_rsa')

    LOG.info("Install software")
    client.cluster.install_software(all_hosts)

    LOG.info("Configure services")
    _configure_services(client, cluster)

    LOG.info("Deploy cluster")
    client.nodes.config(force=True)

    LOG.info("Provisioning configs")
    # cinder and ephemeral drive support
    _configure_storage(client, cluster)
    # swift support
    _configure_swift(client, cluster)
    # user configs
    _add_user_params(client, cluster)

    LOG.info("Format HDFS")
    client.services.hdfs.format()
开发者ID:qinweiwei,项目名称:sahara,代码行数:34,代码来源:installer.py


示例10: _push_configs_to_nodes

 def _push_configs_to_nodes(self, cluster, extra, new_instances):
     all_instances = utils.get_instances(cluster)
     with context.ThreadGroup() as tg:
         for instance in all_instances:
             if instance in new_instances:
                 tg.spawn('spark-configure-%s' % instance.instance_name,
                          self._push_configs_to_new_node, cluster,
                          extra, instance)
开发者ID:phamtruong91,项目名称:sahara,代码行数:8,代码来源:plugin.py


示例11: _get_cluster_hosts_information

def _get_cluster_hosts_information(host, cluster):
    for clust in conductor.cluster_get_all(context.ctx()):
        if clust.id == cluster.id:
            continue

        for i in u.get_instances(clust):
            if i.instance_name == host:
                return g.generate_etc_hosts(clust)

    return None
开发者ID:stannie42,项目名称:sahara,代码行数:10,代码来源:hdfs_helper.py


示例12: decommission_nodes

    def decommission_nodes(self, cluster, instances):
        sls = utils.get_instances(cluster, "slave")
        dns = utils.get_instances(cluster, "datanode")
        decommission_dns = False
        decommission_sls = False

        for i in instances:
            if 'datanode' in i.node_group.node_processes:
                dns.remove(i)
                decommission_dns = True
            if 'slave' in i.node_group.node_processes:
                sls.remove(i)
                decommission_sls = True

        nn = utils.get_instance(cluster, "namenode")
        spark_master = utils.get_instance(cluster, "master")

        if decommission_sls:
            sc.decommission_sl(spark_master, instances, sls)
        if decommission_dns:
            sc.decommission_dn(nn, instances, dns)
开发者ID:JohannaMW,项目名称:sahara,代码行数:21,代码来源:plugin.py


示例13: _update_exclude_files

def _update_exclude_files(cluster, instances):
    datanodes = _get_instances_with_service(instances, 'datanode')
    nodemanagers = _get_instances_with_service(instances, 'nodemanager')
    dn_hosts = u.generate_fqdn_host_names(datanodes)
    nm_hosts = u.generate_fqdn_host_names(nodemanagers)
    for instance in u.get_instances(cluster):
        with instance.remote() as r:
            r.execute_command(
                'sudo su - -c "echo \'%s\' > %s/dn-exclude" hadoop' % (
                    dn_hosts, HADOOP_CONF_DIR))
            r.execute_command(
                'sudo su - -c "echo \'%s\' > %s/nm-exclude" hadoop' % (
                    nm_hosts, HADOOP_CONF_DIR))
开发者ID:JohannaMW,项目名称:sahara,代码行数:13,代码来源:scaling.py


示例14: _push_configs_to_nodes

 def _push_configs_to_nodes(self, cluster, extra, new_instances):
     all_instances = utils.get_instances(cluster)
     new_ids = set([instance.id for instance in new_instances])
     with context.ThreadGroup() as tg:
         for instance in all_instances:
             if instance.id in new_ids:
                 tg.spawn('vanilla-configure-%s' % instance.instance_name,
                          self._push_configs_to_new_node, cluster,
                          extra, instance)
             else:
                 tg.spawn('vanilla-reconfigure-%s' % instance.instance_name,
                          self._push_configs_to_existing_node, cluster,
                          extra, instance)
开发者ID:viplav,项目名称:sahara,代码行数:13,代码来源:versionhandler.py


示例15: _extract_configs_to_extra

    def _extract_configs_to_extra(self, cluster):
        st_master = utils.get_instance(cluster, "master")
        st_slaves = utils.get_instances(cluster, "slave")
        zk_servers = utils.get_instances(cluster, "zookeeper")

        extra = dict()

        config_instances = ''
        if st_master is not None:
            if zk_servers is not None:
                zknames = []
                for zk in zk_servers:
                    zknames.append(zk.hostname())

            config_master = c_helper.generate_storm_config(cluster,
                                st_master.hostname(),
                                zknames)

        # FIGURE OUT HOW TO GET IPS
        for ng in cluster.node_groups:
            extra[ng.id] = {
                'setup_script': c_helper.generate_hosts_setup_script(
                    ng.storage_paths(),
                    c_helper.extract_hadoop_environment_confs(
                        ng.configuration())
                ),
                'sp_master': config_master,
                'sp_slaves': config_slaves
            }

        if c_helper.is_data_locality_enabled(cluster):
            topology_data = th.generate_topology_map(
                cluster, CONF.enable_hypervisor_awareness)
            extra['topology_data'] = "\n".join(
                [k + " " + v for k, v in topology_data.items()]) + "\n"

        return extra
开发者ID:tellesnobrega,项目名称:storm_plugin,代码行数:37,代码来源:plugin.py


示例16: _update_include_files

def _update_include_files(cluster):
    instances = u.get_instances(cluster)

    datanodes = vu.get_datanodes(cluster)
    nodemanagers = vu.get_nodemanagers(cluster)
    dn_hosts = u.generate_fqdn_host_names(datanodes)
    nm_hosts = u.generate_fqdn_host_names(nodemanagers)
    for instance in instances:
        with instance.remote() as r:
            r.execute_command(
                'sudo su - -c "echo \'%s\' > %s/dn-include" hadoop' % (
                    dn_hosts, HADOOP_CONF_DIR))
            r.execute_command(
                'sudo su - -c "echo \'%s\' > %s/nm-include" hadoop' % (
                    nm_hosts, HADOOP_CONF_DIR))
开发者ID:JohannaMW,项目名称:sahara,代码行数:15,代码来源:scaling.py


示例17: start_cluster

    def start_cluster(self, cluster):
        sm_instance = utils.get_instance(cluster, "master")
        sl_instances = utils.get_instances(cluster, "slave")

        # start storm master
        if sm_instance:
            with remote.get_remote(sm_instance) as r:
                run.start_storm_master(r)
                LOG.info("Storm master at '%s' has been started",
                         sm_instance.hostname())

        # start storm slaves
        self._start_slave_processes(sl_instances)

        LOG.info('Cluster %s has been started successfully' % cluster.name)
        self._set_cluster_info(cluster)
开发者ID:tellesnobrega,项目名称:storm_plugin,代码行数:16,代码来源:plugin.py


示例18: configure_cluster

def configure_cluster(cluster):
    instances = gu.get_instances(cluster)

    if not cmd.is_pre_installed_cdh(pu.get_manager(cluster).remote()):
        _configure_os(instances)
        _install_packages(instances, PACKAGES)

    _start_cloudera_agents(instances)
    _start_cloudera_manager(cluster)
    _await_agents(instances)
    _configure_manager(cluster)
    _create_services(cluster)
    _configure_services(cluster)
    _configure_instances(instances)
    cu.deploy_configs(cluster)
    if c_helper.is_swift_enabled(cluster):
        _configure_swift(instances)
开发者ID:viplav,项目名称:sahara,代码行数:17,代码来源:deploy.py


示例19: configure_cluster_for_hdfs

def configure_cluster_for_hdfs(cluster, data_source):
    host = urlparse.urlparse(data_source.url).hostname

    etc_hosts_information = _get_cluster_hosts_information(host, cluster)
    if etc_hosts_information is None:
        # Ip address hasn't been resolved, the last chance is for VM itself
        return

    create_etc_host = 'sudo "cat /tmp/etc-hosts-update '
    create_etc_host += '/etc/hosts > /tmp/etc-hosts"'
    copy_etc_host = 'sudo "cat /tmp/etc-hosts > /etc/hosts"'

    for inst in u.get_instances(cluster):
        with inst.remote as r:
            r.write_file_to("/tmp/etc-hosts-update", etc_hosts_information)
            r.execute_command(create_etc_host)
            r.execute_command(copy_etc_host)
开发者ID:hongbin,项目名称:sahara,代码行数:17,代码来源:hdfs_helper.py


示例20: configure_cluster_for_hdfs

def configure_cluster_for_hdfs(cluster, data_source):
    host = urlparse.urlparse(data_source.url).hostname

    etc_hosts_information = _get_cluster_hosts_information(host, cluster)
    if etc_hosts_information is None:
        # Ip address hasn't been resolved, the last chance is for VM itself
        return

    update_etc_hosts_cmd = (
        'cat /tmp/etc-hosts-update /etc/hosts | '
        'sort | uniq > /tmp/etc-hosts && '
        'cat /tmp/etc-hosts > /etc/hosts && '
        'rm -f /tmp/etc-hosts /tmp/etc-hosts-update')

    for inst in u.get_instances(cluster):
        with inst.remote() as r:
            r.write_file_to('/tmp/etc-hosts-update', etc_hosts_information)
            r.execute_command(update_etc_hosts_cmd, run_as_root=True)
开发者ID:JohannaMW,项目名称:sahara,代码行数:18,代码来源:hdfs_helper.py



注:本文中的sahara.plugins.general.utils.get_instances函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python general.execute_on_instances函数代码示例发布时间:2022-05-27
下一篇:
Python utils.get_instance函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap