• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python utils.get_config_value_or_default函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中sahara.plugins.utils.get_config_value_or_default函数的典型用法代码示例。如果您正苦于以下问题:Python get_config_value_or_default函数的具体用法?Python get_config_value_or_default怎么用?Python get_config_value_or_default使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了get_config_value_or_default函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: get_open_ports

    def get_open_ports(self, node_group):
        cluster = node_group.cluster
        ports_map = {
            'namenode': [8020, 50070, 50470],
            'datanode': [50010, 1004, 50075, 1006, 50020],
            'master': [
                int(utils.get_config_value_or_default("Spark", "Master port",
                                                      cluster)),
                int(utils.get_config_value_or_default("Spark",
                                                      "Master webui port",
                                                      cluster)),
            ],
            'slave': [
                int(utils.get_config_value_or_default("Spark",
                                                      "Worker webui port",
                                                      cluster))
            ],
            'zeppelin': [int(utils.get_config_value_or_default("Zeppelin",
                                                      "Web UI port",
                                                      cluster))]
        }

        ports = []
        for process in node_group.node_processes:
            if process in ports_map:
                ports.extend(ports_map[process])

        return ports
开发者ID:crobby,项目名称:sahara,代码行数:28,代码来源:plugin.py


示例2: _validate_existing_ng_scaling

    def _validate_existing_ng_scaling(self, cluster, existing):
        scalable_processes = self._get_scalable_processes()
        dn_to_delete = 0
        for ng in cluster.node_groups:
            if ng.id in existing:
                if ng.count > existing[ng.id] and ("datanode" in
                                                   ng.node_processes):
                    dn_to_delete += ng.count - existing[ng.id]
                if not set(ng.node_processes).issubset(scalable_processes):
                    raise ex.NodeGroupCannotBeScaled(
                        ng.name, _("Spark plugin cannot scale nodegroup"
                                   " with processes: %s") %
                        ' '.join(ng.node_processes))

        dn_amount = len(utils.get_instances(cluster, "datanode"))
        rep_factor = utils.get_config_value_or_default('HDFS',
                                                       "dfs.replication",
                                                       cluster)

        if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor:
            raise ex.ClusterCannotBeScaled(
                cluster.name, _("Spark plugin cannot shrink cluster because "
                                "there would be not enough nodes for HDFS "
                                "replicas (replication factor is %s)") %
                rep_factor)
开发者ID:crobby,项目名称:sahara,代码行数:25,代码来源:plugin.py


示例3: generate_spark_executor_classpath

def generate_spark_executor_classpath(cluster):
    cp = utils.get_config_value_or_default("Spark",
                                           "Executor extra classpath",
                                           cluster)
    if cp:
        return "spark.executor.extraClassPath " + cp
    return "\n"
开发者ID:gongwayne,项目名称:Openstack,代码行数:7,代码来源:config_helper.py


示例4: validate

    def validate(self, cluster):
        if cluster.hadoop_version == "1.0.0":
            raise exceptions.DeprecatedException(
                _("Support for Spark version 1.0.0 is now deprecated and will" " be removed in the 2016.1 release.")
            )

        nn_count = sum([ng.count for ng in utils.get_node_groups(cluster, "namenode")])
        if nn_count != 1:
            raise ex.InvalidComponentCountException("namenode", 1, nn_count)

        dn_count = sum([ng.count for ng in utils.get_node_groups(cluster, "datanode")])
        if dn_count < 1:
            raise ex.InvalidComponentCountException("datanode", _("1 or more"), nn_count)

        rep_factor = utils.get_config_value_or_default("HDFS", "dfs.replication", cluster)
        if dn_count < rep_factor:
            raise ex.InvalidComponentCountException(
                "datanode",
                _("%s or more") % rep_factor,
                dn_count,
                _("Number of %(dn)s instances should not be less " "than %(replication)s")
                % {"dn": "datanode", "replication": "dfs.replication"},
            )

        # validate Spark Master Node and Spark Slaves
        sm_count = sum([ng.count for ng in utils.get_node_groups(cluster, "master")])

        if sm_count != 1:
            raise ex.RequiredServiceMissingException("Spark master")

        sl_count = sum([ng.count for ng in utils.get_node_groups(cluster, "slave")])

        if sl_count < 1:
            raise ex.InvalidComponentCountException("Spark slave", _("1 or more"), sl_count)
开发者ID:zhangjunli177,项目名称:sahara,代码行数:34,代码来源:plugin.py


示例5: _set_cluster_info

    def _set_cluster_info(self, cluster):
        nn = utils.get_instance(cluster, "namenode")
        sp_master = utils.get_instance(cluster, "master")
        info = {}

        if nn:
            address = utils.get_config_value_or_default("HDFS", "dfs.http.address", cluster)
            port = address[address.rfind(":") + 1 :]
            info["HDFS"] = {"Web UI": "http://%s:%s" % (nn.management_ip, port)}
            info["HDFS"]["NameNode"] = "hdfs://%s:8020" % nn.hostname()

        if sp_master:
            port = utils.get_config_value_or_default("Spark", "Master webui port", cluster)
            if port is not None:
                info["Spark"] = {"Web UI": "http://%s:%s" % (sp_master.management_ip, port)}
        ctx = context.ctx()
        conductor.cluster_update(ctx, cluster, {"info": info})
开发者ID:zhangjunli177,项目名称:sahara,代码行数:17,代码来源:plugin.py


示例6: __init__

 def __init__(self, cluster):
     super(EdpSparkEngine, self).__init__(cluster)
     self.master = u.get_instance(cluster, "SPARK_YARN_HISTORY_SERVER")
     self.plugin_params["spark-user"] = "sudo -u spark "
     self.plugin_params["spark-submit"] = "spark-submit"
     self.plugin_params["deploy-mode"] = "cluster"
     self.plugin_params["master"] = "yarn-cluster"
     driver_cp = u.get_config_value_or_default("Spark", "Executor extra classpath", self.cluster)
     self.plugin_params["driver-class-path"] = driver_cp
开发者ID:uladz,项目名称:sahara,代码行数:9,代码来源:edp_engine.py


示例7: generate_spark_env_configs

def generate_spark_env_configs(cluster):
    configs = []

    # master configuration
    sp_master = utils.get_instance(cluster, "master")
    configs.append('SPARK_MASTER_IP=' + sp_master.hostname())

    # point to the hadoop conf dir so that Spark can read things
    # like the swift configuration without having to copy core-site
    # to /opt/spark/conf
    configs.append('HADOOP_CONF_DIR=' + HADOOP_CONF_DIR)

    masterport = utils.get_config_value_or_default("Spark",
                                                   "Master port",
                                                   cluster)
    if masterport and masterport != _get_spark_opt_default("Master port"):
        configs.append('SPARK_MASTER_PORT=' + str(masterport))

    masterwebport = utils.get_config_value_or_default("Spark",
                                                      "Master webui port",
                                                      cluster)
    if (masterwebport and
            masterwebport != _get_spark_opt_default("Master webui port")):
        configs.append('SPARK_MASTER_WEBUI_PORT=' + str(masterwebport))

    # configuration for workers
    workercores = utils.get_config_value_or_default("Spark",
                                                    "Worker cores",
                                                    cluster)
    if workercores and workercores != _get_spark_opt_default("Worker cores"):
        configs.append('SPARK_WORKER_CORES=' + str(workercores))

    workermemory = utils.get_config_value_or_default("Spark",
                                                     "Worker memory",
                                                     cluster)
    if (workermemory and
            workermemory != _get_spark_opt_default("Worker memory")):
        configs.append('SPARK_WORKER_MEMORY=' + str(workermemory))

    workerport = utils.get_config_value_or_default("Spark",
                                                   "Worker port",
                                                   cluster)
    if workerport and workerport != _get_spark_opt_default("Worker port"):
        configs.append('SPARK_WORKER_PORT=' + str(workerport))

    workerwebport = utils.get_config_value_or_default("Spark",
                                                      "Worker webui port",
                                                      cluster)
    if (workerwebport and
            workerwebport != _get_spark_opt_default("Worker webui port")):
        configs.append('SPARK_WORKER_WEBUI_PORT=' + str(workerwebport))

    workerinstances = utils.get_config_value_or_default("Spark",
                                                        "Worker instances",
                                                        cluster)
    if (workerinstances and
            workerinstances != _get_spark_opt_default("Worker instances")):
        configs.append('SPARK_WORKER_INSTANCES=' + str(workerinstances))
    return '\n'.join(configs)
开发者ID:gongwayne,项目名称:Openstack,代码行数:59,代码来源:config_helper.py


示例8: generate_job_cleanup_config

def generate_job_cleanup_config(cluster):
    args = {
        'minimum_cleanup_megabytes': utils.get_config_value_or_default(
            "Spark", "Minimum cleanup megabytes", cluster),
        'minimum_cleanup_seconds': utils.get_config_value_or_default(
            "Spark", "Minimum cleanup seconds", cluster),
        'maximum_cleanup_seconds': utils.get_config_value_or_default(
            "Spark", "Maximum cleanup seconds", cluster)
    }
    job_conf = {'valid': (args['maximum_cleanup_seconds'] > 0 and
                          (args['minimum_cleanup_megabytes'] > 0
                           and args['minimum_cleanup_seconds'] > 0))}
    if job_conf['valid']:
        job_conf['cron'] = f.get_file_text(
            'plugins/spark/resources/spark-cleanup.cron'),
        job_cleanup_script = f.get_file_text(
            'plugins/spark/resources/tmp-cleanup.sh.template')
        job_conf['script'] = job_cleanup_script.format(**args)
    return job_conf
开发者ID:gongwayne,项目名称:Openstack,代码行数:19,代码来源:config_helper.py


示例9: generate_zeppelin_setup_script

def generate_zeppelin_setup_script(sp_master):
    script_lines = ["#!/bin/bash -x"]
    script_lines.append(
        "echo 'export MASTER=spark://{0}:{1}' "
        ">> /opt/incubator-zeppelin/conf/zeppelin-env.sh".format(
            sp_master['instance_name'],
            utils.get_config_value_or_default(
                "Spark", "Master port", sp_master.node_group.cluster)))
    script_lines.append("echo 'export SPARK_HOME=/opt/spark' >> "
                        "/opt/incubator-zeppelin/conf/zeppelin-env.sh")
    script_lines.append("echo 'export PYTHONPATH=$SPARK_HOME/python:"
                        "$SPARK_HOME/python/lib/py4j-0.8.2.1-src.zip:"
                        "$PYTHONPATH' >> "
                        "/opt/incubator-zeppelin/conf/zeppelin-env.sh")
    script_lines.append("sed -i 's|<value>8080</value>|<value>{0}</value>|g'"
                        " /opt/incubator-zeppelin/conf/zeppelin-site.xml"
                        .format(utils.get_config_value_or_default(
                            "Zeppelin",
                            "Web UI port",
                            sp_master.node_group.cluster)))
    return "\n".join(script_lines)
开发者ID:crobby,项目名称:sahara,代码行数:21,代码来源:config_helper.py


示例10: __init__

 def __init__(self, cluster):
     super(EdpSparkEngine, self).__init__(cluster)
     self.master = u.get_instance(cluster, "CLOUDERA_MANAGER")
     self.plugin_params["spark-user"] = "sudo -u spark "
     self.plugin_params["spark-submit"] = "spark-submit"
     self.plugin_params["deploy-mode"] = "cluster"
     self.plugin_params["master"] = "yarn-cluster"
     driver_cp = u.get_config_value_or_default(
         "Spark", "Executor extra classpath", self.cluster)
     if driver_cp:
         driver_cp = " --driver-class-path " + driver_cp
     self.plugin_params["driver-class-path"] = driver_cp
开发者ID:rsaha,项目名称:sahara,代码行数:12,代码来源:edp_engine.py


示例11: _set_cluster_info

    def _set_cluster_info(self, cluster):
        nn = utils.get_instance(cluster, "namenode")
        sp_master = utils.get_instance(cluster, "master")
        info = {}

        if nn:
            address = utils.get_config_value_or_default(
                'HDFS', 'dfs.http.address', cluster)
            port = address[address.rfind(':') + 1:]
            info['HDFS'] = {
                'Web UI': 'http://%s:%s' % (nn.management_ip, port)
            }
            info['HDFS']['NameNode'] = 'hdfs://%s:8020' % nn.hostname()

        if sp_master:
            port = utils.get_config_value_or_default(
                'Spark', 'Master webui port', cluster)
            if port is not None:
                info['Spark'] = {
                    'Web UI': 'http://%s:%s' % (sp_master.management_ip, port)
                }
        ctx = context.ctx()
        conductor.cluster_update(ctx, cluster, {'info': info})
开发者ID:crobby,项目名称:sahara,代码行数:23,代码来源:plugin.py


示例12: decommission_sl

def decommission_sl(master, inst_to_be_deleted, survived_inst):
    if survived_inst is not None:
        slavenames = []
        for slave in survived_inst:
            slavenames.append(slave.hostname())
        slaves_content = c_helper.generate_spark_slaves_configs(slavenames)
    else:
        slaves_content = "\n"

    cluster = master.cluster
    sp_home = utils.get_config_value_or_default("Spark", "Spark home", cluster)
    r_master = remote.get_remote(master)
    run.stop_spark(r_master, sp_home)

    # write new slave file to master
    files = {os.path.join(sp_home, 'conf/slaves'): slaves_content}
    r_master.write_files_to(files)

    # write new slaves file to each survived slave as well
    for i in survived_inst:
        with remote.get_remote(i) as r:
            r.write_files_to(files)

    run.start_spark_master(r_master, sp_home)
开发者ID:Imperat,项目名称:sahara,代码行数:24,代码来源:scaling.py


示例13: is_kerberos_security_enabled

def is_kerberos_security_enabled(cluster):
    return pl_utils.get_config_value_or_default(
        cluster=cluster, config=enable_kerberos)
开发者ID:openstack,项目名称:sahara,代码行数:3,代码来源:kerberos.py


示例14: get_admin_password

def get_admin_password(cluster):
    # TODO(vgridnev): support in follow-up improved secret storage for
    # configs
    return pl_utils.get_config_value_or_default(
        cluster=cluster, config=admin_password)
开发者ID:openstack,项目名称:sahara,代码行数:5,代码来源:kerberos.py


示例15: get_policy_url

def get_policy_url(cluster):
    return pl_utils.get_config_value_or_default(
        cluster=cluster, config=policy_url)
开发者ID:openstack,项目名称:sahara,代码行数:3,代码来源:kerberos.py


示例16: get_realm_name

def get_realm_name(cluster):
    return pl_utils.get_config_value_or_default(
        cluster=cluster, config=realm_name)
开发者ID:openstack,项目名称:sahara,代码行数:3,代码来源:kerberos.py


示例17: get_admin_principal

def get_admin_principal(cluster):
    return pl_utils.get_config_value_or_default(
        cluster=cluster, config=admin_principal)
开发者ID:openstack,项目名称:sahara,代码行数:3,代码来源:kerberos.py


示例18: get_kdc_server_ip

def get_kdc_server_ip(cluster):
    return pl_utils.get_config_value_or_default(
        cluster=cluster, config=kdc_server_ip)
开发者ID:openstack,项目名称:sahara,代码行数:3,代码来源:kerberos.py


示例19: using_existing_kdc

def using_existing_kdc(cluster):
    return pl_utils.get_config_value_or_default(
        cluster=cluster, config=use_existing_kdc)
开发者ID:openstack,项目名称:sahara,代码行数:3,代码来源:kerberos.py


示例20: get_port_from_config

def get_port_from_config(service, name, cluster=None):
    address = utils.get_config_value_or_default(service, name, cluster)
    return utils.get_port_from_address(address)
开发者ID:gongwayne,项目名称:Openstack,代码行数:3,代码来源:config_helper.py



注:本文中的sahara.plugins.utils.get_config_value_or_default函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python utils.get_instance函数代码示例发布时间:2022-05-27
下一篇:
Python config_helper.is_data_locality_enabled函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap