• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python i18n._LI函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中sahara.i18n._LI函数的典型用法代码示例。如果您正苦于以下问题:Python _LI函数的具体用法?Python _LI怎么用?Python _LI使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了_LI函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: start_cluster

    def start_cluster(self, cluster):
        nn_instance = utils.get_instance(cluster, "namenode")
        sm_instance = utils.get_instance(cluster, "master")
        dn_instances = utils.get_instances(cluster, "datanode")

        # Start the name node
        with remote.get_remote(nn_instance) as r:
            run.format_namenode(r)
            run.start_processes(r, "namenode")

        # start the data nodes
        self._start_slave_datanode_processes(dn_instances)

        LOG.info(_LI("Hadoop services in cluster %s have been started"), cluster.name)

        with remote.get_remote(nn_instance) as r:
            r.execute_command("sudo -u hdfs hdfs dfs -mkdir -p /user/$USER/")
            r.execute_command(("sudo -u hdfs hdfs dfs -chown $USER " "/user/$USER/"))

        # start spark nodes
        if sm_instance:
            with remote.get_remote(sm_instance) as r:
                run.start_spark_master(r, self._spark_home(cluster))
                LOG.info(_LI("Spark service at '%s' has been started"), sm_instance.hostname())

        LOG.info(_LI("Cluster %s has been started successfully"), cluster.name)
        self._set_cluster_info(cluster)
开发者ID:hao707822882,项目名称:sahara,代码行数:27,代码来源:plugin.py


示例2: wait_for_host_registrations

    def wait_for_host_registrations(self, num_hosts, ambari_info):
        LOG.info(
            _LI('Waiting for all Ambari agents to register with server ...'))

        url = 'http://{0}/api/v1/hosts'.format(ambari_info.get_address())
        result = None
        json_result = None

        # TODO(jspeidel): timeout
        while result is None or len(json_result['items']) < num_hosts:
            context.sleep(5)
            try:
                result = self._get(url, ambari_info)
                json_result = json.loads(result.text)

                LOG.info(_LI('Registered Hosts: %(current_number)s of '
                             '%(final_number)s'),
                         {'current_number': len(json_result['items']),
                          'final_number': num_hosts})
                for hosts in json_result['items']:
                    LOG.debug('Registered Host: {0}'.format(
                        hosts['Hosts']['host_name']))
            except Exception:
                # TODO(jspeidel): max wait time
                LOG.info(_LI('Waiting to connect to ambari server ...'))
开发者ID:degorenko,项目名称:sahara,代码行数:25,代码来源:versionhandler.py


示例3: start_cluster

    def start_cluster(self, cluster):
        nn_instance = utils.get_instance(cluster, "namenode")
        dn_instances = utils.get_instances(cluster, "datanode")
        zep_instance = utils.get_instance(cluster, "zeppelin")

        # Start the name node
        self._start_namenode(nn_instance)

        # start the data nodes
        self._start_datanode_processes(dn_instances)

        LOG.info(_LI("Hadoop services have been started"))

        with remote.get_remote(nn_instance) as r:
            r.execute_command("sudo -u hdfs hdfs dfs -mkdir -p /user/$USER/")
            r.execute_command("sudo -u hdfs hdfs dfs -chown $USER "
                              "/user/$USER/")

        # start spark nodes
        self.start_spark(cluster)

        # start zeppelin, if necessary
        if zep_instance:
            self._start_zeppelin(zep_instance)

        LOG.info(_LI('Cluster has been started successfully'))
        self._set_cluster_info(cluster)
开发者ID:crobby,项目名称:sahara,代码行数:27,代码来源:plugin.py


示例4: start_services

    def start_services(self, cluster_name, cluster_spec, ambari_info):
        start_url = ('http://{0}/api/v1/clusters/{1}/services?ServiceInfo/'
                     'state=INSTALLED'.format(
                         ambari_info.get_address(), cluster_name))
        body = ('{"RequestInfo" : { "context" : "Start all services" },'
                '"Body" : {"ServiceInfo": {"state" : "STARTED"}}}')

        self._fire_service_start_notifications(
            cluster_name, cluster_spec, ambari_info)
        result = self._put(start_url, ambari_info, data=body)
        if result.status_code == 202:
            json_result = json.loads(result.text)
            request_id = json_result['Requests']['id']
            success = self._wait_for_async_request(
                self._get_async_request_uri(ambari_info, cluster_name,
                                            request_id), ambari_info)
            if success:
                LOG.info(
                    _LI("Successfully started Hadoop cluster."))
                LOG.info(_LI('Ambari server address: {server_address}')
                         .format(server_address=ambari_info.get_address()))

            else:
                LOG.error(_LE('Failed to start Hadoop cluster.'))
                raise ex.HadoopProvisionError(
                    _('Start of Hadoop services failed.'))

        elif result.status_code != 200:
            LOG.error(
                _LE('Start command failed. Status: {status}, '
                    'response: {response}').format(status=result.status_code,
                                                   response=result.text))
            raise ex.HadoopProvisionError(
                _('Start of Hadoop services failed.'))
开发者ID:AlexanderYAPPO,项目名称:sahara,代码行数:34,代码来源:versionhandler.py


示例5: main

def main():
    # TODO(tmckay): Work on restricting the options
    # pulled in by imports which show up in the help.
    # If we find a nice way to do this the calls to
    # unregister_extra_cli_opt() can be removed
    CONF(project="sahara")

    # For some reason, this is necessary to clear cached values
    # and re-read configs.  For instance, if this is not done
    # here the 'plugins' value will not reflect the value from
    # the config file on the command line
    CONF.reload_config_files()
    log.setup(CONF, "sahara")

    # If we have to enforce extra option checks, like one option
    # requires another, do it here
    extra_option_checks()

    # Since this may be scripted, record the command in the log
    # so a user can know exactly what was done
    LOG.info(_LI("Command: {command}").format(command=" ".join(sys.argv)))

    api.set_logger(LOG)
    api.set_conf(CONF)

    CONF.command.func()

    LOG.info(_LI("Finished {command}").format(command=CONF.command.name))
开发者ID:thefuyang,项目名称:sahara,代码行数:28,代码来源:cli.py


示例6: _await_networks

    def _await_networks(self, cluster, instances):
        if not instances:
            return

        ips_assigned = set()
        while len(ips_assigned) != len(instances):
            if not g.check_cluster_exists(cluster):
                return
            for instance in instances:
                if instance.id not in ips_assigned:
                    if networks.init_instances_ips(instance):
                        ips_assigned.add(instance.id)

            context.sleep(1)

        LOG.info(
            _LI("Cluster '%s': all instances have IPs assigned"), cluster.id)

        cluster = conductor.cluster_get(context.ctx(), cluster)
        instances = g.get_instances(cluster, ips_assigned)

        with context.ThreadGroup() as tg:
            for instance in instances:
                tg.spawn("wait-for-ssh-%s" % instance.instance_name,
                         self._wait_until_accessible, instance)

        LOG.info(_LI("Cluster '%s': all instances are accessible"), cluster.id)
开发者ID:degorenko,项目名称:sahara,代码行数:27,代码来源:engine.py


示例7: _install_services

    def _install_services(self, cluster_name, ambari_info):
        LOG.info(_LI('Installing required Hadoop services ...'))

        ambari_address = ambari_info.get_address()
        install_url = ('http://{0}/api/v1/clusters/{'
                       '1}/services?ServiceInfo/state=INIT'.format(
                           ambari_address, cluster_name))
        body = ('{"RequestInfo" : { "context" : "Install all services" },'
                '"Body" : {"ServiceInfo": {"state" : "INSTALLED"}}}')

        result = self._put(install_url, ambari_info, data=body)

        if result.status_code == 202:
            json_result = json.loads(result.text)
            request_id = json_result['Requests']['id']
            success = self._wait_for_async_request(self._get_async_request_uri(
                ambari_info, cluster_name, request_id),
                ambari_info)
            if success:
                LOG.info(_LI("Install of Hadoop stack successful."))
                self._finalize_ambari_state(ambari_info)
            else:
                LOG.critical(_LC('Install command failed.'))
                raise ex.HadoopProvisionError(
                    _('Installation of Hadoop stack failed.'))
        elif result.status_code != 200:
            LOG.error(
                _LE('Install command failed. {0}').format(result.text))
            raise ex.HadoopProvisionError(
                _('Installation of Hadoop stack failed.'))
开发者ID:degorenko,项目名称:sahara,代码行数:30,代码来源:versionhandler.py


示例8: _await_networks

    def _await_networks(self, cluster, instances):
        if not instances:
            return

        cpo.add_provisioning_step(cluster.id, _("Assign IPs"), len(instances))

        ips_assigned = set()
        self._ips_assign(ips_assigned, cluster, instances)

        LOG.info(
            _LI("Cluster {cluster_id}: all instances have IPs assigned")
            .format(cluster_id=cluster.id))

        cluster = conductor.cluster_get(context.ctx(), cluster)
        instances = g.get_instances(cluster, ips_assigned)

        cpo.add_provisioning_step(
            cluster.id, _("Wait for instance accessibility"), len(instances))

        with context.ThreadGroup() as tg:
            for instance in instances:
                tg.spawn("wait-for-ssh-%s" % instance.instance_name,
                         self._wait_until_accessible, instance)

        LOG.info(_LI("Cluster {cluster_id}: all instances are accessible")
                 .format(cluster_id=cluster.id))
开发者ID:YongchaoTIAN,项目名称:sahara,代码行数:26,代码来源:engine.py


示例9: stop_services

def stop_services(cluster, instances):
    LOG.info(_LI("Stop warden and zookeeper"))
    for instance in instances:
        with instance.remote() as r:
            r.execute_command(STOP_WARDEN_CMD, run_as_root=True)
            if check_if_is_zookeeper_node(instance):
                r.execute_command(STOP_ZOOKEEPER_CMD, run_as_root=True)
    LOG.info(_LI("Warden and zookeeper stoped"))
开发者ID:a9261,项目名称:sahara,代码行数:8,代码来源:scaling.py


示例10: remove_services

def remove_services(cluster, instances):
    LOG.info(_LI("Start remove all mapr services"))
    for instance in instances:
        with instance.remote() as r:
            r.execute_command(REMOVE_MAPR_PACKAGES_CMD, run_as_root=True)
            r.execute_command(REMOVE_MAPR_HOME_CMD, run_as_root=True)
            r.execute_command(REMOVE_MAPR_CORES_CMD, run_as_root=True)
    LOG.info(_LI("All mapr services removed"))
开发者ID:a9261,项目名称:sahara,代码行数:8,代码来源:scaling.py


示例11: format_cluster_deleted_message

def format_cluster_deleted_message(cluster):
    msg = _LI("Cluster %(name)s (id=%(id)s) was deleted. "
              "Canceling current operation.")

    if cluster:
        return (msg, {'name': cluster.name,
                      'id': cluster.id})
    return (msg, {'name': _LI("Unknown"),
                  'id': _LI("Unknown")})
开发者ID:turu,项目名称:sahara,代码行数:9,代码来源:general.py


示例12: move_node

def move_node(cluster, instances):
    LOG.info(_LI("Start moving the node to the /decommissioned"))
    for instance in instances:
        with instance.remote() as r:
            command = GET_SERVER_ID_CMD % instance.management_ip
            ec, out = r.execute_command(command, run_as_root=True)
            command = MOVE_NODE_CMD % out.strip()
            r.execute_command(command, run_as_root=True)
    LOG.info(_LI("Nodes moved to the /decommissioned"))
开发者ID:a9261,项目名称:sahara,代码行数:9,代码来源:scaling.py


示例13: scale_cluster

def scale_cluster(cluster, instances, disk_setup_script_path, waiting_script,
                  context, configure_sh_string, is_node_awareness):
    LOG.info(_LI('START: Cluster scaling. Cluster = %s'), cluster.name)
    for inst in instances:
        start_helper.install_role_on_instance(inst, context)
    config.configure_instances(cluster, instances)
    start_services(cluster, instances, disk_setup_script_path,
                   waiting_script, configure_sh_string)
    LOG.info(_LI('END: Cluster scaling. Cluster = %s'), cluster)
开发者ID:a9261,项目名称:sahara,代码行数:9,代码来源:scaling.py


示例14: _install_components

 def _install_components(self, ambari_info, auth, cluster_name, servers):
     # query for the host components on the given hosts that are in the
     # INIT state
     # TODO(jspeidel): provide request context
     body = '{"HostRoles": {"state" : "INSTALLED"}}'
     install_uri = ('http://{0}/api/v1/clusters/{'
                    '1}/host_components?HostRoles/state=INIT&'
                    'HostRoles/host_name.in({2})'.format(
                        ambari_info.get_address(), cluster_name,
                        self._get_host_list(servers)))
     self._exec_ambari_command(ambari_info, body, install_uri)
     LOG.info(_LI('Started Hadoop components while scaling up'))
     LOG.info(_LI('Ambari server ip {ip}')
              .format(ip=ambari_info.get_address()))
开发者ID:AlexanderYAPPO,项目名称:sahara,代码行数:14,代码来源:versionhandler.py


示例15: start_cluster

    def start_cluster(self, cluster):
        nn_instance = vu.get_namenode(cluster)
        with remote.get_remote(nn_instance) as r:
            run.format_namenode(r)
            run.start_processes(r, "namenode")

        for snn in vu.get_secondarynamenodes(cluster):
            run.start_processes(remote.get_remote(snn), "secondarynamenode")

        jt_instance = vu.get_jobtracker(cluster)
        if jt_instance:
            run.start_processes(remote.get_remote(jt_instance), "jobtracker")

        self._start_tt_dn_processes(utils.get_instances(cluster))

        self._await_datanodes(cluster)

        LOG.info(_LI("Hadoop services in cluster %s have been started"),
                 cluster.name)

        oozie = vu.get_oozie(cluster)
        if oozie:
            with remote.get_remote(oozie) as r:
                if c_helper.is_mysql_enable(cluster):
                    run.mysql_start(r, oozie)
                    run.oozie_create_db(r)
                run.oozie_share_lib(r, nn_instance.hostname())
                run.start_oozie(r)
                LOG.info(_LI("Oozie service at '%s' has been started"),
                         nn_instance.hostname())

        hive_server = vu.get_hiveserver(cluster)
        if hive_server:
            with remote.get_remote(hive_server) as r:
                run.hive_create_warehouse_dir(r)
                run.hive_copy_shared_conf(
                    r, edp.get_hive_shared_conf_path('hadoop'))

                if c_helper.is_mysql_enable(cluster):
                    if not oozie or hive_server.hostname() != oozie.hostname():
                        run.mysql_start(r, hive_server)
                    run.hive_create_db(r)
                    run.hive_metastore_start(r)
                    LOG.info(_LI("Hive Metastore server at %s has been "
                                 "started"),
                             hive_server.hostname())

        LOG.info(_LI('Cluster %s has been started successfully'), cluster.name)
        self._set_cluster_info(cluster)
开发者ID:stannie42,项目名称:sahara,代码行数:49,代码来源:versionhandler.py


示例16: decommission_nodes

def decommission_nodes(cluster, instances, configure_sh_string):
    LOG.info(_LI('Start decommission . Cluster = %s'), cluster.name)
    move_node(cluster, instances)
    stop_services(cluster, instances)
    context.sleep(names.WAIT_NODE_ALARM_NO_HEARTBEAT)
    remove_node(cluster, instances)
    remove_services(cluster, instances)
    if check_for_cldb_or_zookeeper_service(instances):
        all_instances = gen.get_instances(cluster)
        current_cluster_instances = [
            x for x in all_instances if x not in instances]
        for inst in current_cluster_instances:
            start_helper.exec_configure_sh_on_instance(
                cluster, inst, configure_sh_string)
    LOG.info(_LI('End decommission. Cluster = %s'), cluster.name)
开发者ID:a9261,项目名称:sahara,代码行数:15,代码来源:scaling.py


示例17: setup

def setup():
    """Initialise the oslo_messaging layer."""
    global TRANSPORT, NOTIFIER

    messaging.set_transport_defaults('sahara')

    TRANSPORT = messaging.get_transport(cfg.CONF, aliases=_ALIASES)

    if not cfg.CONF.enable_notifications:
        LOG.info(_LI("Notifications disabled"))
        return
    LOG.info(_LI("Notifications enabled"))

    serializer = ContextSerializer(JsonPayloadSerializer())
    NOTIFIER = messaging.Notifier(TRANSPORT, serializer=serializer)
开发者ID:egafford,项目名称:sahara,代码行数:15,代码来源:rpc.py


示例18: cancel_job

def cancel_job(job_execution_id):
    ctx = context.ctx()
    job_execution = conductor.job_execution_get(ctx, job_execution_id)
    if job_execution.info['status'] in edp.JOB_STATUSES_TERMINATED:
        return job_execution
    cluster = conductor.cluster_get(ctx, job_execution.cluster_id)
    if cluster is None:
        return job_execution
    engine = _get_job_engine(cluster, job_execution)
    if engine is not None:
        job_execution = conductor.job_execution_update(
            ctx, job_execution_id,
            {'info': {'status': edp.JOB_STATUS_TOBEKILLED}})

        timeout = CONF.job_canceling_timeout
        s_time = timeutils.utcnow()
        while timeutils.delta_seconds(s_time, timeutils.utcnow()) < timeout:
            if job_execution.info['status'] not in edp.JOB_STATUSES_TERMINATED:
                try:
                    job_info = engine.cancel_job(job_execution)
                except Exception as ex:
                    job_info = None
                    LOG.warning(
                        _LW("Error during cancel of job execution {job}: "
                            "{error}").format(job=job_execution.id,
                                              error=ex))
                if job_info is not None:
                    job_execution = _write_job_status(job_execution, job_info)
                    LOG.info(_LI("Job execution {job_id} was canceled "
                                 "successfully").format(
                                     job_id=job_execution.id))
                    return job_execution
                context.sleep(3)
                job_execution = conductor.job_execution_get(
                    ctx, job_execution_id)
                if not job_execution:
                    LOG.info(_LI("Job execution {job_exec_id} was deleted. "
                                 "Canceling current operation.").format(
                             job_exec_id=job_execution_id))
                    return job_execution
            else:
                LOG.info(_LI("Job execution status {job}: {status}").format(
                         job=job_execution.id,
                         status=job_execution.info['status']))
                return job_execution
        else:
            raise e.CancelingFailed(_('Job execution %s was not canceled')
                                    % job_execution.id)
开发者ID:YongchaoTIAN,项目名称:sahara,代码行数:48,代码来源:job_manager.py


示例19: create_cluster

    def create_cluster(self, cluster):
        version = cluster.hadoop_version
        handler = self.version_factory.get_version_handler(version)

        cluster_spec = handler.get_cluster_spec(
            cluster, self._map_to_user_inputs(
                version, cluster.cluster_configs))
        hosts = self._get_servers(cluster)
        ambari_info = self.get_ambari_info(cluster_spec)
        self.cluster_ambari_mapping[cluster.name] = ambari_info
        rpm = self._get_rpm_uri(cluster_spec)

        servers = []
        for host in hosts:
            host_role = utils.get_host_role(host)
            servers.append(
                h.HadoopServer(host, cluster_spec.node_groups[host_role],
                               ambari_rpm=rpm))

        self._provision_cluster(
            cluster.name, cluster_spec, ambari_info, servers,
            cluster.hadoop_version)

        # add the topology data file and script if rack awareness is
        # enabled
        self._configure_topology_for_cluster(cluster, servers)

        LOG.info(_LI("Install of Hadoop stack successful."))
        # add service urls
        self._set_cluster_info(cluster, cluster_spec)
开发者ID:viplav,项目名称:sahara,代码行数:30,代码来源:ambariplugin.py


示例20: _provision_cluster

    def _provision_cluster(self, name, cluster_spec, ambari_info,
                           servers, version):
        # TODO(jspeidel): encapsulate in another class

        if servers:
            cpo.add_provisioning_step(
                servers[0].cluster_id,
                _("Provision cluster via Ambari"), len(servers))

        with context.ThreadGroup() as tg:
            for server in servers:
                with context.set_current_instance_id(
                        server.instance['instance_id']):
                    tg.spawn(
                        "hdp-provision-instance-%s" %
                        server.instance.hostname(),
                        server.provision_ambari, ambari_info, cluster_spec)

        handler = self.version_factory.get_version_handler(version)
        ambari_client = handler.get_ambari_client()

        ambari_client.wait_for_host_registrations(len(servers), ambari_info)
        self._set_ambari_credentials(cluster_spec, ambari_info, version)

        ambari_client.provision_cluster(
            cluster_spec, servers, ambari_info, name)

        LOG.info(_LI('Cluster provisioned via Ambari Server: {server_ip}')
                 .format(server_ip=ambari_info.get_address()))
开发者ID:al-indigo,项目名称:sahara,代码行数:29,代码来源:ambariplugin.py



注:本文中的sahara.i18n._LI函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python i18n._LW函数代码示例发布时间:2022-05-27
下一篇:
Python i18n._LE函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap