• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python general.change_cluster_status函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中sahara.utils.general.change_cluster_status函数的典型用法代码示例。如果您正苦于以下问题:Python change_cluster_status函数的具体用法?Python change_cluster_status怎么用?Python change_cluster_status使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了change_cluster_status函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: launch_instances

    def launch_instances(self, cluster, target_count):
        # create all instances
        cluster = g.change_cluster_status(cluster, self.STAGES[0])

        tmpl = heat.ClusterTemplate(cluster)

        self._configure_template(tmpl, cluster, target_count)
        stack = tmpl.instantiate(update_existing=self.UPDATE_STACK,
                                 disable_rollback=self.DISABLE_ROLLBACK)
        heat.wait_stack_completion(stack.heat_stack)

        self.inst_ids = self._populate_cluster(cluster, stack)

        # wait for all instances are up and networks ready
        cluster = g.change_cluster_status(cluster, self.STAGES[1])

        instances = g.get_instances(cluster, self.inst_ids)

        self._await_networks(cluster, instances)

        # prepare all instances
        cluster = g.change_cluster_status(cluster, self.STAGES[2])

        instances = g.get_instances(cluster, self.inst_ids)
        volumes.mount_to_instances(instances)

        self._configure_instances(cluster)
开发者ID:degorenko,项目名称:sahara,代码行数:27,代码来源:heat_engine.py


示例2: _provision_scaled_cluster

def _provision_scaled_cluster(cluster_id, node_group_id_map):
    ctx, cluster, plugin = _prepare_provisioning(cluster_id)

    # Decommissioning surplus nodes with the plugin
    cluster = g.change_cluster_status(cluster, "Decommissioning")

    instances_to_delete = []

    for node_group in cluster.node_groups:
        new_count = node_group_id_map[node_group.id]
        if new_count < node_group.count:
            instances_to_delete += node_group.instances[new_count:
                                                        node_group.count]

    if instances_to_delete:
        plugin.decommission_nodes(cluster, instances_to_delete)

    # Scaling infrastructure
    cluster = g.change_cluster_status(cluster, "Scaling")

    instance_ids = INFRA.scale_cluster(cluster, node_group_id_map)

    # Setting up new nodes with the plugin
    if instance_ids:
        cluster = g.change_cluster_status(cluster, "Configuring")
        instances = g.get_instances(cluster, instance_ids)
        plugin.scale_cluster(cluster, instances)

    g.change_cluster_status(cluster, "Active")
开发者ID:lborensky,项目名称:sahara,代码行数:29,代码来源:ops.py


示例3: launch_instances

    def launch_instances(self, ctx, cluster, target_count):
        # create all instances
        cluster = g.change_cluster_status(cluster, self.STAGES[0])

        tmpl = heat.ClusterTemplate(cluster)

        self._configure_template(ctx, tmpl, cluster, target_count)
        stack = tmpl.instantiate(update_existing=self.UPDATE_STACK)
        heat.wait_stack_completion(stack.heat_stack)

        self.inst_ids = self._populate_cluster(ctx, cluster, stack)

        # wait for all instances are up and networks ready
        cluster = g.change_cluster_status(cluster, self.STAGES[1])

        instances = g.get_instances(cluster, self.inst_ids)

        self._await_networks(cluster, instances)

        if not g.check_cluster_exists(cluster):
            LOG.info(g.format_cluster_deleted_message(cluster))
            return

        # prepare all instances
        cluster = g.change_cluster_status(cluster, self.STAGES[2])

        instances = g.get_instances(cluster, self.inst_ids)
        volumes.mount_to_instances(instances)

        self._configure_instances(cluster)
开发者ID:turu,项目名称:sahara,代码行数:30,代码来源:heat_engine.py


示例4: test_change_cluster_status

 def test_change_cluster_status(self):
     cluster = self._make_sample()
     cluster = general.change_cluster_status(cluster, "Deleting", "desc")
     self.assertEqual("Deleting", cluster.status)
     self.assertEqual("desc", cluster.status_description)
     general.change_cluster_status(cluster, "Spawning")
     self.assertEqual("Deleting", cluster.status)
开发者ID:AlexanderYAPPO,项目名称:sahara,代码行数:7,代码来源:test_general.py


示例5: create_cluster

    def create_cluster(self, cluster):
        ctx = context.ctx()
        self._update_rollback_strategy(cluster, shutdown=True)

        # create all instances
        cluster = g.change_cluster_status(cluster, "Spawning")
        self._create_instances(cluster)

        # wait for all instances are up and networks ready
        cluster = g.change_cluster_status(cluster, "Waiting")
        instances = g.get_instances(cluster)

        self._await_active(cluster, instances)

        self._assign_floating_ips(instances)

        self._await_networks(cluster, instances)

        cluster = conductor.cluster_get(ctx, cluster)

        # attach volumes
        volumes.attach_to_instances(g.get_instances(cluster))

        # prepare all instances
        cluster = g.change_cluster_status(cluster, "Preparing")

        self._configure_instances(cluster)

        self._update_rollback_strategy(cluster)
开发者ID:stannie42,项目名称:sahara,代码行数:29,代码来源:direct_engine.py


示例6: _provision_cluster

def _provision_cluster(cluster_id):
    ctx, cluster, plugin = _prepare_provisioning(cluster_id)

    cluster = _update_sahara_info(ctx, cluster)

    if CONF.use_identity_api_v3 and cluster.is_transient:
        trusts.create_trust_for_cluster(cluster)

    # updating cluster infra
    cluster = g.change_cluster_status(cluster, "InfraUpdating")
    plugin.update_infra(cluster)

    # creating instances and configuring them
    cluster = conductor.cluster_get(ctx, cluster_id)
    INFRA.create_cluster(cluster)

    # configure cluster
    cluster = g.change_cluster_status(cluster, "Configuring")
    plugin.configure_cluster(cluster)

    # starting prepared and configured cluster
    cluster = g.change_cluster_status(cluster, "Starting")
    plugin.start_cluster(cluster)

    # cluster is now up and ready
    cluster = g.change_cluster_status(cluster, "Active")

    # schedule execution pending job for cluster
    for je in conductor.job_execution_get_all(ctx, cluster_id=cluster.id):
        job_manager.run_job(je.id)
开发者ID:lborensky,项目名称:sahara,代码行数:30,代码来源:ops.py


示例7: _scale_cluster_instances

    def _scale_cluster_instances(self, cluster, node_group_id_map):
        ctx = context.ctx()
        aa_groups = self._generate_anti_affinity_groups(cluster)
        instances_to_delete = []
        node_groups_to_enlarge = []

        for node_group in cluster.node_groups:
            new_count = node_group_id_map[node_group.id]

            if new_count < node_group.count:
                instances_to_delete += node_group.instances[new_count:
                                                            node_group.count]
            elif new_count > node_group.count:
                node_groups_to_enlarge.append(node_group)

        if instances_to_delete:
            cluster = g.change_cluster_status(cluster, "Deleting Instances")

            for instance in instances_to_delete:
                self._shutdown_instance(instance)

        cluster = conductor.cluster_get(ctx, cluster)

        instances_to_add = []
        if node_groups_to_enlarge:
            cluster = g.change_cluster_status(cluster, "Adding Instances")
            for node_group in node_groups_to_enlarge:
                count = node_group_id_map[node_group.id]
                for idx in six.moves.xrange(node_group.count + 1, count + 1):
                    instance_id = self._run_instance(cluster, node_group, idx,
                                                     aa_groups)
                    instances_to_add.append(instance_id)

        return instances_to_add
开发者ID:COSHPC,项目名称:sahara,代码行数:34,代码来源:direct_engine.py


示例8: _scale_cluster_instances

    def _scale_cluster_instances(self, cluster, node_group_id_map):
        ctx = context.ctx()

        aa_group = None
        old_aa_groups = None
        if cluster.anti_affinity:
            aa_group = self._find_aa_server_group(cluster)
            if not aa_group:
                old_aa_groups = self._generate_anti_affinity_groups(cluster)

        instances_to_delete = []
        node_groups_to_enlarge = set()
        node_groups_to_delete = set()

        for node_group in cluster.node_groups:
            new_count = node_group_id_map[node_group.id]

            if new_count < node_group.count:
                instances_to_delete += node_group.instances[new_count:
                                                            node_group.count]
                if new_count == 0:
                    node_groups_to_delete.add(node_group.id)
            elif new_count > node_group.count:
                node_groups_to_enlarge.add(node_group.id)
                if node_group.count == 0 and node_group.auto_security_group:
                    self._create_auto_security_group(node_group)

        if instances_to_delete:
            cluster = g.change_cluster_status(cluster, "Deleting Instances")

            for instance in instances_to_delete:
                with context.set_current_instance_id(instance.instance_id):
                    self._shutdown_instance(instance)

        self._await_deleted(cluster, instances_to_delete)
        for ng in cluster.node_groups:
            if ng.id in node_groups_to_delete:
                self._delete_auto_security_group(ng)

        cluster = conductor.cluster_get(ctx, cluster)
        instances_to_add = []
        if node_groups_to_enlarge:

            cpo.add_provisioning_step(
                cluster.id, _("Add instances"),
                self._count_instances_to_scale(
                    node_groups_to_enlarge, node_group_id_map, cluster))

            cluster = g.change_cluster_status(cluster, "Adding Instances")
            for ng in cluster.node_groups:
                if ng.id in node_groups_to_enlarge:
                    count = node_group_id_map[ng.id]
                    for idx in six.moves.xrange(ng.count + 1, count + 1):
                        instance_id = self._start_instance(
                            cluster, ng, idx, aa_group, old_aa_groups)
                        instances_to_add.append(instance_id)

        return instances_to_add
开发者ID:AllenFromMinneapolis,项目名称:sahara,代码行数:58,代码来源:direct_engine.py


示例9: _provision_cluster

def _provision_cluster(cluster_id):
    ctx, cluster, plugin = _prepare_provisioning(cluster_id)

    if CONF.use_identity_api_v3 and cluster.is_transient:
        trusts.create_trust_for_cluster(cluster)

    # updating cluster infra
    cluster = g.change_cluster_status(cluster, "InfraUpdating")
    plugin.update_infra(cluster)

    # creating instances and configuring them
    cluster = conductor.cluster_get(ctx, cluster_id)
    INFRA.create_cluster(cluster)

    if not g.check_cluster_exists(cluster):
        LOG.info(g.format_cluster_deleted_message(cluster))
        return

    # configure cluster
    cluster = g.change_cluster_status(cluster, "Configuring")
    try:
        plugin.configure_cluster(cluster)
    except Exception as ex:
        if not g.check_cluster_exists(cluster):
            LOG.info(g.format_cluster_deleted_message(cluster))
            return
        LOG.exception(
            _LE("Can't configure cluster '%(name)s' (reason: %(reason)s)"),
            {'name': cluster.name, 'reason': ex})
        g.change_cluster_status(cluster, "Error")
        return

    if not g.check_cluster_exists(cluster):
        LOG.info(g.format_cluster_deleted_message(cluster))
        return

    # starting prepared and configured cluster
    cluster = g.change_cluster_status(cluster, "Starting")
    try:
        plugin.start_cluster(cluster)
    except Exception as ex:
        if not g.check_cluster_exists(cluster):
            LOG.info(g.format_cluster_deleted_message(cluster))
            return
        LOG.exception(
            _LE("Can't start services for cluster '%(name)s' (reason: "
                "%(reason)s)"), {'name': cluster.name, 'reason': ex})
        g.change_cluster_status(cluster, "Error")
        return

    if not g.check_cluster_exists(cluster):
        LOG.info(g.format_cluster_deleted_message(cluster))
        return

    # cluster is now up and ready
    cluster = g.change_cluster_status(cluster, "Active")

    # schedule execution pending job for cluster
    for je in conductor.job_execution_get_all(ctx, cluster_id=cluster.id):
        job_manager.run_job(je.id)
开发者ID:turu,项目名称:sahara,代码行数:60,代码来源:ops.py


示例10: terminate_cluster

def terminate_cluster(id):
    context.set_current_cluster_id(id)
    cluster = g.change_cluster_status(id, "Deleting")

    OPS.terminate_cluster(id)
    sender.notify(context.ctx(), cluster.id, cluster.name, cluster.status,
                  "delete")
开发者ID:AlexanderYAPPO,项目名称:sahara,代码行数:7,代码来源:api.py


示例11: create_cluster

    def create_cluster(self, cluster):
        version = cluster.hadoop_version
        handler = self.version_factory.get_version_handler(version)

        cluster_spec = handler.get_cluster_spec(
            cluster, self._map_to_user_inputs(
                version, cluster.cluster_configs))
        hosts = self._get_servers(cluster)
        ambari_info = self.get_ambari_info(cluster_spec)
        self.cluster_ambari_mapping[cluster.name] = ambari_info
        rpm = self._get_rpm_uri(cluster_spec)

        servers = []
        for host in hosts:
            host_role = utils.get_host_role(host)
            servers.append(
                h.HadoopServer(host, cluster_spec.node_groups[host_role],
                               ambari_rpm=rpm))

        self._provision_cluster(
            cluster.name, cluster_spec, ambari_info, servers,
            cluster.hadoop_version)

        # add the topology data file and script if rack awareness is
        # enabled
        self._configure_topology_for_cluster(cluster, servers)

        LOG.info(_LI("Install of Hadoop stack successful."))
        # add service urls
        self._set_cluster_info(cluster, cluster_spec)

        # check if HDFS HA is enabled; set it up if so
        if cluster_spec.is_hdfs_ha_enabled(cluster):
            cluster = g.change_cluster_status(cluster, "Configuring HA")
            self.configure_hdfs_ha(cluster)
开发者ID:al-indigo,项目名称:sahara,代码行数:35,代码来源:ambariplugin.py


示例12: create_cluster

    def create_cluster(self, cluster):
        ctx = context.ctx()

        launcher = _CreateLauncher()

        try:
            target_count = self._get_ng_counts(cluster)
            self._nullify_ng_counts(cluster)

            cluster = conductor.cluster_get(ctx, cluster)
            launcher.launch_instances(ctx, cluster, target_count)

            cluster = conductor.cluster_get(ctx, cluster)
            self._add_volumes(ctx, cluster)

        except Exception as ex:
            with excutils.save_and_reraise_exception():
                if not g.check_cluster_exists(cluster):
                    LOG.info(g.format_cluster_deleted_message(cluster))
                    return
                self._log_operation_exception(
                    _LW("Can't start cluster '%(cluster)s' "
                        "(reason: %(reason)s)"), cluster, ex)

                cluster = g.change_cluster_status(
                    cluster, "Error", status_description=six.text_type(ex))
                self._rollback_cluster_creation(cluster)
开发者ID:turu,项目名称:sahara,代码行数:27,代码来源:heat_engine.py


示例13: scale_cluster

    def scale_cluster(self, cluster, node_group_id_map):
        ctx = context.ctx()
        cluster = g.change_cluster_status(cluster, "Scaling")

        instance_ids = self._scale_cluster_instances(cluster,
                                                     node_group_id_map)

        self._update_rollback_strategy(cluster, instance_ids=instance_ids)

        cluster = conductor.cluster_get(ctx, cluster)
        g.clean_cluster_from_empty_ng(cluster)

        cluster = conductor.cluster_get(ctx, cluster)
        instances = g.get_instances(cluster, instance_ids)

        self._await_active(cluster, instances)

        self._assign_floating_ips(instances)

        self._await_networks(cluster, instances)

        cluster = conductor.cluster_get(ctx, cluster)

        volumes.attach_to_instances(
            g.get_instances(cluster, instance_ids))

        # we should be here with valid cluster: if instances creation
        # was not successful all extra-instances will be removed above
        if instance_ids:
            self._configure_instances(cluster)

        self._update_rollback_strategy(cluster)

        return instance_ids
开发者ID:stannie42,项目名称:sahara,代码行数:34,代码来源:direct_engine.py


示例14: create_cluster

def create_cluster(values):
    ctx = context.ctx()
    cluster = conductor.cluster_create(ctx, values)
    plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)

    # validating cluster
    try:
        cluster = g.change_cluster_status(cluster, "Validating")
        plugin.validate(cluster)
    except Exception as e:
        with excutils.save_and_reraise_exception():
            g.change_cluster_status(cluster, "Error",
                                    status_description=six.text_type(e))

    OPS.provision_cluster(cluster.id)

    return cluster
开发者ID:COSHPC,项目名称:sahara,代码行数:17,代码来源:api.py


示例15: scale_cluster

    def scale_cluster(self, cluster, target_count):
        ctx = context.ctx()

        rollback_count = self._get_ng_counts(cluster)

        launcher = _ScaleLauncher()

        try:
            launcher.launch_instances(ctx, cluster, target_count)
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                if not g.check_cluster_exists(cluster):
                    LOG.info(g.format_cluster_deleted_message(cluster))
                    return
                self._log_operation_exception(
                    _LW("Can't scale cluster '%(cluster)s' "
                        "(reason: %(reason)s)"), cluster, ex)

                cluster = conductor.cluster_get(ctx, cluster)

                try:
                    self._rollback_cluster_scaling(
                        ctx, cluster, rollback_count, target_count)
                except Exception:
                    if not g.check_cluster_exists(cluster):
                        LOG.info(g.format_cluster_deleted_message(cluster))
                        return
                    # if something fails during the rollback, we stop
                    # doing anything further
                    cluster = g.change_cluster_status(cluster, "Error")
                    LOG.error(_LE("Unable to complete rollback, aborting"))
                    raise

                cluster = g.change_cluster_status(cluster, "Active")
                LOG.warn(
                    _LW("Rollback successful. "
                        "Throwing off an initial exception."))
        finally:
            cluster = conductor.cluster_get(ctx, cluster)
            g.clean_cluster_from_empty_ng(cluster)

        return launcher.inst_ids
开发者ID:turu,项目名称:sahara,代码行数:42,代码来源:heat_engine.py


示例16: create_cluster

def create_cluster(values):
    ctx = context.ctx()
    cluster = conductor.cluster_create(ctx, values)
    sender.notify(ctx, cluster.id, cluster.name, "New",
                  "create")
    plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
    _add_ports_for_auto_sg(ctx, cluster, plugin)

    # validating cluster
    try:
        cluster = g.change_cluster_status(cluster, "Validating")
        quotas.check_cluster(cluster)
        plugin.validate(cluster)
    except Exception as e:
        with excutils.save_and_reraise_exception():
            g.change_cluster_status(cluster, "Error",
                                    six.text_type(e))

    OPS.provision_cluster(cluster.id)

    return cluster
开发者ID:YongchaoTIAN,项目名称:sahara,代码行数:21,代码来源:api.py


示例17: scale_cluster

def scale_cluster(id, data):
    context.set_current_cluster_id(id)
    ctx = context.ctx()

    cluster = conductor.cluster_get(ctx, id)
    plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
    existing_node_groups = data.get('resize_node_groups', [])
    additional_node_groups = data.get('add_node_groups', [])

    # the next map is the main object we will work with
    # to_be_enlarged : {node_group_id: desired_amount_of_instances}
    to_be_enlarged = {}
    for ng in existing_node_groups:
        ng_id = g.find(cluster.node_groups, name=ng['name'])['id']
        to_be_enlarged.update({ng_id: ng['count']})

    additional = construct_ngs_for_scaling(cluster, additional_node_groups)
    cluster = conductor.cluster_get(ctx, cluster)
    _add_ports_for_auto_sg(ctx, cluster, plugin)

    try:
        cluster = g.change_cluster_status(cluster, "Validating")
        quotas.check_scaling(cluster, to_be_enlarged, additional)
        plugin.recommend_configs(cluster)
        plugin.validate_scaling(cluster, to_be_enlarged, additional)
    except Exception as e:
        with excutils.save_and_reraise_exception():
            g.clean_cluster_from_empty_ng(cluster)
            g.change_cluster_status(cluster, "Active", six.text_type(e))

    # If we are here validation is successful.
    # So let's update to_be_enlarged map:
    to_be_enlarged.update(additional)

    for node_group in cluster.node_groups:
        if node_group.id not in to_be_enlarged:
            to_be_enlarged[node_group.id] = node_group.count

    OPS.provision_scaled_cluster(id, to_be_enlarged)
    return cluster
开发者ID:modin,项目名称:sahara,代码行数:40,代码来源:api.py


示例18: _cluster_create

def _cluster_create(values, plugin):
    ctx = context.ctx()
    cluster = conductor.cluster_create(ctx, values)
    context.set_current_cluster_id(cluster.id)
    sender.notify(ctx, cluster.id, cluster.name, "New",
                  "create")
    _add_ports_for_auto_sg(ctx, cluster, plugin)

    # validating cluster
    try:
        plugin.recommend_configs(cluster)
        cluster = g.change_cluster_status(cluster, "Validating")
        quotas.check_cluster(cluster)
        plugin.validate(cluster)
    except Exception as e:
        with excutils.save_and_reraise_exception():
            g.change_cluster_status(cluster, "Error",
                                    six.text_type(e))

    OPS.provision_cluster(cluster.id)

    return cluster
开发者ID:modin,项目名称:sahara,代码行数:22,代码来源:api.py


示例19: _provision_scaled_cluster

def _provision_scaled_cluster(cluster_id, node_group_id_map):
    ctx, cluster, plugin = _prepare_provisioning(cluster_id)

    try:
        # Decommissioning surplus nodes with the plugin
        cluster = g.change_cluster_status(cluster, "Decommissioning")

        instances_to_delete = []

        for node_group in cluster.node_groups:
            new_count = node_group_id_map[node_group.id]
            if new_count < node_group.count:
                instances_to_delete += node_group.instances[new_count:
                                                            node_group.count]

        if instances_to_delete:
            context.set_step_type(_("Plugin: decommission cluster"))
            plugin.decommission_nodes(cluster, instances_to_delete)

        # Scaling infrastructure
        cluster = g.change_cluster_status(cluster, "Scaling")
        context.set_step_type(_("Engine: scale cluster"))
        instance_ids = INFRA.scale_cluster(cluster, node_group_id_map)

        # Setting up new nodes with the plugin
        if instance_ids:
            ntp_service.configure_ntp(cluster_id)
            cluster = g.change_cluster_status(cluster, "Configuring")
            instances = g.get_instances(cluster, instance_ids)
            context.set_step_type(_("Plugin: scale cluster"))
            plugin.scale_cluster(cluster, instances)

        g.change_cluster_status(cluster, "Active")

    finally:
        if CONF.use_identity_api_v3 and not cluster.is_transient:
            trusts.delete_trust_from_cluster(cluster)
开发者ID:openstacking,项目名称:sahara,代码行数:37,代码来源:ops.py


示例20: _launch_instances

    def _launch_instances(self, cluster, target_count, stages,
                          update_stack=False, disable_rollback=True):
        # create all instances
        cluster = g.change_cluster_status(cluster, stages[0])

        inst_ids = self._create_instances(
            cluster, target_count, update_stack, disable_rollback)

        # wait for all instances are up and networks ready
        cluster = g.change_cluster_status(cluster, stages[1])

        instances = g.get_instances(cluster, inst_ids)

        self._await_networks(cluster, instances)

        # prepare all instances
        cluster = g.change_cluster_status(cluster, stages[2])

        instances = g.get_instances(cluster, inst_ids)
        volumes.mount_to_instances(instances)

        self._configure_instances(cluster)

        return inst_ids
开发者ID:al-indigo,项目名称:sahara,代码行数:24,代码来源:heat_engine.py



注:本文中的sahara.utils.general.change_cluster_status函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python general.check_cluster_exists函数代码示例发布时间:2022-05-27
下一篇:
Python files.get_file_text函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap