• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python edp.compare_job_type函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中sahara.utils.edp.compare_job_type函数的典型用法代码示例。如果您正苦于以下问题:Python compare_job_type函数的具体用法?Python compare_job_type怎么用?Python compare_job_type使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了compare_job_type函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _create_job_binary

def _create_job_binary(id, type):
    binary = mock.Mock()
    binary.id = id
    binary.url = "internal-db://42"
    if edp.compare_job_type(type, edp.JOB_TYPE_PIG):
        binary.name = "script.pig"
    elif edp.compare_job_type(type, edp.JOB_TYPE_MAPREDUCE, edp.JOB_TYPE_JAVA):
        binary.name = "main.jar"
    else:
        binary.name = "script.q"
    return binary
开发者ID:B-Rich,项目名称:sahara,代码行数:11,代码来源:test_job_manager.py


示例2: _create_job_binary

def _create_job_binary(id, type):
    binary = mock.Mock()
    binary.id = id
    binary.url = "internal-db://42"
    if edp.compare_job_type(type, 'Pig'):
        binary.name = "script.pig"
    elif edp.compare_job_type(type, 'MapReduce', 'Java'):
        binary.name = "main.jar"
    else:
        binary.name = "script.q"
    return binary
开发者ID:qinweiwei,项目名称:sahara,代码行数:11,代码来源:test_job_manager.py


示例3: get_possible_job_config

 def get_possible_job_config(job_type):
     if edp.compare_job_type(job_type, edp.JOB_TYPE_HIVE):
         return {'job_config': ch_helper.get_possible_hive_config_from(
                 'plugins/vanilla/v2_6_0/resources/hive-default.xml')}
     if edp.compare_job_type(job_type,
                             edp.JOB_TYPE_MAPREDUCE,
                             edp.JOB_TYPE_MAPREDUCE_STREAMING):
         return {'job_config': ch_helper.get_possible_mapreduce_config_from(
                 'plugins/vanilla/v2_6_0/resources/mapred-default.xml')}
     if edp.compare_job_type(job_type, edp.JOB_TYPE_PIG):
         return {'job_config': ch_helper.get_possible_pig_config_from(
                 'plugins/vanilla/v2_6_0/resources/mapred-default.xml')}
     return edp_engine.EdpOozieEngine.get_possible_job_config(job_type)
开发者ID:AlexanderYAPPO,项目名称:sahara,代码行数:13,代码来源:edp_engine.py


示例4: get_possible_job_config

 def get_possible_job_config(job_type):
     if edp.compare_job_type(job_type, edp.JOB_TYPE_HIVE):
         return {'job_config': ch_helper.get_possible_hive_config_from(
                 'plugins/cdh/v5_4_0/resources/hive-site.xml')}
     if edp.compare_job_type(job_type,
                             edp.JOB_TYPE_MAPREDUCE,
                             edp.JOB_TYPE_MAPREDUCE_STREAMING):
         return {'job_config': ch_helper.get_possible_mapreduce_config_from(
                 'plugins/cdh/v5_4_0/resources/mapred-site.xml')}
     if edp.compare_job_type(job_type, edp.JOB_TYPE_PIG):
         return {'job_config': ch_helper.get_possible_pig_config_from(
                 'plugins/cdh/v5_4_0/resources/mapred-site.xml')}
     return edp_engine.OozieJobEngine.get_possible_job_config(job_type)
开发者ID:metasensus,项目名称:sahara,代码行数:13,代码来源:edp_engine.py


示例5: get_possible_job_config

def get_possible_job_config(job_type):
    if not edp.compare_job_type(job_type, *edp.JOB_TYPES_ALL):
        return None

    if edp.compare_job_type(job_type, edp.JOB_TYPE_JAVA):
        return {'job_config': {'configs': [], 'args': []}}

    if edp.compare_job_type(job_type, edp.JOB_TYPE_SHELL):
        return {'job_config': {'configs': [], 'params': {}, 'args': []}}

    if edp.compare_job_type(job_type,
                            edp.JOB_TYPE_MAPREDUCE, edp.JOB_TYPE_PIG):
        # TODO(nmakhotkin): Here we need return config based on specific plugin
        cfg = xmlutils.load_hadoop_xml_defaults(
            'plugins/vanilla/v1_2_1/resources/mapred-default.xml')
        if edp.compare_job_type(job_type, edp.JOB_TYPE_MAPREDUCE):
            cfg += get_possible_mapreduce_configs()
    elif edp.compare_job_type(job_type, edp.JOB_TYPE_HIVE):
        # TODO(nmakhotkin): Here we need return config based on specific plugin
        cfg = xmlutils.load_hadoop_xml_defaults(
            'plugins/vanilla/v1_2_1/resources/hive-default.xml')

    config = {'configs': cfg}
    if edp.compare_job_type(job_type, edp.JOB_TYPE_PIG, edp.JOB_TYPE_HIVE):
        config.update({'params': {}})
    if edp.compare_job_type(job_type, edp.JOB_TYPE_PIG):
        config.update({'args': []})
    return {'job_config': config}
开发者ID:AllenFromMinneapolis,项目名称:sahara,代码行数:28,代码来源:workflow_factory.py


示例6: get_possible_job_config

 def get_possible_job_config(job_type):
     if edp.compare_job_type(job_type, edp.JOB_TYPE_HIVE):
         return {'job_config': ch_helper.get_possible_hive_config_from(
                 'plugins/hdp/versions/version_1_3_2/resources/'
                 'ambari-config-resource.json')}
     if edp.compare_job_type(job_type,
                             edp.JOB_TYPE_MAPREDUCE,
                             edp.JOB_TYPE_MAPREDUCE_STREAMING):
         return {'job_config': ch_helper.get_possible_mapreduce_config_from(
                 'plugins/hdp/versions/version_1_3_2/resources/'
                 'ambari-config-resource.json')}
     if edp.compare_job_type(job_type, edp.JOB_TYPE_PIG):
         return {'job_config': ch_helper.get_possible_pig_config_from(
                 'plugins/hdp/versions/version_1_3_2/resources/'
                 'ambari-config-resource.json')}
     return edp_engine.EdpOozieEngine.get_possible_job_config(job_type)
开发者ID:AlexanderYAPPO,项目名称:sahara,代码行数:16,代码来源:edp_engine.py


示例7: get_data_sources

def get_data_sources(job_execution, job):
    if edp.compare_job_type(job.type, edp.JOB_TYPE_JAVA):
        return None, None

    ctx = context.ctx()
    input_source = conductor.data_source_get(ctx, job_execution.input_id)
    output_source = conductor.data_source_get(ctx, job_execution.output_id)
    return input_source, output_source
开发者ID:JohannaMW,项目名称:sahara,代码行数:8,代码来源:job_utils.py


示例8: _create_job_exec

def _create_job_exec(job_id, type, configs=None):
    j_exec = mock.Mock()
    j_exec.job_id = job_id
    j_exec.job_configs = configs
    if edp.compare_job_type(type, edp.JOB_TYPE_JAVA):
        j_exec.job_configs['configs']['edp.java.main_class'] = _java_main_class
        j_exec.job_configs['configs']['edp.java.java_opts'] = _java_opts
    return j_exec
开发者ID:B-Rich,项目名称:sahara,代码行数:8,代码来源:test_job_manager.py


示例9: get_possible_job_config

def get_possible_job_config(job_type):
    if not edp.compare_job_type(job_type, *edp.JOB_TYPES_ALL):
        return None

    if edp.compare_job_type(job_type, edp.JOB_TYPE_JAVA):
        return {'job_config': {'configs': [], 'args': []}}

    if edp.compare_job_type(job_type, edp.JOB_TYPE_SHELL):
        return {'job_config': {'configs': [], 'params': [], 'args': []}}

    if edp.compare_job_type(job_type,
                            edp.JOB_TYPE_MAPREDUCE, edp.JOB_TYPE_PIG):
        # TODO(nmakhotkin): Here we need return config based on specific plugin
        cfg = xmlutils.load_hadoop_xml_defaults(
            'plugins/vanilla/v1_2_1/resources/mapred-default.xml')
        if edp.compare_job_type(job_type, edp.JOB_TYPE_MAPREDUCE):
            cfg += xmlutils.load_hadoop_xml_defaults(
                'service/edp/resources/mapred-job-config.xml')
    elif edp.compare_job_type(job_type, edp.JOB_TYPE_HIVE):
        # TODO(nmakhotkin): Here we need return config based on specific plugin
        cfg = xmlutils.load_hadoop_xml_defaults(
            'plugins/vanilla/v1_2_1/resources/hive-default.xml')

    # TODO(tmckay): args should be a list when bug #269968
    # is fixed on the UI side
    config = {'configs': cfg, "args": {}}
    if not edp.compare_job_type(edp.JOB_TYPE_MAPREDUCE, edp.JOB_TYPE_JAVA):
        config.update({'params': {}})
    return {'job_config': config}
开发者ID:alavender,项目名称:sahara,代码行数:29,代码来源:workflow_factory.py


示例10: run_job

def run_job(job_execution):
    ctx = context.ctx()

    cluster = conductor.cluster_get(ctx, job_execution.cluster_id)
    if cluster.status != 'Active':
        return job_execution

    job = conductor.job_get(ctx, job_execution.job_id)
    if not edp.compare_job_type(job.type, edp.JOB_TYPE_JAVA):
        input_source = conductor.data_source_get(ctx,  job_execution.input_id)
        output_source = conductor.data_source_get(ctx, job_execution.output_id)
    else:
        input_source = None
        output_source = None
    #TODO(nprivalova): should be removed after all features implemented
    validate(input_source, output_source, job)

    for data_source in [input_source, output_source]:
        if data_source and data_source.type == 'hdfs':
            h.configure_cluster_for_hdfs(cluster, data_source)

    hdfs_user = _get_hdfs_user(cluster)
    oozie_server = _get_oozie_server(cluster)
    wf_dir = create_workflow_dir(oozie_server, job, hdfs_user)
    upload_job_files(oozie_server, wf_dir, job, hdfs_user)

    creator = workflow_factory.get_creator(job)

    wf_xml = creator.get_workflow_xml(cluster, job_execution,
                                      input_source, output_source)

    path_to_workflow = upload_workflow_file(oozie_server,
                                            wf_dir, wf_xml, hdfs_user)

    rm_path = _get_resource_manager_path(cluster)
    nn_path = cluster['info']['HDFS']['NameNode']

    client = o.OozieClient(cluster['info']['JobFlow']['Oozie'] + "/oozie/",
                           _get_oozie_server(cluster))
    job_parameters = {"jobTracker": rm_path,
                      "nameNode": nn_path,
                      "user.name": hdfs_user,
                      "oozie.wf.application.path":
                      "%s%s" % (nn_path, path_to_workflow),
                      "oozie.use.system.libpath": "true"}

    oozie_job_id = client.add_job(x.create_hadoop_xml(job_parameters),
                                  job_execution)
    job_execution = conductor.job_execution_update(ctx, job_execution,
                                                   {'oozie_job_id':
                                                    oozie_job_id,
                                                    'start_time':
                                                    datetime.datetime.now()})
    client.run_job(job_execution, oozie_job_id)

    return job_execution
开发者ID:esala116,项目名称:sahara,代码行数:56,代码来源:job_manager.py


示例11: test_compare_job_type

 def test_compare_job_type(self):
     self.assertTrue(edp.compare_job_type(
         edp.JOB_TYPE_JAVA,
         edp.JOB_TYPE_JAVA,
         edp.JOB_TYPE_MAPREDUCE,
         strict=True))
     self.assertFalse(edp.compare_job_type(
         edp.JOB_TYPE_MAPREDUCE_STREAMING,
         edp.JOB_TYPE_JAVA,
         edp.JOB_TYPE_MAPREDUCE,
         strict=True))
     self.assertTrue(edp.compare_job_type(
         edp.JOB_TYPE_MAPREDUCE_STREAMING,
         edp.JOB_TYPE_JAVA,
         edp.JOB_TYPE_MAPREDUCE))
     self.assertFalse(edp.compare_job_type(
         edp.JOB_TYPE_MAPREDUCE,
         edp.JOB_TYPE_JAVA,
         edp.JOB_TYPE_MAPREDUCE_STREAMING))
开发者ID:AllenFromMinneapolis,项目名称:sahara,代码行数:19,代码来源:test_edp.py


示例12: _create_job

def _create_job(id, job_binary, type):
    job = mock.Mock()
    job.id = id
    job.type = type
    job.name = 'special_name'
    if edp.compare_job_type(type, 'Pig', 'Hive'):
        job.mains = [job_binary]
        job.libs = None
    else:
        job.libs = [job_binary]
        job.mains = None
    return job
开发者ID:qinweiwei,项目名称:sahara,代码行数:12,代码来源:test_job_manager.py


示例13: _create_job

def _create_job(id, job_binary, type):
    job = mock.Mock()
    job.id = id
    job.type = type
    job.name = 'special_name'
    if edp.compare_job_type(type, edp.JOB_TYPE_PIG, edp.JOB_TYPE_HIVE):
        job.mains = [job_binary]
        job.libs = None
    else:
        job.libs = [job_binary]
        job.mains = None
    return job
开发者ID:B-Rich,项目名称:sahara,代码行数:12,代码来源:test_job_manager.py


示例14: _create_job_exec

def _create_job_exec(job_id, type, configs=None, info=None):
    j_exec = mock.Mock()
    j_exec.id = six.text_type(uuid.uuid4())
    j_exec.job_id = job_id
    j_exec.job_configs = configs
    j_exec.info = info
    if not j_exec.job_configs:
        j_exec.job_configs = {}
    if edp.compare_job_type(type, edp.JOB_TYPE_JAVA):
        j_exec.job_configs['configs']['edp.java.main_class'] = _java_main_class
        j_exec.job_configs['configs']['edp.java.java_opts'] = _java_opts
    return j_exec
开发者ID:gongwayne,项目名称:Openstack,代码行数:12,代码来源:edp_test_utils.py


示例15: get_possible_job_config

def get_possible_job_config(job_type):
    if not edp.compare_job_type(job_type, *get_possible_job_types()):
        return None

    if edp.compare_job_type(job_type, 'Java'):
        return {'job_config': {'configs': [], 'args': []}}

    if edp.compare_job_type(job_type, 'MapReduce', 'Pig'):
        #TODO(nmakhotkin) Here we should return config based on specific plugin
        cfg = xmlutils.load_hadoop_xml_defaults(
            'plugins/vanilla/v1_2_1/resources/mapred-default.xml')
        if edp.compare_job_type(job_type, 'MapReduce'):
            cfg += xmlutils.load_hadoop_xml_defaults(
                'service/edp/resources/mapred-job-config.xml')
    elif edp.compare_job_type(job_type, 'Hive'):
        #TODO(nmakhotkin) Here we should return config based on specific plugin
        cfg = xmlutils.load_hadoop_xml_defaults(
            'plugins/vanilla/v1_2_1/resources/hive-default.xml')

    # TODO(tmckay): args should be a list when bug #269968
    # is fixed on the UI side
    config = {'configs': cfg, "args": {}}
    if not edp.compare_job_type('MapReduce', 'Java'):
        config.update({'params': {}})
    return {'job_config': config}
开发者ID:qinweiwei,项目名称:sahara,代码行数:25,代码来源:workflow_factory.py


示例16: _create_job_exec

def _create_job_exec(job_id, type, configs=None, info=None):
    j_exec = mock.Mock()
    j_exec.id = uuidutils.generate_uuid()
    j_exec.job_id = job_id
    j_exec.job_configs = configs
    j_exec.info = info
    j_exec.input_id = 4
    j_exec.output_id = 5
    j_exec.engine_job_id = None
    j_exec.data_source_urls = {}
    if not j_exec.job_configs:
        j_exec.job_configs = {}
    if edp.compare_job_type(type, edp.JOB_TYPE_JAVA):
        j_exec.job_configs['configs']['edp.java.main_class'] = _java_main_class
        j_exec.job_configs['configs']['edp.java.java_opts'] = _java_opts
    return j_exec
开发者ID:openstack,项目名称:sahara,代码行数:16,代码来源:edp_test_utils.py


示例17: get_data_sources

def get_data_sources(job_execution, job, data_source_urls):
    if edp.compare_job_type(job.type, edp.JOB_TYPE_JAVA, edp.JOB_TYPE_SPARK):
        return None, None

    ctx = context.ctx()

    input_source = conductor.data_source_get(ctx, job_execution.input_id)
    if input_source and input_source.id not in data_source_urls:
        data_source_urls[input_source.id] = _construct_data_source_url(
            input_source.url, job_execution.id)

    output_source = conductor.data_source_get(ctx, job_execution.output_id)
    if output_source and output_source.id not in data_source_urls:
        data_source_urls[output_source.id] = _construct_data_source_url(
            output_source.url, job_execution.id)

    return input_source, output_source
开发者ID:AlexanderYAPPO,项目名称:sahara,代码行数:17,代码来源:job_utils.py


示例18: validate

def validate(input_data, output_data, job):
    if not edp.compare_job_type(job.type, 'Pig', 'MapReduce',
                                'Hive', 'Java'):
        raise RuntimeError
开发者ID:qinweiwei,项目名称:sahara,代码行数:4,代码来源:job_manager.py


示例19: run_job

def run_job(job_execution_id):
    ctx = context.ctx()

    job_execution = conductor.job_execution_get(ctx,
                                                job_execution_id)

    cluster = conductor.cluster_get(ctx, job_execution.cluster_id)
    if cluster.status != 'Active':
        return

    if CONF.use_namespaces and not CONF.use_floating_ips:
        plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
        oozie = plugin.get_oozie_server(cluster)

        info = oozie.remote().get_neutron_info()
        extra = job_execution.extra.copy()
        extra['neutron'] = info

        job_execution = conductor.job_execution_update(ctx,
                                                       job_execution_id,
                                                       {'extra': extra})

    job = conductor.job_get(ctx, job_execution.job_id)
    if not edp.compare_job_type(job.type, edp.JOB_TYPE_JAVA):
        input_source = conductor.data_source_get(ctx,  job_execution.input_id)
        output_source = conductor.data_source_get(ctx, job_execution.output_id)
    else:
        input_source = None
        output_source = None

    for data_source in [input_source, output_source]:
        if data_source and data_source.type == 'hdfs':
            h.configure_cluster_for_hdfs(cluster, data_source)

    hdfs_user = _get_hdfs_user(cluster)
    oozie_server = _get_oozie_server(cluster)
    wf_dir = create_workflow_dir(oozie_server, job, hdfs_user)
    upload_job_files(oozie_server, wf_dir, job, hdfs_user)

    creator = workflow_factory.get_creator(job)

    wf_xml = creator.get_workflow_xml(cluster, job_execution,
                                      input_source, output_source)

    path_to_workflow = upload_workflow_file(oozie_server,
                                            wf_dir, wf_xml, hdfs_user)

    plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name)
    rm_path = plugin.get_resource_manager_uri(cluster)
    nn_path = plugin.get_name_node_uri(cluster)

    client = _create_oozie_client(cluster)
    job_parameters = {"jobTracker": rm_path,
                      "nameNode": nn_path,
                      "user.name": hdfs_user,
                      "oozie.wf.application.path":
                      "%s%s" % (nn_path, path_to_workflow),
                      "oozie.use.system.libpath": "true"}

    oozie_job_id = client.add_job(x.create_hadoop_xml(job_parameters),
                                  job_execution)
    job_execution = conductor.job_execution_update(ctx, job_execution,
                                                   {'oozie_job_id':
                                                    oozie_job_id,
                                                    'start_time':
                                                    datetime.datetime.now()})
    client.run_job(job_execution, oozie_job_id)
开发者ID:hongbin,项目名称:sahara,代码行数:67,代码来源:job_manager.py


示例20: validate

def validate(input_data, output_data, job):
    if not edp.compare_job_type(job.type, edp.JOB_TYPE_PIG,
                                edp.JOB_TYPE_MAPREDUCE, edp.JOB_TYPE_HIVE,
                                edp.JOB_TYPE_JAVA):
        raise RuntimeError
开发者ID:esala116,项目名称:sahara,代码行数:5,代码来源:job_manager.py



注:本文中的sahara.utils.edp.compare_job_type函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python files.get_file_text函数代码示例发布时间:2022-05-27
下一篇:
Python configs.merge_configs函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap