• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python client.Helper类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中soma_workflow.client.Helper的典型用法代码示例。如果您正苦于以下问题:Python Helper类的具体用法?Python Helper怎么用?Python Helper使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了Helper类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: create_workflow

def create_workflow(inp, out, names = None, verbose=False):
    if not osp.isfile(inp):
        raise Exception('File not found %s'%inp)

    commands = [e.rstrip('\n').split(' ') for e in open(inp).readlines()]
    if verbose:
        print commands
    names = ['job_%s'%i for i in xrange(len(commands))] if names is None else names
    jobs = [Job(command=cmd, name=name) for cmd, name in zip(commands, names)]
    workflow = Workflow(jobs=jobs, dependencies=[])
    Helper.serialize(out, workflow)
开发者ID:xgrg,项目名称:alfa,代码行数:11,代码来源:create_workflow.py


示例2: _workflow_stop

    def _workflow_stop(self):
        self._workflow_controller.stop_workflow(self._workflow_id)

        # transfer back files, if any
        Helper.transfer_output_files(self._workflow_id,
                                     self._workflow_controller)

        interrupted_step_ids = self._get_interrupted_step_ids()
        for subject_id, step_ids in six.iteritems(interrupted_step_ids):
            if step_ids:
                analysis = self._study.analyses[subject_id]
                analysis.clear_results(step_ids)
开发者ID:denisri,项目名称:morphologist,代码行数:12,代码来源:runner.py


示例3: run

    def run(self, subject_ids=ALL_SUBJECTS):
        self._setup_soma_workflow_controller()
        self._init_internal_parameters()
        if self._workflow_controller.scheduler_config:
            # in local mode only
            cpus_number = self._cpus_number()
            self._workflow_controller.scheduler_config.set_proc_nb(cpus_number)
        if subject_ids == ALL_SUBJECTS:
            subject_ids = self._study.subjects
        # setup shared path in study_config
        study_config = self._study
        swf_resource = study_config.somaworkflow_computing_resource
        if not self._workflow_controller.scheduler_config:
            # remote config only
            # FIXME: must check if brainvisa shared dir is known in translation
            # config in soma-workflow
            if not study_config.somaworkflow_computing_resources_config.trait(
                    swf_resource):
                setattr(study_config.somaworkflow_computing_resources_config,
                        swf_resource, {})
            resource_conf = getattr(
                study_config.somaworkflow_computing_resources_config,
                swf_resource)
            path_translations = resource_conf.path_translations
            setattr(path_translations, study_config.shared_directory,
                    ['brainvisa', 'de25977f-abf5-9f1c-4384-2585338cd7af'])

        #self._check_input_files(subject_ids)
        workflow = self._create_workflow(subject_ids)
        jobs = [j for j in workflow.jobs if isinstance(j, Job)]
        if self._workflow_id is not None:
            self._workflow_controller.delete_workflow(self._workflow_id)
        if len(jobs) == 0:
            # empty workflow: nothing to do
            self._workflow_id = None
            return
        self._workflow_id = self._workflow_controller.submit_workflow(
            workflow, name=workflow.name)
        self._build_jobid_to_step()

        # run transfers, if any
        Helper.transfer_input_files(self._workflow_id,
                                    self._workflow_controller)
        # the status does not change immediately after run,
        # so we wait for the status WORKFLOW_IN_PROGRESS or timeout
        status = self._workflow_controller.workflow_status(self._workflow_id)
        try_count = 8
        while ((status != sw.constants.WORKFLOW_IN_PROGRESS) and \
                                                (try_count > 0)):
            time.sleep(0.25)
            status = self._workflow_controller.workflow_status(
                self._workflow_id)
            try_count -= 1
开发者ID:denisri,项目名称:morphologist,代码行数:53,代码来源:runner.py


示例4: wait

 def wait(self, subject_id=None, step_id=None):
     if subject_id is None and step_id is None:
         Helper.wait_workflow(
             self._workflow_id, self._workflow_controller)
     elif subject_id is not None:
         if step_id is None:
             raise NotImplementedError
         else:
             self._step_wait(subject_id, step_id)
     else:
         raise NotImplementedError
     # transfer back files, if any
     Helper.transfer_output_files(self._workflow_id,
                                  self._workflow_controller)
开发者ID:denisri,项目名称:morphologist,代码行数:14,代码来源:runner.py


示例5: export_to_gui

 def export_to_gui(self, soma_workflow_dirpath, **Xy):
     """
     Example
     -------
     see the directory of "examples/run_somaworkflow_gui.py" in epac
     """
     try:
         from soma_workflow.client import Job, Workflow
         from soma_workflow.client import Helper, FileTransfer
     except ImportError:
         errmsg = (
             "No soma-workflow is found. " "Please verify your soma-worklow" "on your computer (e.g. PYTHONPATH) \n"
         )
         sys.stderr.write(errmsg)
         sys.stdout.write(errmsg)
         raise NoSomaWFError
     if not os.path.exists(soma_workflow_dirpath):
         os.makedirs(soma_workflow_dirpath)
     tmp_work_dir_path = soma_workflow_dirpath
     cur_work_dir = os.getcwd()
     os.chdir(tmp_work_dir_path)
     ft_working_directory = FileTransfer(is_input=True, client_path=tmp_work_dir_path, name="working directory")
     ## Save the database and tree to working directory
     ## ===============================================
     #        np.savez(os.path.join(tmp_work_dir_path,
     #                 SomaWorkflowEngine.dataset_relative_path), **Xy)
     db_size = estimate_dataset_size(**Xy)
     db_size = int(db_size / (1024 * 1024))  # convert it into mega byte
     save_dataset(SomaWorkflowEngine.dataset_relative_path, **Xy)
     store = StoreFs(dirpath=os.path.join(tmp_work_dir_path, SomaWorkflowEngine.tree_root_relative_path))
     self.tree_root.save_tree(store=store)
     ## Subtree job allocation on disk
     ## ==============================
     node_input = NodesInput(self.tree_root.get_key())
     split_node_input = SplitNodesInput(self.tree_root, num_processes=self.num_processes)
     nodesinput_list = split_node_input.split(node_input)
     keysfile_list = self._save_job_list(tmp_work_dir_path, nodesinput_list)
     ## Build soma-workflow
     ## ===================
     jobs = self._create_jobs(keysfile_list, is_run_local=False, ft_working_directory=ft_working_directory)
     soma_workflow = Workflow(jobs=jobs)
     if soma_workflow_dirpath and soma_workflow_dirpath != "":
         out_soma_workflow_file = os.path.join(
             soma_workflow_dirpath, SomaWorkflowEngine.open_me_by_soma_workflow_gui
         )
         Helper.serialize(out_soma_workflow_file, soma_workflow)
     os.chdir(cur_work_dir)
开发者ID:neurospin,项目名称:pylearn-epac,代码行数:47,代码来源:engine.py


示例6: test_serialization

    def test_serialization(self):
        simple_wf_examples = workflow_local.WorkflowExamplesLocal()
        tr_wf_examples = workflow_transfer.WorkflowExamplesTransfer()
        srp_wf_examples = workflow_shared.WorkflowExamplesShared()
        self.temporaries += [simple_wf_examples.output_dir,
                             tr_wf_examples.output_dir,
                             srp_wf_examples.output_dir]
        workflows = []
        workflows.append(("multiple", simple_wf_examples.example_multiple()))
        workflows.append(("special_command",
                          simple_wf_examples.example_special_command()))

        workflows.append(("mutiple_transfer",
                          tr_wf_examples.example_multiple()))
        workflows.append(("special_command_transfer",
                          tr_wf_examples.example_special_command()))
        workflows.append(("special_transfer",
                          tr_wf_examples.example_special_transfer()))

        workflows.append(("mutiple_srp", srp_wf_examples.example_multiple()))
        workflows.append(("special_command_srp",
                          srp_wf_examples.example_special_command()))

        for workflow_name, workflow in workflows:
            print("Testing", workflow_name)

            file_path = tempfile.mkstemp(prefix="json_",
                                         suffix=workflow_name + ".wf")
            os.close(file_path[0])
            file_path = file_path[1]
            Helper.serialize(file_path, workflow)

            new_workflow = Helper.unserialize(file_path)

            self.assertTrue(new_workflow.attributs_equal(workflow),
                            "Serialization failed for workflow %s" %
                            workflow_name)

            try:
                os.remove(file_path)
            except IOError:
                pass
开发者ID:neurospin,项目名称:soma-workflow,代码行数:42,代码来源:test_serialization.py


示例7: test_serialization

    def test_serialization(self):
        directory = "/tmp/"

        simple_wf_examples = workflow_local.WorkflowExamplesLocal()
        tr_wf_examples = workflow_transfer.WorkflowExamplesTransfer()
        srp_wf_examples = workflow_shared.WorkflowExamplesShared()
        workflows = []
        workflows.append(("multiple", simple_wf_examples.example_multiple()))
        workflows.append(("special_command",
                          simple_wf_examples.example_special_command()))

        workflows.append(("mutiple_transfer",
                          tr_wf_examples.example_multiple()))
        workflows.append(("special_command_transfer",
                          tr_wf_examples.example_special_command()))
        workflows.append(("special_transfer",
                          tr_wf_examples.example_special_transfer()))

        workflows.append(("mutiple_srp", srp_wf_examples.example_multiple()))
        workflows.append(("special_command_srp",
                          srp_wf_examples.example_special_command()))

        for workflow_name, workflow in workflows:
            print "Testing", workflow_name

            file_path = os.path.join(directory,
                                     "json_" + workflow_name + ".wf")
            Helper.serialize(file_path, workflow)

            new_workflow = Helper.unserialize(file_path)

            self.assertTrue(new_workflow.attributs_equal(workflow),
                            "Serialization failed for workflow %s" %
                            workflow_name)

            try:
                os.remove(file_path)
            except IOError:
                pass
开发者ID:imclab,项目名称:soma-workflow,代码行数:39,代码来源:test_serialization.py


示例8: save_wf

def save_wf(wf, output_file, mode="soma-workflow"):
    """Save the workflow in a file.

    Support simple JSON commands list (cmd-list) or soma-workflow.

    Parameters:
    ----------
    wf : tuple (cmd-dict, dependancies),
        Workflow to save.
    output_file : str,
        filename for the workflow.
    mode : str in ["soma-workflow", "cmd_list"],
           optional (default="soma-workflow")
        format to save the workflow.
    """
    cmd = wf[0]
    dep_orig = wf[1]
    if mode == "soma-workflow":
        from soma_workflow.client import Job, Workflow, Helper
        for k, v in cmd.iteritems():
            cmd[k] = Job(command=v, name=k)
        dep = [((cmd[a], cmd[b])) for a, b in dep_orig]
        jobs = np.asarray(cmd.values())[np.argsort(cmd.keys())]
        workflow = Workflow(jobs=jobs.tolist(),
                            dependencies=dep)
        Helper.serialize(output_file, workflow)
        return workflow
    elif mode == "cmd-list":
        import json
        for k, v in cmd.iteritems():
            cmd[k] = " ".join(v)
        with open(output_file, 'w') as fd:
            json.dump(dict(cmd=cmd, dep=dep_orig), fd, indent=True)
        return cmd
    else:
        raise TypeError("Invalid workflow mode \'{}\'".format(mode))
开发者ID:BenoitDamota,项目名称:mempamal,代码行数:36,代码来源:workflow.py


示例9: _get_workflow_status

 def _get_workflow_status(self):
     sw_status \
         = self._workflow_controller.workflow_status(self._workflow_id)
     if (sw_status in [sw.constants.WORKFLOW_IN_PROGRESS,
                       sw.constants.WORKFLOW_NOT_STARTED]):
         status = Runner.RUNNING
     else:
         has_failed = (len(Helper.list_failed_jobs(
             self._workflow_id, self._workflow_controller,
             include_aborted_jobs=True,
             include_user_killed_jobs=True)) != 0)
         if has_failed:
             status = Runner.FAILED
         else:
             status = Runner.SUCCESS
     return status
开发者ID:denisri,项目名称:morphologist,代码行数:16,代码来源:runner.py


示例10: test_result

    def test_result(self):
        workflow = self.wf_examples.example_wrong_native_spec_pbs()
        self.wf_id = self.wf_ctrl.submit_workflow(
            workflow=workflow,
            name=self.__class__.__name__)
        # Transfer input files if file transfer
        if self.path_management == self.FILE_TRANSFER or \
                self.path_management == self.SHARED_TRANSFER:
            Helper.transfer_input_files(self.wf_id,
                                        self.wf_ctrl)
        # Wait for the workflow to finish
        Helper.wait_workflow(self.wf_id, self.wf_ctrl)
        # Transfer output files if file transfer
        if self.path_management == self.FILE_TRANSFER or \
                self.path_management == self.SHARED_TRANSFER:
            Helper.transfer_output_files(self.wf_id,
                                         self.wf_ctrl)

        status = self.wf_ctrl.workflow_status(self.wf_id)
        self.assertTrue(status == constants.WORKFLOW_DONE,
                        "workflow status : %s. Expected : %s" %
                        (status, constants.WORKFLOW_DONE))
        nb_failed_jobs = len(Helper.list_failed_jobs(
            self.wf_id,
            self.wf_ctrl))
        self.assertTrue(nb_failed_jobs == 0,
                        "nb failed jobs : %i. Expected : %i" %
                        (nb_failed_jobs, 0))
        nb_failed_aborted_jobs = len(Helper.list_failed_jobs(
            self.wf_id,
            self.wf_ctrl,
            include_aborted_jobs=True))
        if self.path_management == self.LOCAL_PATH:
            self.assertTrue(nb_failed_aborted_jobs == 0,
                            "nb failed jobs including aborted : %i. "
                            "Expected : %i" % (nb_failed_aborted_jobs, 0))
        else:
            self.assertTrue(nb_failed_aborted_jobs == 1,
                            "nb failed jobs including aborted : %i. "
                            "Expected : %i" % (nb_failed_aborted_jobs, 1))
开发者ID:denisri,项目名称:soma-workflow,代码行数:40,代码来源:test_wrong_native_spec.py


示例11: run

    def run(self, **Xy):
        """Run soma-workflow without gui

        Example
        -------

        >>> from sklearn import datasets
        >>> from epac.map_reduce.engine import SomaWorkflowEngine
        >>> from epac.tests.wfexamples2test import WFExample2

        >>> ## Build dataset
        >>> ## =============
        >>> X, y = datasets.make_classification(n_samples=10,
        ...                                     n_features=20,
        ...                                     n_informative=5,
        ...                                     random_state=1)
        >>> Xy = {'X':X, 'y':y}

        >>> ## Build epac tree
        >>> ## ===============
        >>> tree_root_node = WFExample2().get_workflow()

        >>> ## Build SomaWorkflowEngine and run function for each node
        >>> ## =======================================================
        >>> sfw_engine = SomaWorkflowEngine(tree_root=tree_root_node,
        ...                                 function_name="transform",
        ...                                 num_processes=3,
                                            remove_finished_wf=False)
        >>> tree_root_node = sfw_engine.run(**Xy)
        light mode
        >>> ## Run reduce process
        >>> ## ==================
        >>> tree_root_node.reduce()
        ResultSet(
        [{'key': SelectKBest/SVC(C=1), 'y/test/score_f1': [ 0.6  0.6], 'y/test/score_recall_mean/pval': [ 0.5], 'y/test/score_recall/pval': [ 0.   0.5], 'y/test/score_accuracy/pval': [ 0.], 'y/test/score_f1/pval': [ 0.   0.5], 'y/test/score_precision/pval': [ 0.5  0. ], 'y/test/score_precision': [ 0.6  0.6], 'y/test/score_recall': [ 0.6  0.6], 'y/test/score_accuracy': 0.6, 'y/test/score_recall_mean': 0.6},
         {'key': SelectKBest/SVC(C=3), 'y/test/score_f1': [ 0.6  0.6], 'y/test/score_recall_mean/pval': [ 0.5], 'y/test/score_recall/pval': [ 0.   0.5], 'y/test/score_accuracy/pval': [ 0.], 'y/test/score_f1/pval': [ 0.   0.5], 'y/test/score_precision/pval': [ 0.5  0. ], 'y/test/score_precision': [ 0.6  0.6], 'y/test/score_recall': [ 0.6  0.6], 'y/test/score_accuracy': 0.6, 'y/test/score_recall_mean': 0.6}])

        """
        try:
            from soma_workflow.client import Job, Workflow
            from soma_workflow.client import Helper, FileTransfer
            from soma_workflow.client import WorkflowController
        except ImportError:
            errmsg = (
                "No soma-workflow is found. " "Please verify your soma-worklow" "on your computer (e.g. PYTHONPATH) \n"
            )
            sys.stderr.write(errmsg)
            sys.stdout.write(errmsg)
            raise NoSomaWFError
        tmp_work_dir_path = tempfile.mkdtemp()
        cur_work_dir = os.getcwd()
        os.chdir(tmp_work_dir_path)
        is_run_local = False
        if not self.resource_id or self.resource_id == "":
            self.resource_id = socket.gethostname()
            is_run_local = True
        # print "is_run_local=", is_run_local
        if not is_run_local:
            ft_working_directory = FileTransfer(is_input=True, client_path=tmp_work_dir_path, name="working directory")
        else:
            ft_working_directory = tmp_work_dir_path

        ## Save the database and tree to working directory
        ## ===============================================
        # np.savez(os.path.join(tmp_work_dir_path,
        # SomaWorkflowEngine.dataset_relative_path), **Xy)
        save_dataset(SomaWorkflowEngine.dataset_relative_path, **Xy)
        store = StoreFs(dirpath=os.path.join(tmp_work_dir_path, SomaWorkflowEngine.tree_root_relative_path))
        self.tree_root.save_tree(store=store)

        ## Subtree job allocation on disk
        ## ==============================
        node_input = NodesInput(self.tree_root.get_key())
        split_node_input = SplitNodesInput(self.tree_root, num_processes=self.num_processes)
        nodesinput_list = split_node_input.split(node_input)
        keysfile_list = save_job_list(tmp_work_dir_path, nodesinput_list)

        ## Build soma-workflow
        ## ===================
        jobs = self._create_jobs(keysfile_list, is_run_local, ft_working_directory)
        soma_workflow = Workflow(jobs=jobs)

        controller = WorkflowController(self.resource_id, self.login, self.pw)
        ## run soma-workflow
        ## =================
        wf_id = controller.submit_workflow(workflow=soma_workflow, name="epac workflow", queue=self.queue)
        Helper.transfer_input_files(wf_id, controller)
        Helper.wait_workflow(wf_id, controller)
        Helper.transfer_output_files(wf_id, controller)

        self.engine_info = self.get_engine_info(controller, wf_id)

        if self.remove_finished_wf:
            controller.delete_workflow(wf_id)
        ## read result tree
        ## ================
        self.tree_root = store.load()
        os.chdir(cur_work_dir)
        if os.path.isdir(tmp_work_dir_path) and self.remove_local_tree:
            shutil.rmtree(tmp_work_dir_path)
#.........这里部分代码省略.........
开发者ID:neurospin,项目名称:pylearn-epac,代码行数:101,代码来源:engine.py


示例12: test_result

    def test_result(self):
        workflow = self.wf_examples.example_special_transfer()
        self.wf_id = self.wf_ctrl.submit_workflow(workflow=workflow, name=self.__class__.__name__)

        # Transfer input files
        Helper.transfer_input_files(self.wf_id, self.wf_ctrl)
        # Wait for the worklow to finish
        Helper.wait_workflow(self.wf_id, self.wf_ctrl)
        status = self.wf_ctrl.workflow_status(self.wf_id)
        # Transfer output files
        Helper.transfer_output_files(self.wf_id, self.wf_ctrl)

        status = self.wf_ctrl.workflow_status(self.wf_id)
        self.assertTrue(
            status == constants.WORKFLOW_DONE, "workflow status : %s. Expected : %s" % (status, constants.WORKFLOW_DONE)
        )
        nb_failed_jobs = len(Helper.list_failed_jobs(self.wf_id, self.wf_ctrl))
        self.assertTrue(nb_failed_jobs == 0, "nb failed jobs : %i. Expected : %i" % (nb_failed_jobs, 0))
        nb_failed_aborted_jobs = len(Helper.list_failed_jobs(self.wf_id, self.wf_ctrl, include_aborted_jobs=True))
        self.assertTrue(
            nb_failed_aborted_jobs == 0,
            "nb failed jobs including aborted : %i. Expected : %i" % (nb_failed_aborted_jobs, 0),
        )

        (jobs_info, transfers_info, workflow_status, workflow_queue, tmp_files) = self.wf_ctrl.workflow_elements_status(
            self.wf_id
        )

        for (job_id, tmp_status, queue, exit_info, dates) in jobs_info:
            job_list = self.wf_ctrl.jobs([job_id])
            job_name, job_command, job_submission_date = job_list[job_id]

            self.tested_job = job_id

            if exit_info[0] == constants.FINISHED_REGULARLY:
                # To check job standard out and standard err
                job_stdout_file = tempfile.NamedTemporaryFile(
                    prefix="job_soma_out_log_", suffix=repr(job_id), delete=False
                )
                job_stdout_file = job_stdout_file.name
                job_stderr_file = tempfile.NamedTemporaryFile(
                    prefix="job_soma_outerr_log_", suffix=repr(job_id), delete=False
                )
                job_stderr_file = job_stderr_file.name

                try:
                    self.wf_ctrl.retrieve_job_stdouterr(job_id, job_stdout_file, job_stderr_file)
                    if job_name == "dir_contents":
                        # Test job standard out
                        with open(job_stdout_file, "r+") as f:
                            dir_contents = f.readlines()
                        dir_path_in = self.wf_examples.lo_in_dir
                        full_path_list = []
                        for element in os.listdir(dir_path_in):
                            full_path_list.append(os.path.join(dir_path_in, element))
                        dir_contents_model = list_contents(full_path_list, [])
                        self.assertTrue(sorted(dir_contents) == sorted(dir_contents_model))
                        # Test no stderr
                        self.assertTrue(
                            os.stat(job_stderr_file).st_size == 0, "job stderr not empty : cf %s" % job_stderr_file
                        )

                    if job_name == "multi file format test":
                        # Test job standard out
                        isSame, msg = identical_files(job_stdout_file, self.wf_examples.lo_mff_stdout)
                        self.assertTrue(isSame, msg)
                        # Test no stderr
                        self.assertTrue(
                            os.stat(job_stderr_file).st_size == 0, "job stderr not empty : cf %s" % job_stderr_file
                        )
                finally:
                    os.unlink(job_stdout_file)
                    os.unlink(job_stderr_file)

        del self.tested_job
开发者ID:denisri,项目名称:soma-workflow,代码行数:75,代码来源:test_special_transfer.py


示例13: test_result

    def test_result(self):
        workflow = self.wf_examples.example_simple()
        self.wf_id = self.wf_ctrl.submit_workflow(
            workflow=workflow,
            name=self.__class__.__name__)
        # Transfer input files if file transfer
        if self.path_management == self.FILE_TRANSFER or \
                self.path_management == self.SHARED_TRANSFER:
            Helper.transfer_input_files(self.wf_id, self.wf_ctrl)
        # Wait for the workflow to finish
        Helper.wait_workflow(self.wf_id, self.wf_ctrl)
        # Transfer output files if file transfer
        if self.path_management == self.FILE_TRANSFER or \
                self.path_management == self.SHARED_TRANSFER:
            Helper.transfer_output_files(self.wf_id, self.wf_ctrl)

        status = self.wf_ctrl.workflow_status(self.wf_id)
        self.assertTrue(status == constants.WORKFLOW_DONE,
                        "workflow status : %s. Expected : %s" %
                        (status, constants.WORKFLOW_DONE))

        nb_failed_jobs = len(Helper.list_failed_jobs(self.wf_id,
                                                     self.wf_ctrl))
        self.assertTrue(nb_failed_jobs == 0,
                        "nb failed jobs : %i. Expected : %i" %
                        (nb_failed_jobs, 0))
        nb_failed_aborted_jobs = len(Helper.list_failed_jobs(
            self.wf_id,
            self.wf_ctrl,
            include_aborted_jobs=True))
        self.assertTrue(nb_failed_aborted_jobs == 0,
                        "nb failed jobs including aborted : %i. Expected : %i"
                        % (nb_failed_aborted_jobs, 0))

        (jobs_info, transfers_info, workflow_status, workflow_queue,
            tmp_files) = self.wf_ctrl.workflow_elements_status(self.wf_id)

        for (job_id, tmp_status, queue, exit_info, dates) in jobs_info:
            job_list = self.wf_ctrl.jobs([job_id])
            job_name, job_command, job_submission_date = job_list[job_id]

            self.tested_job = job_id

            if exit_info[0] == constants.FINISHED_REGULARLY:
                # To check job standard out and standard err
                job_stdout_file = tempfile.NamedTemporaryFile(
                    prefix="job_soma_out_log_",
                    suffix=repr(job_id),
                    delete=False)
                job_stdout_file = job_stdout_file.name
                job_stderr_file = tempfile.NamedTemporaryFile(
                    prefix="job_soma_outerr_log_",
                    suffix=repr(job_id),
                    delete=False)
                job_stderr_file = job_stderr_file.name

                try:
                    self.wf_ctrl.retrieve_job_stdouterr(job_id,
                                                        job_stdout_file,
                                                        job_stderr_file)
                    if job_name == 'job1':
                        # Test stdout
                        isSame, msg = identical_files(
                            job_stdout_file,
                            self.wf_examples.lo_stdout[1])
                        self.assertTrue(isSame, msg)
                        # Test no stderr
                        msg = "job stderr not empty : cf %s\n" \
                            "stderr:\n---\n%s\n---" \
                            % (job_stderr_file, open(job_stderr_file).read())
                        self.assertTrue(os.stat(job_stderr_file).st_size == 0,
                                        msg)
                        # Test output files
                        if self.path_management == self.LOCAL_PATH:
                            isSame, msg = identical_files(
                                self.wf_examples.lo_out_model_file[11],
                                self.wf_examples.lo_file[11])
                            self.assertTrue(isSame, msg)
                            isSame, msg = identical_files(
                                self.wf_examples.lo_out_model_file[12],
                                self.wf_examples.lo_file[12])
                            self.assertTrue(isSame, msg)
                        if self.path_management == self.FILE_TRANSFER or \
                                self.path_management == self.SHARED_TRANSFER:
                            isSame, msg = identical_files(
                                self.wf_examples.lo_out_model_file[11],
                                self.wf_examples.tr_file[11].client_path)
                            self.assertTrue(isSame, msg)
                            isSame, msg = identical_files(
                                self.wf_examples.lo_out_model_file[12],
                                self.wf_examples.tr_file[12].client_path)
                            self.assertTrue(isSame, msg)
                            # For unknown reason, it raises some errors
                            # http://stackoverflow.com/questions/10496758/unexpected-end-of-file-and-error-importing-function-definition-error-running
                            # isSame,	msg	= identical_files(job_stderr_file,self.wf_examples.lo_stderr[1])
                            # self.failUnless(isSame == True)

                    if job_name in ['job2', 'job3', 'job4']:
                        job_nb = int(job_name[3])
                        # Test stdout
#.........这里部分代码省略.........
开发者ID:neurospin,项目名称:soma-workflow,代码行数:101,代码来源:test_simple.py


示例14: test_result

    def test_result(self):
        nb = 20
        time_sleep = 1

        workflow = self.wf_examples.example_n_jobs(nb=nb, time=time_sleep)
        self.wf_id = self.wf_ctrl.submit_workflow(
            workflow=workflow,
            name=self.__class__.__name__)
        # Transfer input files if file transfer
        if self.path_management == self.FILE_TRANSFER or \
                self.path_management == self.SHARED_TRANSFER:
            Helper.transfer_input_files(self.wf_id, self.wf_ctrl)
        # Wait for the workflow to finish
        Helper.wait_workflow(self.wf_id, self.wf_ctrl)
        # Transfer output files if file transfer
        if self.path_management == self.FILE_TRANSFER or \
                self.path_management == self.SHARED_TRANSFER:
            Helper.transfer_output_files(self.wf_id, self.wf_ctrl)

        status = self.wf_ctrl.workflow_status(self.wf_id)
        self.assertTrue(status == constants.WORKFLOW_DONE,
                        "workflow status : %s. Expected : %s" %
                        (status, constants.WORKFLOW_DONE))
        nb_failed_jobs = len(Helper.list_failed_jobs(
            self.wf_id,
            self.wf_ctrl))
        self.assertTrue(nb_failed_jobs == 0,
                        "nb failed jobs : %i. Expected : %i" %
                        (nb_failed_jobs, 0))
        nb_failed_aborted_jobs = len(Helper.list_failed_jobs(
            self.wf_id,
            self.wf_ctrl,
            include_aborted_jobs=True))
        self.assertTrue(nb_failed_aborted_jobs == 0,
                        "nb failed jobs including aborted : %i. Expected : %i"
                        % (nb_failed_aborted_jobs, 0))

        (jobs_info, transfers_info, workflow_status, workflow_queue,
            tmp_files) = self.wf_ctrl.workflow_elements_status(self.wf_id)

        for (job_id, tmp_status, queue, exit_info, dates) in jobs_info:
            job_list = self.wf_ctrl.jobs([job_id])
            job_name, job_command, job_submission_date = job_list[job_id]

            self.tested_job = job_id

            if exit_info[0] == constants.FINISHED_REGULARLY:
                # To check job standard out and standard err
                job_stdout_file = tempfile.NamedTemporaryFile(
                    prefix="job_soma_out_log_",
                    suffix=repr(job_id),
                    delete=False)
                job_stdout_file = job_stdout_file.name
                job_stderr_file = tempfile.NamedTemporaryFile(
                    prefix="job_soma_outerr_log_",
                    suffix=repr(job_id),
                    delete=False)
                job_stderr_file = job_stderr_file.name

                try:
                    self.wf_ctrl.retrieve_job_stdouterr(job_id,
                                                        job_stdout_file,
                                                        job_stderr_file)
                    # Test stdout
                    self.assertTrue(os.stat(job_stdout_file).st_size == 0,
                                    "job stdout not empty : file: %s, "
                                    "contents:\n%s" %
                                    (job_stdout_file,
                                     open(job_stdout_file).read()))
                    # Test no stderr
                    self.assertTrue(os.stat(job_stderr_file).st_size == 0,
                                    "job stderr not empty : file %s, "
                                    "contents:\n%s" %
                                    (job_stderr_file,
                                     open(job_stderr_file).read()))
                finally:
                    os.unlink(job_stdout_file)
                    os.unlink(job_stderr_file)

        del self.tested_job
开发者ID:denisri,项目名称:soma-workflow,代码行数:80,代码来源:test_njobs.py


示例15: remote_map_marshal

def remote_map_marshal(func, largs=None, lkwargs=None, mode='local'):

    if largs is None:
        if lkwargs is not None:
            largs = [[]] * len(lkwargs)
        else:
            largs = []

    if lkwargs is None:
        lkwargs = [{}] * len(largs)

    lkwargs = [merge_default_kwargs(func, kw) for kw in lkwargs]

    assert len(lkwargs) == len(largs)

    all_args = zip(largs, lkwargs)

    if mode == 'local':
        return [func(*args, **kwargs) for args, kwargs in all_args]
    elif mode == 'local_with_dumps':

        func_fn = './func.marshal'
        dump_func(func, func_fn)
        results = []
        for i, params in enumerate(all_args):
            print 'params:', params
            params_fn = 'params_%d.pck' % i
            fparams = open(params_fn, 'wb')
            cPickle.dump(params, fparams)
            fparams.close()
            output_fn = 'output_%d.pck' % i
            print 'call subprocess ...'
            subprocess.call(['python', '-c', cfunc_marshal, params_fn,
                             func_fn, output_fn])
            print 'Read outputs ...'
            fout = open(output_fn)
            results.append(cPickle.load(fout))
        return results
    elif mode == 'remote_cluster':
        # FileTransfer creation for input files
        #data_dir = './rmap_data'
        data_dir = mkdtemp(prefix="sw_rmap")
        func_fn = op.join(data_dir, 'func.marshal')
        dump_func(func, func_fn)
        func_file = FileTransfer(is_input=True,
                                 client_path=func_fn,
                                 name="func_file")

        all_jobs = []
        param_files = []
        for i, params in enumerate(all_args):
            params_fn = op.join(data_dir, 'params_%d.pck' % i)
            fparams = open(params_fn, 'wb')
            cPickle.dump(params, fparams)
            fparams.close()
            param_file = FileTransfer(is_input=True,
                                      client_path=params_fn,
                                      name='params_file_%d' % i)
            param_files.append(param_file)
            output_fn = op.join(data_dir, 'output_%d.pck' % i)
            output_file = FileTransfer(is_input=False,
                                       client_path=output_fn,
                                       name='output_file_%d' % i)
            job = Job(command=['python', '-c', cfunc, param_file, func_file,
                               output_file],
                      name="rmap, item %d" % i,
                      referenced_input_files=[func_file, param_file],
                      referenced_output_files=[output_file])
            all_jobs.append(job)

        workflow = Workflow(jobs=all_jobs, dependencies=[])
        # submit the workflow
        cfg = pyhrf.cfg['parallel-cluster']
        controller = WorkflowController(cfg['server_id'], cfg['user'])

        # controller.transfer_files(fids_to_transfer)
        wf_id = controller.submit_workflow(
            workflow=workflow, name="remote_map")

        Helper.transfer_input_files(wf_id, controller)

        Helper.wait_workflow(wf_id, controller)

        Helper.transfer_output_files(wf_id, controller)

        results = []
        for i in xrange(len(all_args)):
            fout = open(op.join(data_dir, 'output_%d.pck' % i))
            results.append(cPickle.load(fout))
            fout.close()
        return results
开发者ID:pyhrf,项目名称:pyhrf,代码行数:91,代码来源:parallel.py


示例16: remote_map


#.........这里部分代码省略.........

    all_args = zip(largs, lkwargs)
    # print 'all_args:', all_args

    fmodule = func.__module__
    fname = '.'.join([fmodule, func.__name__])

    if mode == 'serial':
        return [func(*args, **kwargs) for args, kwargs in all_args]
    elif mode == 'local':
        try:
            from joblib import Parallel, delayed
        except ImportError:
            raise ImportError('Can not import joblib. It is '
                              'required to enable parallel '
                              'processing on a local machine.')

        if logger.getEffectiveLevel() == logging.DEBUG:
            parallel_verb = 10
        else:
            parallel_verb = 0
        if pyhrf.cfg['parallel-local']['nb_procs']:
            n_jobs = pyhrf.cfg['parallel-local']['nb_procs']
        else:
            n_jobs = available_cpu_count()
        p = Parallel(n_jobs=n_jobs, verbose=parallel_verb)
        return p(delayed(func)(*args, **kwargs)
                 for args, kwargs in all_args)

    elif mode == 'local_with_dumps':
        results = []
        for i, params in enumerate(all_args):
            # print 'params:', params
            params_fn = 'params_%d.pck' % i
            fparams = open(params_fn, 'wb')
            cPickle.dump(params, fparams)
            fparams.close()
            output_fn = 'output_%d.pck' % i
            # print 'call subprocess ...'
            subprocess.call(['python', '-c', cfunc % (fmodule, fname),
                             params_fn, output_fn])
            # print 'Read outputs ...'
            fout = open(output_fn)
            results.append(cPickle.load(fout))
        return results
    elif mode == 'remote_cluster':
        # FileTransfer creation for input files
        #data_dir = './rmap_data'
        data_dir = mkdtemp(prefix="sw_rmap")

        all_jobs = []
        param_files = []
        for i, params in enumerate(all_args):
            params_fn = op.join(data_dir, 'params_%d.pck' % i)
            fparams = open(params_fn, 'wb')
            cPickle.dump(params, fparams)
            fparams.close()
            param_file = FileTransfer(is_input=True,
                                      client_path=params_fn,
                                      name='params_file_%d' % i)
            param_files.append(param_file)
            output_fn = op.join(data_dir, 'output_%d.pck' % i)
            output_file = FileTransfer(is_input=False,
                                       client_path=output_fn,
                                       name='output_file_%d' % i)
            job = Job(command=['pyhrf_exec_pyfunc', fmodule, fname,
                               param_file, output_file],
                      name="rmap, item %d" % i,
                      referenced_input_files=[param_file],
                      referenced_output_files=[output_file])
            all_jobs.append(job)

        workflow = Workflow(jobs=all_jobs, dependencies=[])
        # submit the workflow
        cfg = pyhrf.cfg['parallel-cluster']
        controller = WorkflowController(cfg['server_id'], cfg['user'])
        # controller.transfer_files(fids_to_transfer)
        wf_id = controller.submit_workflow(
            workflow=workflow, name="remote_map")

        Helper.transfer_input_files(wf_id, controller)

        Helper.wait_workflow(wf_id, controller)

        Helper.transfer_output_files(wf_id, controller)

        results = []
        for i in xrange(len(all_args)):
            fnout = op.join(data_dir, 'output_%d.pck' % i)
            fout = open(fnout)
            o = cPickle.load(fout)
            print 'file cPickle loaded:', o
            fout.close()
            os.remove(fnout)
            if isinstance(o, Exception):
                raise RemoteException('Task %d failed' % i, o)
                if o.errno != 17:
                    raise RemoteException('Task %d failed' % i, o)
            results.append(o)
        return results
开发者ID:pyhrf,项目名称:pyhrf,代码行数:101,代码来源:parallel.py


示例17: test_result

    def test_result(self):
        workflow = self.wf_examples.example_native_spec_pbs()
        self.wf_id = self.wf_ctrl.submit_workflow(workflow=workflow, name=self.__class__.__name__)
        # Transfer input files if file transfer
        if self.path_management == self.FILE_TRANSFER or self.path_management == self.SHARED_TRANSFER:
            Helper.transfer_input_files(self.wf_id, self 

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python bioio.getTempFile函数代码示例发布时间:2022-05-27
下一篇:
Python solver.Solver类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap