• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python config.update_config函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中nipype.config.update_config函数的典型用法代码示例。如果您正苦于以下问题:Python update_config函数的具体用法?Python update_config怎么用?Python update_config使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了update_config函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: ants_ct_wf

def ants_ct_wf(subjects_id,
            preprocessed_data_dir,
            working_dir,
            ds_dir,
            template_dir,
            plugin_name):
    import os
    from nipype import config
    from nipype.pipeline.engine import Node, Workflow, MapNode
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    from nipype.interfaces.freesurfer.utils import ImageInfo



    #####################################
    # GENERAL SETTINGS
    #####################################
    wf = Workflow(name='ants_ct')
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(logging=dict(workflow_level='DEBUG'), execution={'stop_on_first_crash': True,
                                                                       'remove_unnecessary_outputs': True,
                                                                       'job_finished_timeout': 120})
    config.update_config(nipype_cfg)
    wf.config['execution']['crashdump_dir'] = os.path.join(working_dir, 'crash')

    ds = Node(nio.DataSink(base_directory=ds_dir), name='ds')



    #####################################
    # GET DATA
    #####################################
    # GET SUBJECT SPECIFIC STRUCTURAL DATA
    in_data_templates = {
        't1w': '{subject_id}/raw_niftis/sMRI/t1w_reoriented.nii.gz',
    }

    in_data = Node(nio.SelectFiles(in_data_templates,
                                       base_directory=preprocessed_data_dir),
                       name="in_data")
    in_data.inputs.subject_id = subjects_id


    # GET NKI ANTs templates
    ants_templates_templates = {
        'brain_template': 'NKI/T_template.nii.gz',
        'brain_probability_mask': 'NKI/T_templateProbabilityMask.nii.gz',
        'segmentation_priors': 'NKI/Priors/*.nii.gz',
        't1_registration_template': 'NKI/T_template_BrainCerebellum.nii.gz'

    }

    ants_templates = Node(nio.SelectFiles(ants_templates_templates,
                                       base_directory=template_dir),
                       name="ants_templates")
开发者ID:fliem,项目名称:LeiCA,代码行数:57,代码来源:ants_ct.py


示例2: _create_singleSession

def _create_singleSession(dataDict, master_config, interpMode, pipeline_name):
    """
    create singleSession workflow on a single session

    This is the main function to call when processing a data set with T1 & T2
    data.  ExperimentBaseDirectoryPrefix is the base of the directory to place results, T1Images & T2Images
    are the lists of images to be used in the auto-workup. atlas_fname_wpath is
    the path and filename of the atlas to use.
    """
    assert 'tissue_classify' in master_config['components'] or \
           'auxlmk' in master_config['components'] or \
           'denoise' in master_config['components'] or \
           'landmark' in master_config['components'] or \
           'segmentation' in master_config['components'] or \
           'malf_2012_neuro' in master_config['components']

    from nipype import config, logging

    config.update_config(master_config)  # Set universal pipeline options
    logging.update_logging(config)

    from workflows.baseline import generate_single_session_template_WF

    project = dataDict['project']
    subject = dataDict['subject']
    session = dataDict['session']

    blackListFileName = dataDict['T1s'][0] + '_noDenoise'
    isBlackList = os.path.isfile(blackListFileName)

    pname = "{0}_{1}_{2}".format(master_config['workflow_phase'], subject, session)
    onlyT1 = not (len(dataDict['T2s']) > 0)
    if onlyT1:
        print "T1 Only processing starts ..."
    else:
        print "Multimodal processing starts ..."
    sessionWorkflow = generate_single_session_template_WF(project, subject, session, onlyT1, master_config,
                                                          phase=master_config['workflow_phase'],
                                                          interpMode=interpMode,
                                                          pipeline_name=pipeline_name,
                                                          doDenoise=(not isBlackList))
    sessionWorkflow.base_dir = master_config['cachedir']

    sessionWorkflow_inputsspec = sessionWorkflow.get_node('inputspec')
    sessionWorkflow_inputsspec.inputs.T1s = dataDict['T1s']
    sessionWorkflow_inputsspec.inputs.T2s = dataDict['T2s']
    sessionWorkflow_inputsspec.inputs.PDs = dataDict['PDs']
    sessionWorkflow_inputsspec.inputs.FLs = dataDict['FLs']
    sessionWorkflow_inputsspec.inputs.OTHERs = dataDict['OTs']
    return sessionWorkflow
开发者ID:mharms,项目名称:BRAINSTools,代码行数:50,代码来源:singleSession.py


示例3: execute_task

def execute_task(pckld_task, node_config, updatehash):
    from socket import gethostname
    from traceback import format_exc
    from nipype import config, logging
    traceback=None
    result=None
    try:
        config.update_config(node_config)
        logging.update_logging(config)
        from cPickle import loads
        task = loads(pckld_task)
        result = task.run(updatehash=updatehash)
    except:
        traceback = format_exc()
        result = task.result
    return result, traceback, gethostname()
开发者ID:Alunisiira,项目名称:nipype,代码行数:16,代码来源:ipython.py


示例4: learning_prepare_data_wf

def learning_prepare_data_wf(working_dir,
                             ds_dir,
                             template_dir,
                             df_file,
                             in_data_name_list,
                             data_lookup_dict,
                             use_n_procs,
                             plugin_name):
    import os
    from nipype import config
    from nipype.pipeline.engine import Node, Workflow, MapNode, JoinNode
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    from nipype.interfaces.freesurfer.utils import ImageInfo
    from utils import aggregate_data, vectorize_data
    from itertools import chain




    # ensure in_data_name_list is list of lists
    in_data_name_list = [i if type(i) == list else [i] for i in in_data_name_list]
    in_data_name_list_unique = list(set(chain.from_iterable(in_data_name_list)))

    #####################################
    # GENERAL SETTINGS
    #####################################
    wf = Workflow(name='learning_prepare_data_wf')
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(logging=dict(workflow_level='DEBUG'), execution={'stop_on_first_crash': True,
                                                                       'remove_unnecessary_outputs': False,
                                                                       'job_finished_timeout': 120})
    config.update_config(nipype_cfg)
    wf.config['execution']['crashdump_dir'] = os.path.join(working_dir, 'crash')

    ds = Node(nio.DataSink(), name='ds')
    ds.inputs.base_directory = os.path.join(ds_dir, 'group_learning_prepare_data')

    ds.inputs.regexp_substitutions = [
        # ('subject_id_', ''),
        ('_parcellation_', ''),
        ('_bp_freqs_', 'bp_'),
        ('_extraction_method_', ''),
        ('_subject_id_[A0-9]*/', '')
    ]
    ds_pdf = Node(nio.DataSink(), name='ds_pdf')
    ds_pdf.inputs.base_directory = os.path.join(ds_dir, 'pdfs')
    ds_pdf.inputs.parameterization = False

    # get atlas data
    templates_atlases = {'GM_mask_MNI_2mm': 'SPM_GM/SPM_GM_mask_2mm.nii.gz',
                         'GM_mask_MNI_3mm': 'SPM_GM/SPM_GM_mask_3mm.nii.gz',
                         'brain_mask_MNI_3mm': 'cpac_image_resources/MNI_3mm/MNI152_T1_3mm_brain_mask.nii.gz',
                         'brain_template_MNI_3mm': 'cpac_image_resources/MNI_3mm/MNI152_T1_3mm.nii.gz'
                         }

    selectfiles_anat_templates = Node(nio.SelectFiles(templates_atlases,
                                                      base_directory=template_dir),
                                      name="selectfiles_anat_templates")

    #####################################
    # SET ITERATORS
    #####################################
    # SUBJECTS ITERATOR
    in_data_name_infosource = Node(util.IdentityInterface(fields=['in_data_name']), name='in_data_name_infosource')
    in_data_name_infosource.iterables = ('in_data_name', in_data_name_list_unique)

    mulitmodal_in_data_name_infosource = Node(util.IdentityInterface(fields=['multimodal_in_data_name']),
                                              name='mulitmodal_in_data_name_infosource')
    mulitmodal_in_data_name_infosource.iterables = ('multimodal_in_data_name', in_data_name_list)

    subjects_selection_crit_dict = {}
    subjects_selection_crit_dict['adult_healthy_F'] = ["df[df.sex == \'F\']", 'df[df.no_axis_1]', 'df[df.age >= 18]']
    subjects_selection_crit_dict['adult_F'] = ["df[df.sex == \'F\']", 'df[df.age >= 18]']
    subjects_selection_crit_dict['F'] = ["df[df.sex == \'F\']"]

    subjects_selection_crit_dict['adult_healthy_M'] = ["df[df.sex == \'M\']", 'df[df.no_axis_1]', 'df[df.age >= 18]']
    subjects_selection_crit_dict['adult_M'] = ["df[df.sex == \'M\']", 'df[df.age >= 18]']
    subjects_selection_crit_dict['adult'] = ['df[df.age >= 18]']
    # subjects_selection_crit_names_list = subjects_selection_crit_dict.keys()
    subjects_selection_crit_names_list = ['adult_F']

    subject_selection_infosource = Node(util.IdentityInterface(fields=['selection_criterium']),
                                        name='subject_selection_infosource')
    subject_selection_infosource.iterables = ('selection_criterium', subjects_selection_crit_names_list)

    def out_name_str_fct(selection_criterium, in_data_name):
        return selection_criterium + '_' + in_data_name

    out_name_str = Node(util.Function(input_names=['selection_criterium', 'in_data_name'],
                                      output_names=['out_name_str'],
                                      function=out_name_str_fct),
                        name='out_name_str')
    wf.connect(in_data_name_infosource, 'in_data_name', out_name_str, 'in_data_name')
    wf.connect(subject_selection_infosource, 'selection_criterium', out_name_str, 'selection_criterium')

    def get_subjects_info_fct(df_file, subjects_selection_crit_dict, selection_criterium):
        import pandas as pd
        import os
#.........这里部分代码省略.........
开发者ID:fliem,项目名称:LeiCA,代码行数:101,代码来源:learning_prepare_data_wf.py


示例5: segmentation

def segmentation(projectid, subjectid, sessionid, master_config, onlyT1=True, pipeline_name=''):
    import os.path
    import nipype.pipeline.engine as pe
    import nipype.interfaces.io as nio
    from nipype.interfaces import ants
    from nipype.interfaces.utility import IdentityInterface, Function, Merge
    # Set universal pipeline options
    from nipype import config
    config.update_config(master_config)
    assert config.get('execution', 'plugin') == master_config['execution']['plugin']

    from PipeLineFunctionHelpers import ClipT1ImageWithBrainMask
    from WorkupT1T2BRAINSCut import CreateBRAINSCutWorkflow
    from utilities.distributed import modify_qsub_args
    from SEMTools import BRAINSSnapShotWriter

    baw200 = pe.Workflow(name=pipeline_name)

    # HACK: print for debugging
    for key, itme in master_config.items():
        print "-" * 30
        print key, ":", itme
    print "-" * 30
    #END HACK

    inputsSpec = pe.Node(interface=IdentityInterface(fields=['t1_average',
                                                             't2_average',
                                                             'template_t1',
                                                             'hncma-atlas',
                                                             'LMIatlasToSubject_tx',
                                                             'inputLabels',
                                                             'inputHeadLabels',
                                                             'posteriorImages',
                                                             'TissueClassifyatlasToSubjectInverseTransform',
                                                             'UpdatedPosteriorsList']),
                         run_without_submitting=True, name='inputspec')

    # outputsSpec = pe.Node(interface=IdentityInterface(fields=[...]),
    #                       run_without_submitting=True, name='outputspec')

    currentClipT1ImageWithBrainMaskName = 'ClipT1ImageWithBrainMask_' + str(subjectid) + "_" + str(sessionid)
    ClipT1ImageWithBrainMaskNode = pe.Node(interface=Function(function=ClipT1ImageWithBrainMask,
                                                              input_names=['t1_image', 'brain_labels',
                                                                           'clipped_file_name'],
                                                              output_names=['clipped_file']),
                                            name=currentClipT1ImageWithBrainMaskName)
    ClipT1ImageWithBrainMaskNode.inputs.clipped_file_name = 'clipped_from_BABC_labels_t1.nii.gz'

    baw200.connect([(inputsSpec, ClipT1ImageWithBrainMaskNode, [('t1_average', 't1_image'),
                                                                ('inputLabels', 'brain_labels')])])

    currentAtlasToSubjectantsRegistration = 'AtlasToSubjectANTsRegistration_' + str(subjectid) + "_" + str(sessionid)
    AtlasToSubjectantsRegistration = pe.Node(interface=ants.Registration(), name=currentAtlasToSubjectantsRegistration)

    AtlasToSubjectantsRegistration.inputs.dimension = 3
    AtlasToSubjectantsRegistration.inputs.transforms = ["Affine", "SyN"]
    AtlasToSubjectantsRegistration.inputs.transform_parameters = [[0.1], [0.15, 3.0, 0.0]]
    AtlasToSubjectantsRegistration.inputs.metric = ['Mattes', 'CC']
    AtlasToSubjectantsRegistration.inputs.sampling_strategy = ['Regular', None]
    AtlasToSubjectantsRegistration.inputs.sampling_percentage = [1.0, 1.0]
    AtlasToSubjectantsRegistration.inputs.metric_weight = [1.0, 1.0]
    AtlasToSubjectantsRegistration.inputs.radius_or_number_of_bins = [32, 4]
    AtlasToSubjectantsRegistration.inputs.number_of_iterations = [[1000, 1000, 1000], [10000, 500, 500, 200]]
    AtlasToSubjectantsRegistration.inputs.convergence_threshold = [5e-7, 5e-7]
    AtlasToSubjectantsRegistration.inputs.convergence_window_size = [25, 25]
    AtlasToSubjectantsRegistration.inputs.use_histogram_matching = [True, True]
    AtlasToSubjectantsRegistration.inputs.shrink_factors = [[4, 2, 1], [5, 4, 2, 1]]
    AtlasToSubjectantsRegistration.inputs.smoothing_sigmas = [[4, 2, 0], [5, 4, 2, 0]]
    AtlasToSubjectantsRegistration.inputs.sigma_units = ["vox","vox"]
    AtlasToSubjectantsRegistration.inputs.use_estimate_learning_rate_once = [False, False]
    AtlasToSubjectantsRegistration.inputs.write_composite_transform = True
    AtlasToSubjectantsRegistration.inputs.collapse_output_transforms = True
    AtlasToSubjectantsRegistration.inputs.output_transform_prefix = 'AtlasToSubject_'
    AtlasToSubjectantsRegistration.inputs.winsorize_lower_quantile = 0.025
    AtlasToSubjectantsRegistration.inputs.winsorize_upper_quantile = 0.975
    AtlasToSubjectantsRegistration.inputs.collapse_linear_transforms_to_fixed_image_header = False
    AtlasToSubjectantsRegistration.inputs.output_warped_image = 'atlas2subject.nii.gz'
    AtlasToSubjectantsRegistration.inputs.output_inverse_warped_image = 'subject2atlas.nii.gz'

    baw200.connect([(inputsSpec, AtlasToSubjectantsRegistration, [('LMIatlasToSubject_tx', 'initial_moving_transform'),
                                                                  ('t1_average', 'fixed_image'),
                                                                  ('template_t1', 'moving_image')])
                   ])

    myLocalSegWF = CreateBRAINSCutWorkflow(projectid,
                                           subjectid,
                                           sessionid,
                                           master_config['queue'],
                                           master_config['long_q'],
                                           t1Only=onlyT1)
    MergeStage2AverageImagesName = "99_mergeAvergeStage2Images_" + str(sessionid)
    MergeStage2AverageImages = pe.Node(interface=Merge(2), run_without_submitting=True,
                                       name=MergeStage2AverageImagesName)

    baw200.connect([(inputsSpec, myLocalSegWF, [('t1_average', 'inputspec.T1Volume'),
                                                ('posteriorImages', "inputspec.posteriorDictionary"),
                                                ('inputLabels', 'inputspec.RegistrationROI'),]),
                    (inputsSpec, MergeStage2AverageImages, [('t1_average', 'in1')]),
                    (AtlasToSubjectantsRegistration, myLocalSegWF, [('composite_transform',
                                                                     'inputspec.atlasToSubjectTransform')])
#.........这里部分代码省略.........
开发者ID:gang-liu,项目名称:BRAINSTools,代码行数:101,代码来源:segmentation.py


示例6: segmentation

def segmentation(projectid, subjectid, sessionid, master_config, onlyT1=True, pipeline_name=''):
    import os.path
    import nipype.pipeline.engine as pe
    import nipype.interfaces.io as nio
    from nipype.interfaces import ants
    from nipype.interfaces.utility import IdentityInterface, Function, Merge
    # Set universal pipeline options
    from nipype import config
    config.update_config(master_config)

    from PipeLineFunctionHelpers import ClipT1ImageWithBrainMask
    from .WorkupT1T2BRAINSCut import CreateBRAINSCutWorkflow
    from utilities.distributed import modify_qsub_args
    from nipype.interfaces.semtools import BRAINSSnapShotWriter

    # CLUSTER_QUEUE=master_config['queue']
    CLUSTER_QUEUE_LONG = master_config['long_q']
    baw200 = pe.Workflow(name=pipeline_name)

    # HACK: print for debugging
    for key, itme in list(master_config.items()):
        print(("-" * 30))
        print((key, ":", itme))
    print(("-" * 30))
    # END HACK

    inputsSpec = pe.Node(interface=IdentityInterface(fields=['t1_average',
                                                             't2_average',
                                                             'template_t1',
                                                             'hncma_atlas',
                                                             'LMIatlasToSubject_tx',
                                                             'inputLabels',
                                                             'inputHeadLabels',
                                                             'posteriorImages',
                                                             'UpdatedPosteriorsList',
                                                             'atlasToSubjectRegistrationState',
                                                             'rho',
                                                             'phi',
                                                             'theta',
                                                             'l_caudate_ProbabilityMap',
                                                             'r_caudate_ProbabilityMap',
                                                             'l_hippocampus_ProbabilityMap',
                                                             'r_hippocampus_ProbabilityMap',
                                                             'l_putamen_ProbabilityMap',
                                                             'r_putamen_ProbabilityMap',
                                                             'l_thalamus_ProbabilityMap',
                                                             'r_thalamus_ProbabilityMap',
                                                             'l_accumben_ProbabilityMap',
                                                             'r_accumben_ProbabilityMap',
                                                             'l_globus_ProbabilityMap',
                                                             'r_globus_ProbabilityMap',
                                                             'trainModelFile_txtD0060NT0060_gz',
                                                             ]),
                         run_without_submitting=True, name='inputspec')

    # outputsSpec = pe.Node(interface=IdentityInterface(fields=[...]),
    #                       run_without_submitting=True, name='outputspec')

    currentClipT1ImageWithBrainMaskName = 'ClipT1ImageWithBrainMask_' + str(subjectid) + "_" + str(sessionid)
    ClipT1ImageWithBrainMaskNode = pe.Node(interface=Function(function=ClipT1ImageWithBrainMask,
                                                              input_names=['t1_image', 'brain_labels',
                                                                           'clipped_file_name'],
                                                              output_names=['clipped_file']),
                                           name=currentClipT1ImageWithBrainMaskName)
    ClipT1ImageWithBrainMaskNode.inputs.clipped_file_name = 'clipped_from_BABC_labels_t1.nii.gz'

    baw200.connect([(inputsSpec, ClipT1ImageWithBrainMaskNode, [('t1_average', 't1_image'),
                                                                ('inputLabels', 'brain_labels')])])

    currentA2SantsRegistrationPostABCSyN = 'A2SantsRegistrationPostABCSyN_' + str(subjectid) + "_" + str(sessionid)
    ## TODO: It would be great to update the BRAINSABC atlasToSubjectTransform at this point, but
    ##       That requires more testing, and fixes to ANTS to properly collapse transforms.
    ##       For now we are simply creating a dummy node to pass through


    A2SantsRegistrationPostABCSyN = pe.Node(interface=ants.Registration(), name=currentA2SantsRegistrationPostABCSyN)

    many_cpu_ANTsSyN_options_dictionary = {'qsub_args': modify_qsub_args(CLUSTER_QUEUE_LONG, 8, 8, 16),
                                           'overwrite': True}
    A2SantsRegistrationPostABCSyN.plugin_args = many_cpu_ANTsSyN_options_dictionary
    CommonANTsRegistrationSettings(
        antsRegistrationNode=A2SantsRegistrationPostABCSyN,
        registrationTypeDescription="A2SantsRegistrationPostABCSyN",
        output_transform_prefix='AtlasToSubjectPostBABC_SyN',
        output_warped_image='atlas2subjectPostBABC.nii.gz',
        output_inverse_warped_image='subject2atlasPostBABC.nii.gz',
        save_state='SavedInternalSyNStatePostBABC.h5',
        invert_initial_moving_transform=False,
        initial_moving_transform=None)

    ## TODO: Try multi-modal registration here
    baw200.connect([(inputsSpec, A2SantsRegistrationPostABCSyN, [('atlasToSubjectRegistrationState', 'restore_state'),
                                                                 ('t1_average', 'fixed_image'),
                                                                 ('template_t1', 'moving_image')])
                    ])

    myLocalSegWF = CreateBRAINSCutWorkflow(projectid,
                                           subjectid,
                                           sessionid,
                                           master_config['queue'],
#.........这里部分代码省略.........
开发者ID:NIRALUser,项目名称:BRAINSTools,代码行数:101,代码来源:segmentation.py


示例7: Workflow



######################
# WF
######################

wd_dir = '/scr/kansas1/data/lsd-lemon/lemon_wd_meanDist_%s' % distype
ds_dir = '/scr/kansas1/data/lsd-lemon/lemon_results_meanDist_%s' % distype

wf = Workflow(name='distconnect_meanDist_%s' % distype)
wf.base_dir = os.path.join(wd_dir)
nipype_cfg = dict(logging=dict(workflow_level='DEBUG'), execution={'stop_on_first_crash': False,
                                                                   'remove_unnecessary_outputs': False,
                                                                   'job_finished_timeout': 120})
config.update_config(nipype_cfg)
wf.config['execution']['crashdump_dir'] = os.path.join(wd_dir, 'crash')

ds = Node(nio.DataSink(base_directory=ds_dir), name='ds')


######################
# GET DATA
######################
# SUBJECTS ITERATOR
          
subjects_infosource = Node(util.IdentityInterface(fields=['subject_id']), name='subjects_infosource')
subjects_infosource.iterables = ('subject_id', subjects_list)

run_mean_dist = Node(util.Function(input_names=['sub'],
                                   output_names=[], 
开发者ID:Lauckner,项目名称:distconnect,代码行数:28,代码来源:5_meanDist_workflow.py


示例8: learning_predict_data_2samp_wf

def learning_predict_data_2samp_wf(working_dir,
                                   ds_dir,
                                   in_data_name_list,
                                   subjects_selection_crit_dict,
                                   subjects_selection_crit_names_list,
                                   aggregated_subjects_dir,
                                   target_list,
                                   use_n_procs,
                                   plugin_name,
                                   confound_regression=[False, True],
                                   run_cv=False,
                                   n_jobs_cv=1,
                                   run_tuning=False,
                                   run_2sample_training=False,
                                   aggregated_subjects_dir_nki=None,
                                   subjects_selection_crit_dict_nki=None,
                                   subjects_selection_crit_name_nki=None,
                                   reverse_split=False,
                                   random_state_nki=666,
                                   run_learning_curve=False,
                                   life_test_size=0.5):
    import os
    from nipype import config
    from nipype.pipeline.engine import Node, Workflow
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    from itertools import chain
    from learning_utils import aggregate_multimodal_metrics_fct, run_prediction_split_fct, \
        backproject_and_split_weights_fct, select_subjects_fct, select_multimodal_X_fct, learning_curve_plot
    import pandas as pd



    ###############################################################################################################
    # GENERAL SETTINGS

    wf = Workflow(name='learning_predict_data_2samp_wf')
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(logging=dict(workflow_level='DEBUG'),
                      execution={'stop_on_first_crash': False,
                                 'remove_unnecessary_outputs': False,
                                 'job_finished_timeout': 120})
    config.update_config(nipype_cfg)
    wf.config['execution']['crashdump_dir'] = os.path.join(working_dir, 'crash')

    ds = Node(nio.DataSink(), name='ds')
    ds.inputs.base_directory = os.path.join(ds_dir, 'group_learning_prepare_data')

    ds.inputs.regexp_substitutions = [
        # ('subject_id_', ''),
        ('_parcellation_', ''),
        ('_bp_freqs_', 'bp_'),
        ('_extraction_method_', ''),
        ('_subject_id_[A0-9]*/', '')
    ]
    ds_pdf = Node(nio.DataSink(), name='ds_pdf')
    ds_pdf.inputs.base_directory = os.path.join(ds_dir, 'pdfs')
    ds_pdf.inputs.parameterization = False



    ###############################################################################################################
    # ensure in_data_name_list is list of lists
    in_data_name_list = [i if type(i) == list else [i] for i in in_data_name_list]
    in_data_name_list_unique = list(set(chain.from_iterable(in_data_name_list)))



    ###############################################################################################################
    # SET ITERATORS

    in_data_name_infosource = Node(util.IdentityInterface(fields=['in_data_name']), name='in_data_name_infosource')
    in_data_name_infosource.iterables = ('in_data_name', in_data_name_list_unique)

    multimodal_in_data_name_infosource = Node(util.IdentityInterface(fields=['multimodal_in_data_name']),
                                              name='multimodal_in_data_name_infosource')
    multimodal_in_data_name_infosource.iterables = ('multimodal_in_data_name', in_data_name_list)

    subject_selection_infosource = Node(util.IdentityInterface(fields=['selection_criterium']),
                                        name='subject_selection_infosource')
    subject_selection_infosource.iterables = ('selection_criterium', subjects_selection_crit_names_list)

    target_infosource = Node(util.IdentityInterface(fields=['target_name']), name='target_infosource')
    target_infosource.iterables = ('target_name', target_list)



    ###############################################################################################################
    # COMPILE LIFE DATA
    ###############################################################################################################

    ###############################################################################################################
    # GET INFO AND SELECT FILES
    df_all_subjects_pickle_file = os.path.join(aggregated_subjects_dir, 'df_all_subjects_pickle_file/df_all.pkl')
    df = pd.read_pickle(df_all_subjects_pickle_file)

    # build lookup dict for unimodal data
    X_file_template = 'X_file/_in_data_name_{in_data_name}/vectorized_aggregated_data.npy'
    info_file_template = 'unimodal_backprojection_info_file/_in_data_name_{in_data_name}/unimodal_backprojection_info.pkl'
#.........这里部分代码省略.........
开发者ID:fliem,项目名称:LeiCA_LIFE,代码行数:101,代码来源:learning_predict_data_wf.py


示例9: open

        wf.config['execution'] = {'hash_method': 'timestamp', 'crashdump_dir': os.path.abspath(c.crashLogDirectory)}
        log_dir = os.path.join(c.outputDirectory, 'logs', 'group_analysis', resource, 'model_%s' % (os.path.basename(model)))
        try:
            os.makedirs(log_dir)
        except:
            print "log_dir already exist"
        



        # enable logging
    
        from nipype import config
        from nipype import logging
        
        config.update_config({'logging': {'log_directory': log_dir,
                              'log_to_file': True}})
        
        # Temporarily disable until solved
        #logging.update_logging(config)

        iflogger = logging.getLogger('interface')
    
    
        group_sublist = open(subject_list, 'r')

        #print >>diag, "> Opened subject list: ", subject_list
        #print >>diag, ""

        sublist_items = group_sublist.readlines()

        input_subject_list = [line.rstrip('\n') for line in sublist_items \
开发者ID:ZhenYangCMI,项目名称:Colibazzi_code,代码行数:32,代码来源:cpac_group_analysis_pipeline.py


示例10: RunSubjectWorkflow

def RunSubjectWorkflow(args):
    """
                           .-----------.
                       --- | Session 1 | ---> /project/subjectA/session1/phase/
                     /     *-----------*
    .-----------.   /
    | Subject A | <
    *-----------*   \
                     \     .-----------.
                       --- | Session 2 | ---> /project/subjectA/session2/phase/
                           *-----------*
    **** Replaces WorkflowT1T2.py ****
    """
    database, start_time, subject, master_config = args
    assert 'baseline' in master_config['components'] or 'longitudinal' in master_config['components'], "Baseline or Longitudinal is not in WORKFLOW_COMPONENTS!"
    # HACK:
    #    To avoid a "sqlite3.ProgrammingError: Base Cursor.__init__ not called" error
    #    using multiprocessing.map_async(), re-instantiate database
    # database.__init__(defaultDBName=database.dbName, subject_list=database.subjectList)
    #
    # END HACK
    import time

    from nipype import config, logging
    config.update_config(master_config)  # Set universal pipeline options
    assert config.get('execution', 'plugin') == master_config['execution']['plugin']
    # DEBUG
    # config.enable_debug_mode()
    # config.set('execution', 'stop_on_first_rerun', 'true')
    # END DEBUG
    logging.update_logging(config)

    import nipype.pipeline.engine as pe
    import nipype.interfaces.base as nbase
    import nipype.interfaces.io as nio
    from nipype.interfaces.utility import IdentityInterface, Function
    import traits

    from baw_exp import OpenSubjectDatabase
    from SessionDB import SessionDB
    from PipeLineFunctionHelpers import convertToList
    from atlasNode import MakeAtlasNode
    from utilities.misc import GenerateSubjectOutputPattern as outputPattern
    from utilities.misc import GenerateWFName

    while time.time() < start_time:
        time.sleep(start_time - time.time() + 1)
        print "Delaying start for {subject}".format(subject=subject)
    print("===================== SUBJECT: {0} ===========================".format(subject))

    subjectWorkflow = pe.Workflow(name="BAW_StandardWorkup_subject_{0}".format(subject))
    subjectWorkflow.base_dir = config.get('logging', 'log_directory')
    # subjectWorkflow.config['execution']['plugin'] = 'Linear'  # Hardcodeded in WorkupT1T2.py - why?
    # DEBUG
    # subjectWorkflow.config['execution']['stop_on_first_rerun'] = 'true'
    # END DEBUG
    atlasNode = MakeAtlasNode(master_config['atlascache'], 'BAtlas')

    sessionWorkflow = dict()
    inputsSpec = dict()
    sessions = database.getSessionsFromSubject(subject)
    # print "These are the sessions: ", sessions
    if 'baseline' in master_config['components']:
        current_phase = 'baseline'
        from baseline import create_baseline as create_wkfl
    elif 'longitudinal' in master_config['components']:
        current_phase = 'longitudinal'
        from longitudinal import create_longitudial as create_wkfl

    for session in sessions:  # TODO (future): Replace with iterable inputSpec node and add Function node for getAllFiles()
        project = database.getProjFromSession(session)
        pname = "{0}_{1}".format(session, current_phase)  # Long node names make graphs a pain to read/print
        # pname = GenerateWFName(project, subject, session, current_phase)
        print "Building session pipeline for {0}".format(session)
        inputsSpec[session] = pe.Node(name='inputspec_{0}'.format(session),
                                      interface=IdentityInterface(fields=['T1s', 'T2s', 'PDs', 'FLs', 'OTs']))
        inputsSpec[session].inputs.T1s = database.getFilenamesByScantype(session, ['T1-15', 'T1-30'])
        inputsSpec[session].inputs.T2s = database.getFilenamesByScantype(session, ['T2-15', 'T2-30'])
        inputsSpec[session].inputs.PDs = database.getFilenamesByScantype(session, ['PD-15', 'PD-30'])
        inputsSpec[session].inputs.FLs = database.getFilenamesByScantype(session, ['FL-15', 'FL-30'])
        inputsSpec[session].inputs.OTs = database.getFilenamesByScantype(session, ['OTHER-15', 'OTHER-30'])

        sessionWorkflow[session] = create_wkfl(project, subject, session, master_config,
                                               interpMode='Linear', pipeline_name=pname)

        subjectWorkflow.connect([(inputsSpec[session], sessionWorkflow[session], [('T1s', 'inputspec.T1s'),
                                                                                  ('T2s', 'inputspec.T2s'),
                                                                                  ('PDs', 'inputspec.PDs'),
                                                                                  ('FLs', 'inputspec.FLs'),
                                                                                  ('OTs', 'inputspec.OTHERs'),
                                                                                  ]),
                                 (atlasNode, sessionWorkflow[session], [('template_landmarks_50Lmks_fcsv',
                                                                         'inputspec.atlasLandmarkFilename'),
                                                                        ('template_weights_50Lmks_wts',
                                                                         'inputspec.atlasWeightFilename'),
                                                                        ('LLSModel_50Lmks_hdf5', 'inputspec.LLSModel'),
                                                                        ('T1_50Lmks_mdl', 'inputspec.inputTemplateModel')]),
                                ])
        if current_phase == 'baseline':
            subjectWorkflow.connect([(atlasNode, sessionWorkflow[session], [('template_t1', 'inputspec.template_t1'),
#.........这里部分代码省略.........
开发者ID:lassoan,项目名称:BRAINSTools,代码行数:101,代码来源:singleSubject.py


示例11: calc_local_metrics

def calc_local_metrics(brain_mask,
                       preprocessed_data_dir,
                       subject_id,
                       parcellations_dict,
                       bp_freq_list,
                       TR,
                       selectfiles_templates,
                       working_dir,
                       ds_dir,
                       use_n_procs,
                       plugin_name):
    import os
    from nipype import config
    from nipype.pipeline.engine import Node, Workflow, MapNode
    import nipype.interfaces.utility as util
    import nipype.interfaces.io as nio
    import nipype.interfaces.fsl as fsl
    from nipype.interfaces.freesurfer.preprocess import MRIConvert

    import CPAC.alff.alff as cpac_alff
    import CPAC.reho.reho as cpac_reho
    import CPAC.utils.utils as cpac_utils

    import utils as calc_metrics_utils
    from motion import calculate_FD_P, calculate_FD_J





    #####################################
    # GENERAL SETTINGS
    #####################################
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')

    wf = Workflow(name='LeiCA_LIFE_metrics')
    wf.base_dir = os.path.join(working_dir)

    nipype_cfg = dict(logging=dict(workflow_level='DEBUG'), execution={'stop_on_first_crash': True,
                                                                       'remove_unnecessary_outputs': True,
                                                                       'job_finished_timeout': 15})
    config.update_config(nipype_cfg)
    wf.config['execution']['crashdump_dir'] = os.path.join(working_dir, 'crash')

    ds = Node(nio.DataSink(base_directory=ds_dir), name='ds')
    ds.inputs.regexp_substitutions = [('MNI_resampled_brain_mask_calc.nii.gz', 'falff.nii.gz'),
                                      ('residual_filtered_3dT.nii.gz', 'alff.nii.gz'),
                                      ('_parcellation_', ''),
                                      ('_bp_freqs_', 'bp_'),
                                      ]



    #####################
    # ITERATORS
    #####################
    # PARCELLATION ITERATOR
    parcellation_infosource = Node(util.IdentityInterface(fields=['parcellation']), name='parcellation_infosource')
    parcellation_infosource.iterables = ('parcellation', parcellations_dict.keys())

    # BP FILTER ITERATOR
    bp_filter_infosource = Node(util.IdentityInterface(fields=['bp_freqs']), name='bp_filter_infosource')
    bp_filter_infosource.iterables = ('bp_freqs', bp_freq_list)

    selectfiles = Node(nio.SelectFiles(selectfiles_templates,
                                       base_directory=preprocessed_data_dir),
                       name='selectfiles')
    selectfiles.inputs.subject_id = subject_id

    # #####################
    # # FIX TR IN HEADER
    # #####################
    # tr_msec = int(TR * 1000)
    # tr_str = '-tr %s' % tr_msec
    #
    # fixed_tr_bp = Node(MRIConvert(out_type='niigz', args=tr_str), name='fixed_tr_bp')
    # wf.connect(selectfiles, 'epi_MNI_bp', fixed_tr_bp, 'in_file')
    #
    # fixed_tr_fullspectrum = Node(MRIConvert(out_type='niigz', args=tr_str), name='fixed_tr_fullspectrum')
    # wf.connect(selectfiles, 'epi_MNI_fullspectrum', fixed_tr_fullspectrum, 'in_file')

    #####################
    # calc FD
    #####################
    FD_P = Node(util.Function(input_names=['in_file'],
                              output_names=['FD_ts_file', 'mean_FD_file', 'max_FD_file'],
                              function=calculate_FD_P),
                name='FD_P')
    wf.connect(selectfiles, 'moco_parms_file', FD_P, 'in_file')
    wf.connect(FD_P, 'FD_ts_file', ds, '[email protected]')
    wf.connect(FD_P, 'mean_FD_file', ds, '[email protected]_FD')
    wf.connect(FD_P, 'max_FD_file', ds, '[email protected]_FD')

    FD_J = Node(util.Function(input_names=['in_file'],
                              output_names=['FD_ts_file', 'mean_FD_file', 'max_FD_file'],
                              function=calculate_FD_J),
                name='FD_J')
    wf.connect(selectfiles, 'jenkinson_file', FD_J, 'in_file')
    wf.connect(FD_J, 'FD_ts_file', ds, '[email protected]_J')
    wf.connect(FD_J, 'mean_FD_file', ds, '[email protected]_FD_J')
#.........这里部分代码省略.........
开发者ID:fliem,项目名称:LeiCA_LIFE,代码行数:101,代码来源:calc_metrics.py


示例12: init_mriqc

def init_mriqc(opts, retval):
    """Build the workflow enumerator"""

    from bids.grabbids import BIDSLayout
    from nipype import config as ncfg
    from nipype.pipeline.engine import Workflow

    from ..utils.bids import collect_bids_data
    from ..workflows.core import build_workflow

    retval['workflow'] = None
    retval['plugin_settings'] = None

    # Build settings dict
    bids_dir = Path(opts.bids_dir).expanduser()
    output_dir = Path(opts.output_dir).expanduser()

    # Number of processes
    n_procs = opts.n_procs or cpu_count()

    settings = {
        'bids_dir': bids_dir.resolve(),
        'output_dir': output_dir.resolve(),
        'work_dir': opts.work_dir.expanduser().resolve(),
        'write_graph': opts.write_graph,
        'n_procs': n_procs,
        'testing': opts.testing,
        'hmc_afni': opts.hmc_afni,
        'hmc_fsl': opts.hmc_fsl,
        'fft_spikes_detector': opts.fft_spikes_detector,
        'ants_nthreads': opts.ants_nthreads,
        'ants_float': opts.ants_float,
        'verbose_reports': opts.verbose_reports or opts.testing,
        'float32': opts.float32,
        'ica': opts.ica,
        'no_sub': opts.no_sub,
        'email': opts.email,
        'fd_thres': opts.fd_thres,
        'webapi_url': opts.webapi_url,
        'webapi_port': opts.webapi_port,
        'upload_strict': opts.upload_strict,
    }

    if opts.hmc_afni:
        settings['deoblique'] = opts.deoblique
        settings['despike'] = opts.despike
        settings['correct_slice_timing'] = opts.correct_slice_timing
        if opts.start_idx:
            settings['start_idx'] = opts.start_idx
        if opts. stop_idx:
            settings['stop_idx'] = opts.stop_idx

    if opts.ants_settings:
        settings['ants_settings'] = opts.ants_settings

    if opts.dsname:
        settings['dataset_name'] = opts.dsname

    log_dir = settings['output_dir'] / 'logs'

    # Create directories
    log_dir.mkdir(parents=True, exist_ok=True)
    settings['work_dir'].mkdir(parents=True, exist_ok=True)

    # Set nipype config
    ncfg.update_config({
        'logging': {'log_directory': str(log_dir), 'log_to_file': True},
        'execution': {
            'crashdump_dir': str(log_dir), 'crashfile_format': 'txt',
            'resource_monitor': opts.profile},
    })

    # Plugin configuration
    plugin_settings = {}
    if n_procs == 1:
        plugin_settings['plugin'] = 'Linear'

        if settings['ants_nthreads'] == 0:
            settings['ants_nthreads'] = 1
    else:
        plugin_settings['plugin'] = 'MultiProc'
        plugin_settings['plugin_args'] = {'n_procs': n_procs}
        if opts.mem_gb:
            plugin_settings['plugin_args']['memory_gb'] = opts.mem_gb

        if settings['ants_nthreads'] == 0:
            # always leave one extra thread for non ANTs work,
            # don't use more than 8 threads - the speed up is minimal
            settings['ants_nthreads'] = min(settings['n_procs'] - 1, 8)

    # Overwrite options if --use-plugin provided
    if opts.use_plugin and opts.use_plugin.exists():
        from yaml import load as loadyml
        with opts.use_plugin.open() as pfile:
            plugin_settings.update(loadyml(pfile))

    # Process data types
    modalities = opts.modalities

    layout = BIDSLayout(str(settings['bids_dir']),
#.........这里部分代码省略.........
开发者ID:oesteban,项目名称:mriqc,代码行数:101,代码来源:mriqc_run.py


示例13: process

    def process(self):
        # Process time
        now = datetime.datetime.now().strftime("%Y%m%d_%H%M")
       
        # Initialization
        if os.path.exists(os.path.join(self.base_directory,"LOG","pypeline.log")):
            os.unlink(os.path.join(self.base_directory,"LOG","pypeline.log"))
        config.update_config({'logging': {'log_directory': os.path.join(self.base_directory,"LOG"),
                                  'log_to_file': True},
                              'execution': {'remove_unnecessary_outputs': False}
                              })
        logging.update_logging(config)
        iflogger = logging.getLogger('interface')
       
        # Data import
        datasource = pe.Node(interface=nio.DataGrabber(outfields = ['fMRI','T1','T2']), name='datasource')
        datasource.inputs.base_directory = os.path.join(self.base_directory,'NIFTI')
        datasource.inputs.template = '*'
        datasource.inputs.raise_on_empty = False
        datasource.inputs.field_template = dict(fMRI='fMRI.nii.gz',T1='T1.nii.gz',T2='T2.nii.gz')
        datasource.inputs.sort_filelist=False
       
        # Data sinker for output
        sinker = pe.Node(nio.DataSink(), name="fMRI_sinker")
        sinker.inputs.base_directory = os.path.join(self.base_directory, "RESULTS")
        
        # Clear previous outputs
        self.clear_stages_outputs()
        
        # Create common_flow
        common_flow = self.create_common_flow()
        
        # Create fMRI flow
        
        fMRI_flow = pe.Workflow(name='fMRI_pipeline')
        fMRI_inputnode = pe.Node(interface=util.IdentityInterface(fields=["fMRI","T1","T2","subjects_dir","subject_id","wm_mask_file","roi_volumes","wm_e 

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python base.isdefined函数代码示例发布时间:2022-05-27
下一篇:
Python config.enable_debug_mode函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap