• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python searchlight.sphere_searchlight函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中mvpa2.measures.searchlight.sphere_searchlight函数的典型用法代码示例。如果您正苦于以下问题:Python sphere_searchlight函数的具体用法?Python sphere_searchlight怎么用?Python sphere_searchlight使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了sphere_searchlight函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_preallocate_output

    def test_preallocate_output(self, nblocks):
        ds = datasets['3dsmall'].copy()[:, :25] # smaller copy
        ds.fa['voxel_indices'] = ds.fa.myspace
        ds.fa['feature_id'] = np.arange(ds.nfeatures)

        def measure(ds):
            # return more than one sample
            return np.repeat(ds.fa.feature_id, 10, axis=0)

        nprocs = [1, 2] if externals.exists('pprocess') else [1]
        enable_ca = ['roi_sizes', 'raw_results', 'roi_feature_ids']
        for nproc in nprocs:
            sl = sphere_searchlight(measure,
                                    radius=0,
                                    center_ids=np.arange(ds.nfeatures),
                                    nproc=nproc,
                                    enable_ca=enable_ca,
                                    nblocks=nblocks
                                    )
            sl_inplace = sphere_searchlight(measure,
                                    radius=0,
                                    preallocate_output=True,
                                    center_ids=np.arange(ds.nfeatures),
                                    nproc=nproc,
                                    enable_ca=enable_ca,
                                    nblocks=nblocks
                                    )
            out = sl(ds)
            out_inplace = sl_inplace(ds)

            for c in enable_ca:
                assert_array_equal(sl.ca[c].value, sl_inplace.ca[c].value)
            assert_array_equal(out.samples, out_inplace.samples)
            assert_array_equal(out.fa.center_ids, out_inplace.fa.center_ids)
开发者ID:PyMVPA,项目名称:PyMVPA,代码行数:34,代码来源:test_searchlight.py


示例2: test_PDistTargetSimilaritySearchlight

def test_PDistTargetSimilaritySearchlight():
    # Test ability to use PDistTargetSimilarity in a searchlight
    from mvpa2.testing.datasets import datasets
    from mvpa2.mappers.fx import mean_group_sample
    from mvpa2.mappers.shape import TransposeMapper
    from mvpa2.measures.searchlight import sphere_searchlight
    ds = datasets['3dsmall'][:, :3]
    ds.fa['voxel_indices'] = ds.fa.myspace
    # use chunks values (4 of them) for targets
    ds.sa['targets'] = ds.sa.chunks
    ds = mean_group_sample(['chunks'])(ds)
    tdsm = np.arange(6)
    # We can run on full dataset
    tdcm1 = PDistTargetSimilarity(tdsm)
    a1 = tdcm1(ds)
    assert_array_equal(a1.fa.metrics, ['rho', 'p'])

    tdcm1_rho = PDistTargetSimilarity(tdsm, corrcoef_only=True)
    sl_rho = sphere_searchlight(tdcm1_rho)(ds)
    assert_array_equal(sl_rho.shape, (1, ds.nfeatures))

    # now with both but we need to transpose datasets
    tdcm1_both = PDistTargetSimilarity(tdsm, postproc=TransposeMapper())
    sl_both = sphere_searchlight(tdcm1_both)(ds)
    assert_array_equal(sl_both.shape, (2, ds.nfeatures))
    assert_array_equal(sl_both.sa.metrics, ['rho', 'p'])
    # rho must be exactly the same
    assert_array_equal(sl_both.samples[0], sl_rho.samples[0])
    # just because we are here and we can
    # Actually here for some reason assert_array_lequal gave me a trouble
    assert_true(np.all(sl_both.samples[1] <= 1.0))
    assert_true(np.all(0 <= sl_both.samples[1]))
开发者ID:beausievers,项目名称:PyMVPA,代码行数:32,代码来源:test_rsa.py


示例3: test_partial_searchlight_with_full_report

 def test_partial_searchlight_with_full_report(self):
     ds = self.dataset.copy()
     center_ids = np.zeros(ds.nfeatures, dtype='bool')
     center_ids[[3,50]] = True
     ds.fa['center_ids'] = center_ids
     # compute N-1 cross-validation for each sphere
     cv = CrossValidation(sample_clf_lin, NFoldPartitioner())
     # contruct diameter 1 (or just radius 0) searchlight
     # one time give center ids as a list, the other one takes it from the
     # dataset itself
     sls = (sphere_searchlight(cv, radius=0, center_ids=[3,50]),
            sphere_searchlight(cv, radius=0, center_ids='center_ids'))
     for sl in sls:
         # run searchlight
         results = sl(ds)
         # only two spheres but error for all CV-folds
         self.assertEqual(results.shape, (len(self.dataset.UC), 2))
     # test if we graciously puke if center_ids are out of bounds
     dataset0 = ds[:, :50] # so we have no 50th feature
     self.assertRaises(IndexError, sls[0], dataset0)
     # but it should be fine on the one that gets the ids from the dataset
     # itself
     results = sl(dataset0)
     assert_equal(results.nfeatures, 1)
     # check whether roi_seeds are correct
     sl = sphere_searchlight(lambda x: np.vstack((x.fa.roi_seed, x.samples)),
                             radius=1, add_center_fa=True, center_ids=[12])
     res = sl(ds)
     assert_array_equal(res.samples[1:, res.samples[0].astype('bool')].squeeze(),
                        ds.samples[:, 12])
开发者ID:schoeke,项目名称:PyMVPA,代码行数:30,代码来源:test_searchlight.py


示例4: test_chained_crossvalidation_searchlight

def test_chained_crossvalidation_searchlight():
    from mvpa2.clfs.gnb import GNB
    from mvpa2.clfs.meta import MappedClassifier
    from mvpa2.generators.partition import NFoldPartitioner
    from mvpa2.mappers.base import ChainMapper
    from mvpa2.mappers.base import Mapper
    from mvpa2.measures.base import CrossValidation
    from mvpa2.measures.searchlight import sphere_searchlight
    from mvpa2.testing.datasets import datasets

    dataset = datasets['3dlarge'].copy()
    dataset.fa['voxel_indices'] = dataset.fa.myspace
    sample_clf = GNB()              # fast and deterministic

    class ZScoreFeaturesMapper(Mapper):
        """Very basic mapper which would take care about standardizing
        all features within each sample separately
        """
        def _forward_data(self, data):
            return (data - np.mean(data, axis=1)[:, None])/np.std(data, axis=1)[:, None]

    # only do partial to save time
    sl_kwargs = dict(radius=2, center_ids=[3, 50])
    clf_mapped = MappedClassifier(sample_clf, ZScoreFeaturesMapper())
    cv = CrossValidation(clf_mapped, NFoldPartitioner())
    sl = sphere_searchlight(cv, **sl_kwargs)
    results_mapped = sl(dataset)

    cv_chained = ChainMapper([ZScoreFeaturesMapper(auto_train=True),
                              CrossValidation(sample_clf, NFoldPartitioner())])
    sl_chained = sphere_searchlight(cv_chained, **sl_kwargs)
    results_chained = sl_chained(dataset)

    assert_array_equal(results_mapped, results_chained)
开发者ID:beausievers,项目名称:PyMVPA,代码行数:34,代码来源:test_usecases.py


示例5: test_nblocks

 def test_nblocks(self):
     skip_if_no_external('pprocess')
     # just a basic test to see that we are getting the same
     # results with different nblocks
     ds = datasets['3dsmall'].copy(deep=True)[:, :13]
     ds.fa['voxel_indices'] = ds.fa.myspace
     cv = CrossValidation(GNB(), OddEvenPartitioner())
     res1 = sphere_searchlight(cv, radius=1, nproc=2)(ds)
     res2 = sphere_searchlight(cv, radius=1, nproc=2, nblocks=5)(ds)
     assert_array_equal(res1, res2)
开发者ID:kirty,项目名称:PyMVPA,代码行数:10,代码来源:test_searchlight.py


示例6: test_usecase_concordancesl

    def test_usecase_concordancesl(self):
        import numpy as np
        from mvpa2.base.dataset import vstack
        from mvpa2.mappers.fx import mean_sample

        # Take our sample 3d dataset
        ds1 = datasets['3dsmall'].copy(deep=True)
        ds1.fa['voxel_indices'] = ds1.fa.myspace
        ds1.sa['subject'] = [1]  # not really necessary -- but let's for clarity
        ds1 = mean_sample()(ds1) # so we get just a single representative sample

        def corr12(ds):
            corr = np.corrcoef(ds.samples)
            assert(corr.shape == (2, 2)) # for paranoid ones
            return corr[0, 1]

        for nsc, thr, thr_mean in (
            (0, 1.0, 1.0),
            (0.1, 0.3, 0.8)):   # just a bit of noise
            ds2 = ds1.copy(deep=True)    # make a copy for the 2nd subject
            ds2.sa['subject'] = [2]
            ds2.samples += nsc * np.random.normal(size=ds1.shape)

            # make sure that both have the same voxel indices
            assert(np.all(ds1.fa.voxel_indices == ds2.fa.voxel_indices))
            ds_both = vstack((ds1, ds2))# join 2 images into a single dataset
                                        # with .sa.subject distinguishing both

            sl = sphere_searchlight(corr12, radius=2)
            slmap = sl(ds_both)
            ok_(np.all(slmap.samples >= thr))
            ok_(np.mean(slmap.samples) >= thr)
开发者ID:kirty,项目名称:PyMVPA,代码行数:32,代码来源:test_searchlight.py


示例7: test_searchlight_cross_decoding

def test_searchlight_cross_decoding(path, subjects, conf_file, type, **kwargs):
    
    conf = read_configuration(path, conf_file, type)
    
    for arg in kwargs:
        conf[arg] = kwargs[arg]
        if arg == 'radius':
            radius = kwargs[arg]
    
    
    debug.active += ["SLC"]
    
    ds_merged = get_merged_ds(path, subjects, conf_file, type, **kwargs)
    
    clf = LinearCSVMC(C=1, probability=1, enable_ca=['probabilities'])
    cv = CrossValidation(clf, NFoldPartitioner(attr='task'))
    
    maps = []
    
    for ds in ds_merged:
                
        ds.targets[ds.targets == 'point'] = 'face'
        ds.targets[ds.targets == 'saccade'] = 'place'
        
        sl = sphere_searchlight(cv, radius, space = 'voxel_indices')
    
        sl_map = sl(ds)
    
        sl_map.samples *= -1
        sl_map.samples +=  1
    
        nif = map2nifti(sl_map, imghdr=ds.a.imghdr)
        
        maps.append(nif)
        
        
    datetime = get_time()
    analysis = 'cross_searchlight'
    mask = conf['mask_area']
    task = type
    
    new_dir = datetime+'_'+analysis+'_'+mask+'_'+task
    command = 'mkdir '+os.path.join(path, '0_results', new_dir)
    os.system(command)
    
    parent_dir = os.path.join(path, '0_results', new_dir)
    
    for s, map in zip(subjects, maps):
        name = s
        command = 'mkdir '+os.path.join(parent_dir, name)
        os.system(command)
        
        results_dir = os.path.join(parent_dir, name)
        fname = name+'_radius_'+str(radius)+'_searchlight_map.nii.gz'
        map.to_filename(os.path.join(results_dir, fname))
        
    
    return maps
开发者ID:robbisg,项目名称:mvpa_itab_wu,代码行数:58,代码来源:test_wu.py


示例8: test_searchlight_errors_per_trial

def test_searchlight_errors_per_trial():
    # To make sure that searchlight can return error/accuracy per trial
    from mvpa2.clfs.gnb import GNB
    from mvpa2.generators.partition import OddEvenPartitioner
    from mvpa2.measures.base import CrossValidation
    from mvpa2.measures.searchlight import sphere_searchlight
    from mvpa2.measures.gnbsearchlight import sphere_gnbsearchlight
    from mvpa2.testing.datasets import datasets
    from mvpa2.misc.errorfx import prediction_target_matches

    dataset = datasets['3dsmall'].copy()
    # randomly permute samples so we break any random correspondence
    # to strengthen tests below
    sample_idx = np.arange(len(dataset))
    dataset = dataset[np.random.permutation(sample_idx)]

    dataset.sa.targets = ['L%d' % l for l in dataset.sa.targets]
    dataset.fa['voxel_indices'] = dataset.fa.myspace
    sample_clf = GNB()              # fast and deterministic

    part = OddEvenPartitioner()
    # only do partial to save time
    cv = CrossValidation(sample_clf, part, errorfx=None) #prediction_target_matches)
    # Just to compare error
    cv_error = CrossValidation(sample_clf, part)

    # Large searchlight radius so we get entire ROI, 2 centers just to make sure
    # that all stacking works correctly
    sl = sphere_searchlight(cv, radius=10, center_ids=[0, 1])
    results = sl(dataset)

    sl_gnb = sphere_gnbsearchlight(sample_clf, part, radius=10, errorfx=None,
                                   center_ids=[0, 1])
    results_gnbsl = sl_gnb(dataset)

    # inspect both results
    # verify that partitioning was done correctly
    partitions = list(part.generate(dataset))
    for res in (results, results_gnbsl):
        assert('targets' in res.sa.keys())  # should carry targets
        assert('cvfolds' in res.sa.keys())  # should carry cvfolds
        for ipart in xrange(len(partitions)):
            assert_array_equal(dataset[partitions[ipart].sa.partitions == 2].targets,
                               res.sa.targets[res.sa.cvfolds == ipart])

    assert_datasets_equal(results, results_gnbsl)

    # one "accuracy" per each trial
    assert_equal(results.shape, (len(dataset), 2))
    # with accuracies the same in both searchlights since the same
    # features were to be selected in both cases due too large radii
    errors_dataset = cv(dataset)
    assert_array_equal(errors_dataset.samples[:, 0], results.samples[:, 0])
    assert_array_equal(errors_dataset.samples[:, 0], results.samples[:, 1])
    # and error matching (up to precision) the one if we run with default error function
    assert_array_almost_equal(np.mean(results.targets[:, None] != results.samples, axis=0)[0],
                              np.mean(cv_error(dataset)))
开发者ID:beausievers,项目名称:PyMVPA,代码行数:57,代码来源:test_usecases.py


示例9: test_regression_with_additional_sa

    def test_regression_with_additional_sa(self):
        regr = regrswh[:][0]
        ds = datasets['3dsmall'].copy()
        ds.fa['voxel_indices'] = ds.fa.myspace

        # Create a new sample attribute which will be used along with
        # every searchlight
        ds.sa['beh'] = np.random.normal(size=(ds.nsamples, 2))

        # and now for fun -- lets create custom linar regression
        # targets out of some random feature and beh linearly combined
        rfeature = np.random.randint(ds.nfeatures)
        ds.sa.targets = np.dot(
            np.hstack((ds.sa.beh,
                       ds.samples[:, rfeature:rfeature + 1])),
            np.array([0.3, 0.2, 0.3]))

        class CrossValidationWithBeh(CrossValidation):
            """An adapter for regular CV which would hstack
               sa.beh to the searchlighting ds"""
            def _call(self, ds):
                return CrossValidation._call(
                    self,
                    Dataset(np.hstack((ds, ds.sa.beh)),
                            sa=ds.sa))
        cvbeh = CrossValidationWithBeh(regr, OddEvenPartitioner(),
                                       errorfx=corr_error)
        # regular cv
        cv = CrossValidation(regr, OddEvenPartitioner(),
                             errorfx=corr_error)

        slbeh = sphere_searchlight(cvbeh, radius=1)
        slmapbeh = slbeh(ds)
        sl = sphere_searchlight(cv, radius=1)
        slmap = sl(ds)

        assert_equal(slmap.shape, (2, ds.nfeatures))
        # SL which had access to beh should have got for sure better
        # results especially in the vicinity of the chosen feature...
        features = sl.queryengine.query_byid(rfeature)
        assert_array_lequal(slmapbeh.samples[:, features],
                            slmap.samples[:, features])
开发者ID:kirty,项目名称:PyMVPA,代码行数:42,代码来源:test_searchlight.py


示例10: test_swaroop_case

    def test_swaroop_case(self, preallocate_output):
        """Test hdf5 backend to pass results on Swaroop's usecase
        """
        skip_if_no_external('h5py')
        from mvpa2.measures.base import Measure
        class sw_measure(Measure):
            def __init__(self):
                Measure.__init__(self, auto_train=True)
            def _call(self, dataset):
                # For performance measures -- increase to 50-200
                # np.sum here is just to get some meaningful value in
                # them
                #return np.ones(shape=(2, 2))*np.sum(dataset)
                return Dataset(
                    np.array([{'d': np.ones(shape=(5, 5)) * np.sum(dataset)}],
                             dtype=object))
        results = []
        ds = datasets['3dsmall'].copy(deep=True)
        ds.fa['voxel_indices'] = ds.fa.myspace

        our_custom_prefix = tempfile.mktemp()
        for backend in ['native'] + \
                (externals.exists('h5py') and ['hdf5'] or []):
            sl = sphere_searchlight(sw_measure(),
                                    radius=1,
                                    tmp_prefix=our_custom_prefix,
                                    results_backend=backend,
                                    preallocate_output=preallocate_output)
            t0 = time.time()
            results.append(np.asanyarray(sl(ds)))
            # print "Done for backend %s in %d sec" % (backend, time.time() - t0)
        # because of swaroop's ad-hoc (who only could recommend such
        # a construct?) use case, and absent fancy working assert_objectarray_equal
        # let's compare manually
        #assert_objectarray_equal(*results)
        if not externals.exists('h5py'):
            self.assertRaises(RuntimeError,
                              sphere_searchlight,
                              sw_measure(),
                              results_backend='hdf5')
            raise SkipTest('h5py required for test of backend="hdf5"')
        assert_equal(results[0].shape, results[1].shape)
        results = [r.flatten() for r in results]
        for x, y in zip(*results):
            assert_equal(x.keys(), y.keys())
            assert_array_equal(x['d'], y['d'])
        # verify that no junk is left behind
        tempfiles = glob.glob(our_custom_prefix + '*')
        assert_equal(len(tempfiles), 0)
开发者ID:PyMVPA,项目名称:PyMVPA,代码行数:49,代码来源:test_searchlight.py


示例11: searchlight

    def searchlight(self, ds, cvte):
        
        sl = sphere_searchlight(cvte, 
                                radius= self._radius, 
                                space = 'voxel_indices')            
        sl_map = sl(ds)
        sl_map.samples *= -1
        sl_map.samples +=  1
        
        map_ = map2nifti(sl_map, imghdr=ds.a.imghdr)
        map_ = ni.Nifti1Image(map_.get_data(), affine=ds.a.imgaffine)

        self.maps.append(map_)
        
        return map_
开发者ID:robbisg,项目名称:mvpa_itab_wu,代码行数:15,代码来源:__init__.py


示例12: test_agreement_surface_volume

    def test_agreement_surface_volume(self):
        '''test agreement between volume-based and surface-based
        searchlights when using euclidean measure'''

        #import runner
        def sum_ds(ds):
            return np.sum(ds)

        radius = 3

        # make a small dataset with a mask
        sh = (10, 10, 10)
        msk = np.zeros(sh)
        for i in xrange(0, sh[0], 2):
            msk[i, :, :] = 1
        vg = volgeom.VolGeom(sh, np.identity(4), mask=msk)

        # make an image
        nt = 6
        img = vg.get_masked_nifti_image(6)
        ds = fmri_dataset(img, mask=msk)


        # run the searchlight
        sl = sphere_searchlight(sum_ds, radius=radius)
        m = sl(ds)

        # now use surface-based searchlight
        v = volsurf.from_volume(ds)
        source_surf = v.intermediate_surface
        node_msk = np.logical_not(np.isnan(source_surf.vertices[:, 0]))

        # check that the mask matches with what we used earlier
        assert_array_equal(msk.ravel() + 0., node_msk.ravel() + 0.)

        source_surf_nodes = np.nonzero(node_msk)[0]

        sel = surf_voxel_selection.voxel_selection(v, float(radius),
                                        source_surf=source_surf,
                                        source_surf_nodes=source_surf_nodes,
                                        distance_metric='euclidean')

        qe = queryengine.SurfaceVerticesQueryEngine(sel)
        sl = Searchlight(sum_ds, queryengine=qe)
        r = sl(ds)

        # check whether they give the same results
        assert_array_equal(r.samples, m.samples)
开发者ID:Arthurkorn,项目名称:PyMVPA,代码行数:48,代码来源:test_surfing.py


示例13: run_searchlight

def run_searchlight(ds, metric='correlation', radius=2, center_ids=None, n_cpu=None):

    if metric == 'cca_u':
        measure = cca_uncentered
    elif metric == 'cca':
        measure = cca
    elif metric == '1_to_many_cca':
        measure = cca_one_to_all
    elif metric == 'all_cca':
        measure = all_cca
    elif metric == 'cca_validate':
        measure = cca_validate
    elif metric == 'cca_validate_max':
        measure = cca_validate_max
    elif metric == 'correlation':
        measure = pearsons_average
    elif metric == 'all_pearsons':
        measure = all_pearsons_averages
    elif metric == 'pvalues':
        measure = pvalues
    elif metric == 'tvalues':
        measure = tvalues
    elif metric == "timepoint_double_corr":
        measure = timepoint_double_corr
    elif metric == "scene_based_double_corr":
        measure = scene_based_double_corr
    elif metric == "event_svm_cv":
        measure = event_svm_cross_validation
    elif metric =="event_svm_cv_cm":
        measure = event_svm_cross_validation_confusion_matrix
    elif metric == "cluster_scenes":
        measure = cluster_scenes
    elif metric == "cluster_scenes_track_indices":
        measure = cluster_scenes_track_indices
    elif metric == "cluster_scenes_return_indices":
        measure = cluster_scenes_return_indices
    else:
        measure = metric
        
    sl = sphere_searchlight(measure, radius=radius, center_ids=center_ids, nproc=n_cpu)   
        
    searched_ds = sl(ds)
    searched_ds.fa = ds.fa
    searched_ds.a = ds.a    
    
    return searched_ds
开发者ID:Zpeugh,项目名称:WagnerLab,代码行数:46,代码来源:dataset_utilities.py


示例14: test_chi_square_searchlight

    def test_chi_square_searchlight(self):
        # only do partial to save time

        # Can't yet do this since test_searchlight isn't yet "under nose"
        #skip_if_no_external('scipy')
        if not externals.exists('scipy'):
            return

        from mvpa2.misc.stats import chisquare

        cv = CrossValidation(sample_clf_lin, NFoldPartitioner(),
                enable_ca=['stats'])


        def getconfusion(data):
            cv(data)
            return chisquare(cv.ca.stats.matrix)[0]

        sl = sphere_searchlight(getconfusion, radius=0,
                         center_ids=[3, 50])

        # run searchlight
        results = sl(self.dataset)
        self.assertTrue(results.nfeatures == 2)
开发者ID:kirty,项目名称:PyMVPA,代码行数:24,代码来源:test_searchlight.py


示例15: test_custom_results_fx_logic

    def test_custom_results_fx_logic(self):
        # results_fx was introduced for the blow-up-the-memory-Swaroop
        # where keeping all intermediate results of the dark-magic SL
        # hyperalignment is not feasible.  So it is desired to split
        # searchlight computation in more blocks while composing the
        # target result "on-the-fly" from available so far results.
        #
        # Implementation relies on using generators feeding the
        # results_fx with fresh results whenever those become
        # available.
        #
        # This test/example's "measure" creates files which should be
        # handled by the results_fx function and removed in this case
        # to check if we indeed have desired high number of blocks while
        # only limited nproc.
        skip_if_no_external('pprocess')

        tfile = tempfile.mktemp('mvpa', 'test-sl')

        ds = datasets['3dsmall'].copy()[:, :71] # smaller copy
        ds.fa['voxel_indices'] = ds.fa.myspace
        ds.fa['feature_id'] = np.arange(ds.nfeatures)

        nproc = 3 # it is not about computing -- so we will can
                  # start more processes than possibly having CPUs just to test
        nblocks = nproc * 7
        # figure out max number of features to be given to any proc_block
        # yoh: not sure why I had to +1 here... but now it became more robust and
        # still seems to be doing what was demanded so be it
        max_block = int(ceil(ds.nfeatures / float(nblocks)) + 1)

        def print_(s, *args):
            """For local debugging"""
            #print s, args
            pass

        def results_fx(sl=None, dataset=None, roi_ids=None, results=None):
            """It will "process" the results by removing those files
               generated inside the measure
            """
            res = []
            print_("READY")
            for x in results:
                ok_(isinstance(x, list))
                res.append(x)
                print_("R: ", x)
                for r in x:
                    # Can happen if we requested those .ca's enabled
                    # -- then automagically _proc_block would wrap
                    # results in a dataset... Originally detected by
                    # running with MVPA_DEBUG=.* which triggered
                    # enabling all ca's
                    if is_datasetlike(r):
                        r = np.asscalar(r.samples)
                    os.unlink(r)         # remove generated file
                print_("WAITING")
            return hstack(sum(res, []))

        def measure(ds):
            """The "measure" will check if a run with the same "index" from
               previous block has been processed by now
            """
            f = '%s+%03d' % (tfile, ds.fa.feature_id[0] % (max_block * nproc))
            print_("FID:%d f:%s" % (ds.fa.feature_id[0], f))

            # allow for up to few seconds to wait for the file to
            # disappear -- i.e. its result from previous "block" was
            # processed
            t0 = time.time()
            while os.path.exists(f) and time.time() - t0 < 4.:
                time.sleep(0.5) # so it does take time to compute the measure
            if os.path.exists(f):
                print_("ERROR: ", f)
                raise AssertionError("File %s must have been processed by now"
                                     % f)
            open(f, 'w').write('XXX')   # signal that we have computing this measure
            print_("RES: %s" % f)
            return f

        sl = sphere_searchlight(measure,
                                radius=0,
                                nproc=nproc,
                                nblocks=nblocks,
                                results_fx=results_fx,
                                center_ids=np.arange(ds.nfeatures)
                                )

        assert_equal(len(glob.glob(tfile + '*')), 0) # so no junk around
        try:
            res = sl(ds)
        finally:
            # remove those generated left-over files
            for f in glob.glob(tfile + '*'):
                os.unlink(f)
开发者ID:kirty,项目名称:PyMVPA,代码行数:94,代码来源:test_searchlight.py


示例16: test_spatial_searchlight

    def test_spatial_searchlight(self, common_variance=True, do_roi=False):
        """Tests both generic and GNBSearchlight
        Test of GNBSearchlight anyways requires a ground-truth
        comparison to the generic version, so we are doing sweepargs here
        """
        # compute N-1 cross-validation for each sphere
        # YOH: unfortunately sample_clf_lin is not guaranteed
        #      to provide exactly the same results due to inherent
        #      iterative process.  Therefore lets use something quick
        #      and pure Python
        gnb = GNB(common_variance=common_variance)
        cv = CrossValidation(gnb, NFoldPartitioner())

        ds = datasets['3dsmall'].copy()
        ds.fa['voxel_indices'] = ds.fa.myspace

        skwargs = dict(radius=1, enable_ca=['roi_sizes', 'raw_results'])

        if do_roi:
            # select some random set of features
            nroi = rnd.randint(1, ds.nfeatures)
            # and lets compute the full one as well once again so we have a reference
            # which will be excluded itself from comparisons but values will be compared
            # for selected roi_id
            sl_all = sphere_gnbsearchlight(gnb, NFoldPartitioner(cvtype=1),
                                           **skwargs)
            result_all = sl_all(ds)
            # select random features
            roi_ids = rnd.permutation(range(ds.nfeatures))[:nroi]
            skwargs['center_ids'] = roi_ids
        else:
            nroi = ds.nfeatures

        sls = [sphere_searchlight(cv, **skwargs),
               #GNBSearchlight(gnb, NFoldPartitioner(cvtype=1))
               sphere_gnbsearchlight(gnb, NFoldPartitioner(cvtype=1),
                                     indexsum='fancy', **skwargs)
               ]

        if externals.exists('scipy'):
            sls += [ sphere_gnbsearchlight(gnb, NFoldPartitioner(cvtype=1),
                                           indexsum='sparse', **skwargs)]

        # Just test nproc whenever common_variance is True
        if externals.exists('pprocess') and common_variance:
            sls += [sphere_searchlight(cv, nproc=2, **skwargs)]

        all_results = []
        for sl in sls:
            # run searchlight
            results = sl(ds)
            all_results.append(results)
            #print `sl`
            # check for correct number of spheres
            self.assertTrue(results.nfeatures == nroi)
            # and measures (one per xfold)
            self.assertTrue(len(results) == len(ds.UC))

            # check for chance-level performance across all spheres
            # makes sense only if number of features was big enough
            # to get some stable estimate of mean
            if not do_roi or nroi > 20:
                self.assertTrue(0.4 < results.samples.mean() < 0.6)

            mean_errors = results.samples.mean(axis=0)
            # that we do get different errors ;)
            self.assertTrue(len(np.unique(mean_errors) > 3))

            # check resonable sphere sizes
            self.assertTrue(len(sl.ca.roi_sizes) == nroi)
            if do_roi:
                # for roi we should relax conditions a bit
                self.assertTrue(max(sl.ca.roi_sizes) <= 7)
                self.assertTrue(min(sl.ca.roi_sizes) >= 4)
            else:
                self.assertTrue(max(sl.ca.roi_sizes) == 7)
                self.assertTrue(min(sl.ca.roi_sizes) == 4)

            # check base-class state
            self.assertEqual(sl.ca.raw_results.nfeatures, nroi)

            # Test if we got results correctly for 'selected' roi ids
            if do_roi:
                assert_array_equal(result_all[:, roi_ids], results)



        if len(all_results) > 1:
            # if we had multiple searchlights, we can check either they all
            # gave the same result (they should have)
            aresults = np.array([a.samples for a in all_results])
            dresults = np.abs(aresults - aresults.mean(axis=0))
            dmax = np.max(dresults)
            self.assertTrue(dmax <= 1e-13)
开发者ID:schoeke,项目名称:PyMVPA,代码行数:94,代码来源:test_searchlight.py


示例17: sphere_searchlight

plot_args = {
    'background' : os.path.join(sessionPath,'/home/brain/host/pymvpaniifiles/anat.nii.gz'),
    'background_mask' : os.path.join(sessionPath,'/home/brain/host/pymvpaniifiles/mask_brain.nii.gz'),
    'overlay_mask' : os.path.join(sessionPath,'analyze/structural/lc2ms_deskulled.hdr'),
    'do_stretch_colors' : False,
    'cmap_bg' : 'gray',
    'cmap_overlay' : 'autumn', # pl.cm.autumn
    'interactive' : cfg.getboolean('examples', 'interactive', True),
    }

for radius in [3]:
# tell which one we are doing
    print 'Running searchlight with radius: %i ...' % (radius)

    sl = sphere_searchlight(foldwiseCvedAnovaSelectedSMLR, radius=radius, space='voxel_indices',
                            center_ids=center_ids,
                            postproc=mean_sample())

    ds = dataset.copy(deep=False,
                      sa=['targets', 'chunks'],
                      fa=['voxel_indices'],
                      a=['mapper'])

    sl_map = sl(ds)
    sl_map.samples *= -1
    sl_map.samples += 1

    niftiresults = map2nifti(sl_map, imghdr=dataset.a.imghdr)
    niftiresults.to_filename(os.path.join(sessionPath,'analyze/functional/%s-grey-searchlight.nii') % classificationName)
    print 'Best performing sphere error:', np.min(sl_map.samples)
开发者ID:veezbo,项目名称:bilingual-language-switching,代码行数:30,代码来源:LanguageThink_Searchlight.py


示例18: test_partial_searchlight_with_confusion_matrix

    def test_partial_searchlight_with_confusion_matrix(self):
        ds = self.dataset
        from mvpa2.clfs.stats import MCNullDist
        from mvpa2.mappers.fx import mean_sample, sum_sample

        # compute N-1 cross-validation for each sphere
        cm = ConfusionMatrix(labels=ds.UT)
        cv = CrossValidation(
            sample_clf_lin, NFoldPartitioner(),
            # we have to assure that matrix does not get flatted by
            # first vstack in cv and then hstack in searchlight --
            # thus 2 leading dimensions
            # TODO: RF? make searchlight/crossval smarter?
            errorfx=lambda *a: cm(*a)[None, None, :])
        # contruct diameter 2 (or just radius 1) searchlight
        sl = sphere_searchlight(cv, radius=1, center_ids=[3, 5, 50])

        # our regular searchlight -- to compare results
        cv_gross = CrossValidation(sample_clf_lin, NFoldPartitioner())
        sl_gross = sphere_searchlight(cv_gross, radius=1, center_ids=[3, 5, 50])

        # run searchlights
        res = sl(ds)
        res_gross = sl_gross(ds)

        # only two spheres but error for all CV-folds and complete confusion matrix
        assert_equal(res.shape, (len(ds.UC), 3, len(ds.UT), len(ds.UT)))
        assert_equal(res_gross.shape, (len(ds.UC), 3))

        # briefly inspect the confusion matrices
        mat = res.samples
        # since input dataset is probably balanced (otherwise adjust
        # to be per label): sum within columns (thus axis=-2) should
        # be identical to per-class/chunk number of samples
        samples_per_classchunk = len(ds) / (len(ds.UT) * len(ds.UC))
        ok_(np.all(np.sum(mat, axis= -2) == samples_per_classchunk))
        # and if we compute accuracies manually -- they should
        # correspond to the one from sl_gross
        assert_array_almost_equal(res_gross.samples,
                           # from accuracies to errors
                           1 - (mat[..., 0, 0] + mat[..., 1, 1]).astype(float)
                           / (2 * samples_per_classchunk))

        # and now for those who remained sited -- lets perform H0 MC
        # testing of this searchlight... just a silly one with minimal
        # number of permutations
        no_permutations = 10
        permutator = AttributePermutator('targets', count=no_permutations)

        # once again -- need explicit leading dimension to avoid
        # vstacking during cross-validation
        cv.postproc = lambda x: sum_sample()(x)[None, :]

        sl = sphere_searchlight(cv, radius=1, center_ids=[3, 5, 50],
                                null_dist=MCNullDist(permutator, tail='right',
                                                     enable_ca=['dist_samples']))
        res_perm = sl(ds)
        # XXX all of the res_perm, sl.ca.null_prob and
        #     sl.null_dist.ca.dist_samples carry a degenerate leading
        #     dimension which was probably due to introduced new axis
        #     above within cv.postproc
        assert_equal(res_perm.shape, (1, 3, 2, 2))
        assert_equal(sl.null_dist.ca.dist_samples.shape,
                     res_perm.shape + (no_permutations,))
        assert_equal(sl.ca.null_prob.shape, res_perm.shape)
        # just to make sure ;)
        ok_(np.all(sl.ca.null_prob.samples >= 0))
        ok_(np.all(sl.ca.null_prob.samples <= 1))

        # we should have got sums of hits across the splits
        assert_array_equal(np.sum(mat, axis=0), res_perm.samples[0])
开发者ID:kirty,项目名称:PyMVPA,代码行数:71,代码来源:test_searchlight.py


示例19: test_spatial_searchlight

    def test_spatial_searchlight(self, lrn_sllrn_SL_partitioner, do_roi=False,
                                 results_backend='native'):
        """Tests both generic and ad-hoc searchlights (e.g. GNBSearchlight)
        Test of and adhoc searchlight anyways requires a ground-truth
        comparison to the generic version, so we are doing sweepargs here
        """
        lrn, sllrn, SL, partitioner, correction = lrn_sllrn_SL_partitioner
        ## if results_backend == 'hdf5' and not common_variance:
        ##     # no need for full combination of all possible arguments here
        ##     return

        if __debug__ and 'ENFORCE_CA_ENABLED' in debug.active \
           and  isinstance(lrn, ChainMapper):
            raise SkipTest("Known to fail while trying to enable "
                           "training_stats for the ChainMapper (M1NN here)")


        # e.g. for M1NN we need plain kNN(1) for m1nnsl, but to imitate m1nn
        #      "learner" we must use a chainmapper atm
        if sllrn is None:
            sllrn = lrn
        ds = datasets['3dsmall'].copy()
        # Let's test multiclass here, so boost # of labels
        ds[6:18].T += 2
        ds.fa['voxel_indices'] = ds.fa.myspace

        # To assure that users do not run into incorrect operation due to overflows
        ds.samples += 5000
        ds.samples *= 1000
        ds.samples = ds.samples.astype(np.int16)

        # compute N-1 cross-validation for each sphere
        # YOH: unfortunately sample_clf_lin is not guaranteed
        #      to provide exactly the same results due to inherent
        #      iterative process.  Therefore lets use something quick
        #      and pure Python
        cv = CrossValidation(lrn, partitioner)

        skwargs = dict(radius=1, enable_ca=['roi_sizes', 'raw_results',
                                            'roi_feature_ids'])

        if do_roi:
            # select some random set of features
            nroi = rnd.randint(1, ds.nfeatures)
            # and lets compute the full one as well once again so we have a reference
            # which will be excluded itself from comparisons but values will be compared
            # for selected roi_id
            sl_all = SL(sllrn, partitioner, **skwargs)
            result_all = sl_all(ds)
            # select random features
            roi_ids = rnd.permutation(range(ds.nfeatures))[:nroi]
            skwargs['center_ids'] = roi_ids
        else:
            nroi = ds.nfeatures
            roi_ids = np.arange(nroi)
            result_all = None

        if results_backend == 'hdf5':
            skip_if_no_external('h5py')

        sls = [sphere_searchlight(cv, results_backend=results_backend,
                                  **skwargs),
               #GNBSearchlight(gnb, NFoldPartitioner(cvtype=1))
               SL(sllrn, partitioner, indexsum='fancy', **skwargs)
               ]

        if externals.exists('scipy'):
            sls += [ SL(sllrn, partitioner, indexsum='sparse', **skwargs)]

        # Test nproc just once
        if externals.exists('pprocess') and not self._tested_pprocess:
            sls += [sphere_searchlight(cv, nproc=2, **skwargs)]
            self._tested_pprocess = True

        # Provide the dataset and all those searchlights for testing
        #self._test_searchlights(ds, s 

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python attrmap.AttributeMap类代码示例发布时间:2022-05-27
下一篇:
Python base.Measure类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap