• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python zscore.zscore函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中mvpa2.mappers.zscore.zscore函数的典型用法代码示例。如果您正苦于以下问题:Python zscore函数的具体用法?Python zscore怎么用?Python zscore使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了zscore函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: prepare_subject_for_hyperalignment

def prepare_subject_for_hyperalignment(subject_label, bold_fname, mask_fname, out_dir):
    print('Loading data %s with mask %s' % (bold_fname, mask_fname))
    ds = fmri_dataset(samples=bold_fname, mask=mask_fname)
    zscore(ds, chunks_attr=None)
    out_fname = os.path.join(out_dir, 'sub-%s_data.hdf5' % subject_label)
    print('Saving to %s' % out_fname)
    h5save(out_fname, ds)
开发者ID:BIDS-Apps,项目名称:hyperalignment,代码行数:7,代码来源:run.py


示例2: test_mapper_vs_zscore

def test_mapper_vs_zscore():
    """Test by comparing to results of elderly z-score function
    """
    # data: 40 sample feature line in 20d space (40x20; samples x features)
    dss = [
        dataset_wizard(np.concatenate(
            [np.arange(40) for i in range(20)]).reshape(20,-1).T,
                targets=1, chunks=1),
        ] + datasets.values()

    for ds in dss:
        ds1 = deepcopy(ds)
        ds2 = deepcopy(ds)

        zsm = ZScoreMapper(chunks_attr=None)
        assert_raises(RuntimeError, zsm.forward, ds1.samples)
        idhashes = (idhash(ds1), idhash(ds1.samples))
        zsm.train(ds1)
        idhashes_train = (idhash(ds1), idhash(ds1.samples))
        assert_equal(idhashes, idhashes_train)

        # forward dataset
        ds1z_ds = zsm.forward(ds1)
        idhashes_forwardds = (idhash(ds1), idhash(ds1.samples))
        # must not modify samples in place!
        assert_equal(idhashes, idhashes_forwardds)

        # forward samples explicitly
        ds1z = zsm.forward(ds1.samples)
        idhashes_forward = (idhash(ds1), idhash(ds1.samples))
        assert_equal(idhashes, idhashes_forward)

        zscore(ds2, chunks_attr=None)
        assert_array_almost_equal(ds1z, ds2.samples)
        assert_array_equal(ds1.samples, ds.samples)
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:35,代码来源:test_zscoremapper.py


示例3: _get_seed_means

 def _get_seed_means(self, measure, queryengine, dataset, seed_indices):
     # Computing seed data as mean timeseries in each SL
     seed_data = Searchlight(measure, queryengine=queryengine,
                             nproc=self.params.nproc, roi_ids=seed_indices)
     seed_data = seed_data(dataset)
     zscore(seed_data, chunks_attr=None)
     return seed_data
开发者ID:PyMVPA,项目名称:PyMVPA,代码行数:7,代码来源:connectivity_hyperalignment.py


示例4: test_zscore_withoutchunks

def test_zscore_withoutchunks():
    # just a smoke test to see if all issues of
    # https://github.com/PyMVPA/PyMVPA/issues/26
    # are fixed
    from mvpa2.datasets import Dataset
    ds = Dataset(np.arange(32).reshape((8,-1)), sa=dict(targets=range(8)))
    zscore(ds, chunks_attr=None)
    assert(np.any(ds.samples != np.arange(32).reshape((8,-1))))
    ds_summary = ds.summary()
    assert(ds_summary is not None)
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:10,代码来源:test_zscoremapper.py


示例5: compute_connectivity_profile_similarity

 def compute_connectivity_profile_similarity(self, dss):
     # from scipy.spatial.distance import pdist, squareform
     # conns = [1 - squareform(pdist(ds.samples.T, 'correlation')) for ds in dss]
     conns = [np.corrcoef(ds.samples.T) for ds in dss]
     conn_sum = np.sum(conns, axis=0)
     sim = np.zeros((len(dss), dss[0].shape[1]))
     for i, conn in enumerate(conns):
         conn_diff = conn_sum - conn
         zscore(conn_diff, chunks_attr=None)
         zscore(conn, chunks_attr=None)
         sim[i] = np.mean(conn_diff * conn, axis=0)
     return sim
开发者ID:PyMVPA,项目名称:PyMVPA,代码行数:12,代码来源:test_connectivity_hyperalignment.py


示例6: create_betas_per_trial_with_pymvpa_roni

def create_betas_per_trial_with_pymvpa_roni(study_path, subj, conf, mask_name, flavor, TR):
    dhandle = OpenFMRIDataset(study_path)
    model = 1
    task = 1
    # Do this for other tasks as well. not only the first
    mask_fname = _opj(study_path, "sub{:0>3d}".format(subj), "masks", conf.mvpa_tasks[0], "{}.nii.gz".format(mask_name))
    print mask_fname
    run_datasets = []
    for run_id in dhandle.get_task_bold_run_ids(task)[subj]:
        if type(run_id) == str:
            continue

            # all_events = dhandle.get_bold_run_model(model, subj, run_id)
        all_events = get_bold_run_model(dhandle, 2, subj, run_id)
        run_events = []
        i = 0
        for event in all_events:
            if event["task"] == task:
                event["condition"] = "{}-{}".format(event["condition"], event["id"])
                run_events.append(event)
                i += 1

                # load BOLD data for this run (with masking); add 0-based chunk ID
        run_ds = dhandle.get_bold_run_dataset(subj, task, run_id, flavor=flavor, chunks=run_id - 1, mask=mask_fname)
        # convert event info into a sample attribute and assign as 'targets'
        run_ds.sa.time_coords = run_ds.sa.time_indices * TR
        run_ds.sa["targets"] = events2sample_attr(run_events, run_ds.sa.time_coords, noinfolabel="rest")
        # additional time series preprocessing can go here
        poly_detrend(run_ds, polyord=1, chunks_attr="chunks")
        zscore(run_ds, chunks_attr="chunks", param_est=("targets", ["rest"]), dtype="float32")
        glm_dataset = fit_event_hrf_model(run_ds, run_events, time_attr="time_coords", condition_attr="condition")
        glm_dataset.sa["targets"] = [x[: x.find("-")] for x in glm_dataset.sa.condition]
        glm_dataset.sa["id"] = [x[x.find("-") + 1 :] for x in glm_dataset.sa.condition]
        glm_dataset.sa.condition = glm_dataset.sa["targets"]
        glm_dataset.sa["chunks"] = [run_id - 1] * len(glm_dataset.samples)

        # If a trial was dropped (the subject pressed on a button) than the counter trial from the
        # other condition should also be dropped
        for pair in conf.conditions_to_compare:
            cond_bool = np.array([c in pair for c in glm_dataset.sa["condition"]])
            sub_dataset = glm_dataset[cond_bool]
            c = Counter(sub_dataset.sa.id)
            for value in c:
                if c[value] < 2:
                    id_bool = np.array([value in cond_id for cond_id in glm_dataset.sa["id"]])
                    glm_dataset = glm_dataset[np.bitwise_not(np.logical_and(id_bool, cond_bool))]

        run_datasets.append(glm_dataset)

    return vstack(run_datasets, 0)
开发者ID:ronimaimon,项目名称:mvpa_analysis,代码行数:50,代码来源:ds_creation.py


示例7: _level1

    def _level1(self, datasets, commonspace, ref_ds, mappers, residuals):
        params = self.params            # for quicker access ;)
        data_mapped = [ds.samples for ds in datasets]
        counts = 1  # number of datasets used so far for generating commonspace
        for i, (m, ds_new) in enumerate(zip(mappers, datasets)):
            if __debug__:
                debug('HPAL_', "Level 1: ds #%i" % i)
            if i == ref_ds:
                continue
            # assign common space to ``space`` of the mapper, because this is
            # where it will be looking for it
            ds_new.sa[m.get_space()] = commonspace
            # find transformation of this dataset into the current common space
            m.train(ds_new)
            # remove common space attribute again to save on memory when the
            # common space is updated for the next iteration
            del ds_new.sa[m.get_space()]
            # project this dataset into the current common space
            ds_ = m.forward(ds_new.samples)
            if params.zscore_common:
                zscore(ds_, chunks_attr=None)
            # replace original dataset with mapped one -- only the reference
            # dataset will remain unchanged
            data_mapped[i] = ds_

            # compute first-level residuals wrt to the initial common space
            if residuals is not None:
                residuals[0, i] = np.linalg.norm(ds_ - commonspace)

            # Update the common space. This is an incremental update after
            # processing each 1st-level dataset. Maybe there should be a flag
            # to make a batch update after processing all 1st-level datasets
            # to an identical 1st-level common space
            # TODO: make just a function so we dont' waste space
            if params.level1_equal_weight:
                commonspace = params.combiner1(ds_, commonspace,
                                               weights=(float(counts), 1.0))
            else:
                commonspace = params.combiner1(ds_, commonspace)
            counts += 1
            if params.zscore_common:
                zscore(commonspace, chunks_attr=None)
        return data_mapped
开发者ID:PyMVPA,项目名称:PyMVPA,代码行数:43,代码来源:hyperalignment.py


示例8: test_hyper_input_dataset_check

 def test_hyper_input_dataset_check(self):
     # If supplied with only one dataset during training,
     # make sure it doesn't run multiple levels and crap out
     ha = Hyperalignment()
     ds_all = [datasets['uni4small'] for i in range(3)]
     # Make sure it raises TypeError if a list is not passed
     self.assertRaises(TypeError, ha, ds_all[0])
     self.assertRaises(TypeError, ha.train, ds_all[0])
     # And it doesn't crap out with a single dataset for training
     ha.train([ds_all[0]])
     zscore(ds_all[0], chunks_attr=None)
     assert_array_equal(ha.commonspace, ds_all[0].samples)
     # make sure it accepts tuple of ndarray
     ha = Hyperalignment()
     m = ha(tuple(ds_all))
     ha = Hyperalignment()
     dss_arr = np.empty(len(ds_all), dtype=object)
     for i in range(len(ds_all)):
         dss_arr[i] = ds_all[i]
     m = ha(dss_arr)
开发者ID:swaroopgj,项目名称:PyMVPA,代码行数:20,代码来源:test_hyperalignment.py


示例9: detrend

def detrend(ds):
	#print ds.summary()
	ds.samples = ds.samples.astype('float')
	pl.figure()
	pl.subplot(221)
	plot_samples_distance(ds, sortbyattr='chunks')
	#plot_samples_distance(ds)
	pl.title('Sample distances (sorted by chunks)')
	poly_detrend(ds, polyord=2, chunks_attr='chunks')
	pl.subplot(222)
	plot_samples_distance(ds, sortbyattr='chunks')
	pl.show()
	zscore(ds, chunks_attr='chunks', dtype='float32')
	pl.subplot(223)
	plot_samples_distance(ds, sortbyattr='chunks')
	pl.subplot(224)
#	plot_samples_distance(ds, sortbyattr='targets')
	pl.title('Sample distances (sorted by condition)')
	pl.show()
	#poly_detrend(ds, polyord=1, chunks_attr='chunks')
	#zscore(ds, chunks_attr='chunks', dtype='float32')
	return ds
开发者ID:ronimaimon,项目名称:mvpa_analysis,代码行数:22,代码来源:visualize_data.py


示例10: get_testdata

    def get_testdata(self):
        # rs = np.random.RandomState(0)
        rs = np.random.RandomState()
        nt = 200
        n_triangles = 4
        ns = 10
        nv = n_triangles * 3
        vertices = np.zeros((nv, 3))  # 4 separated triangles
        faces = []
        for i in range(n_triangles):
            vertices[i*3] = [i*2, 0, 0]
            vertices[i*3+1] = [i*2+1, 1/np.sqrt(3), 0]
            vertices[i*3+2] = [i*2+1, -1/np.sqrt(3), 0]
            faces.append([i*3, i*3+1, i*3+2])
        faces = np.array(faces)
        surface = Surface(vertices, faces)

        ds_orig = np.zeros((nt, nv))
        # add coarse-scale information
        for i in range(n_triangles):
            ds_orig[:, i*3:(i+1)*3] += rs.normal(size=(nt, 1))
        # add fine-scale information
        ds_orig += rs.normal(size=(nt, nv))
        dss_train, dss_test = [], []
        for i in range(ns):
            ds = np.zeros_like(ds_orig)
            for j in range(n_triangles):
                ds[:, j*3:(j+1)*3] = np.dot(ds_orig[:, j*3:(j+1)*3],
                                            get_random_rotation(3))
                                            # special_ortho_group.rvs(3, random_state=rs))
            ds = Dataset(ds)
            ds.fa['node_indices'] = np.arange(nv)
            ds_train, ds_test = ds[:nt//2, :], ds[nt//2:, :]
            zscore(ds_train, chunks_attr=None)
            zscore(ds_test, chunks_attr=None)
            dss_train.append(ds_train)
            dss_test.append(ds_test)
        return dss_train, dss_test, surface
开发者ID:PyMVPA,项目名称:PyMVPA,代码行数:38,代码来源:test_connectivity_hyperalignment.py


示例11: test_linear_svm_weights_per_class

    def test_linear_svm_weights_per_class(self, svm):
        # assumming many defaults it is as simple as
        kwargs = dict(enable_ca=["sensitivities"])
        sana_split = svm.get_sensitivity_analyzer(
            split_weights=True, **kwargs)
        sana_full = svm.get_sensitivity_analyzer(
            force_train=False, **kwargs)

        # and lets look at all sensitivities
        ds2 = datasets['uni4large'].copy()
        zscore(ds2, param_est=('targets', ['L2', 'L3']))
        ds2 = ds2[np.logical_or(ds2.sa.targets == 'L0', ds2.sa.targets == 'L1')]

        senssplit = sana_split(ds2)
        sensfull = sana_full(ds2)

        self.assertEqual(senssplit.shape, (2, ds2.nfeatures))
        self.assertEqual(sensfull.shape, (1, ds2.nfeatures))

        # just to verify that we split properly and if we reconstruct
        # manually we obtain the same
        dmap = (-1 * senssplit.samples[1] + senssplit.samples[0]) \
               - sensfull.samples
        self.assertTrue((np.abs(dmap) <= 1e-10).all())
        #print "____"
        #print senssplit
        #print SMLR().get_sensitivity_analyzer(combiner=None)(ds2)

        # for now we can do split weights for binary tasks only, so
        # lets check if we raise a concern
        # we temporarily shutdown warning, since it is going to complain
        # otherwise, but we do it on purpose here
        handlers = warning.handlers
        warning.handlers = []
        self.assertRaises(NotImplementedError,
                              sana_split, datasets['uni3medium'])
        # reenable the warnings
        warning.handlers = handlers
开发者ID:andreirusu,项目名称:PyMVPA,代码行数:38,代码来源:test_datameasure.py


示例12: test_connectivity_hyperalignment

    def test_connectivity_hyperalignment(self):
        skip_if_no_external('scipy')
        skip_if_no_external('hdf5')  # needed for default results backend hdf5

        dss_train, dss_test, surface = self.get_testdata()
        qe = SurfaceQueryEngine(surface, 10, fa_node_key='node_indices')
        cha = ConnectivityHyperalignment(
            mask_ids=[0, 3, 6, 9],
            seed_indices=[0, 3, 6, 9],
            seed_queryengines=qe,
            queryengine=qe)
        mappers = cha(dss_train)
        aligned_train = [mapper.forward(ds) for ds, mapper in zip(dss_train, mappers)]
        aligned_test = [mapper.forward(ds) for ds, mapper in zip(dss_test, mappers)]
        for ds in aligned_train + aligned_test:
            zscore(ds, chunks_attr=None)
        sim_train_before = self.compute_connectivity_profile_similarity(dss_train)
        sim_train_after = self.compute_connectivity_profile_similarity(aligned_train)
        sim_test_before = self.compute_connectivity_profile_similarity(dss_test)
        sim_test_after = self.compute_connectivity_profile_similarity(aligned_test)
        # ISC should be higher after CHA for both training and testing data
        self.assertTrue(sim_train_after.mean() > sim_train_before.mean())
        self.assertTrue(sim_test_after.mean() > sim_test_before.mean())
开发者ID:PyMVPA,项目名称:PyMVPA,代码行数:23,代码来源:test_connectivity_hyperalignment.py


示例13: create_betas_per_trial_with_pymvpa

def create_betas_per_trial_with_pymvpa(study_path, subj, conf, mask_name, flavor, TR):
    dhandle = OpenFMRIDataset(study_path)
    model = 1
    task = 1
    # Do this for other tasks as well. not only the first
    mask_fname = _opj(study_path, "sub{:0>3d}".format(subj), "masks", conf.mvpa_tasks[0], "{}.nii.gz".format(mask_name))
    print mask_fname
    run_datasets = []
    for run_id in dhandle.get_task_bold_run_ids(task)[subj]:
        if type(run_id) == str:
            continue
        all_events = dhandle.get_bold_run_model(model, subj, run_id)
        run_events = []
        i = 0
        for event in all_events:
            if event["task"] == task:
                event["condition"] = "{}-{}".format(event["condition"], i)
                run_events.append(event)
                i += 1

                # load BOLD data for this run (with masking); add 0-based chunk ID
        run_ds = dhandle.get_bold_run_dataset(subj, task, run_id, flavor=flavor, chunks=run_id - 1, mask=mask_fname)
        # convert event info into a sample attribute and assign as 'targets'
        run_ds.sa.time_coords = run_ds.sa.time_indices * TR
        print run_id

        run_ds.sa["targets"] = events2sample_attr(run_events, run_ds.sa.time_coords, noinfolabel="rest")
        # additional time series preprocessing can go here
        poly_detrend(run_ds, polyord=1, chunks_attr="chunks")
        zscore(run_ds, chunks_attr="chunks", param_est=("targets", ["rest"]), dtype="float32")
        glm_dataset = fit_event_hrf_model(run_ds, run_events, time_attr="time_coords", condition_attr="condition")
        glm_dataset.sa["targets"] = [x[: x.find("-")] for x in glm_dataset.sa.condition]
        glm_dataset.sa.condition = glm_dataset.sa["targets"]
        glm_dataset.sa["chunks"] = [run_id - 1] * len(glm_dataset.samples)
        run_datasets.append(glm_dataset)
    return vstack(run_datasets, 0)
开发者ID:ronimaimon,项目名称:mvpa_analysis,代码行数:36,代码来源:ds_creation.py


示例14: _level2

    def _level2(self, datasets, lvl1_data, mappers, residuals):
        params = self.params            # for quicker access ;)
        data_mapped = lvl1_data
        # aggregate all processed 1st-level datasets into a new 2nd-level
        # common space
        commonspace = params.combiner2(data_mapped)

        # XXX Why is this commented out? Who knows what combiner2 is doing and
        # whether it changes the distribution of the data
        #if params.zscore_common:
        #zscore(commonspace, chunks_attr=None)

        ndatasets = len(datasets)
        for loop in xrange(params.level2_niter):
            # 2nd-level alignment starts from the original/unprojected datasets
            # again
            for i, (m, ds_new) in enumerate(zip(mappers, datasets)):
                if __debug__:
                    debug('HPAL_', "Level 2 (%i-th iteration): ds #%i" % (loop, i))

                # Optimization speed up heuristic
                # Slightly modify the common space towards other feature
                # spaces and reduce influence of this feature space for the
                # to-be-computed projection
                temp_commonspace = (commonspace * ndatasets - data_mapped[i]) \
                                    / (ndatasets - 1)

                if params.zscore_common:
                    zscore(temp_commonspace, chunks_attr=None)
                # assign current common space
                ds_new.sa[m.get_space()] = temp_commonspace
                # retrain the mapper for this dataset
                m.train(ds_new)
                # remove common space attribute again to save on memory when the
                # common space is updated for the next iteration
                del ds_new.sa[m.get_space()]
                # obtain the 2nd-level projection
                ds_ =  m.forward(ds_new.samples)
                if params.zscore_common:
                    zscore(ds_, chunks_attr=None)
                # store for 2nd-level combiner
                data_mapped[i] = ds_
                # compute residuals
                if residuals is not None:
                    residuals[1+loop, i] = np.linalg.norm(ds_ - commonspace)

            commonspace = params.combiner2(data_mapped)

        # and again
        if params.zscore_common:
            zscore(commonspace, chunks_attr=None)

        # return the final common space
        return commonspace
开发者ID:adamatus,项目名称:PyMVPA,代码行数:54,代码来源:hyperalignment.py


示例15: test_hypal_michael_caused_problem

    def test_hypal_michael_caused_problem(self):
        from mvpa2.misc import data_generators
        from mvpa2.mappers.zscore import zscore
        # Fake data
        ds = data_generators.normal_feature_dataset(nfeatures=20)
        ds_all = [data_generators.random_affine_transformation(ds) for i in range(3)]
        _ = [zscore(sd, chunks_attr=None) for sd in ds_all]
        # Making random data per subject for testing with bias added to first subject
        ds_test = [np.random.rand(1, ds.nfeatures) for i in range(len(ds_all))]
        ds_test[0] += np.arange(1, ds.nfeatures + 1) * 100
        assert(np.corrcoef(ds_test[2], ds_test[1])[0, 1] < 0.99)  # that would have been rudiculous if it was

        # Test with varying alpha so we for sure to not have that issue now
        for alpha in (0, 0.01, 0.5, 0.99, 1.0):
            hyper09 = Hyperalignment(alpha=alpha)
            mappers = hyper09([sd for sd in ds_all])
            ds_test_a = [m.forward(sd) for m, sd in zip(mappers, ds_test)]
            ds_test_a = [mappers[0].reverse(sd) for sd in ds_test_a]
            corr = np.corrcoef(ds_test_a[2], ds_test_a[1])[0, 1]
            assert(corr < 0.99)
开发者ID:hanke,项目名称:PyMVPA,代码行数:20,代码来源:test_hyperalignment.py


示例16: _get_hypesvs

    def _get_hypesvs(self, sl_connectomes, local_common_model=None):
        '''
        Hyperalign connectomes and return mapppers
        and trained SVDMapper of common space.

        Parameters
        ----------
        sl_connectomes: a list of connectomes to hyperalign
        local_common_model: a reference common model to be used.

        Returns
        -------
        a tuple (sl_hmappers, svm, local_common_model)
        sl_hmappers: a list of mappers corresponding to input list in that order.
        svm: a svm mapper based on the input data. if given a common model, this is None.
        local_common_model: If local_common_model is provided as input, this will be None.
            Otherwise, local_common_model will be computed here and returned.
        '''
        # TODO Should we z-score sl_connectomes?
        return_model = False if self.params.save_model is None else True
        if local_common_model is not None:
            ha = Hyperalignment(level2_niter=0)
            if not is_datasetlike(local_common_model):
                local_common_model = Dataset(samples=local_common_model)
            ha.train([local_common_model])
            sl_hmappers = ha(sl_connectomes)
            return sl_hmappers, None, None
        ha = Hyperalignment()
        sl_hmappers = ha(sl_connectomes)
        sl_connectomes = [slhm.forward(slc) for slhm, slc in zip(sl_hmappers, sl_connectomes)]
        _ = [zscore(slc, chunks_attr=None) for slc in sl_connectomes]
        sl_connectomes = np.dstack(sl_connectomes).mean(axis=-1)
        svm = SVDMapper(force_train=True)
        svm.train(sl_connectomes)
        if return_model:
            local_common_model = svm.forward(sl_connectomes)
        else:
            local_common_model = None
        return sl_hmappers, svm, local_common_model
开发者ID:PyMVPA,项目名称:PyMVPA,代码行数:39,代码来源:connectivity_hyperalignment.py


示例17: get_testdata

 def get_testdata(self):
     # get a dataset with some prominent trends in it
     ds4l = datasets['uni4large']
     # lets select for now only meaningful features
     ds_orig = ds4l[:, ds4l.a.nonbogus_features]
     zscore(ds_orig, chunks_attr=None)
     n = 4  # # of datasets to generate
     Rs, dss_rotated, dss_rotated_clean = [], [], []
     # now lets compose derived datasets by using some random
     # rotation(s)
     while len(dss_rotated_clean) < n:
         ds_ = random_affine_transformation(ds_orig, scale_fac=1.0, shift_fac=0.)
         if ds_.a.random_scale <= 0:
             continue
         Rs.append(ds_.a.random_rotation)
         zscore(ds_, chunks_attr=None)
         dss_rotated_clean.append(ds_)
         i = len(dss_rotated_clean) - 1
         ds_2 = hstack([ds_, ds4l[:, ds4l.a.bogus_features[i * 4: i * 4 + 4]]])
         zscore(ds_2, chunks_attr=None)
         dss_rotated.append(ds_2)
     return ds_orig, dss_rotated, dss_rotated_clean, Rs
开发者ID:VladimirBadalyan,项目名称:PyMVPA,代码行数:22,代码来源:test_searchlight_hyperalignment.py


示例18: test_hyperalignment_measure

 def test_hyperalignment_measure(self):
     ref_ds = 0
     fsha = FeatureSelectionHyperalignment()
     ds_orig, dss_rotated, dss_rotated_clean, Rs = self.get_testdata()
     # Lets test two scenarios -- in one with no noise -- we should get
     # close to perfect reconstruction.  If noisy features were added -- not so good
     for noisy, dss in ((False, dss_rotated_clean),
                        (True, dss_rotated)):
         # to verify that original datasets didn't get changed by
         # Hyperalignment store their idhashes of samples
         idhashes = [idhash(ds.samples) for ds in dss]
         idhashes_targets = [idhash(ds.targets) for ds in dss]
         mappers = fsha(dss)
         mappers = [StaticProjectionMapper(proj=m, recon=m.T)
                    for m in mappers]
         idhashes_ = [idhash(ds.samples) for ds in dss]
         idhashes_targets_ = [idhash(ds.targets) for ds in dss]
         self.assertEqual(
             idhashes, idhashes_,
             msg="Hyperalignment must not change original data.")
         self.assertEqual(
             idhashes_targets, idhashes_targets_,
             msg="Hyperalignment must not change original data targets.")
         # Map data back
         dss_clean_back = [m.forward(ds_)
                           for m, ds_ in zip(mappers, dss)]
         _ = [zscore(sd, chunks_attr=None) for sd in dss_clean_back]
         nddss = []
         ndcss = []
         nf = ds_orig.nfeatures
         ds_norm = np.linalg.norm(dss[ref_ds].samples[:, :nf])
         ds_orig_Rref = np.dot(ds_orig.samples, Rs[ref_ds]) \
                        * np.sign(dss_rotated_clean[ref_ds].a.random_scale)
         zscore(ds_orig_Rref, chunks_attr=None)
         for ds_back in dss_clean_back:
             ndcs = np.diag(np.corrcoef(ds_back.samples.T[:nf, ],
                                        ds_orig_Rref.T)[nf:, :nf], k=0)
             ndcss += [ndcs]
             dds = ds_back.samples[:, :nf] - ds_orig_Rref
             ndds = np.linalg.norm(dds) / ds_norm
             nddss += [ndds]
         # First compare correlations
         snoisy = ('clean', 'noisy')[int(noisy)]
         self.assertTrue(
             np.all(np.array(ndcss) >= (0.9, 0.85)[int(noisy)]),
             msg="Should have reconstructed original dataset more or"
             " less. Got correlations %s in %s case."
             % (ndcss, snoisy))
         # normed differences
         self.assertTrue(
             np.all(np.array(nddss) <= (.2, 3)[int(noisy)]),
             msg="Should have reconstructed original dataset more or"
             " less for all. Got normed differences %s in %s case."
             % (nddss, snoisy))
         self.assertTrue(
             nddss[ref_ds] <= (.1, 0.3)[int(noisy)],
             msg="Should have reconstructed original dataset quite "
             "well even with zscoring. Got normed differences %s "
             "in %s case." % (nddss, snoisy))
         self.assertTrue(
             np.all(np.array(nddss) / nddss[ref_ds] >= (0.95, 0.8)[int(noisy)]),
             msg="Should have reconstructed orig_ds best of all. "
             "Got normed differences %s in %s case with ref_ds=%d."
             % (nddss, snoisy, ref_ds))
     # Testing feature selection within the measure using fraction and count
     # same features
     fsha_fsf = FeatureSelectionHyperalignment(featsel=0.5)
     fsha_fsn = FeatureSelectionHyperalignment(featsel=4)
     fsha_fsf_same = FeatureSelectionHyperalignment(featsel=0.5, use_same_features=True)
     fsha = FeatureSelectionHyperalignment(full_matrix=False)
     # check for valueerror if full_matrix=False and no roi_seed fa
     self.assertRaises(ValueError, fsha, dss_rotated)
     fsha = FeatureSelectionHyperalignment()
     dss_rotated[ref_ds].fa['roi_seed'] = [1, 0, 0, 0, 0, 0, 0, 0]
     mappers_fsf = fsha_fsf(dss_rotated)
     mappers_fsf_same = fsha_fsf_same(dss_rotated)
     mappers_fsn = fsha_fsn(dss_rotated)
     mappers = fsha(dss_rotated_clean)
     mappers_diffsizedss = fsha_fsf([sd[:, nfs] for nfs, sd in
         zip([np.arange(5), np.random.permutation(np.arange(8)), np.arange(8)[::-1], np.arange(8)], dss_rotated)])
     # Testing that most of noisy features are eliminated from reference data
     assert_true(np.alltrue([np.sum(m[:4, :4].std(0) > 0) > 2 for m in mappers_fsf]))
     # using same features make it most likely to eliminate all noisy features
     assert_true(np.alltrue([np.sum(m[:4, :4].std(0) > 0) == 4 for m in mappers_fsf_same]))
     assert_true(np.alltrue([np.sum(m[:4, :4].std(0) > 0) > 2 for m in mappers_fsn]))
     # And it correctly maps the selected features if they are selected
     if np.alltrue([np.all(m[4:, :4] == 0) for m in mappers_fsf]):
         for m, mfs in zip(mappers, mappers_fsf):
             assert_array_equal(m, mfs[:4, :4])
     if np.alltrue([np.all(m[4:, :4] == 0) for m in mappers_fsf_same]):
         for m, mfs in zip(mappers, mappers_fsf_same):
             assert_array_equal(m, mfs[:4, :4])
     # testing roi_seed forces feature selection
     dss_rotated[ref_ds].fa['roi_seed'] = [0, 0, 0, 0, 0, 0, 0, 1]
     fsha_fsf = FeatureSelectionHyperalignment(featsel=0.5)
     mappers_fsf = fsha_fsf(dss_rotated)
     assert(np.alltrue([np.sum(m[7, :] == 0) == 4 for m in mappers_fsf]))
开发者ID:VladimirBadalyan,项目名称:PyMVPA,代码行数:97,代码来源:test_searchlight_hyperalignment.py


示例19: test_custom_qas

    def test_custom_qas(self):
        # Test if we could provide custom QEs per each of the datasets
        skip_if_no_external('scipy')
        skip_if_no_external('hdf5')  # needed for default results backend hdf5

        ns, nf = 10, 4  # # of samples/features -- a very BIG dataset ;)
        ds0 = Dataset(np.random.normal(size=(ns, nf)))
        zscore(ds0, chunks_attr=None)
        ds1 = ds0[:, [3, 0, 1, 2]]  # features circular shifted to the right

        qe0 = FancyQE([[0], [1], [2], [3]])  # does nothing
        qe1 = FancyQE([[1], [2], [3], [0]])  # knows to look into the right

        def apply_slhyper(queryengine, dss=[ds0, ds1], return_mappers=False, **kw):
            """Helper for a common code to create/call slhyper"""
            slhyper = SearchlightHyperalignment(queryengine=queryengine, **kw)
            mappers = slhyper(dss)
            proj = [m.proj.todense() for m in mappers]
            return (proj, mappers) if return_mappers else proj

        # since this single qe resulted in trying to match non-matching time series
        # projections should be non-identity, but no offdiagonal elements
        assert_no_offdiag(apply_slhyper(qe0))

        # both are provided
        projs, mappers = apply_slhyper([qe0, qe1], return_mappers=True)
        tprojs_shifted = [np.eye(nf), np.roll(np.eye(nf), 1, axis=0)]
        assert_array_equal(projs[0], tprojs_shifted[0])  # must be identity since we made them so
        assert_array_equal(projs[1], tprojs_shifted[1])  # pretty much incorporating that shift

        # TODO -- not identity assert_array_equal(projs[0], np.eye(len(p)))  # must be identity since we made them so
        # and must restore data properly
        assert_array_almost_equal(mappers[0].forward(ds0), mappers[1].forward(ds1))

        # give more then # of qes
        assert_raises(ValueError,
                      SearchlightHyperalignment(queryengine=[qe0, qe1]),
                      [ds0, ds1, ds0])

        # The one having no voxels for the "1st" id in "subj1"
        qe1_ = FancyQE([[1], [], [3], [0]])  # knows to look into the right

        projs = apply_slhyper(qe1_)
        assert_no_offdiag(projs)
        for proj in projs:
            # assess that both have '2nd' one 0
            # but not the others!
            assert_array_equal(np.diagonal(proj) != 0, [True, True, False, True])

        # smoke test whenever combine is False
        # In this case should work ok
        apply_slhyper(qe0, combine_neighbormappers=False)
        # this one ok as well since needs only matching ones in ref_ds
        apply_slhyper([qe0, qe1], combine_neighbormappers=False)
        # here since features do not match node_ids -- should raise ValueError
        assert_raises(ValueError, apply_slhyper, qe1, combine_neighbormappers=False)
        assert_raises(ValueError, apply_slhyper, [qe0, qe1], ref_ds=1, combine_neighbormappers=False)

        # and now only one qe lacking for that id
        projs = apply_slhyper([qe0, qe1_])
        tproj0 = np.eye(nf)
        tproj0[1, 1] = 0
        tprojs_shifted_1st0 = [tproj0, np.roll(tproj0, 1, axis=0)]
        for proj, tproj in zip(projs, tprojs_shifted_1st0):
            # assess that both have '2nd' one 0
            # but not the others!
            assert_array_equal(proj, tproj)

        # And now a test with varying number of selected fids, no shift
        qe0 = FancyQE([[0], [1, 2], [1, 2, 3], [0, 1, 2, 3]])
        projs = apply_slhyper(qe0)
        # Test that in general we get larger coefficients for "correct" transformation
        for p, tproj in zip(projs, tprojs_shifted):
            assert(np.all(np.asarray(p)[tproj>0] >= 1.0))
            assert_array_lequal(np.mean(np.asarray(p)[tproj == 0]), 0.3)

        qe1 = FancyQE([[0, 1, 2, 3], [1, 2, 3], [2, 3], [3]])
        # Just a smoke test, for now TODO
        projs = apply_slhyper([qe0, qe1])
开发者ID:VladimirBadalyan,项目名称:PyMVPA,代码行数:79,代码来源:test_searchlight_hyperalignment.py


示例20: test_basic_functioning

    def test_basic_functioning(self, ref_ds, zscore_common, zscore_all):
        ha = Hyperalignment(ref_ds=ref_ds,
                            zscore_all=zscore_all,
                            zscore_common=zscore_common)
        if ref_ds is None:
            ref_ds = 0                      # by default should be this one

        # get a dataset with some prominent trends in it
        ds4l = datasets['uni4large']
        # lets select for now only meaningful features
        ds_orig = ds4l[:, ds4l.a.nonbogus_features]
        nf = ds_orig.nfeatures
        n = 4 # # of datasets to generate
        Rs, dss_rotated, dss_rotated_clean, random_shifts, random_scales \
            = [], [], [], [], []

        # now lets compose derived datasets by using some random
        # rotation(s)
        for i in xrange(n):
            ## if False: # i == ref_ds:
            #     # Do not rotate the target space so we could check later on
            #     # if we transform back nicely
            #     R = np.eye(ds_orig.nfeatures)
            ## else:
            ds_ = random_affine_transformation(ds_orig, scale_fac=100, shift_fac=10)
            Rs.append(ds_.a.random_rotation)
            # reusing random data from dataset itself
            random_scales += [ds_.a.random_scale]
            random_shifts += [ds_.a.random_shift]
            random_noise = ds4l.samples[:, ds4l.a.bogus_features[:4]]

            ## if (zscore_common or zscore_all):
            ##     # for later on testing of "precise" reconstruction
            ##     zscore(ds_, chunks_attr=None)

            dss_rotated_clean.append(ds_)

            ds_ = ds_.copy()
            ds_.samples = ds_.samples + 0.1 * random_noise
            dss_rotated.append(ds_)

        # Lets test two scenarios -- in one with no noise -- we should get
        # close to perfect reconstruction.  If noise was added -- not so good
        for noisy, dss in ((False, dss_rotated_clean),
                           (True, dss_rotated)):
            # to verify that original datasets didn't get changed by
            # Hyperalignment store their idhashes of samples
            idhashes = [idhash(ds.samples) for ds in dss]
            idhashes_targets = [idhash(ds.targets) for ds in dss]

            mappers = ha(dss)

            idhashes_ = [idhash(ds.samples) for ds in dss]
            idhashes_targets_ = [idhash(ds.targets) for ds in dss]
            self.assertEqual(idhashes, idhashes_,
                msg="Hyperalignment must not change original data.")
            self.assertEqual(idhashes_targets, idhashes_targets_,
                msg="Hyperalignment must not change original data targets.")

            self.assertEqual(ref_ds, ha.ca.chosen_ref_ds)

            # Map data back

            dss_clean_back = [m.forward(ds_)
                              for m, ds_ in zip(mappers, dss_rotated_clean)]

            ds_norm = np.linalg.norm(dss[ref_ds].samples)
            nddss = []
            ndcss = []
            ds_orig_Rref = np.dot(ds_orig.samples, Rs[ref_ds]) \
                           * random_scales[ref_ds] \
                           + random_shifts[ref_ds]
            if zscore_common or zscore_all:
                zscore(Dataset(ds_orig_Rref), chunks_attr=None)
            for ds_back in dss_clean_back:
                # if we used zscoring of common, we cannot rely
                # that range/offset could be matched, so lets use
                # corrcoef
                ndcs = np.diag(np.corrcoef(ds_back.samples.T,
                                           ds_orig_Rref.T)[nf:, :nf], k=0)
                ndcss += [ndcs]
                dds = ds_back.samples - ds_orig_Rref
                ndds = np.linalg.norm(dds) / ds_norm
                nddss += [ndds]
            snoisy = ('clean', 'noisy')[int(noisy)]
            do_labile = cfg.getboolean('tests', 'labile', default='yes')
            if not noisy or do_labile:
                # First compare correlations
                self.assertTrue(np.all(np.array(ndcss)
                                       >= (0.9, 0.85)[int(nois 

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python base.Measure类代码示例发布时间:2022-05-27
下一篇:
Python fx.mean_sample函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap