• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python dataset.Dataset类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中neurosynth.base.dataset.Dataset的典型用法代码示例。如果您正苦于以下问题:Python Dataset类的具体用法?Python Dataset怎么用?Python Dataset使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了Dataset类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: neurosynthInit

def neurosynthInit(dbsize):
    print "Initializing Neurosynth database..."
    dataset = Dataset('data/' + dbsize + 'terms/database.txt')
    dataset.add_features('data/' + dbsize + 'terms/features.txt')    

    #print "Loading standard space brain..."
    #img = nb.load("data/MNI152_T1_2mm_brain.nii.gz")
    #standard = img.get_data()
    return dataset
开发者ID:vsoch,项目名称:neuro2gene,代码行数:9,代码来源:neuro2gene.py


示例2: test_dataset_save_and_load

 def test_dataset_save_and_load(self):
     # smoke test of saving and loading
     t = tempfile.mktemp()
     self.dataset.save(t, keep_mappables=True)
     self.assertTrue(os.path.exists(t))
     dataset = Dataset.load(t)
     self.assertIsNotNone(dataset)
     self.assertIsNotNone(dataset.mappables)
     self.assertEqual(len(dataset.mappables), 5)
     # Now with the mappables deleted
     dataset.save(t)
     self.assertTrue(os.path.exists(t))
     dataset = Dataset.load(t)
     self.assertEqual(len(dataset.mappables), 0)
     os.unlink(t)
开发者ID:chrisfilo,项目名称:Neurosynth,代码行数:15,代码来源:test_base.py


示例3: _getdata

def _getdata():
    """Downloads data from neurosynth and returns it as a Dataset.

    Also pickles the dataset for future use."""
    LOG.warning("Downloading and processing Neurosynth database")

    os.makedirs("data", exist_ok=True)
    from neurosynth.base.dataset import download

    download(path="data", unpack=True)

    data = Dataset("data/database.txt")
    data.add_features("data/features.txt")
    data.save("data/dataset.pkl")
    return data
开发者ID:fredcallaway,项目名称:brain_matrix,代码行数:15,代码来源:brain_matrix.py


示例4: create_voxel_x_feature_matrix

def create_voxel_x_feature_matrix(path_to_dataset, path_to_image_files):
        dataset = Dataset.load(path_to_dataset)
	feature_list = dataset.get_feature_names()
	vox_feat_matrix = zeros((dataset.volume.num_vox_in_mask, len(feature_list)), dtype=int16)
	for (i,feature) in enumerate(feature_list):
		image_path = path_to_image_files + feature + '_pFgA_z.nii.gz'
		vox_feat_matrix[:,i] = dataset.volume.mask(image_path)
	return vox_feat_matrix
开发者ID:acley,项目名称:neuro-data-matrix-factorization,代码行数:8,代码来源:voxel-x-feature-matrix.py


示例5: generate_maps

def generate_maps(terms,output_dir):

    f,d = download_data()
    features = pandas.read_csv(f,sep="\t")  
    database = pandas.read_csv(d,sep="\t")  

    output_dir = "%s/maps" %(output_dir)

    print "Deriving pickled maps to extract relationships from..."
    dataset = Dataset(d)
    dataset.add_features(f)
    for t in range(len(terms)):
        term = terms[t]
        print "Generating P(term|activation) for term %s, %s of %s" %(term,t,len(terms))
        ids = dataset.get_ids_by_features(term)
        maps = meta.MetaAnalysis(dataset,ids)
        term_name = term.replace(" ","_")
        pickle.dump(maps.images["pFgA_z"],open("%s/%s_pFgA_z.pkl" %(output_dir,term_name),"wb"))
开发者ID:word-fish,项目名称:wordfish-plugins,代码行数:18,代码来源:functions.py


示例6: test_dataset_save_and_load

 def test_dataset_save_and_load(self):
     # smoke test of saving and loading
     t = tempfile.mktemp()
     self.dataset.save(t)
     self.assertTrue(os.path.exists(t))
     dataset = Dataset.load(t)
     self.assertIsNotNone(dataset)
     self.assertEqual(len(dataset.image_table.ids), 5)
     os.unlink(t)
开发者ID:MQMQ0229,项目名称:neurosynth,代码行数:9,代码来源:test_base.py


示例7: __init__

    def __init__(self, db, dataset=None, studies=None, features=None,
                 reset_db=False, reset_dataset=False, download_data=True):
        """
        Initialize instance from a pickled Neurosynth Dataset instance or a
        pair of study and analysis .txt files.

        Args:
            db: the SQLAlchemy database connection to use.
            dataset: an optional filename of a pickled neurosynth Dataset
                instance.
                Note that the Dataset must contain the list of Mappables (i.e.,
                    save() must have been called with keep_mappables set to
                    True).
            studies: name of file containing activation data. If passed, a new
                Dataset instance will be constructed.
            features: name of file containing feature data.
            reset_db: if True, will drop and re-create all database tables
                before adding new content. If False (default), will add content
                incrementally.
            reset_dataset: if True, will regenerate the pickled Neurosynth
                dataset.
            download_data: if True, ignores any existing files and downloads
                the latest Neurosynth data files from GitHub.
        """

        if (studies is not None and not os.path.exists(studies)) \
                or settings.RESET_ASSETS:
            print "WARNING: RESETTING ALL NEUROSYNTH ASSETS!"
            self.reset_assets(download_data)

        # Load or create Neurosynth Dataset instance
        if dataset is None or reset_dataset or (isinstance(dataset, basestring) and not os.path.exists(dataset)):

            print "\tInitializing a new Dataset..."
            if (studies is None) or (features is None):
                raise ValueError(
                    "To generate a new Dataset instance, both studies and "
                    "analyses must be provided.")
            dataset = Dataset(studies)
            dataset.add_features(features)
            dataset.save(settings.PICKLE_DATABASE, keep_mappables=True)
        else:
            print "\tLoading existing Dataset..."
            dataset = Dataset.load(dataset)
            if features is not None:
                dataset.add_features(features)

        self.dataset = dataset
        self.db = db

        if reset_db:
            print "WARNING: RESETTING DATABASE!!!"
            self.reset_database()
开发者ID:UCL-CS35,项目名称:incdb-poc,代码行数:53,代码来源:database_builder.py


示例8: extract_relations

def extract_relations(terms,maps_dir,output_dir):

    if isinstance(terms,str):
        terms = [terms]

    f,d = download_data()
    features = pandas.read_csv(f,sep="\t")  
    database = pandas.read_csv(d,sep="\t")  
    allterms = features.columns.tolist()
    allterms.pop(0)  #pmid

    dataset = Dataset(d)
    dataset.add_features(f)
    image_matrix = pandas.DataFrame(columns=range(228453))
    for t in range(len(allterms)):
        term = allterms[t]
        term_name = term.replace(" ","_")
        pickled_map = "%s/%s_pFgA_z.pkl" %(maps_dir,term_name)
        if not os.path.exists(pickled_map):
            print "Generating P(term|activation) for term %s" %(term)
            ids = dataset.get_ids_by_features(term)
            maps = meta.MetaAnalysis(dataset,ids)
            pickle.dump(maps.images["pFgA_z"],open(pickled_map,"wb"))
        map_data = pickle.load(open(pickled_map,"rb"))
        image_matrix.loc[term] = map_data

    sims = pandas.DataFrame(columns=image_matrix.index)
    tuples = []
    for t1 in range(len(terms)):
        term1 = terms[t1]
        print "Extracting NeuroSynth relationships for term %s..." %(term1)
        for t2 in range(len(terms)):
            term2 = terms[t2]
            if t1<t2:
                score = pearsonr(image_matrix.loc[term1],image_matrix.loc[term2])[0]
                tuples.append((term1,term2,score))

    save_relations(output_dir=output_dir,relations=tuples)
开发者ID:word-fish,项目名称:wordfish-plugins,代码行数:38,代码来源:functions.py


示例9: TestAnalysis

class TestAnalysis(unittest.TestCase):

  def setUp(self):
    """ Create a new Dataset and add features. """
    self.dataset = Dataset('data/test_dataset.txt')
    self.dataset.add_features('data/test_features.txt')
  
  def test_meta_analysis(self):
    """ Test full meta-analysis stream. """
    pass

  def test_decoder(self):
    pass

  def test_coactivation(self):
    """ Test seed-based coactivation. """ 
    pass

  def test_roi_averaging(self):
    pass

  def test_get_random_voxels(self):
    pass
开发者ID:wanirepo,项目名称:Neurosynth,代码行数:23,代码来源:test_analysis.py


示例10: import_neurosynth_git

    def import_neurosynth_git(self):
        # Add the appropriate neurosynth git folder to the python path. 
        sys.path.append(self.npath)
        from neurosynth.base.dataset import Dataset
        from neurosynth.analysis import meta

        # Try to load a pickle if it exists. Create a new dataset instance 
        # if it doesn't.
        try:
            self.dataset = cPickle.load(
                open(self.npath+os.sep+'data/dataset.pkl', 'rb'))
        except IOError:
        # Create Dataset instance from a database file.
            self.dataset = Dataset(self.npath+os.sep+'data/database.txt')

        # Load features from file
        self.dataset.add_features(self.npath+os.sep+'data/features.txt')

        # Get names of features. 
        self.feature_list = self.dataset.get_feature_names()
开发者ID:law826,项目名称:Neurosynth_SNA,代码行数:20,代码来源:Neurosynth_SNA.py


示例11: __init__

    def __init__(
        self,
        metric="emd",
        image_type="pAgF",
        name=None,
        multi=True,
        image_transform="block_reduce",
        downsample=8,
        auto_save=True,
    ):
        self.image_type = image_type
        self.multi = multi
        self.downsample = downsample
        self.auto_save = auto_save

        if callable(metric):
            self.metric = metric
        elif metric == "emd":
            self.metric = euclidean_emd
        else:
            raise ValueError("{metric} is not a valid metric".format(**locals()))

        if callable(image_transform):
            self.image_transform = image_transform
        elif image_transform == "block_reduce":
            from functools import partial

            self.image_transform = partial(block_reduce, factor=downsample)
            # def block_reduce_transform(image):
            # """The default transformation."""
            # return block_reduce(image, downsample, blur)
            # self.image_transform = block_reduce_transform
        else:
            raise ValueError(("{image_transform} is not a valid" "transform function").format(**locals()))
        self.name = name if name else time.strftime("analysis_from_%m-%d_%H-%M-%S")

        try:
            self.data = Dataset.load("data/dataset.pkl")
        except FileNotFoundError:
            self.data = _getdata()
开发者ID:fredcallaway,项目名称:brain_matrix,代码行数:40,代码来源:brain_matrix.py


示例12: Masker

from sklearn.cluster import KMeans, DBSCAN, MiniBatchKMeans
from sklearn import metrics
from scipy import stats

base_path = '/home/pauli/Development/neurobabel/'
test_data_path = base_path + 'ACE/'
masker_filename = base_path + 'atlases/whs_sd/WHS_SD_rat_one_sm_v2.nii.gz'
atlas_filename = base_path + 'atlases/whs_sd/WHS_SD_rat_atlas_brain_sm_v2.nii.gz'
mask = nib.load(masker_filename)
masker = Masker(mask)
r = 1.0
transform = {'BREGMA': transformations.bregma_to_whs()}
target = 'WHS'

# load data set
dataset = Dataset(os.path.join(test_data_path, 'db_bregma_cog_atlas_export.txt'), masker=masker_filename, r=r, transform=transform, target=target)
dataset.feature_table = FeatureTable(dataset)
dataset.add_features(os.path.join(test_data_path, "db_bregma_cog_atlas_features.txt")) # add features
fn = dataset.get_feature_names()
features = dataset.get_feature_data()

n_xyz, n_articles = dataset.image_table.data.shape
# do topic modeling (LSA)
n_components = 20
svd = TruncatedSVD(n_components=n_components)
X = svd.fit_transform(features)
X_orig = X.copy()

X = StandardScaler().fit_transform(X_orig)

# db = DBSCAN(eps=10.0, min_samples=10).fit(X)
开发者ID:wmpauli,项目名称:neurosynth,代码行数:31,代码来源:cluster_analysis.py


示例13: get_test_dataset

def get_test_dataset():
    test_data_path = get_test_data_path()
    dataset = Dataset(test_data_path + 'test_dataset.txt')
    dataset.add_features(test_data_path + 'test_features.txt')
    return dataset
开发者ID:jdnc,项目名称:ml-project,代码行数:5,代码来源:utils.py


示例14: setUp

 def setUp(self):
   """ Create a new Dataset and add features. """
   self.dataset = Dataset('data/test_dataset.txt')
   self.dataset.add_features('data/test_features.txt')
开发者ID:poldrack,项目名称:Neurosynth,代码行数:4,代码来源:test_base.py


示例15: TestBase

class TestBase(unittest.TestCase):

  def setUp(self):
    """ Create a new Dataset and add features. """
    self.dataset = Dataset('data/test_dataset.txt')
    self.dataset.add_features('data/test_features.txt')
  
  def test_dataset_initializes(self):
    """ Test whether dataset initializes properly. """
    self.assertIsNotNone(self.dataset.volume)
    self.assertIsNotNone(self.dataset.image_table)
    self.assertEqual(len(self.dataset.mappables), 5)
    self.assertIsNotNone(self.dataset.volume)
    self.assertIsNotNone(self.dataset.r)

  def test_image_table_loads(self):
    """ Test ImageTable initialization. """
    self.assertIsNotNone(self.dataset.image_table)
    it = self.dataset.image_table
    self.assertEqual(len(it.ids), 5)
    self.assertIsNotNone(it.volume)
    self.assertIsNotNone(it.r)
    self.assertEqual(it.data.shape, (228453, 5))
    # Add tests for values in table

  def test_feature_table_loads(self):
    """ Test FeatureTable initialization. """
    tt = self.dataset.feature_table
    self.assertIsNotNone(tt)
    self.assertEqual(len(self.dataset.list_features()), 5)
    self.assertEqual(tt.data.shape, (5,5))
    self.assertEqual(tt.feature_names[3], 'f4')
    self.assertEqual(tt.data[0,0], 0.0003)

  def test_feature_search(self):
    """ Test feature-based Mappable search. Tests both the FeatureTable method 
    and the Dataset wrapper. """
    tt = self.dataset.feature_table
    features = tt.search_features(['f*'])
    self.assertEqual(len(features), 4)
    d = self.dataset
    ids = d.get_ids_by_features(['f*'], threshold=0.001)
    self.assertEqual(len(ids), 4)
    img_data = d.get_ids_by_features(['f1', 'f3', 'g1'], 0.001, func='max', get_image_data=True)
    self.assertEqual(img_data.shape, (228453, 5))

  def test_selection_by_mask(self):
    """ Test mask-based Mappable selection.
    Only one peak in the test dataset (in study5) should be within the sgACC. """
    ids = self.dataset.get_ids_by_mask('data/sgacc_mask.nii.gz')
    self.assertEquals(len(ids), 1)
    self.assertEquals('study5', ids[0])

  def test_selection_by_peaks(self):
    """ Test peak-based Mappable selection. """
    ids = self.dataset.get_ids_by_peaks(np.array([[3, 30, -9]]))
    self.assertEquals(len(ids), 1)
    self.assertEquals('study5', ids[0])
  
  # def test_invalid_coordinates_ignored(self):
    """ Test dataset contains 3 valid coordinates and one outside mask. But this won't work 
开发者ID:poldrack,项目名称:Neurosynth,代码行数:61,代码来源:test_base.py


示例16: shuffle_data

###
# This script shuffle the classification labels and reruns classification many times to get data to calculate a confidence interval around the null hypothesis

from sklearn.linear_model import RidgeClassifier
from base.classifiers import OnevsallClassifier
from neurosynth.base.dataset import Dataset
from sklearn.metrics import roc_auc_score
import pickle
from random import shuffle

def shuffle_data(classifier):
	for region in classifier.c_data:
		shuffle(region[1])


d_abs_topics_filt = Dataset.load('../data/datasets/abs_topics_filt_july.pkl')

results = []

clf = OnevsallClassifier(d_abs_topics_filt, '../masks/Ward/50.nii.gz', cv='4-Fold',
	 thresh=10, thresh_low=0, memsave=True, classifier=RidgeClassifier())
clf.load_data(None, None)
clf.initalize_containers(None, None, None)


for i in range(0, 500):
	shuffle_data(clf)
	clf.classify(scoring=roc_auc_score, processes=8, class_weight=None)
	results = list(clf.class_score) + results
	print(i),
开发者ID:margulies,项目名称:NS_Classify,代码行数:30,代码来源:resample_ova.py


示例17: Masker

from neurosynth.analysis import meta

base_path = '/home/pauli/Development/neurobabel/'
test_data_path = base_path + 'ACE/'
masker_filename = base_path + 'atlases/whs_sd/WHS_SD_rat_one_sm_v2.nii.gz'
atlas_filename = base_path + 'atlases/whs_sd/WHS_SD_rat_atlas_brain_sm_v2.nii.gz'
mask = nb.load(masker_filename)
masker = Masker(mask)
r = 1.0
# transform = {'BREGMA': transformations.bregma_to_whs()}
#transform = {'BREGMA': transformations.identity()}
transform = {'BREGMA': transformations.bregma_to_whs()}
target = 'WHS'

# load data set
dataset = Dataset(os.path.join(test_data_path, 'db_bregma_export.txt'), masker=masker_filename, r=r, transform=transform, target=target)
dataset.feature_table = FeatureTable(dataset)
dataset.add_features(os.path.join(test_data_path, "db_bregma_features.txt")) # add features
fn = dataset.get_feature_names()

def get_whs_labels(filename=os.path.join(base_path, "atlases/whs_sd/WHS_SD_rat_atlas_v2.label")):
    ''' load the names of all labelled areas in the atlas (e.g. brainstem), return list of them '''
    in_file = open(filename, 'r')
    lines = in_file.readlines()
    labels = {}
    for line in lines:
        start = line.find("\"") + 1
        if start > 0:
            stop = line.find("\"", start)
            label = line[start:stop]
            idx = line.split()[0]
开发者ID:wmpauli,项目名称:neurosynth,代码行数:31,代码来源:create_bregma_dataset.py


示例18: __init__

class NeurosynthMerge:
    def __init__(self, thesaurus, npath, outdir, test_mode=False):
        """
        Generates a new set of images using the neurosynth repository combining 
        across terms in a thesarus.

        Args:
            - thesaurus: A list of tuples where:[('term that will be the name 
                of the file', 'the other term', 'expression combining the 
                terms')]
                    - the last expression is alphanumeric and separated by: 
                    (& for and) (&~ for andnot) (| for or) 
            - npath: directory where the neurosynth git repository is locally 
            on your machine (https://github.com/neurosynth/neurosynth)
            - outdir: directory where the generated images will be saved
            - test_mode: when true, the code will run an abridged version for 
            test purposes (as implemented by test.Neurosynth.py)
        """
        self.thesaurus = thesaurus
        self.npath = npath
        self.outdir = outdir

        self.import_neurosynth_git()
        from neurosynth.analysis import meta

        # Take out first two terms from the feature_list and insert the third 
        # term from the tuple.
        for triplet in thesaurus:
            self.feature_list = [feature for feature in self.feature_list \
            if feature not in triplet]
            self.feature_list.append(triplet[-1])

        # This makes an abridged version of feature_list for testing purposes. 
        if test_mode:
            self.feature_list = [triplet[-1] for triplet in thesaurus]

        # Run metanalyses on the new features set and save the results to the 
            #outdir.
        for feature in self.feature_list:
            self.ids = self.dataset.get_ids_by_expression(feature, 
                threshold=0.001)
            ma = meta.MetaAnalysis(self.dataset, self.ids)

            # Parse the feature name (to avoid conflicts with illegal 
                #characters as file names)
            regex = re.compile('\W+')
            split = re.split(regex, feature)
            feat_fname = split[0] 

            # Save the results (many different types of files)
            ma.save_results(self.outdir+os.sep+feat_fname)

    def import_neurosynth_git(self):
        # Add the appropriate neurosynth git folder to the python path. 
        sys.path.append(self.npath)
        from neurosynth.base.dataset import Dataset
        from neurosynth.analysis import meta

        # Try to load a pickle if it exists. Create a new dataset instance 
        # if it doesn't.
        try:
            self.dataset = cPickle.load(
                open(self.npath+os.sep+'data/dataset.pkl', 'rb'))
        except IOError:
        # Create Dataset instance from a database file.
            self.dataset = Dataset(self.npath+os.sep+'data/database.txt')

        # Load features from file
        self.dataset.add_features(self.npath+os.sep+'data/features.txt')

        # Get names of features. 
        self.feature_list = self.dataset.get_feature_names()
开发者ID:law826,项目名称:Neurosynth_SNA,代码行数:72,代码来源:Neurosynth_SNA.py


示例19: __init__

# -*- coding: utf-8 -*-

# Here I use Yeo to test Neurosynth's classify functions
from neurosynth.base.dataset import Dataset
from neurosynth.analysis import classify
import os
import itertools
import re
import numpy as np
import pdb
import sys
from nipype.interfaces import fsl
from sklearn.ensemble import GradientBoostingClassifier


dataset = Dataset.load('../data/pickled.pkl')

masklist = ['7Networks_Liberal_1.nii.gz', '7Networks_Liberal_2.nii.gz',
            '7Networks_Liberal_3.nii.gz', '7Networks_Liberal_4.nii.gz',
            '7Networks_Liberal_5.nii.gz', '7Networks_Liberal_6.nii.gz',
            '7Networks_Liberal_7.nii.gz']

rootdir = '../masks/Yeo_JNeurophysiol11_MNI152/standardized/'


class maskClassifier:
    def __init__(self, classifier=GradientBoostingClassifier(), param_grid={'max_features': np.arange(2, 140, 44), 'n_estimators': np.arange(5, 141, 50),
          'learning_rate': np.arange(0.05, 1, 0.1)}, thresh = 0.08)


diffs = {}
开发者ID:margulies,项目名称:NS_Classify,代码行数:31,代码来源:Yeo_Test.py


示例20: Exception

from sklearn.metrics import roc_auc_score
import sys
from base.mv import bootstrap_mv_full
from neurosynth.base.dataset import Dataset
dataset = Dataset.load("../permutation_clustering/abs_60topics_filt_jul.pkl")

from sklearn.linear_model import LassoLarsIC

print sys.argv
try:
	cmd, iterations, job_id = sys.argv
except:
	raise Exception("Incorect number of arguments")

import csv
cognitive_topics = ['topic' + topic[0] for topic in csv.reader(open('topic_keys60-july_cognitive.csv', 'rU')) if topic[1] == "T"]

results = bootstrap_mv_full(dataset, LassoLarsIC(), roc_auc_score, 
	'../permutation_clustering/results/medial_fc_30_kmeans/kmeans_k9/cluster_labels.nii.gz', features=cognitive_topics, processes=None, 
	boot_n=int(iterations), outfile='results/bootstrap_full_mv_' + str(iterations) + '_mFC__LASSO_LARS_60_ ' + str(job_id) + '.csv')

开发者ID:csddzh,项目名称:NS_Classify,代码行数:20,代码来源:perm_complexity.py



注:本文中的neurosynth.base.dataset.Dataset类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python _i18n._函数代码示例发布时间:2022-05-27
下一篇:
Python h.run函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap