• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python nibabel.concat_images函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中nibabel.concat_images函数的典型用法代码示例。如果您正苦于以下问题:Python concat_images函数的具体用法?Python concat_images怎么用?Python concat_images使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了concat_images函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _file_exists

def _file_exists(url, output_dir):
    """
    Checks whether to-be-downloaded stuff already exists locally

    """

    if url in DONE_URLS:
        return True

    output_filename = os.path.join(output_dir, os.path.basename(url))
    if not os.path.exists(output_filename):
        return False

    for ext in [".txt", ".mat"]:
        if output_filename.endswith(ext):
            if os.path.isfile(output_filename):
                # print "Skipping existing file: %s" % output_filename
                DONE_URLS.append(url)
                return True

    if output_filename.endswith(".nii"):
        try:
            nibabel.load(output_filename)
            nibabel.concat_images([output_filename])
            # print "Skipping existing file: %s" % output_filename
            DONE_URLS.append(url)
            return True
        except Exception, e:
            print "nibabel.load(...) error:", e
            print
            print "Corrupt image %s; redownloading" % output_filename
            print commands.getoutput("rm -f %s*" % output_filename)
            return False
开发者ID:schwarty,项目名称:pypreprocess,代码行数:33,代码来源:fetch_brainhack_data.py


示例2: _openfmri_preproc

def _openfmri_preproc(out_dir, doc, metadata=None, verbose=1):
    """
        Parameters
        ----------
        metadata: dict
            - run_key: naming the sessions

        Examples
        --------
        {'run_key': ['task001 run001', 'task001 run002',
                     'task002 run001', 'task002 run002']}

    """
    if 'study_id' in doc:
        study_dir = os.path.join(out_dir, doc['study_id'])
    else:
        study_dir = out_dir

    if verbose > 0:
        print '%[email protected]%s: dumping preproc' % (doc['subject_id'], doc['study_id'])

    subject_dir = os.path.join(study_dir, doc['subject_id'])
    anatomy_dir = os.path.join(subject_dir, 'anatomy')

    if not os.path.exists(anatomy_dir):
        os.makedirs(anatomy_dir)

    anatomy = doc['preproc']['anatomy']
    wm_anatomy = doc['final']['anatomy']

    anatomy = nb.load(anatomy)
    wm_anatomy = nb.load(wm_anatomy)

    nb.save(anatomy, os.path.join(anatomy_dir, 'highres001.nii.gz'))
    nb.save(wm_anatomy, os.path.join(anatomy_dir,
                                     'normalized_highres001.nii.gz'))

    bold_dir = os.path.join(subject_dir, 'BOLD')

    for session, run_key in zip(
            doc['slice_timing']['bold'], metadata['run_key']):

        bold = nb.concat_images(session)
        session_dir = os.path.join(bold_dir, run_key.replace(' ', '_'))
        if not os.path.exists(session_dir):
            os.makedirs(session_dir)
        nb.save(bold, os.path.join(session_dir, 'bold.nii.gz'))

    for session, motion, run_key in zip(doc['final']['bold'],
                                        doc['realign']['motion'],
                                        metadata['run_key']):

        bold = nb.concat_images(session)
        session_dir = os.path.join(bold_dir, run_key.replace(' ', '_'))
        if not os.path.exists(session_dir):
            os.makedirs(session_dir)
        nb.save(bold, os.path.join(session_dir, 'normalized_bold.nii.gz'))
        shutil.copyfile(motion, os.path.join(session_dir, 'motion.txt'))
开发者ID:fabianp,项目名称:pypreprocess,代码行数:58,代码来源:openfmri.py


示例3: prep_data

    def prep_data(self, nifti1, bval1, bvec1, nifti2, bval2, bvec2):
        ''' Load the reconstructed image files and generate the files that TOPUP needs. '''
        ni1 = nb.load(nifti1)
        ni2 = nb.load(nifti2)
        phase_dim1 = ni1.get_header().get_dim_info()[1]
        phase_dim2 = ni2.get_header().get_dim_info()[1]

        bvals1 = np.loadtxt(bval1)
        bvals2 = np.loadtxt(bval2)
        bvecs1 = np.loadtxt(bvec1)
        bvecs2 = np.loadtxt(bvec2)

        nondwi1 = [im for i,im in enumerate(nb.four_to_three(ni1)) if bvals1[i]<10 and i<self.num_vols]
        nondwi2 = [im for i,im in enumerate(nb.four_to_three(ni2)) if bvals2[i]<10 and i<self.num_vols]

        b0 = nb.concat_images(nondwi1+nondwi2)
        # Topup requires an even number of slices
        if b0.shape[2]%2:
            d = b0.get_data()
            d = np.concatenate((d,np.zeros((d.shape[0],d.shape[1],1,d.shape[3]), dtype=d.dtype)),axis=2)
            b0 = nb.Nifti1Image(d, b0.get_affine())

        nb.save(b0, self.b0_file)
        with open(self.acq_file, 'w') as f:
            for i in xrange(len(nondwi1)):
                row = ['0','0','0',str(self.readout_time1),'\n']
                row[phase_dim1] = str(self.pe_dir1)
                f.write(' '.join(row))
            for i in xrange(len(nondwi2)):
                row = ['0','0','0',str(self.readout_time2),'\n']
                row[phase_dim2] = str(self.pe_dir2)
                f.write(' '.join(row))

        mux_ims1 = nb.four_to_three(ni1)[self.num_cal1:]
        mux_ims2 = nb.four_to_three(ni2)[self.num_cal2:]
        all_ims = nb.concat_images(mux_ims1 + mux_ims2)
        if all_ims.shape[2]%2:
            d = all_ims.get_data()
            d = np.concatenate((d,np.zeros((d.shape[0],d.shape[1],1,d.shape[3]), dtype=d.dtype)),axis=2)
            all_ims = nb.Nifti1Image(d, all_ims.get_affine())

        nb.save(all_ims, self.dwi_base+'.nii.gz')

        indices = ['1' for i in xrange(len(mux_ims1))] + [str(len(nondwi1)+1) for i in xrange(len(mux_ims2))]
        with open(self.index_file, 'w') as f:
            f.write(' '.join(indices))

        bvals = np.concatenate((bvals1[self.num_cal1:],bvals2[self.num_cal2:]), axis=0)
        bvecs = np.concatenate((bvecs1[:,self.num_cal1:],bvecs2[:,self.num_cal2:]), axis=1)
        with open(self.bval_file, 'w') as f:
            f.write(' '.join(['%0.1f' % value for value in bvals]))
        with open(self.bvec_file, 'w') as f:
            f.write(' '.join(['%0.4f' % value for value in bvecs[0,:]]) + '\n')
            f.write(' '.join(['%0.4f' % value for value in bvecs[1,:]]) + '\n')
            f.write(' '.join(['%0.4f' % value for value in bvecs[2,:]]) + '\n')
开发者ID:bryancort,项目名称:nims,代码行数:55,代码来源:pepolar_unwarp.py


示例4: t_test_2sample

def t_test_2sample(sample_a, sample_b, equal_var=True):
    """t-statistics are positive if a > b"""
    a_stack = nib.concat_images(sample_a, check_affines=False)
    b_stack = nib.concat_images(sample_b, check_affines=False)

    tstats, pvalues = sp.stats.ttest_ind(a_stack.get_data(),
                                         b_stack.get_data(), axis=3,
                                         equal_var=equal_var)
    reject, corrected = mne.stats.fdr_correction(pvalues)

    return (image_like(a_stack, tstats), image_like(a_stack, pvalues),
            image_like(a_stack, corrected))
开发者ID:effigies,项目名称:sandbox,代码行数:12,代码来源:images.py


示例5: _run_interface

    def _run_interface(self, runtime):
        in_files = self.inputs.in_files

        indices = list(range(len(in_files)))
        if isdefined(self.inputs.indices):
            indices = self.inputs.indices

        if len(self.inputs.in_files) < 2:
            self._results['out_file'] = in_files[0]
            return runtime

        first_fname = in_files[indices[0]]
        if len(indices) == 1:
            self._results['out_file'] = first_fname
            return runtime

        im = nb.concat_images([in_files[i] for i in indices])
        data = im.get_data().astype(float).sum(axis=3)
        data = np.clip(data, a_min=0.0, a_max=1.0)

        out_file = fname_presuffix(first_fname, suffix='_tpmsum',
                                   newpath=runtime.cwd)
        newnii = im.__class__(data, im.affine, im.header)
        newnii.set_data_dtype(np.float32)

        # Set visualization thresholds
        newnii.header['cal_max'] = 1.0
        newnii.header['cal_min'] = 0.0
        newnii.to_filename(out_file)
        self._results['out_file'] = out_file

        return runtime
开发者ID:poldracklab,项目名称:niworkflows,代码行数:32,代码来源:utils.py


示例6: _merge_subject_images

 def _merge_subject_images(self, images, good_indices=None):
     """Stack a list of 3D images into 4D image."""
     if good_indices is None:
         good_indices = range(len(images))
     images = [img for i, img in enumerate(images) if i in good_indices]
     out_img = nib.concat_images(images)
     return out_img
开发者ID:sgagnon,项目名称:lyman,代码行数:7,代码来源:mixedfx.py


示例7: transform

 def transform(self, raw_data=None, output_dir=None,
               affine=None, prefix='a', basenames=None, ext=None):
     self.output_data_ = STC.transform(self, raw_data=raw_data)
     if not basenames is None:
         self.basenames_ = basenames
     if not affine is None:
         self.affine_ = affine
     if not output_dir is None:
         if not os.path.exists(output_dir):
             os.makedirs(output_dir)
     if hasattr(self, 'affine_'):
         if isinstance(self.affine_, list):
             self.output_data_ = [nibabel.Nifti1Image(
                 self.output_data_[..., t], self.affine_[t])
                 for t in range(self.output_data_.shape[-1])]
             if output_dir is None:
                 self.output_data_ = nibabel.concat_images(
                     self.output_data_, check_affines=False)
         else:
             self.output_data_ = nibabel.Nifti1Image(self.output_data_,
                                                     self.affine_)
         if not output_dir is None:
             self.output_data_ = save_vols(
                 self.output_data_,
                 output_dir, prefix=prefix,
                 basenames=get_basenames(self.basenames_, ext=ext))
     return self.output_data_
开发者ID:AlexandreAbraham,项目名称:pypreprocess,代码行数:27,代码来源:slice_timing.py


示例8: t_test_images

def t_test_images(images, popmean=0.0):
    """Perform per-entry t-test on nibabel spatial images"""
    stack = nib.concat_images(images, check_affines=False)

    tstats, pvalues = sp.stats.ttest_1samp(stack.get_data(), popmean, axis=3)
    reject, corrected = mne.stats.fdr_correction(pvalues)

    return (image_like(stack, tstats), image_like(stack, pvalues),
            image_like(stack, corrected))
开发者ID:effigies,项目名称:sandbox,代码行数:9,代码来源:images.py


示例9: mean_images

def mean_images(overlays):
    """Read set of spatial files, write average (ignoring zeros) to new file"""

    stack = nib.concat_images(overlays, check_affines=False)

    sums = np.sum(stack.get_data(), axis=3)
    counts = np.sum(stack.get_data() != 0, axis=3)
    counts[counts == 0] = 1

    return image_like(stack, sums / counts)
开发者ID:effigies,项目名称:sandbox,代码行数:10,代码来源:images.py


示例10: check_niimgs

def check_niimgs(niimgs):
    niimgs_ = []
    for niimg in niimgs:
        if isinstance(niimg, (str, unicode)):
            niimgs_.append(Niimg(niimg))
        elif isinstance(niimg, list):
            niimgs_.append(nb.concat_images(niimg))
        else:
            niimgs_.append(niimg)
    return niimgs_
开发者ID:JohnGriffiths,项目名称:spym,代码行数:10,代码来源:utils.py


示例11: compute_mean_image

def compute_mean_image(images, output_filename=None, threeD=False):
    """Computes the mean of --perhaps differently shaped-- images

    Parameters
    ----------
    images: string/image object, or list (-like) of
        image(s) whose mean we seek

    output_filename: string, optional (default None)
        output file where computed mean image will be written

    Returns
    -------
    mean nifti image object

    """

    # sanitize
    if not hasattr(images, '__iter__') or isinstance(images, basestring):
        images = [images]

    # make list of data an affines
    all_data = []
    all_affine = []
    for image in images:
        if not is_niimg(image):
            if isinstance(image, basestring):
                image = nibabel.load(image)
            else:
                image = nibabel.concat_images(image,
                                              check_affines=False
                                              )
        data = image.get_data()

        if threeD:
            if is_4D(image):
                data = data.mean(-1)

        all_data.append(data)
        all_affine.append(image.get_affine())

    # compute mean
    mean_data = np.mean(all_data, axis=0)

    # XXX I'm assuming all the affines are equal
    mean_affine = all_affine[0]

    mean_image = nibabel.Nifti1Image(mean_data, mean_affine)

    # save mean image
    if output_filename:
        nibabel.save(mean_image, output_filename)

    # return return result
    return mean_image
开发者ID:VirgileFritsch,项目名称:pypreprocess,代码行数:55,代码来源:io_utils.py


示例12: is_3D

def is_3D(image):
    """Check whether image is 3D"""

    if isinstance(image, basestring):
        image = nibabel.load(image)
    elif isinstance(image, list):
        image = nibabel.concat_images(image,
                                      check_affines=False
                                      )

    return len(image.shape) == 3
开发者ID:VirgileFritsch,项目名称:pypreprocess,代码行数:11,代码来源:io_utils.py


示例13: _load_session

    def _load_session(x):
        if isinstance(x, basestring):
            x = nibabel.load(x).get_data()
        else:
            x = nibabel.concat_images(x).get_data()

        if x.ndim == 5:
            x = x[:, :, :, 0, :]
        else:
            assert x.ndim == 4, x.shape

        return x
开发者ID:fabianp,项目名称:pypreprocess,代码行数:12,代码来源:preproc_reporter.py


示例14: _get_timeseries

def _get_timeseries(data, row_mask, affine=None):
    if isinstance(data, list):
        return nb.concat_images(np.array(data)[row_mask])
    elif isinstance(data, (str, unicode)):
        img = nb.load(data)
        return nb.Nifti1Image(img.get_data()[row_mask, :], img.get_affine())
    elif isinstance(data, (np.ndarray, np.memmap)):
        if affine is None:
            raise Exception("The affine is not optional " "when data is an array")
        return nb.Nifti1Image(data[row_mask, :], affine)
    else:
        raise ValueError('Data type "%s" not supported' % type(data))
开发者ID:chrplr,项目名称:pypreprocess,代码行数:12,代码来源:utils.py


示例15: is_3D

def is_3D(image):
    """Check whether image is 3D"""

    if isinstance(image, basestring):
        image = nibabel.load(image)
    elif isinstance(image, list):
        image = nibabel.concat_images(image)

    if len(image.shape) == 3:
        return True
    else:
        return len(image.shape) == 4 and image.shape[-1] == 1
开发者ID:jcketz,项目名称:pypreprocess,代码行数:12,代码来源:io_utils.py


示例16: __init__

    def __init__(self, data=None, Y=None, X=None, mask=None, output_file=None, **kwargs):
        if mask is not None:
            if not isinstance(mask, nib.Nifti1Image):
                if type(mask) is str:
                    if os.path.isfile(mask):
                        mask = nib.load(mask)
            else:
                raise ValueError("mask is not a nibabel instance")
            self.mask = mask
        else:
            self.mask = nib.load(os.path.join(get_resource_path(),'MNI152_T1_2mm_brain_mask.nii.gz'))
        self.nifti_masker = NiftiMasker(mask_img=self.mask)

        if data is not None:
            if type(data) is str:
                data=nib.load(data)
            elif type(data) is list:
                data=nib.concat_images(data)
            elif not isinstance(data, nib.Nifti1Image):
                raise ValueError("data is not a nibabel instance")
            self.data = self.nifti_masker.fit_transform(data)

            # Collapse any extra dimension
            if any([x==1 for x in self.data.shape]):
                self.data=self.data.squeeze()
        else:
            self.data = np.array([])

        if Y is not None:
            if type(Y) is str:
                if os.path.isfile(Y):
                    Y=pd.read_csv(Y,header=None,index_col=None)
            if isinstance(Y, pd.DataFrame):
                if self.data.shape[0]!= len(Y):
                    raise ValueError("Y does not match the correct size of data")
                self.Y = Y
            else:
                raise ValueError("Make sure Y is a pandas data frame.")
        else:
            self.Y = pd.DataFrame()

        if X is not None:
            if self.data.shape[0]!= X.shape[0]:
                raise ValueError("X does not match the correct size of data")
            self.X = X
        else:
            self.X = pd.DataFrame()

        if output_file is not None:
            self.file_name = output_file
        else:
            self.file_name = []
开发者ID:Xen4n,项目名称:neurolearn,代码行数:52,代码来源:data.py


示例17: _flatten_split_merge

def _flatten_split_merge(in_files):
    from builtins import bytes, str

    if isinstance(in_files, (bytes, str)):
        in_files = [in_files]

    nfiles = len(in_files)

    all_nii = []
    for fname in in_files:
        nii = nb.squeeze_image(nb.load(fname))

        if nii.get_data().ndim > 3:
            all_nii += nb.four_to_three(nii)
        else:
            all_nii.append(nii)

    if len(all_nii) == 1:
        LOGGER.warn('File %s cannot be split', all_nii[0])
        return in_files[0], in_files

    if len(all_nii) == nfiles:
        flat_split = in_files
    else:
        splitname = genfname(in_files[0], suffix='split%04d')
        flat_split = []
        for i, nii in enumerate(all_nii):
            flat_split.append(splitname % i)
            nii.to_filename(flat_split[-1])

    # Only one 4D file was supplied
    if nfiles == 1:
        merged = in_files[0]
    else:
        # More that one in_files - need merge
        merged = genfname(in_files[0], suffix='merged')
        nb.concat_images(all_nii).to_filename(merged)

    return merged, flat_split
开发者ID:rwblair,项目名称:preprocessing-workflow,代码行数:39,代码来源:images.py


示例18: __init__

    def __init__(self, data, Y, algorithm=None, cv_dict=None, mask=None,
                 output_dir='.', **kwargs):
        """ Initialize Predict.

        Args:
            data: nibabel data instance
            Y: vector of training labels
            subject_id: vector of labels corresponding to each subject
            algorithm: Algorithm to use for prediction.  Must be one of 'svm', 'svr',
                'linear', 'logistic', 'lasso', 'ridge', 'ridgeClassifier','randomforest',
                or 'randomforestClassifier'
            cv_dict: Type of cross_validation to use. A dictionary of
                {'type': 'kfolds', 'n_folds': n},
                {'type': 'kfolds', 'n_folds': n, 'subject_id': holdout}, or
                {'type': 'loso', 'subject_id': holdout},
                where n = number of folds, and subject = vector of subject ids that corresponds to self.Y
            mask: binary nibabel mask
            output_dir: Directory to use for writing all outputs
            **kwargs: Additional keyword arguments to pass to the prediction algorithm

        """

        self.output_dir = output_dir

        if mask is not None:
            if type(mask) is not nib.nifti1.Nifti1Image:
                raise ValueError("mask is not a nibabel instance")
            self.mask = mask
        else:
            self.mask = nib.load(os.path.join(get_resource_path(),'MNI152_T1_2mm_brain_mask.nii.gz'))

        if type(data) is list:
            data=nib.concat_images(data)

        if not isinstance(data,(nib.nifti1.Nifti1Image, nib.nifti1.Nifti1Pair)):
            raise ValueError("data is not a nibabel instance")
        self.nifti_masker = NiftiMasker(mask_img=mask)
        self.data = self.nifti_masker.fit_transform(data)

        if type(Y) is list:
            Y=np.array(Y)
        if self.data.shape[0]!= len(Y):
            raise ValueError("Y does not match the correct size of data")
        self.Y = Y

        if algorithm is not None:
            self.set_algorithm(algorithm, **kwargs)

        if cv_dict is not None:
            self.cv = set_cv(cv_dict)
开发者ID:Xen4n,项目名称:neurolearn,代码行数:50,代码来源:analysis.py


示例19: test_simulator

def test_simulator(tmpdir):
    sim = Simulator()
    r = 10
    sigma = 1
    y = [0, 1]
    n_reps = 3
    output_dir = str(tmpdir)
    sim.create_data(y, sigma, reps=n_reps, output_dir=output_dir)
    flist = glob.glob(str(tmpdir.join("centered*nii.gz")))

    shape = (91, 109, 91)
    sim_img = nb.concat_images(flist)
    assert len(sim.data) == n_reps * len(y)
    assert sim_img.shape[0:3] == shape
开发者ID:burnash,项目名称:neurolearn,代码行数:14,代码来源:test_simulator.py


示例20: save_raw

def save_raw(subject_dir, doc):
    if 'bold' in doc:
        run_key = doc['runs']
        for label, session_data in zip(run_key, doc['bold']):
            if isinstance(session_data, (list, np.ndarray)):
                img = nb.concat_images(session_data, check_affines=False)
            else:
                img = nb.load(session_data)
            session_dir = make_dir(subject_dir, 'BOLD', label, strict=False)
            nb.save(img, os.path.join(session_dir, 'bold.nii.gz'))
    if 'anatomy' in doc:
        anat_dir = make_dir(subject_dir, 'anatomy', strict=False)
        img = nb.load(doc['anatomy'])
        nb.save(img, os.path.join(anat_dir, 'highres001.nii.gz'))
开发者ID:GaelVaroquaux,项目名称:nignore,代码行数:14,代码来源:openfmri.py



注:本文中的nibabel.concat_images函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python nibabel.four_to_three函数代码示例发布时间:2022-05-27
下一篇:
Python mesh.RectangularMesh类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap