• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python nibabel.load函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中nibabel.load函数的典型用法代码示例。如果您正苦于以下问题:Python load函数的具体用法?Python load怎么用?Python load使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了load函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: tensor_fitting

def tensor_fitting(data, bvals, bvecs, mask_file=None):
    """
    Use dipy to fit DTI

    Parameters
    ----------
    in_file : str
        Full path to a DWI data file.
    bvals : str
        Full path to a file containing gradient magnitude information (b-values).
    bvecs : str
        Full path to a file containing gradient direction information (b-vectors).
    mask_file : str, optional
        Full path to a file containing a binary mask. Defaults to use the entire volume.

    Returns
    -------
    TensorFit object, affine
    """
    img = nb.load(in_file).get_data()
    data = img.get_data()
    affine = img.get_affine()
    if mask_file is not None:
        mask = nb.load(self.inputs.mask_file).get_data()
    else:
        mask = None

    # Load information about the gradients:
    gtab = grad.gradient_table(self.inputs.bvals, self.inputs.bvecs)

    # Fit it
    tenmodel = dti.TensorModel(gtab)
    return tenmodel.fit(data, mask), affine
开发者ID:amoliu,项目名称:nipype,代码行数:33,代码来源:tensors.py


示例2: _run_interface

    def _run_interface(self, runtime):
        from dipy.reconst import dki
        from dipy.io.utils import nifti1_symmat
        gtab = self._get_gradient_table()

        img = nb.load(self.inputs.in_file)
        data = img.get_data()
        affine = img.affine
        mask = None
        if isdefined(self.inputs.mask_file):
            mask = nb.load(self.inputs.mask_file).get_data()

        # Fit the DKI model
        kurtosis_model = dki.DiffusionKurtosisModel(gtab)
        kurtosis_fit = kurtosis_model.fit(data, mask)
        lower_triangular = kurtosis_fit.lower_triangular()
        img = nifti1_symmat(lower_triangular, affine)
        out_file = self._gen_filename('dki')
        nb.save(img, out_file)
        IFLOGGER.info('DKI parameters image saved as {i}.format(i=out_file)')

        # FA, MD, RD, and AD
        for metric in ['fa', 'md', 'rd', 'ad', 'mk', 'ak', 'rk']:
            data = getattr(kurtosis_fit.metric).astype('float32')
            out_name = self._gen_filename(metric)
            nb.Nifti1Image(data, affine).to_filename(out_name)
            IFLOGGER.info('DKI {metric} image saved as {i}'.format(i=out_name,
                                                                   metric=metric))

        return runtime
开发者ID:jamespooley,项目名称:nipype,代码行数:30,代码来源:tensors.py


示例3: extract_subrois

def extract_subrois(timeseries_file, label_file, indices):
    """Extract voxel time courses for each subcortical roi index

    Parameters
    ----------

    timeseries_file: a 4D Nifti file
    label_file: a 3D file containing rois in the same space/size of the 4D file
    indices: a list of indices for ROIs to extract.

    Returns
    -------
    out_file: a text file containing time courses for each voxel of each roi
        The first four columns are: freesurfer index, i, j, k positions in the
        label file
    """
    img = nb.load(timeseries_file)
    data = img.get_data()
    roiimg = nb.load(label_file)
    rois = roiimg.get_data()
    prefix = split_filename(timeseries_file)[1]
    out_ts_file = os.path.join(os.getcwd(), '%s_subcortical_ts.txt' % prefix)
    with open(out_ts_file, 'wt') as fp:
        for fsindex in indices:
            ijk = np.nonzero(rois == fsindex)
            ts = data[ijk]
            for i0, row in enumerate(ts):
                fp.write('%d,%d,%d,%d,' % (fsindex, ijk[0][i0],
                                           ijk[1][i0], ijk[2][i0]) +
                         ','.join(['%.10f' % val for val in row]) + '\n')
    return out_ts_file
开发者ID:Conxz,项目名称:nipype,代码行数:31,代码来源:rsfmri_vol_surface_preprocessing_nipy.py


示例4: intersubjectconsensus

def intersubjectconsensus():
    """Compute inter-subjects clustering consensus.

    """
    base_dir = r'/nfs/h1/workingshop/huanglijie/uni_mul_analysis'
    db_dir = os.path.join(base_dir, 'multivariate', 'detection', 'mvpcluster')

    n_clusters = 60

    mask_file = os.path.join(base_dir, 'multivariate', 'detection',
                             'mask.nii.gz')
    mask = nib.load(mask_file).get_data()

    for n in range(1, n_clusters):
        n += 1
        merged_file = os.path.join(db_dir, 'merged_cluster_'+str(n)+'.nii.gz')
        merged_data = nib.load(merged_file).get_data()
        n_subjs = merged_data.shape[3]
        mtx = np.zeros((n_subjs, n_subjs))
        for i in range(n_subjs):
            for j in range(n_subjs):
                data_i = merged_data[..., i]
                data_j = merged_data[..., j]
                vtr_i = data_i[np.nonzero(mask)]
                vtr_j = data_j[np.nonzero(mask)]
                tmp = metrics.adjusted_mutual_info_score(vtr_i, vtr_j)
                mtx[i, j] = tmp
        outfile = os.path.join(db_dir, 'consensus_'+str(n)+'.csv')
        np.savetxt(outfile, mtx, delimiter=',')
开发者ID:sealhuang,项目名称:mvpclustering,代码行数:29,代码来源:util.py


示例5: get_mean_timeseries

def get_mean_timeseries(infile,roi,mask):
    import os
    import nibabel as nib
    from nipype.utils.filemanip import fname_presuffix, split_filename
    import numpy as np

    img = nib.load(infile)
    data, aff = img.get_data(), img.get_affine()

    roi_img = nib.load(roi) 
    roi_data, roi_affine = roi_img.get_data(), roi_img.get_affine()

    if len(roi_data.shape) > 3:
        roi_data = roi_data[:,:,:,0]

    mask = nib.load(mask).get_data()
    roi_data = (roi_data > 0).astype(int) + (mask>0).astype(int)

    _,roiname,_ = split_filename(roi)
    outfile = fname_presuffix(infile,"%s_"%roiname,'.txt',newpath=os.path.abspath('.'),use_ext=False)
    
    out_data = np.mean(data[roi_data>1,:],axis=0)
    print out_data.shape
    
    np.savetxt(outfile,out_data)

    return outfile, roiname
开发者ID:INCF,项目名称:BrainImagingPipelines,代码行数:27,代码来源:seed_based_connectivity2.py


示例6: slicerGifCompare

    def slicerGifCompare(self, source1, source2, target, gifSpeed=100, vmax=None, boundaries=None):
        """Create a animated gif from a 4d NIfTI
        Args:
            source: 4D NIfTI image
            target: outputfile gif name
            gifSpeed: delay between images (tens of ms), default=30
        """
        gifId = self.__idGenerator()

        image1 = nibabel.load(source1)
        imageData1 = image1.get_data()

        image2 = nibabel.load(source2)
        imageData2 = image2.get_data()

        if vmax == None:
            vmax=numpy.percentile(imageData1, 99)

        imageList = []
        for num, image in enumerate([imageData1, imageData2]):
            output = gifId + '{0:04}.png'.format(num)
            self.slicerPng(image[:,:,:,2], output, vmax=vmax, isData=True, boundaries=boundaries)
            imageList.append(output)

        self.__imageList2Gif(imageList, target, gifSpeed)
        #Cleaning temp files
        cmd = 'rm {}*.png'.format(gifId)
        self.launchCommand(cmd)

        return target
开发者ID:sbrambati,项目名称:toad,代码行数:30,代码来源:qa.py


示例7: mrtrix_spherical_functions

def mrtrix_spherical_functions():
    """Spherical functions represented by spherical harmonic coefficients and
    evaluated on a discrete sphere.

    Returns
    -------
    func_coef : array (2, 3, 4, 45)
        Functions represented by the coefficients associated with the
        mxtrix spherical harmonic basis of order 8.
    func_discrete : array (2, 3, 4, 81)
        Functions evaluated on `sphere`.
    sphere : Sphere
        The discrete sphere, points on the surface of a unit sphere, used to
        evaluate the functions.

    Notes
    -----
    These coefficients were obtained by using the dwi2SH command of mrtrix.

    """
    func_discrete = load(pjoin(THIS_DIR, "func_discrete.nii.gz")).get_data()
    func_coef = load(pjoin(THIS_DIR, "func_coef.nii.gz")).get_data()
    gradients = np.loadtxt(pjoin(THIS_DIR, "sphere_grad.txt"))
    # gradients[0] and the first volume of func_discrete, 
    # func_discrete[..., 0], are associated with the b=0 signal.
    # gradients[:, 3] are the b-values for each gradient/volume.
    sphere = Sphere(xyz=gradients[1:, :3])
    return func_coef, func_discrete[..., 1:], sphere
开发者ID:bevlin510,项目名称:dipy,代码行数:28,代码来源:__init__.py


示例8: read_mni_template

def read_mni_template(contrast="T2"):
    """
    Read the MNI template from disk

    Parameters
    ----------
    contrast : list or string, optional
        Which of the contrast templates to read. Two contrasts are available:
        "T1" and "T2", so you can either enter one of these strings as input,
        or a list containing both of them.

    Returns
    -------
    list : contains the nibabel.Nifti1Image objects requested, according to the
        order they were requested in the input.

    Examples
    --------
    Get only the T2 file:
    >>> T2_nifti = read_mni_template("T2") # doctest: +SKIP
    Get both files in this order:
    >>> T1_nifti, T2_nifti = read_mni_template(["T1", "T2"]) # doctest: +SKIP
    """
    files, folder = fetch_mni_template()
    file_dict = {"T1": pjoin(folder, 'mni_icbm152_t1_tal_nlin_asym_09a.nii'),
                 "T2": pjoin(folder, 'mni_icbm152_t2_tal_nlin_asym_09a.nii')}
    if isinstance(contrast, str):
        return nib.load(file_dict[contrast])
    else:
        out_list = []
        for k in contrast:
            out_list.append(nib.load(file_dict[k]))
    return out_list
开发者ID:theaverageguy,项目名称:dipy,代码行数:33,代码来源:fetcher.py


示例9: export

    def export(self, nidm_version, export_dir):
        """
        Create prov graph.
        """
        if self.expl_mean_sq_file is None:
            # Create Contrast Explained Mean Square Map as fstat<num>.nii.gz
            # multiplied by sigmasquareds.nii.gz and save it in export_dir
            fstat_img = nib.load(self.stat_file)
            fstat = fstat_img.get_data()

            sigma_sq_img = nib.load(self.sigma_sq_file)
            sigma_sq = sigma_sq_img.get_data()

            expl_mean_sq = nib.Nifti1Image(
                fstat*sigma_sq, fstat_img.get_qform())

            self.filename = ("ContrastExplainedMeanSquareMap" +
                             self.num + ".nii.gz")
            self.expl_mean_sq_file = os.path.join(
                export_dir, self.filename)
            nib.save(expl_mean_sq, self.expl_mean_sq_file)

        self.file = NIDMFile(self.id, self.expl_mean_sq_file,
                             filename=self.filename,
                             sha=self.sha, fmt=self.fmt)

        # Contrast Explained Mean Square Map entity
        path, filename = os.path.split(self.expl_mean_sq_file)
        self.add_attributes((
            (PROV['type'], self.type),
            (NIDM_IN_COORDINATE_SPACE, self.coord_space.id),
            (PROV['label'], self.label)))
开发者ID:cmaumet,项目名称:nidmresults,代码行数:32,代码来源:contrast.py


示例10: _run_interface

 def _run_interface(self, runtime):
     img = nb.load(self.inputs.in_file[0])
     header = img.get_header().copy()
     vollist = [nb.load(filename) for filename in self.inputs.in_file]
     data = np.concatenate([vol.get_data().reshape(
         vol.get_shape()[:3] + (-1,)) for vol in vollist], axis=3)
     if data.dtype.kind == 'i':
         header.set_data_dtype(np.float32)
         data = data.astype(np.float32)
     if isdefined(self.inputs.regress_poly):
         timepoints = img.get_shape()[-1]
         X = np.ones((timepoints, 1))
         for i in range(self.inputs.regress_poly):
             X = np.hstack((X, legendre(
                 i + 1)(np.linspace(-1, 1, timepoints))[:, None]))
         betas = np.dot(np.linalg.pinv(X), np.rollaxis(data, 3, 2))
         datahat = np.rollaxis(np.dot(X[:, 1:],
                                      np.rollaxis(
                                          betas[1:, :, :, :], 0, 3)),
                               0, 4)
         data = data - datahat
         img = nb.Nifti1Image(data, img.get_affine(), header)
         nb.save(img, self._gen_output_file_name('detrended'))
     meanimg = np.mean(data, axis=3)
     stddevimg = np.std(data, axis=3)
     tsnr = meanimg / stddevimg
     img = nb.Nifti1Image(tsnr, img.get_affine(), header)
     nb.save(img, self._gen_output_file_name())
     img = nb.Nifti1Image(meanimg, img.get_affine(), header)
     nb.save(img, self._gen_output_file_name('mean'))
     img = nb.Nifti1Image(stddevimg, img.get_affine(), header)
     nb.save(img, self._gen_output_file_name('stddev'))
     return runtime
开发者ID:cdla,项目名称:nipype,代码行数:33,代码来源:misc.py


示例11: check_images

def check_images(file1, file2):
    """Check that 2 images have the same affines and data shapes.

    Parameters
    ----------
    file1 : str
        Path to the first nifti image

    file2 : str
        Path to the second nifti image
    """
    img = nibabel.load(file1)
    shape1 = np.shape(img.get_data())
    affine1 = img.get_affine()
    img = nibabel.load(file2)
    shape2 = np.shape(img.get_data())
    affine2 = img.get_affine()
    if shape1 != shape2:
        raise ValueError('Images got different shapes: {0} of shape {1}, {2} '
                         'of shape {3}'.format(file1, shape1, file2, shape2))

    if np.any(affine1 != affine2):
        raise ValueError('Images got different affines: {0} has affine {1}, '
                         '{2} has affine {3}'.format(file1, affine1,
                                                     file2, affine2))
开发者ID:salma1601,项目名称:process-asl,代码行数:25,代码来源:_utils.py


示例12: test_split_and_merge

def test_split_and_merge(tmpdir):
    import numpy as np
    import nibabel as nb
    import os.path as op
    import os

    from nipype.algorithms.misc import split_rois, merge_rois

    in_mask = example_data('tpms_msk.nii.gz')
    dwfile = op.join(str(tmpdir), 'dwi.nii.gz')
    mskdata = nb.load(in_mask).get_data()
    aff = nb.load(in_mask).affine

    dwshape = (mskdata.shape[0], mskdata.shape[1], mskdata.shape[2], 6)
    dwdata = np.random.normal(size=dwshape)
    os.chdir(str(tmpdir))
    nb.Nifti1Image(dwdata.astype(np.float32),
                   aff, None).to_filename(dwfile)

    resdw, resmsk, resid = split_rois(dwfile, in_mask, roishape=(20, 20, 2))
    merged = merge_rois(resdw, resid, in_mask)
    dwmerged = nb.load(merged).get_data()

    dwmasked = dwdata * mskdata[:, :, :, np.newaxis]

    assert np.allclose(dwmasked, dwmerged)
开发者ID:shoshber,项目名称:nipype,代码行数:26,代码来源:test_splitmerge.py


示例13: test_median_otsu_flow

def test_median_otsu_flow():
    with TemporaryDirectory() as out_dir:
        data_path, _, _ = get_data('small_25')
        volume = nib.load(data_path).get_data()
        save_masked = True
        median_radius = 3
        numpass = 3
        autocrop = False
        vol_idx = [0]
        dilate = 0

        mo_flow = MedianOtsuFlow()
        mo_flow.run(data_path, out_dir=out_dir, save_masked=save_masked,
                             median_radius=median_radius, numpass=numpass,
                             autocrop=autocrop, vol_idx=vol_idx, dilate=dilate)

        mask_name = mo_flow.last_generated_outputs['out_mask']
        masked_name = mo_flow.last_generated_outputs['out_masked']

        masked, mask = median_otsu(volume, median_radius,
                                   numpass, autocrop,
                                   vol_idx, dilate)

        result_mask_data = nib.load(join(out_dir, mask_name)).get_data()
        npt.assert_array_equal(result_mask_data, mask)

        result_masked_data = nib.load(join(out_dir, masked_name)).get_data()
        npt.assert_array_equal(result_masked_data, masked)
开发者ID:MarcCote,项目名称:dipy,代码行数:28,代码来源:test_segment.py


示例14: _run_interface

    def _run_interface(self, runtime):
        src_nii = nb.load(self.inputs.src_file)
        src = NiftiWrapper(src_nii, make_empty=True)
        dest_nii = nb.load(self.inputs.dest_file)
        dest = NiftiWrapper(dest_nii, make_empty=True)
        classes = src.meta_ext.get_valid_classes()
        if self.inputs.include_classes:
            classes = [cls
                       for cls in classes
                       if cls in self.inputs.include_classes
                      ]
        if self.inputs.exclude_classes:
            classes = [cls
                       for cls in classes
                       if not cls in self.inputs.exclude_classes
                      ]

        for cls in classes:
            src_dict = src.meta_ext.get_class_dict(cls)
            dest_dict = dest.meta_ext.get_class_dict(cls)
            dest_dict.update(src_dict)
        # Update the shape and slice dimension to reflect the meta extension update.
        dest.meta_ext.slice_dim = src.meta_ext.slice_dim
        dest.meta_ext.shape = src.meta_ext.shape

        self.out_path = path.join(os.getcwd(),
                                  path.basename(self.inputs.dest_file))
        dest.to_filename(self.out_path)

        return runtime
开发者ID:Alunisiira,项目名称:nipype,代码行数:30,代码来源:dcmstack.py


示例15: reorient_bvecs

def reorient_bvecs(in_dwi, old_dwi, in_bvec):
    """
    Checks reorientations of ``in_dwi`` w.r.t. ``old_dwi`` and
    reorients the in_bvec table accordingly.
    """
    import os
    import numpy as np
    import nibabel as nb

    name, fext = os.path.splitext(os.path.basename(in_bvec))
    if fext == '.gz':
        name, _ = os.path.splitext(name)
    out_file = os.path.abspath('%s_reorient.bvec' % name)
    bvecs = np.loadtxt(in_bvec).T
    new_bvecs = []

    N = nb.load(in_dwi).get_affine()
    O = nb.load(old_dwi).get_affine()
    RS = N.dot(np.linalg.inv(O))[:3, :3]
    sc_idx = np.where((np.abs(RS) != 1) & (RS != 0))
    S = np.ones_like(RS)
    S[sc_idx] = RS[sc_idx]
    R = RS/S

    new_bvecs = [R.dot(b) for b in bvecs]
    np.savetxt(out_file, np.array(new_bvecs).T, fmt='%0.15f')
    return out_file
开发者ID:Alunisiira,项目名称:nipype,代码行数:27,代码来源:utils.py


示例16: data_generator

def data_generator(inputsamplelinks, inputlabellinks, conf, batch_size, rdm=True):
    while True:
        images = inputsamplelinks
        targets = inputlabellinks
        index = [i for i in range(len(images))]

        if rdm:
            rng_state = numpy.random.get_state()
            numpy.random.shuffle(images)
            numpy.random.set_state(rng_state)
            numpy.random.shuffle(targets)

        sample_in_batch = 0
        all_img = numpy.zeros((2,2))
        all_targ = numpy.zeros((2,2))
        for i in range(len(images)):
            img = nibabel.load(images[i]).get_data()
            targ = nibabel.load(targets[i]).get_data()
            #targ = targ/255
            img = img.reshape((1, img.shape[0], img.shape[1], img.shape[2], img.shape[3]))
            targ = targ.reshape((1, targ.shape[0], targ.shape[1], targ.shape[2]))
            targ = multiclass(targ, conf)
            if sample_in_batch == 0:
                all_img = img
                all_targ = targ
                sample_in_batch += 1
            else:
                all_img = numpy.concatenate((all_img, img))
                all_targ = numpy.concatenate((all_targ, targ))
                sample_in_batch += 1
            if sample_in_batch == batch_size:
                sample_in_batch = 0
                yield (numpy.copy(all_img), numpy.copy(all_targ))
开发者ID:ZheweiMedia,项目名称:DL_experiments,代码行数:33,代码来源:processData.py


示例17: enhance

def enhance(in_file, clip_limit=0.010, in_mask=None, out_file=None):
    import numpy as np
    import nibabel as nb
    import os.path as op
    from skimage import exposure, img_as_int

    if out_file is None:
        fname, fext = op.splitext(op.basename(in_file))
        if fext == '.gz':
            fname, _ = op.splitext(fname)
        out_file = op.abspath('./%s_enh.nii.gz' % fname)

    im = nb.load(in_file)
    imdata = im.get_data()
    imshape = im.get_shape()

    if in_mask is not None:
        msk = nb.load(in_mask).get_data()
        msk[msk > 0] = 1
        msk[msk < 1] = 0
        imdata = imdata * msk

    immin = imdata.min()
    imdata = (imdata - immin).astype(np.uint16)

    adapted = exposure.equalize_adapthist(imdata.reshape(imshape[0], -1),
                                          clip_limit=clip_limit)

    nb.Nifti1Image(adapted.reshape(imshape), im.get_affine(),
                   im.get_header()).to_filename(out_file)

    return out_file
开发者ID:Alunisiira,项目名称:nipype,代码行数:32,代码来源:utils.py


示例18: extract_noise_components

def extract_noise_components(realigned_file, noise_mask_file, num_components):
    """Derive components most reflective of physiological noise
    """
    import os
    from nibabel import load
    import numpy as np
    import scipy as sp
    imgseries = load(realigned_file)
    components = None
    mask = load(noise_mask_file).get_data()
    voxel_timecourses = imgseries.get_data()[mask > 0]
    voxel_timecourses[np.isnan(np.sum(voxel_timecourses, axis=1)), :] = 0
    # remove mean and normalize by variance
    # voxel_timecourses.shape == [nvoxels, time]
    X = voxel_timecourses.T
    stdX = np.std(X, axis=0)
    stdX[stdX == 0] = 1.
    stdX[np.isnan(stdX)] = 1.
    stdX[np.isinf(stdX)] = 1.
    X = (X - np.mean(X, axis=0))/stdX
    u, _, _ = sp.linalg.svd(X, full_matrices=False)
    if components is None:
        components = u[:, :num_components]
    else:
        components = np.hstack((components, u[:, :num_components]))
    components_file = os.path.join(os.getcwd(), 'noise_components.txt')
    np.savetxt(components_file, components, fmt="%.10f")
    return components_file
开发者ID:Alunisiira,项目名称:nipype,代码行数:28,代码来源:resting.py


示例19: _run_interface

    def _run_interface(self, runtime):
        maskdata = nb.load(self.inputs.mask).get_data()
        maskdata = np.logical_not(np.logical_or(maskdata == 0, np.isnan(maskdata)))

        session_datas = [[nb.load(fname).get_data()[maskdata].reshape(-1, 1) for fname in sessions] for sessions in self.inputs.subjects_sessions]
        list_of_sessions = [np.dstack(session_data) for session_data in session_datas]
        all_data = np.hstack(list_of_sessions)
        icc = np.zeros(session_datas[0][0].shape)
        session_F = np.zeros(session_datas[0][0].shape)
        session_var = np.zeros(session_datas[0][0].shape)
        subject_var = np.zeros(session_datas[0][0].shape)

        for x in range(icc.shape[0]):
            Y = all_data[x, :, :]
            icc[x], subject_var[x], session_var[x], session_F[x], _, _ = ICC_rep_anova(Y)

        nim = nb.load(self.inputs.subjects_sessions[0][0])
        new_data = np.zeros(nim.shape)
        new_data[maskdata] = icc.reshape(-1,)
        new_img = nb.Nifti1Image(new_data, nim.affine, nim.header)
        nb.save(new_img, 'icc_map.nii')

        new_data = np.zeros(nim.shape)
        new_data[maskdata] = session_var.reshape(-1,)
        new_img = nb.Nifti1Image(new_data, nim.affine, nim.header)
        nb.save(new_img, 'session_var_map.nii')

        new_data = np.zeros(nim.shape)
        new_data[maskdata] = subject_var.reshape(-1,)
        new_img = nb.Nifti1Image(new_data, nim.affine, nim.header)
        nb.save(new_img, 'subject_var_map.nii')

        return runtime
开发者ID:DimitriPapadopoulos,项目名称:nipype,代码行数:33,代码来源:icc.py


示例20: readDataset

def readDataset(niifilename, niiBrainMaskFilename, btablefilename,  parcellationfilename = None):  
     
    # load the masked diffusion dataset
    diffusionData = nib.load(niifilename).get_data()
    affine        = nib.load(niifilename).get_affine()
    
    # load the brain mask
    mask    = nib.load(niiBrainMaskFilename).get_data()
    
    rows, cols, nSlices, nDirections = diffusionData.shape
    
    bvals, bvecs = readbtable(btablefilename)
    gtable       = gradient_table(bvals, bvecs)
    
    if parcellationfilename != None:
        #parcellation = nib.load(parcellationfilename).get_data()
        parcellation,_ = nrrd.read(parcellationfilename)
    
        if parcellation.shape[2] != nSlices:  # for the second phantom (unc_res)
            parcellation = parcellation[:,:,parcellation.shape[2]-nSlices:]        
        parcellation = np.squeeze(parcellation)
    else:
        parcellation = None
    
    return diffusionData, mask, affine, gtable, parcellation
开发者ID:Nick3869,项目名称:Commandline,代码行数:25,代码来源:io.py



注:本文中的nibabel.load函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python nibabel.save函数代码示例发布时间:2022-05-27
下一篇:
Python nibabel.four_to_three函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap