本文整理汇总了Python中nilearn.image.iter_img函数的典型用法代码示例。如果您正苦于以下问题:Python iter_img函数的具体用法?Python iter_img怎么用?Python iter_img使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了iter_img函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_component_sign
def test_component_sign():
# We should have a heuristic that flips the sign of components in
# CanICA to have more positive values than negative values, for
# instance by making sure that the largest value is positive.
# make data (SVD)
rng = np.random.RandomState(0)
shape = (20, 10, 1)
affine = np.eye(4)
components = _make_canica_components(shape)
# make +ve
for mp in components:
mp[rng.randn(*mp.shape) > .8] *= -5.
assert_less_equal(mp.max(), -mp.min()) # goal met ?
# synthesize data with given components
data = _make_data_from_components(components, affine, shape, rng=rng,
n_subjects=2)
mask_img = nibabel.Nifti1Image(np.ones(shape, dtype=np.int8), affine)
# run CanICA many times (this is known to produce different results)
canica = CanICA(n_components=4, random_state=rng, mask=mask_img)
for _ in range(3):
canica.fit(data)
for mp in iter_img(canica.masker_.inverse_transform(
canica.components_)):
mp = mp.get_data()
assert_less_equal(-mp.min(), mp.max())
开发者ID:salma1601,项目名称:nilearn,代码行数:29,代码来源:test_canica.py
示例2: filter_ics
def filter_ics(comps_img, mask, zscore=2., mode='+-'):
"""
Generator for masking and thresholding each IC spatial map.
Parameters
----------
comps_img: img-like
The 'raw' ICC maps image.
mask: img-like
If not None. Will apply this masks in the end of the process.
thr: float
The threshold value.
zscore: bool
If True will calculate the z-score of the ICC before thresholding.
mode: str
Choices: '+' for positive threshold,
'+-' for positive and negative threshold and
'-' for negative threshold.
Returns
-------
icc_filts: list of nibabel.NiftiImage
Thresholded and masked ICCs.
"""
# store the average value of the blob in a list
mask = niimg.load_img(mask)
for i, icimg in enumerate(iter_img(comps_img)):
yield filter_icc(icimg, mask=mask, thr=zscore, zscore=True, mode=mode)
开发者ID:Neurita,项目名称:pypes,代码行数:32,代码来源:utils.py
示例3: load_vols
def load_vols(niimgs):
"""Loads a nifti image (or a bail of) into a list qof 3D volumes.
Parameters
----------
niimgs: 3 or 4D Niimg-like object
If niimgs is an iterable, checks if data is really 4D. Then,
considering that it is a list of niimg and load them one by one.
If niimg is a string, consider it as a path to Nifti image and
call nibabel.load on it. If it is an object, check if get_data
and get_affine methods are present, raise an Exception otherwise.
Returns
-------
niimgs_: list of nifti image objects
The loaded volumes.
"""
# try loading 4d
try:
niimgs = list(check_niimg_4d(niimgs, return_iterator=True))
except TypeError:
# probably not 4d
niimgs = [check_niimg(niimgs)]
except ValueError:
# probably inconsisten affines
pass
try:
# try loading volumes one-by-one
if isinstance(niimgs, _basestring): niimgs = [niimgs]
return [check_niimg(niimg, ensure_ndim=3) for niimg in niimgs]
except TypeError:
pass
# collect the loaded volumes into a list
if is_niimg(niimgs):
# should be 3d, squash 4th dimension otherwise
if niimgs.shape[-1] == 1:
return [nibabel.Nifti1Image(niimgs.get_data()[:, :, :, 0],
niimgs.get_affine())]
else:
return list(iter_img(niimgs))
else:
niimgs = list(niimgs)
if len(niimgs) == 1: niimgs = niimgs[0]
return list(iter_img(niimgs))
开发者ID:chrplr,项目名称:pypreprocess,代码行数:45,代码来源:io_utils.py
示例4: _filter_ic_imgs
def _filter_ic_imgs(self, ic_file):
if self.zscore > 0:
do_zscore = True
else:
do_zscore = False
mask = niimg.load_img(self.mask_file)
return [filter_icc(icimg, mask=mask, thr=self.zscore, zscore=do_zscore, mode=self.mode)
for icimg in iter_img(ic_file)]
开发者ID:Neurita,项目名称:pypes,代码行数:9,代码来源:plotting.py
示例5: run_mini_pipeline
def run_mini_pipeline():
atlas = datasets.fetch_atlas_msdl()
atlas_img = atlas['maps']
labels = pd.read_csv(atlas['labels'])['name']
masker = NiftiMapsMasker(maps_img=atlas_img, standardize=True,
memory='/tmp/nilearn', verbose=0)
data = datasets.fetch_adhd(number_subjects)
figures_folder = '../figures/'
count=0
for func_file, confound_file in zip(data.func, data.confounds):
# fit the data to the atlas mask, regress out confounds
time_series = masker.fit_transform(func_file, confounds=confound_file)
correlation = np.corrcoef(time_series.T)
#plotting starts here
plt.figure(figsize=(10, 10))
plt.imshow(correlation, interpolation="nearest")
x_ticks = plt.xticks(range(len(labels)), labels, rotation=90)
y_ticks = plt.yticks(range(len(labels)), labels)
corr_file = figures_folder+'subject_number_' + str(count) + '_correlation.pdf'
plt.savefig(corr_file)
atlas_region_coords = [plotting.find_xyz_cut_coords(img) for img in image.iter_img(atlas_img)]
threshold = 0.6
plotting.plot_connectome(correlation, atlas_region_coords, edge_threshold=threshold)
connectome_file = figures_folder+'subject_number_' + str(count) + '_connectome.pdf'
plt.savefig(connectome_file)
#graph setup
#binarize correlation matrix
correlation[correlation<threshold] = 0
correlation[correlation != 0] = 1
graph = nx.from_numpy_matrix(correlation)
partition=louvain.best_partition(graph)
values = [partition.get(node) for node in graph.nodes()]
plt.figure()
nx.draw_spring(graph, cmap = plt.get_cmap('jet'), node_color = values, node_size=30, with_labels=True)
graph_file = figures_folder+'subject_number_' + str(count) + '_community.pdf'
plt.savefig(graph_file)
count += 1
plt.close('all')
开发者ID:flrgsr,项目名称:Mini-Pipeline-Community,代码行数:54,代码来源:nilearn_pipeline.py
示例6: _process_inputs
def _process_inputs(self):
''' validate and process inputs into useful form.
Returns a list of nilearn maskers and the list of corresponding label
names.'''
import nilearn.input_data as nl
import nilearn.image as nli
label_data = nli.concat_imgs(self.inputs.label_files)
maskers = []
# determine form of label files, choose appropriate nilearn masker
if np.amax(label_data.get_data()) > 1: # 3d label file
n_labels = np.amax(label_data.get_data())
maskers.append(nl.NiftiLabelsMasker(label_data))
else: # 4d labels
n_labels = label_data.get_data().shape[3]
if self.inputs.incl_shared_variance: # independent computation
for img in nli.iter_img(label_data):
maskers.append(
nl.NiftiMapsMasker(
self._4d(img.get_data(), img.affine)))
else: # one computation fitting all
maskers.append(nl.NiftiMapsMasker(label_data))
# check label list size
if not np.isclose(int(n_labels), n_labels):
raise ValueError(
'The label files {} contain invalid value {}. Check input.'
.format(self.inputs.label_files, n_labels))
if len(self.inputs.class_labels) != n_labels:
raise ValueError('The length of class_labels {} does not '
'match the number of regions {} found in '
'label_files {}'.format(self.inputs.class_labels,
n_labels,
self.inputs.label_files))
if self.inputs.include_global:
global_label_data = label_data.get_data().sum(
axis=3) # sum across all regions
global_label_data = np.rint(global_label_data).astype(int).clip(
0, 1) # binarize
global_label_data = self._4d(global_label_data, label_data.affine)
global_masker = nl.NiftiLabelsMasker(
global_label_data, detrend=self.inputs.detrend)
maskers.insert(0, global_masker)
self.inputs.class_labels.insert(0, 'GlobalSignal')
for masker in maskers:
masker.set_params(detrend=self.inputs.detrend)
return maskers
开发者ID:TheChymera,项目名称:nipype,代码行数:52,代码来源:nilearn.py
示例7: test_threshold_img
def test_threshold_img():
# to check whether passes with valid threshold inputs
shape = (10, 20, 30)
maps, _ = data_gen.generate_maps(shape, n_regions=4)
affine = np.eye(4)
mask_img = nibabel.Nifti1Image(np.ones((shape), dtype=np.int8), affine)
for img in iter_img(maps):
# when threshold is a float value
thr_maps_img = threshold_img(img, threshold=0.8)
# when we provide mask image
thr_maps_percent = threshold_img(img, threshold=1, mask_img=mask_img)
# when threshold is a percentile
thr_maps_percent2 = threshold_img(img, threshold='2%')
开发者ID:jeromedockes,项目名称:nilearn,代码行数:14,代码来源:test_image.py
示例8: plot_ica_components
def plot_ica_components(components_img, **kwargs):
""" Plot the components IC spatial maps in a grid."""
import math
from nilearn.image import iter_img
from nilearn.plotting import plot_stat_map
from matplotlib import pyplot as plt
from matplotlib import gridspec
n_ics = len(list(iter_img(components_img)))
n_rows = math.ceil(n_ics/2)
fig = plt.figure(figsize=(6, 3*n_rows), facecolor='black')
gs = gridspec.GridSpec(n_rows, 2)
plots = []
for i, ic_img in enumerate(iter_img(components_img)):
ax = plt.subplot(gs[i])
p = plot_stat_map(ic_img, display_mode="z", title="IC {}".format(i+1),
cut_coords=1, colorbar=False, figure=fig, axes=ax, **kwargs)
plots.append(p)
for p in plots:
p.close()
return fig
开发者ID:Neurita,项目名称:pypes,代码行数:24,代码来源:plot.py
示例9: get_largest_blobs
def get_largest_blobs(ic_maps):
""" Generator for the largest blobs in each IC spatial map.
These should be masked and thresholded.
Parameters
----------
ic_maps: sequence of niimg-like
Returns
-------
blobs: generator of niimg-like
"""
# store the average value of the blob in a list
for i, icimg in enumerate(iter_img(ic_maps)):
yield niimg.new_img_like(icimg, largest_connected_component(icimg.get_data()))
开发者ID:Neurita,项目名称:pypes,代码行数:15,代码来源:utils.py
示例10: test_component_sign
def test_component_sign():
# We should have a heuristic that flips the sign of components in
# CanICA to have more positive values than negative values, for
# instance by making sure that the largest value is positive.
data, mask_img, components, rng = _make_canica_test_data(n_subjects=2,
noisy=True)
# run CanICA many times (this is known to produce different results)
canica = CanICA(n_components=4, random_state=rng, mask=mask_img)
for _ in range(3):
canica.fit(data)
for mp in iter_img(canica.components_img_):
mp = mp.get_data()
assert_less_equal(-mp.min(), mp.max())
开发者ID:bthirion,项目名称:nilearn,代码行数:15,代码来源:test_canica.py
示例11: test_component_sign
def test_component_sign():
# Regression test
# We should have a heuristic that flips the sign of components in
# DictLearning to have more positive values than negative values, for
# instance by making sure that the largest value is positive.
data, mask_img, components, rng = _make_canica_test_data(n_subjects=2, noisy=True)
for mp in components:
assert_less_equal(-mp.min(), mp.max())
dict_learning = DictLearning(n_components=4, random_state=rng, mask=mask_img, smoothing_fwhm=0.0, alpha=1)
dict_learning.fit(data)
for mp in iter_img(dict_learning.masker_.inverse_transform(dict_learning.components_)):
mp = mp.get_data()
assert_less_equal(np.sum(mp[mp <= 0]), np.sum(mp[mp > 0]))
开发者ID:CandyPythonFlow,项目名称:nilearn,代码行数:15,代码来源:test_dict_learning.py
示例12: plot_icmaps
def plot_icmaps(self, outtype='png', **kwargs):
""" Plot the thresholded IC spatial maps and store the outputs in the ICA results folder.
Parameters
----------
outtype: str
Extension (without the '.') of the output files, will specify which plot image file you want.
Returns
-------
all_icc_plot_f: str
iccs_plot_f: str
sliced_ic_plots: list of str
"""
# specify the file paths
all_icc_plot_f = op.join(self.ica_dir, 'all_components_zscore_{}.{}'.format(self.zscore, outtype))
iccs_plot_f = op.join(self.ica_dir, 'ic_components_zscore_{}.{}'.format(self.zscore, outtype))
icc_multi_slice = op.join(self.ica_dir, 'ic_map_{}_zscore_{}.{}')
# make the plots
fig1 = plot_ica_components(self._icc_imgs, **kwargs)
fig1.savefig(iccs_plot_f, facecolor=fig1.get_facecolor(), edgecolor='none')
fig2 = plot_all_components(self._icc_imgs, **kwargs)
fig2.savefig(all_icc_plot_f, facecolor=fig2.get_facecolor(), edgecolor='none')
# make the multi sliced IC plots
sliced_ic_plots = []
for i, img in enumerate(iter_img(self._icc_imgs)):
fig3 = plot_multi_slices(img,
cut_dir="z",
n_cuts=24,
n_cols=4,
title="IC {}\n(z-score {})".format(i+1, self.zscore),
title_fontsize=32,
plot_func=None,
**kwargs)
# prepare the output file name/path
out_f = icc_multi_slice.format(i+1, self.zscore, outtype)
fig3.savefig(out_f, facecolor=fig3.get_facecolor(), edgecolor='none')
sliced_ic_plots.append(out_f)
return all_icc_plot_f, iccs_plot_f, sliced_ic_plots
开发者ID:Neurita,项目名称:pypes,代码行数:45,代码来源:plotting.py
示例13: split_bilateral_rois
def split_bilateral_rois(maps_img):
"""Convenience function for splitting bilateral ROIs
into two unilateral ROIs"""
new_rois = []
for map_img in iter_img(maps_img):
for hemi in ["L", "R"]:
hemi_mask = HemisphereMasker(hemisphere=hemi)
hemi_mask.fit(map_img)
if hemi_mask.mask_img_.get_data().sum() > 0:
hemi_vectors = hemi_mask.transform(map_img)
hemi_img = hemi_mask.inverse_transform(hemi_vectors)
new_rois.append(hemi_img.get_data())
new_maps_data = np.concatenate(new_rois, axis=3)
new_maps_img = new_img_like(maps_img, data=new_maps_data, copy_header=True)
print("Changed from %d ROIs to %d ROIs" % (maps_img.shape[-1], new_maps_img.shape[-1]))
return new_maps_img
开发者ID:atsuch,项目名称:lateralized-components,代码行数:19,代码来源:masking.py
示例14: test_iterator_generator
def test_iterator_generator():
# Create a list of random images
l = [Nifti1Image(np.random.random((10, 10, 10)), np.eye(4)) for i in range(10)]
cc = _utils.concat_niimgs(l)
assert_equal(cc.shape[-1], 10)
assert_array_almost_equal(cc.get_data()[..., 0], l[0].get_data())
# Same with iteration
i = image.iter_img(l)
cc = _utils.concat_niimgs(i)
assert_equal(cc.shape[-1], 10)
assert_array_almost_equal(cc.get_data()[..., 0], l[0].get_data())
# Now, a generator
b = []
g = nifti_generator(b)
cc = _utils.concat_niimgs(g)
assert_equal(cc.shape[-1], 10)
assert_equal(len(b), 10)
开发者ID:carlosf,项目名称:nilearn,代码行数:19,代码来源:test_niimg_conversions.py
示例15: plot_components
def plot_components(ica_image, hemi='', out_dir=None,
bg_img=datasets.load_mni152_template()):
print("Plotting %s components..." % hemi)
# Determine threshoold and vmax for all the plots
# get nonzero part of the image for proper thresholding of
# r- or l- only component
nonzero_img = ica_image.get_data()[np.nonzero(ica_image.get_data())]
thr = stats.scoreatpercentile(np.abs(nonzero_img), 90)
vmax = stats.scoreatpercentile(np.abs(nonzero_img), 99.99)
for ci, ic_img in enumerate(iter_img(ica_image)):
title = _title_from_terms(terms=ica_image.terms, ic_idx=ci, label=hemi)
fh = plt.figure(figsize=(14, 6))
plot_stat_map(ic_img, axes=fh.gca(), threshold=thr, vmax=vmax,
colorbar=True, title=title, black_bg=True, bg_img=bg_img)
# Save images instead of displaying
if out_dir is not None:
save_and_close(out_path=op.join(
out_dir, '%s_component_%i.png' % (hemi, ci)))
开发者ID:atsuch,项目名称:lateralized-components,代码行数:21,代码来源:plotting.py
示例16: plot_components_summary
def plot_components_summary(ica_image, hemi='', out_dir=None,
bg_img=datasets.load_mni152_template()):
print("Plotting %s components summary..." % hemi)
n_components = ica_image.get_data().shape[3]
# Determine threshoold and vmax for all the plots
# get nonzero part of the image for proper thresholding of
# r- or l- only component
nonzero_img = ica_image.get_data()[np.nonzero(ica_image.get_data())]
thr = stats.scoreatpercentile(np.abs(nonzero_img), 90)
vmax = stats.scoreatpercentile(np.abs(nonzero_img), 99.99)
for ii, ic_img in enumerate(iter_img(ica_image)):
ri = ii % 5 # row i
ci = (ii / 5) % 5 # column i
pi = ii % 25 + 1 # plot i
fi = ii / 25 # figure i
if ri == 0 and ci == 0:
fh = plt.figure(figsize=(30, 20))
print('Plot %03d of %d' % (fi + 1, np.ceil(n_components / 25.)))
ax = fh.add_subplot(5, 5, pi)
title = _title_from_terms(terms=ica_image.terms, ic_idx=ii, label=hemi)
colorbar = ci == 4
plot_stat_map(
ic_img, axes=ax, threshold=thr, vmax=vmax, colorbar=colorbar,
title=title, black_bg=True, bg_img=bg_img)
if (ri == 4 and ci == 4) or ii == n_components - 1:
out_path = op.join(
out_dir, '%s_components_summary%02d.png' % (hemi, fi + 1))
save_and_close(out_path)
开发者ID:atsuch,项目名称:lateralized-components,代码行数:36,代码来源:plotting.py
示例17: len
rois = labels['name'].T
n_r = len(rois)
l=360./n_r#roi label size in figures
visu = atlas_filename
all_ntwks = range(n_r)
networks = {'Auditory': [0,1],'striate' : [2],'DMN': [3,4,5,6],'Occ post' :[7],
'Motor': [8],'Attentional' : [9,10,11,12,14,15,16,17,18],
'Basal' : [13],'Visual secondary' : [19,20,21], 'Salience':[22,23,24],
'Temporal(STS)':[25,26],'Langage':[27,28,29,30,31],'Cereb':[32],
'Dors PCC': [33],'cing ins' :[34,35,36],'Ant IPS': [37,38],'All ROIs':all_ntwks}
coords = [] #chose regions representative coordinates, other wise it s computed with find_xyz_cut_coords
#coords = np.vstack((labels['x'], labels['y'], labels['z'])).T
if not coords:
coords =[plotting.find_xyz_cut_coords(roi) for roi in image.iter_img(atlas_filename)]
root='/neurospin/grip/protocols/MRI/AVCnn_Dhaif_2016/AVCnn/AVCnn_data/' #fichier reg et conca pret pour analyse
func_type_list = [ 'controlRSc','patientsRSc_LD', 'patientsRSc_LG']# #name of each group's directory for functional images
reg_dirs = [ root+'rgt']#name of each group's directory for regressors (regressor have to be .txt files)
reg_prefix = 'art_mv_fmv_wm_vent_ext_hv_' #art_mv_fmv_wm_vent_ext_hv_regressor prefix (regressors must have corresponding functional file name after prefix: swars_ab_123456.nii and reg1_reg2_swars_ab_123456.txt)
common = 4 #initial differing character between regressors and functional file names
#choose report directory and name (default location is in root, default name is atlas_naabsolute
main_title ='AVCnn_Cont_LG_LD_'+MC_correction #
save_dir = root + 'reports_test/'
try:
os.makedirs(save_dir)
except:
print('Warning could not make dir '+save_dir)
pass
开发者ID:Dhaif,项目名称:Functional_connectivity_python-,代码行数:31,代码来源:rs_group_comp_test_dhaif.py
示例18: plot_all
def plot_all(self):
names = self.network_names
for idx, rsn in enumerate(niimg.iter_img(self._img)):
disp = niplot.plot_roi(rsn, title=names.get(idx, None))
开发者ID:Neurita,项目名称:pypes,代码行数:4,代码来源:rsn_atlas.py
示例19: spatclust
def spatclust(img, min_cluster_size, threshold=None, index=None, mask=None):
"""
Spatially clusters `img`
Parameters
----------
img : str or img_like
Image file or object to be clustered
min_cluster_size : int
Minimum cluster size (in voxels)
threshold : float, optional
Whether to threshold `img` before clustering
index : array_like, optional
Whether to extract volumes from `img` for clustering
mask : (S,) array_like, optional
Boolean array for masking resultant data array
Returns
-------
clustered : :obj:`numpy.ndarray`
Boolean array of clustered (and thresholded) `img` data
"""
# we need a 4D image for `niimg.iter_img`, below
img = niimg.copy_img(check_niimg(img, atleast_4d=True))
# temporarily set voxel sizes to 1mm isotropic so that `min_cluster_size`
# represents the minimum number of voxels we want to be in a cluster,
# rather than the minimum size of the desired clusters in mm^3
if not np.all(np.abs(np.diag(img.affine)) == 1):
img.set_sform(np.sign(img.affine))
# grab desired volumes from provided image
if index is not None:
if not isinstance(index, list):
index = [index]
img = niimg.index_img(img, index)
# threshold image
if threshold is not None:
img = niimg.threshold_img(img, float(threshold))
clout = []
for subbrick in niimg.iter_img(img):
# `min_region_size` is not inclusive (as in AFNI's `3dmerge`)
# subtract one voxel to ensure we aren't hitting this thresholding issue
try:
clsts = connected_regions(subbrick,
min_region_size=int(min_cluster_size) - 1,
smoothing_fwhm=None,
extract_type='connected_components')[0]
# if no clusters are detected we get a TypeError; create a blank 4D
# image object as a placeholder instead
except TypeError:
clsts = niimg.new_img_like(subbrick,
np.zeros(subbrick.shape + (1,)))
# if multiple clusters detected, collapse into one volume
clout += [niimg.math_img('np.sum(a, axis=-1)', a=clsts)]
# convert back to data array and make boolean
clustered = utils.load_image(niimg.concat_imgs(clout).get_data()) != 0
# if mask provided, mask output
if mask is not None:
clustered = clustered[mask]
return clustered
开发者ID:TomMaullin,项目名称:tedana,代码行数:67,代码来源:fit.py
示例20: _apply_mask_to_4dimg
def _apply_mask_to_4dimg(self, imgs, **kwargs):
masker = NiftiMasker(mask_img=self.load_mask(), **kwargs)
return (masker.fit_transform(img) for img in iter_img(imgs))
开发者ID:Neurita,项目名称:pypes,代码行数:3,代码来源:plotting.py
注:本文中的nilearn.image.iter_img函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论