本文整理汇总了Python中nipy.modalities.fmri.design_matrix.make_dmtx函数的典型用法代码示例。如果您正苦于以下问题:Python make_dmtx函数的具体用法?Python make_dmtx怎么用?Python make_dmtx使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了make_dmtx函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: make_design_matrices
def make_design_matrices(onsets, n_scans, tr, motion=None,
hrf_model='canonical with derivative',
drift_model='cosine', orthogonalize=None):
design_matrices = []
for i, onset in enumerate(onsets):
if n_scans[i] == 0:
design_matrices.append(None)
onset = np.array(onset)
labels = onset[:, 0]
time = onset[:, 1].astype('float')
duration = onset[:, 2].astype('float')
amplitude = onset[:, 3].astype('float')
if duration.sum() == 0:
paradigm = EventRelatedParadigm(labels, time, amplitude)
else:
paradigm = BlockParadigm(labels, time, duration, amplitude)
frametimes = np.linspace(0, (n_scans[i] - 1) * tr, n_scans[i])
if motion is not None:
add_regs = np.array(motion[i]).astype('float')
add_reg_names = ['motion_%i' % r
for r in range(add_regs.shape[1])]
design_matrix = make_dmtx(
frametimes, paradigm, hrf_model=hrf_model,
drift_model=drift_model,
add_regs=add_regs, add_reg_names=add_reg_names)
else:
design_matrix = make_dmtx(
frametimes, paradigm, hrf_model=hrf_model,
drift_model=drift_model)
if orthogonalize is not None:
if 'derivative' in hrf_model:
raise Exception(
'Orthogonalization not supported with hrf derivative.')
orth = orthogonalize[i]
if orth is not None:
for x, y in orth:
x_ = design_matrix.matrix[:, x]
y_ = design_matrix.matrix[:, y]
z = orthogonalize_vectors(x_, y_)
design_matrix.matrix[:, x] = z
design_matrices.append(design_matrix.matrix)
return design_matrices
开发者ID:schwarty,项目名称:nignore,代码行数:48,代码来源:spm.py
示例2: make_design_matrices
def make_design_matrices(events, n_scans, tr, hrf_model="canonical", drift_model="cosine", motion=None, orth=None):
design_matrices = []
n_sessions = len(n_scans)
for i in range(n_sessions):
onsets = events[i][0][:, 0]
duration = events[i][0][:, 1]
amplitude = events[i][0][:, 2]
trials = events[i][1]
order = np.argsort(onsets)
# make a block or event paradigm depending on stimulus duration
if duration.sum() == 0:
paradigm = EventRelatedParadigm(trials[order], onsets[order], amplitude[order])
else:
paradigm = BlockParadigm(trials[order], onsets[order], duration[order], amplitude[order])
frametimes = np.linspace(0, (n_scans[i] - 1) * tr, n_scans[i])
if motion is not None:
add_regs = np.array(motion[i]).astype("float")
add_reg_names = ["motion_%i" % r for r in range(add_regs.shape[1])]
design_matrix = make_dmtx(
frametimes,
paradigm,
hrf_model=hrf_model,
drift_model=drift_model,
add_regs=add_regs,
add_reg_names=add_reg_names,
)
else:
design_matrix = make_dmtx(frametimes, paradigm, hrf_model=hrf_model, drift_model=drift_model)
if orth is not None:
session_orth = orth[i]
if session_orth is not None:
for x, y in session_orth:
reg = orthogonalize_regressors(design_matrix.matrix[:, x], design_matrix.matrix[:, y])
design_matrix.matrix[:, x] = reg
design_matrices.append(design_matrix)
return design_matrices
开发者ID:schwarty,项目名称:spym,代码行数:44,代码来源:parsing_openfmri.py
示例3: make_design_matrices
def make_design_matrices(events, n_scans, tr, hrf_model='canonical',
drift_model='cosine', motion=None):
design_matrices = []
n_sessions = len(n_scans)
for i in range(n_sessions):
onsets = events[i][0][:, 0]
duration = events[i][0][:, 1]
amplitude = events[i][0][:, 2]
cond_id = events[i][1]
order = np.argsort(onsets)
# make a block or event paradigm depending on stimulus duration
if duration.sum() == 0:
paradigm = EventRelatedParadigm(cond_id[order],
onsets[order],
amplitude[order])
else:
paradigm = BlockParadigm(cond_id[order], onsets[order],
duration[order], amplitude[order])
frametimes = np.linspace(0, (n_scans[i] - 1) * tr, n_scans[i])
if motion is not None:
add_regs = np.array(motion[i]).astype('float')
add_reg_names = ['motion_%i' % r
for r in range(add_regs.shape[1])]
design_matrix = make_dmtx(
frametimes, paradigm, hrf_model=hrf_model,
drift_model=drift_model,
add_regs=add_regs, add_reg_names=add_reg_names)
else:
design_matrix = make_dmtx(
frametimes, paradigm, hrf_model=hrf_model,
drift_model=drift_model)
design_matrices.append(design_matrix.matrix)
return design_matrices
开发者ID:fabianp,项目名称:pypreprocess,代码行数:42,代码来源:openfmri.py
示例4: make_dmtx_from_timing_files
def make_dmtx_from_timing_files(timing_files, condition_ids=None,
frametimes=None, n_scans=None, tr=None,
add_regs_file=None,
add_reg_names=None,
**make_dmtx_kwargs):
# make paradigm
paradigm = make_paradigm_from_timing_files(timing_files,
condition_ids=condition_ids)
# make frametimes
if frametimes is None:
assert not n_scans is None, ("frametimes not specified, especting a "
"value for n_scans")
assert not tr is None, ("frametimes not specified, especting a "
"value for tr")
frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)
else:
assert n_scans is None, ("frametimes specified, not especting a "
"value for n_scans")
assert tr is None, ("frametimes specified, not especting a "
"value for tr")
# load addition regressors from file
if not add_regs_file is None:
if isinstance(add_regs_file, np.ndarray):
add_regs = add_regs_file
else:
assert os.path.isfile(add_regs_file), (
"add_regs_file %s doesn't exist")
add_regs = np.loadtxt(add_regs_file)
assert add_regs.ndim == 2, (
"Bad add_regs_file: %s (must contain a 2D array, each column "
"representing the values of a single regressor)" % add_regs_file)
if add_reg_names is None:
add_reg_names = ["R%i" % (col + 1) for col in range(
add_regs.shape[-1])]
else:
assert len(add_reg_names) == add_regs.shape[1], (
"Expecting %i regressor names, got %i" % (
add_regs.shape[1], len(add_reg_names)))
make_dmtx_kwargs["add_reg_names"] = add_reg_names
make_dmtx_kwargs["add_regs"] = add_regs
# make design matrix
design_matrix = make_dmtx(frametimes, paradigm, **make_dmtx_kwargs)
# return output
return design_matrix, paradigm, frametimes
开发者ID:banilo,项目名称:pypreprocess,代码行数:49,代码来源:fsl_to_nipy.py
示例5: _fit_hrf_event_model
def _fit_hrf_event_model(
ds, events, time_attr, condition_attr="targets", design_kwargs=None, glmfit_kwargs=None, regr_attrs=None
):
if externals.exists("nipy", raise_=True):
from nipy.modalities.fmri.design_matrix import make_dmtx
from mvpa2.mappers.glm import NiPyGLMMapper
# Decide/device condition attribute on which GLM will actually be done
if isinstance(condition_attr, basestring):
# must be a list/tuple/array for the logic below
condition_attr = [condition_attr]
glm_condition_attr = "regressor_names" # actual regressors
glm_condition_attr_map = dict([(con, dict()) for con in condition_attr]) #
# to map back to original conditions
events = copy.deepcopy(events) # since we are modifying in place
for event in events:
if glm_condition_attr in event:
raise ValueError(
"Event %s already has %s defined. Should not "
"happen. Choose another name if defined it" % (event, glm_condition_attr)
)
compound_label = event[glm_condition_attr] = "glm_label_" + "+".join(str(event[con]) for con in condition_attr)
# and mapping back to original values, without str()
# for each condition:
for con in condition_attr:
glm_condition_attr_map[con][compound_label] = event[con]
evvars = _events2dict(events)
add_paradigm_kwargs = {}
if "amplitude" in evvars:
add_paradigm_kwargs["amplitude"] = evvars["amplitude"]
# create paradigm
if "duration" in evvars:
from nipy.modalities.fmri.experimental_paradigm import BlockParadigm
# NiPy considers everything with a duration as a block paradigm
paradigm = BlockParadigm(
con_id=evvars[glm_condition_attr], onset=evvars["onset"], duration=evvars["duration"], **add_paradigm_kwargs
)
else:
from nipy.modalities.fmri.experimental_paradigm import EventRelatedParadigm
paradigm = EventRelatedParadigm(con_id=evvars[glm_condition_attr], onset=evvars["onset"], **add_paradigm_kwargs)
# create design matrix -- all kinds of fancy additional regr can be
# auto-generated
if design_kwargs is None:
design_kwargs = {}
if not regr_attrs is None:
names = []
regrs = []
for attr in regr_attrs:
names.append(attr)
regrs.append(ds.sa[attr].value)
if len(regrs) < 2:
regrs = [regrs]
regrs = np.hstack(regrs).T
if "add_regs" in design_kwargs:
design_kwargs["add_regs"] = np.hstack((design_kwargs["add_regs"], regrs))
else:
design_kwargs["add_regs"] = regrs
if "add_reg_names" in design_kwargs:
design_kwargs["add_reg_names"].extend(names)
else:
design_kwargs["add_reg_names"] = names
design_matrix = make_dmtx(ds.sa[time_attr].value, paradigm, **design_kwargs)
# push design into source dataset
glm_regs = [(reg, design_matrix.matrix[:, i]) for i, reg in enumerate(design_matrix.names)]
# GLM
glm = NiPyGLMMapper(
[],
glmfit_kwargs=glmfit_kwargs,
add_regs=glm_regs,
return_design=True,
return_model=True,
space=glm_condition_attr,
)
model_params = glm(ds)
# some regressors might be corresponding not to original condition_attr
# so let's separate them out
regressor_names = model_params.sa[glm_condition_attr].value
condition_regressors = np.array([v in glm_condition_attr_map.values()[0] for v in regressor_names])
assert condition_regressors.dtype == np.bool
if not np.all(condition_regressors):
# some regressors do not correspond to conditions and would need
# to be taken into a separate dataset
model_params.a["add_regs"] = model_params[~condition_regressors]
# then we process the rest
model_params = model_params[condition_regressors]
regressor_names = model_params.sa[glm_condition_attr].value
# now define proper condition sa's
for con, con_map in glm_condition_attr_map.iteritems():
model_params.sa[con] = [con_map[v] for v in regressor_names]
model_params.sa.pop(glm_condition_attr) # remove generated one
return model_params
开发者ID:neurosbh,项目名称:PyMVPA,代码行数:100,代码来源:eventrelated.py
示例6: do_subject_glm
def do_subject_glm(subject_data):
"""FE analysis for a single subject."""
subject_id = subject_data['subject_id']
output_dir = subject_data["output_dir"]
func_files = subject_data['func']
anat = subject_data['anat']
onset_files = subject_data['onset']
tr = subject_data['TR']
time_units = subject_data['time_units'].lower()
assert time_units in ["seconds", "tr", "milliseconds"]
drift_model = subject_data['drift_model']
hrf_model = subject_data["hrf_model"]
hfcut = subject_data["hfcut"]
mem = Memory(os.path.join(output_dir, "cache"))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if 0:
subject_data = mem.cache(do_subject_preproc)(
dict(func=func_files, anat=anat, output_dir=output_dir))
func_files = subject_data['func']
anat = subject_data['anat']
# reslice func images
func_files = [mem.cache(reslice_vols)(
sess_func, target_affine=nibabel.load(sess_func[0]).get_affine())
for sess_func in func_files]
### GLM: loop on (session_bold, onse_file) pairs over the various sessions
design_matrices = []
for func_file, onset_file in zip(func_files, onset_files):
if isinstance(func_file, str):
bold = nibabel.load(func_file)
else:
if len(func_file) == 1:
func_file = func_file[0]
bold = nibabel.load(func_file)
assert len(bold.shape) == 4
n_scans = bold.shape[-1]
del bold
else:
n_scans = len(func_file)
frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)
conditions, onsets, durations, amplitudes = parse_onset_file(
onset_file)
if time_units == "tr":
onsets *= tr
durations *= tr
elif time_units in ["milliseconds"]:
onsets *= 1e-3
durations *= 1e-3
paradigm = BlockParadigm(con_id=conditions, onset=onsets,
duration=durations, amplitude=amplitudes)
design_matrices.append(make_dmtx(
frametimes,
paradigm, hrf_model=hrf_model,
drift_model=drift_model, hfcut=hfcut))
# specify contrasts
n_columns = len(design_matrices[0].names)
contrasts = {}
for i in range(paradigm.n_conditions):
contrasts['%s' % design_matrices[0].names[2 * i]
] = np.eye(n_columns)[2 * i]
# effects of interest F-test
diff_contrasts = []
for i in range(paradigm.n_conditions - 1):
a = contrasts[design_matrices[0].names[2 * i]]
b = contrasts[design_matrices[0].names[2 * (i + 1)]]
diff_contrasts.append(a - b)
contrasts["diff"] = diff_contrasts
# fit GLM
print('Fitting a GLM (this takes time)...')
fmri_glm = FMRILinearModel([nibabel.concat_images(sess_func,
check_affines=False)
for sess_func in func_files],
[design_matrix.matrix
for design_matrix in design_matrices],
mask='compute'
)
fmri_glm.fit(do_scaling=True, model='ar1')
# save computed mask
mask_path = os.path.join(output_dir, "mask.nii.gz")
print("Saving mask image %s" % mask_path)
nibabel.save(fmri_glm.mask, mask_path)
# compute contrasts
z_maps = {}
effects_maps = {}
for contrast_id, contrast_val in contrasts.items():
print("\tcontrast id: %s" % contrast_id)
if np.ndim(contrast_val) > 1:
contrast_type = "t"
else:
contrast_type = "F"
z_map, t_map, effects_map, var_map = fmri_glm.contrast(
#.........这里部分代码省略.........
开发者ID:chrplr,项目名称:pypreprocess,代码行数:101,代码来源:glm_utils.py
示例7: print
img = nb.load(smooth_file)
#-----------------------------------------------------------------
# Construct a design matrix for each test
#-----------------------------------------------------------------
print(' Make design matrix...')
print(' Conditions:\n {}'.format(conditions))
print(' Amplitudes:\n {}'.format(amplitudes))
print(' Onsets:\n {}'.format(onsets))
print(' Durations:\n {}'.format(durations))
paradigm = BlockParadigm(con_id=conditions, onset=onsets,
duration=durations, amplitude=amplitudes)
frametimes = np.linspace(0, n_images-1, n_images)
if ntest < 3:
dmtx = make_dmtx(frametimes, paradigm, hrf_model='FIR',
drift_model='polynomial', drift_order=2, hfcut=np.inf)
else:
dmtx = make_dmtx(frametimes, paradigm, hrf_model='FIR', hfcut=np.inf)
design_matrix = dmtx.matrix
# Plot the design matrix
if plot_design_matrix:
fig1 = mp.figure(figsize=(10, 6))
dmtx.show()
mp.title(desc)
fig1_file = os.path.join(out_path, label + 'design_matrix_test' + \
str(ntest) + '.png')
mp.savefig(fig1_file)
#-----------------------------------------------------------------
# Mean-scale, de-mean and multiply data by 100
开发者ID:binarybottle,项目名称:beebrains,代码行数:32,代码来源:imageB.py
示例8: load_paradigm_from_csv_file
# timing
n_scans = 128
tr = 2.4
# paradigm
frametimes = np.linspace(0.5 * tr, (n_scans - .5) * tr, n_scans)
fmri_data = nibabel.load('s12069_swaloc1_corr.nii.gz')
########################################
# Design matrix
########################################
paradigm = load_paradigm_from_csv_file('localizer_paradigm.csv')['0']
design_matrix = make_dmtx(frametimes, paradigm,
hrf_model='canonical with derivative',
drift_model="cosine", hfcut=128)
#########################################
# Specify the contrasts
#########################################
# simplest ones
contrasts = {}
n_columns = len(design_matrix.names)
for i in range(paradigm.n_conditions):
contrasts['%s' % design_matrix.names[2 * i]] = np.eye(n_columns)[2 * i]
# Our contrast of interest
reading_vs_visual = contrasts["phrasevideo"] - contrasts["damier_H"]
开发者ID:NanoResearch,项目名称:python-cogstats,代码行数:30,代码来源:plot_permutation.py
示例9: load_glm_params
def load_glm_params(data_dir, model_id,
subject_ids=None,
hrf_model='canonical',
drift_model='cosine',
motion_params=None,
is_event_paradigm=True):
"""Load GLM parameters
XXX document this code!
"""
docs = []
study_id = os.path.split(data_dir)[-1]
tr = float(open(os.path.join(data_dir, 'scan_key.txt')).read().split()[1])
if subject_ids is None:
subject_dirs = sorted(glob.glob(os.path.join(data_dir, 'sub*')))
else:
subject_dirs = sorted([os.path.join(data_dir, subject_id)
for subject_id in subject_ids])
for subject_dir in subject_dirs:
params = {}
subject_id = os.path.split(subject_dir)[1]
params['study'] = study_id
params['subject'] = subject_id
# subjects models
model_dir = os.path.join(subject_dir, 'model', model_id)
session_dirs = sorted(glob.glob(
os.path.join(model_dir, 'onsets', '*')))
for i, session_dir in enumerate(session_dirs):
session_id = os.path.split(session_dir)[1]
img = nb.load(os.path.join(data_dir, subject_id, 'BOLD',
session_id, 'bold.nii.gz'))
n_scans = img.shape[-1]
params.setdefault('sessions', []).append(session_id)
params.setdefault('n_scans', []).append(n_scans)
onsets = None
cond_vect = None
cond_files = sorted(glob.glob(os.path.join(session_dir, 'cond*')))
for cond_file in cond_files:
cond_id = int(re.findall('cond(.*).txt',
os.path.split(cond_file)[1])[0])
# load paradigm
# replace whitespace characters by spaces before parsing
cond = open(cond_file, 'rb').read()
iterable = [
row.strip()
for row in re.sub('[\t\r\f\v]', ' ', cond).split('\n')]
reader = csv.reader(iterable, delimiter=' ')
rows = list(reader)
if onsets is None:
onsets = [float(row[0]) for row in rows if row != []]
cond_vect = [cond_id] * len(rows)
else:
onsets += [float(row[0]) for row in rows if row != []]
cond_vect += [cond_id] * len(rows)
onsets = np.array(onsets)
cond_vect = np.array(cond_vect)
order = np.argsort(onsets)
if is_event_paradigm:
paradigm = EventRelatedParadigm(
cond_vect[order], onsets[order])
else:
paradigm = BlockParadigm(cond_vect[order], onsets[order])
frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)
if motion_params is None or not subject_id in motion_params:
design_matrix = make_dmtx(
frametimes, paradigm, hrf_model=hrf_model,
drift_model=drift_model)
else:
add_regs = motion_params[subject_id][i]
add_reg_names = ['motion_%i' % r
for r in range(add_regs.shape[1])]
design_matrix = make_dmtx(
frametimes, paradigm, hrf_model=hrf_model,
drift_model=drift_model,
add_regs=add_regs, add_reg_names=add_reg_names)
# plot and save dmat
ax = design_matrix.show()
ax.set_position([.05, .25, .9, .65])
ax.set_title('Design matrix (%s, %s)' % (subject_id,
session_id))
dmat_outfile = os.path.join(session_dir, 'design_matrix.png')
print "Saving design matrix %s" % dmat_outfile
pl.savefig(dmat_outfile)
params.setdefault('design_matrices', []).append(
design_matrix.matrix)
docs.append(params)
# study model
n_regressors = [dm.shape[-1] for dm in params['design_matrices']]
#.........这里部分代码省略.........
开发者ID:fabianp,项目名称:pypreprocess,代码行数:101,代码来源:nipy_glm_openfmri.py
示例10: do_subject_glm
def do_subject_glm(subject_data):
"""FE analysis for a single subject."""
subject_id = subject_data['subject_id']
output_dir = subject_data["output_dir"]
func_files = subject_data['func']
anat = subject_data['anat']
onset_files = subject_data['onset']
# subject_id = os.path.basename(subject_dir)
# subject_output_dir = os.path.join(output_dir, subject_id)
mem = Memory(os.path.join(output_dir, "cache"))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# glob files: anat, session func files, session onset files
# anat = glob.glob(os.path.join(subject_dir, anat_wildcard))
# assert len(anat) == 1
# anat = anat[0]
# onset_files = sorted([glob.glob(os.path.join(subject_dir, session))[0]
# for session in session_onset_wildcards])
# func_files = sorted([sorted(glob.glob(os.path.join(subject_dir, session)))
# for session in session_func_wildcards])
### Preprocess data #######################################################
if 0:
subject_data = mem.cache(do_subject_preproc)(
dict(func=func_files, anat=anat, output_dir=output_dir))
func_files = subject_data['func']
anat = subject_data['anat']
# reslice func images
func_files = [mem.cache(reslice_vols)(
sess_func,
target_affine=nibabel.load(sess_func[0]).get_affine())
for sess_func in func_files]
### GLM: loop on (session_bold, onse_file) pairs over the various sessions
design_matrices = []
for session, (func_file, onset_file) in enumerate(zip(func_files,
onset_files)):
if isinstance(func_file, str):
bold = nibabel.load(func_file)
else:
if len(func_file) == 1:
func_file = func_file[0]
bold = nibabel.load(func_file)
assert len(bold.shape) == 4
n_scans = bold.shape[-1]
del bold
else:
n_scans = len(func_file)
frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)
conditions, onsets, durations, amplitudes = parse_onset_file(
onset_file)
onsets *= tr
durations *= tr
paradigm = BlockParadigm(con_id=conditions, onset=onsets,
duration=durations, amplitude=amplitudes)
design_matrices.append(make_dmtx(frametimes,
paradigm, hrf_model=hrf_model,
drift_model=drift_model,
hfcut=hfcut))
# specify contrasts
n_columns = len(design_matrices[0].names)
contrasts = {}
for i in xrange(paradigm.n_conditions):
contrasts['%s' % design_matrices[0].names[2 * i]
] = np.eye(n_columns)[2 * i]
# more interesting contrasts
contrasts['faces-scrambled'] = contrasts['faces'
] - contrasts['scrambled']
contrasts['scrambled-faces'] = -contrasts['faces-scrambled']
contrasts['effects_of_interest'] = contrasts['faces'
] + contrasts['scrambled']
# effects of interest F-test
diff_contrasts = []
for i in xrange(paradigm.n_conditions - 1):
a = contrasts[design_matrices[0].names[2 * i]]
b = contrasts[design_matrices[0].names[2 * (i + 1)]]
diff_contrasts.append(a - b)
contrasts["diff"] = diff_contrasts
# fit GLM
print 'Fitting a GLM (this takes time)...'
fmri_glm = FMRILinearModel([nibabel.concat_images(sess_func,
check_affines=False)
for sess_func in func_files],
[design_matrix.matrix
for design_matrix in design_matrices],
mask='compute'
)
fmri_glm.fit(do_scaling=True, model='ar1')
# save computed mask
mask_path = os.path.join(output_dir, "mask.nii.gz")
print "Saving mask image %s" % mask_path
nibabel.save(fmri_glm.mask, mask_path)
#.........这里部分代码省略.........
开发者ID:AlexandreAbraham,项目名称:pypreprocess,代码行数:101,代码来源:sprint.py
示例11: fit_event_hrf_model
def fit_event_hrf_model(
ds, events, time_attr, condition_attr='targets', design_kwargs=None,
glmfit_kwargs=None, regr_attrs=None, return_model=False):
"""Fit a GLM with HRF regressor and yield a dataset with model parameters
A univariate GLM is fitted for each feature and model parameters are
returned as samples. Model parameters are returned for each regressor in
the design matrix. Using functionality from NiPy, design matrices can be
generated from event definitions with a variety of customizations (HRF
model, confound regressors, ...).
Events need to be specified as a list of dictionaries
(see:class:`~mvpa2.misc.support.Event`) for a helper class. Each dictionary
contains all relevant attributes to describe an event.
HRF event model details
-----------------------
The event specifications are used to generate a design matrix for all
present conditions. In addition to the mandatory ``onset`` information
each event definition needs to include a label in order to associate
individual events to conditions (the design matrix will contain at least
one regressor for each condition). The name of this label attribute must
be specified too (see ``condition_attr`` argument).
NiPy is used to generate the actual design matrix. It is required to
specify a dataset sample attribute that contains time stamps for all input
data samples (see ``time_attr``). NiPy operation could be customized (see
``design_kwargs`` argument). Additional regressors from sample attributes
of the input dataset can be included in the design matrix (see
``regr_attrs``).
The actual GLM fit is also performed by NiPy and can be fully customized
(see ``glmfit_kwargs``).
Parameters
----------
ds : Dataset
The samples of this input dataset have to be in whatever ascending order.
events : list
Each event definition has to specify ``onset`` and ``duration``. All
other attributes will be passed on to the sample attributes collection of
the returned dataset.
time_attr : str
Attribute with dataset sample time stamps.
Its values will be treated as in-the-same-unit and are used to
determine corresponding samples from real-value onset and duration
definitions. For HRF modeling this argument is mandatory.
condition_attr : str
Name of the event attribute with the condition labels.
Can be a list of those (e.g. ['targets', 'chunks'] combination of which
would constitute a condition.
design_kwargs : dict
Arbitrary keyword arguments for NiPy's make_dmtx() used for design matrix
generation. Choose HRF model, confound regressors, etc.
glmfit_kwargs : dict
Arbitrary keyword arguments for NiPy's GeneralLinearModel.fit() used for
estimating model parameter. Choose fitting algorithm: OLS or AR1.
regr_attrs : list
List of dataset sample attribute names that shall be extracted from the
input dataset and used as additional regressors in the design matrix.
return_model : bool
Flag whether to included the fitted GLM model in the returned dataset.
For large input data this can be problematic, as the model may contain
the residuals (same size is input data), hence multiplies the memory
demand. Off by default.
Returns
-------
Dataset
One sample for each regressor/condition in the design matrix is returned.
The condition names are included as a sample attribute with the name
specified by the ``condition_attr`` argument. The actual design
regressors are included as ``regressors`` sample attribute. If enabled,
an instance with the fitted NiPy GLM results is included as a dataset
attribute ``model``, and can be used for computing contrasts subsequently.
"""
if externals.exists('nipy', raise_=True):
from nipy.modalities.fmri.design_matrix import make_dmtx
from mvpa2.mappers.glm import NiPyGLMMapper
# Decide/device condition attribute on which GLM will actually be done
if isinstance(condition_attr, basestring):
# must be a list/tuple/array for the logic below
condition_attr = [condition_attr]
glm_condition_attr = 'regressor_names' # actual regressors
glm_condition_attr_map = dict([(con, dict()) for con in condition_attr]) #
# to map back to original conditions
events = copy.deepcopy(events) # since we are modifying in place
for event in events:
if glm_condition_attr in event:
raise ValueError("Event %s already has %s defined. Should not "
"happen. Choose another name if defined it"
% (event, glm_condition_attr))
compound_label = event[glm_condition_attr] = \
'glm_label_' + '+'.join(
str(event[con]) for con in condition_attr)
# and mapping back to original values, without str()
# for each condition:
#.........这里部分代码省略.........
开发者ID:roshan-srin,项目名称:nidata,代码行数:101,代码来源:eventrelated.py
示例12: _preprocess_and_analysis_subject
def _preprocess_and_analysis_subject(subject_data,
do_normalize=False,
fwhm=0.,
slicer='z',
cut_coords=6,
threshold=3.,
cluster_th=15
):
"""
Preprocesses the subject and then fits (mass-univariate) GLM thereupon.
"""
# sanitize run_ids:
# Sub14/BOLD/Run_02/fMR09029-0004-00010-000010-01.nii is garbage,
# for example
run_ids = range(9)
if subject_data['subject_id'] == "Sub14":
run_ids = [0] + range(2, 9)
subject_data['func'] = [subject_data['func'][0]] + subject_data[
'func'][2:]
subject_data['session_id'] = [subject_data['session_id'][0]
] + subject_data['session_id'][2:]
# sanitize subject output dir
if not 'output_dir' in subject_data:
subject_data['output_dir'] = os.path.join(
output_dir, subject_data['subject_id'])
# preprocess the data
subject_data = do_subject_preproc(SubjectData(**subject_data),
do_realign=True,
do_coreg=True,
do_report=False,
do_cv_tc=False
)
assert not subject_data.anat is None
# norm
if do_normalize:
subject_data = nipype_do_subject_preproc(
subject_data,
do_realign=False,
do_coreg=False,
do_segment=True,
do_normalize=True,
func_write_voxel_sizes=[3, 3, 3],
anat_write_voxel_sizes=[2, 2, 2],
fwhm=fwhm,
hardlink_output=False,
do_report=False
)
# chronometry
stats_start_time = pretty_time()
# to-be merged lists, one item per run
paradigms = []
frametimes_list = []
design_matrices = [] # one
list_of_contrast_dicts = [] # one dict per run
n_scans = []
for run_id in run_ids:
_n_scans = len(subject_data.func[run_id])
n_scans.append(_n_scans)
# make paradigm
paradigm = make_paradigm(getattr(subject_data, 'timing')[run_id])
# make design matrix
tr = 2.
drift_model = 'Cosine'
hrf_model = 'Canonical With Derivative'
hfcut = 128.
frametimes = np.linspace(0, (_n_scans - 1) * tr, _n_scans)
design_matrix = make_dmtx(
frametimes,
paradigm, hrf_model=hrf_model,
drift_model=drift_model, hfcut=hfcut,
add_regs=np.loadtxt(getattr(subject_data,
'realignment_parameters')[run_id]),
add_reg_names=[
'Translation along x axis',
'Translation along yaxis',
'Translation along z axis',
'Rotation along x axis',
'Rotation along y axis',
'Rotation along z axis'
]
)
# import matplotlib.pyplot as plt
# design_matrix.show()
# plt.show()
paradigms.append(paradigm)
design_matrices.append(design_matrix)
frametimes_list.append(frametimes)
n_scans.append(_n_scans)
#.........这里部分代码省略.........
开发者ID:VirgileFritsch,项目名称:pypreprocess,代码行数:101,代码来源:br41nH4ck_purepython.py
示例13: len
squeeze_me=True, struct_as_record=False)
faces_onsets = timing['onsets'][0].ravel()
scrambled_onsets = timing['onsets'][1].ravel()
onsets = np.hstack((faces_onsets, scrambled_onsets))
onsets *= tr # because onsets were reporting in 'scans' units
conditions = ['faces'] * len(faces_onsets) + ['scrambled'] * len(
scrambled_onsets)
paradigm = EventRelatedParadigm(conditions, onsets)
# build design matrix
frametimes = np.linspace(0, (n_scans - 1) * tr, n_scans)
design_matrix = make_dmtx(
frametimes,
paradigm, hrf_model=hrf_model,
drift_model=drift_model, hfcut=hfcut,
add_reg_names=['tx', 'ty', 'tz', 'rx', 'ry', 'rz'],
add_regs=np.loadtxt(subject_data.realignment_parameters[x])
)
# specify contrasts
contrasts = {}
n_columns = len(design_matrix.names)
for i in xrange(paradigm.n_conditions):
contrasts['%s' % design_matrix.names[2 * i]
] = np.eye(n_columns)[2 * i]
# more interesting contrasts
contrasts['faces-scrambled'] = contrasts['faces'
] - contrasts['scrambled']
contrasts['scrambled-faces'] = -contrasts['faces-scrambled']
开发者ID:fabianp,项目名称:pypreprocess,代码行数:31,代码来源:spm_multimodal_fmri.py
示例14: len
empty_conditions.append(c)
elif len(conditions[c]) < 2:
pseudo_empty_conditions.append(c)
condition += [c]*len(conditions[c])
onset = np.hstack([onset, conditions[c]])
duration += [durations[c]]*len(conditions[c])
paradigm = BlockParadigm(con_id=condition,
onset=onset,
duration=duration)
frametimes = np.linspace(0, (N_SCANS-1)*TR/1000., num=N_SCANS)
design_mat = design_matrix.make_dmtx(frametimes, paradigm,
hrf_model='Canonical with derivative',
add_regs=regressors,
drift_model='Cosine',
hfcut=128)
# Fill empty conditions
for c in empty_conditions:
if c == 'anticip_missed_largewin':
ind = 6
elif c == 'feedback_missed_largewin':
ind = 18
elif c == 'anticip_missed_smallwin':
ind = 10
elif c == 'feedback_missed_smallwin':
ind = 20
elif c == 'anticip_missed_nowin':
开发者ID:mrahim,项目名称:eprime_parser,代码行数:31,代码来源:generate_design_matrix_original.py
示例15: load
mask_array = load(mask_file).get_data()
m = np.where(mask_array > 0)
pyhrf.verbose(1, 'Mask: shape=%s, nb vox in mask=%d'
%(str(mask_array.shape), mask_array.sum()))
# BOLD data
fmri_image = load(bold_file)
#pyhrf.verbose(1, 'BOLD shape : %s' %str(fmri_image.get_shape()))
Y = fmri_image.get_data()[m[0],m[1],m[2],:]
n_scans = Y.shape[1]
pyhrf.verbose(1, 'Loaded BOLD: nvox=%d, nscans=%d' %Y.shape)
# Design matrix
frametimes = np.linspace(0, (n_scans-1)*tr, n_scans)
design_matrix = dm.make_dmtx(frametimes, paradigm,
hrf_model=hrf_model,
drift_model=drift_model, hfcut=hfcut,
fir_delays=fir_delays)
ns, nr = design_matrix.matrix.shape
pyhrf.verbose(2, 'Design matrix built with %d regressors:' %nr)
for rn in design_matrix.names:
pyhrf.verbose(2, ' - %s' %rn)
cdesign_matrix = xndarray(design_matrix.matrix,
axes_names=['time','regressor'],
axes_domains={'time':np.arange(ns)*tr,
'regressor':design_matrix.names})
cdesign_matrix.save(op.join(output_dir, 'design_matrix.nii'))
import cPickle
f = open(op.join(output_dir, 'design_matrix.pck'), 'w')
开发者ID:Solvi,项目名称:pyhrf,代码行数:32,代码来源:glm.py
示例16: runsub
def runsub(sub, thisContrast, thisContrastStr,
filterLen, filterOrd,
paramEst, chunklen, alphas=np.logspace(0, 3, 20), debug=False, write=False, roi='grayMatter'):
thisSub = {sub: subList[sub]}
mc_params = lmvpa.loadmotionparams(paths, thisSub)
beta_events = lmvpa.loadevents(paths, thisSub)
dsdict = lmvpa.loadsubdata(paths, thisSub, m=roi, c='trial_type')
thisDS = dsdict[sub]
# savitsky golay filtering
sg.sg_filter(thisDS, filterLen, filterOrd)
# gallant group zscores before regression.
# zscore w.r.t. rest trials
# zscore(thisDS, param_est=('targets', ['rest']), chunks_attr='chunks')
# zscore entire set. if done chunk-wise, there is no double-dipping (since we leave a chunk out at a time).
zscore(thisDS, chunks_attr='chunks')
# kay method: leave out a model run, use it to fit an HRF for each voxel
# huth method: essentially use FIR
# mumford method: deconvolution with canonical HRF
# refit events and regress...
# get timing data from timing files
# rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub])
rds, events = lmvpa.amendtimings(thisDS.copy(), beta_events[sub], contrasts) # adding features
# we can model out motion and just not use those betas.
# Ridge
if isinstance(thisContrast, basestring):
thisContrast = [thisContrast]
# instead of binarizing each one, make them parametric
desX, rds = lmvpa.make_designmat(rds, events, time_attr='time_coords', condition_attr=thisContrast,
design_kwargs={'hrf_model': 'canonical', 'drift_model': 'blank'},
regr_attrs=None)
# want to collapse ap and cr, but have anim separate
desX['motion'] = make_dmtx(rds.sa['time_coords'].value, paradigm=None, add_regs=mc_params[sub], drift_model='blank')
des = lmvpa.make_parammat(desX, hrf='canonical', zscore=True)
# set chunklen and nchunks
# split by language and pictures
lidx = thisDS.chunks < thisDS.sa['chunks'].unique[len(thisDS.sa['chunks'].unique) / 2]
pidx = thisDS.c
|
请发表评论