• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python logger.info函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中mne.utils.logger.info函数的典型用法代码示例。如果您正苦于以下问题:Python info函数的具体用法?Python info怎么用?Python info使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了info函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _interpolate_bads_eeg_epochs

def _interpolate_bads_eeg_epochs(epochs, bad_channels_by_epoch=None):
    """Interpolate bad channels per epoch

    Parameters
    ----------
    inst : mne.io.Raw, mne.Epochs or mne.Evoked
        The data to interpolate. Must be preloaded.
    bad_channels_by_epoch : list of list of str
        Bad channel names specified for each epoch. For example, for an Epochs
        instance containing 3 epochs: ``[['F1'], [], ['F3', 'FZ']]``
    """
    if len(bad_channels_by_epoch) != len(epochs):
        raise ValueError("Unequal length of epochs (%i) and "
                         "bad_channels_by_epoch (%i)"
                         % (len(epochs), len(bad_channels_by_epoch)))

    interp_cache = {}
    for i, bad_channels in enumerate(bad_channels_by_epoch):
        if not bad_channels:
            continue

        # find interpolation matrix
        key = tuple(sorted(bad_channels))
        if key in interp_cache:
            goods_idx, bads_idx, interpolation = interp_cache[key]
        else:
            goods_idx, bads_idx, interpolation = interp_cache[key] \
                                = _make_interpolator(epochs, key)

        # apply interpolation
        logger.info('Interpolating %i sensors on epoch %i', bads_idx.sum(), i)
        epochs._data[i, bads_idx, :] = np.dot(interpolation,
                                              epochs._data[i, goods_idx, :])
开发者ID:phoebegaston,项目名称:Eelbrain,代码行数:33,代码来源:_interpolation.py


示例2: _raw_to_epochs_array

def _raw_to_epochs_array(x, sfreq, events, tmin, tmax):
    """Aux function to create epochs from a 2D array"""
    if events.ndim != 1:
        raise ValueError('events must be 1D')
    if events.dtype != int:
        raise ValueError('events must be of dtype int')

    # Check that events won't be cut off
    n_times = x.shape[-1]
    min_ix = 0 - sfreq * tmin
    max_ix = n_times - sfreq * tmax
    msk_keep = np.logical_and(events > min_ix, events < max_ix)

    if not all(msk_keep):
        logger.info('Some event windows extend beyond data limits,'
                    ' and will be cut off...')
        events = events[msk_keep]

    # Pull events from the raw data
    epochs = []
    for ix in events:
        ix_min, ix_max = [ix + int(i_tlim * sfreq)
                          for i_tlim in [tmin, tmax]]
        epochs.append(x[np.newaxis, :, ix_min:ix_max])
    epochs = np.concatenate(epochs, axis=0)
    times = np.arange(epochs.shape[-1]) / float(sfreq) + tmin
    return epochs, times, msk_keep
开发者ID:Eric89GXL,项目名称:mne-sandbox,代码行数:27,代码来源:cfc.py


示例3: chop_raw_data

def chop_raw_data(raw, start_time=60.0, stop_time=360.0, save=True):
    '''
    This function extracts specified duration of raw data
    and writes it into a fif file.
    Five mins of data will be extracted by default.

    Parameters
    ----------

    raw: Raw object or raw file name as a string.
    start_time: Time to extract data from in seconds. Default is 60.0 seconds.
    stop_time: Time up to which data is to be extracted. Default is 360.0 seconds.
    save: bool, If True the raw file is written to disk.

    '''
    if isinstance(raw, str):
        print 'Raw file name provided, loading raw object...'
        raw = mne.io.Raw(raw, preload=True)
    # Check if data is longer than required chop duration.
    if (raw.n_times / (raw.info['sfreq'])) < (stop_time + start_time):
        logger.info("The data is not long enough for file %s.") % (raw.info['filename'])
        return
    # Obtain indexes for start and stop times.
    assert start_time < stop_time, "Start time is greater than stop time."
    start_idx = raw.time_as_index(start_time)
    stop_idx = raw.time_as_index(stop_time)
    data, times = raw[:, start_idx:stop_idx]
    raw._data,raw._times = data, times
    dur = int((stop_time - start_time) / 60)
    if save:
        #raw.save(raw.info['filename'].split('/')[-1].split('.')[0] + '_' + str(dur) + 'm-raw.fif')
        raw.save(raw.info['filename'].split('-raw.fif')[0] + ',' + str(dur) + 'm-raw.fif')
    raw.close()
    return
开发者ID:dongqunxi,项目名称:jumeg,代码行数:34,代码来源:jumeg_utils.py


示例4: chop_raw_data

def chop_raw_data(raw, start_time=60.0, stop_time=360.0):
    ''' 
    This function extracts specified duration of raw data 
    and write it into a fif file.
    Five mins of data will be extracted by default.

    Parameters
    ----------

    raw: Raw object. 
    start_time: Time to extract data from in seconds. Default is 60.0 seconds. 
    stop_time: Time up to which data is to be extracted. Default is 360.0 seconds.

    '''
    # Check if data is longer than required chop duration.
    if (raw.n_times / (raw.info['sfreq'])) < (stop_time + 60.0):
        logger.info("The data is not long enough.")
        return
    # Obtain indexes for start and stop times.
    assert start_time < stop_time, "Start time is greater than stop time."
    start_idx = raw.time_as_index(start_time)
    stop_idx = raw.time_as_index(stop_time)
    data, times = raw[:, start_idx:stop_idx]
    raw._data,raw._times = data, times
    dur = int((stop_time - start_time) / 60)
    raw.save(raw.info['filename'].split('/')[-1].split('.')[0]+'_'+str(dur)+'m.fif')
    # For the moment, simply warn.
    logger.warning('The file name is not saved in standard form.')
    return
开发者ID:dengemann,项目名称:jumeg-1,代码行数:29,代码来源:jumeg_utils.py


示例5: label_svd

def label_svd(sub_leadfield, n_svd_comp, ch_names):
    """ Computes SVD of subleadfield for sensor types separately

    Parameters:
    -----------
    sub_leadfield: numpy array (n_sens x n_vert) with part of the 
                   leadfield matrix
    n_svd_comp: scalar, number of SVD components required
    ch_names: list of channel names

    Returns:
    --------
    this_label_lfd_summary: numpy array, n_svd_comp scaled SVD components
                            of subleadfield

    OH Aug 2015
    """

    logger.info("\nComputing SVD within labels, using %d component(s)"
                        % n_svd_comp)
    

    EEG_idx = [cc for cc in range(len(ch_names)) if ch_names[cc][:3]=='EEG']
    MAG_idx = [cc for cc in range(len(ch_names)) if (ch_names[cc][:3]=='MEG'
                                                and ch_names[cc][-1:]=='1')]
    GRA_idx = [cc for cc in range(len(ch_names)) if (ch_names[cc][:3]=='MEG'
                    and (ch_names[cc][-1:]=='2' or ch_names[cc][-1:]=='3'))]

    list_idx = []
    u_idx = -1 # keep track of which element of u_svd belongs t which sensor type
    if MAG_idx:
        list_idx.append(MAG_idx)
        u_idx += 1
        u_mag = u_idx
    if GRA_idx:
        list_idx.append(GRA_idx)
        u_idx += 1
        u_gra = u_idx
    if EEG_idx:
        list_idx.append(EEG_idx)
        u_idx += 1
        u_eeg = u_idx
    
    # # compute SVD of sub-leadfield for individual sensor types
    u_svd = [get_svd_comps(sub_leadfield[ch_idx,:], n_svd_comp) for ch_idx
                                                                  in list_idx]

    # put sensor types back together
    this_label_lfd_summary = np.zeros([len(ch_names),u_svd[0].shape[1]])
    if MAG_idx:
        this_label_lfd_summary[MAG_idx] = u_svd[u_mag]
    if GRA_idx:
        this_label_lfd_summary[GRA_idx] = u_svd[u_gra]
    if EEG_idx:
        this_label_lfd_summary[EEG_idx] = u_svd[u_eeg]    

    return this_label_lfd_summary
开发者ID:olafhauk,项目名称:mne-python,代码行数:57,代码来源:DeFleCT.py


示例6: _interpolate_bads_eeg

def _interpolate_bads_eeg(inst, picks=None, verbose=None):
    """ Interpolate bad EEG channels.

    Operates in place.

    Parameters
    ----------
    inst : mne.io.Raw, mne.Epochs or mne.Evoked
        The data to interpolate. Must be preloaded.
    picks: np.ndarray, shape(n_channels, ) | list | None
        The channel indices to be used for interpolation.
    """
    from mne.bem import _fit_sphere
    from mne.utils import logger, warn
    from mne.channels.interpolation import _do_interp_dots
    from mne.channels.interpolation import _make_interpolation_matrix
    import numpy as np

    if picks is None:
        picks = pick_types(inst.info, meg=False, eeg=True, exclude=[])

    bads_idx = np.zeros(len(inst.ch_names), dtype=np.bool)
    goods_idx = np.zeros(len(inst.ch_names), dtype=np.bool)

    inst.info._check_consistency()
    bads_idx[picks] = [inst.ch_names[ch] in inst.info['bads'] for ch in picks]

    if len(picks) == 0 or bads_idx.sum() == 0:
        return

    goods_idx[picks] = True
    goods_idx[bads_idx] = False

    pos = inst._get_channel_positions(picks)

    # Make sure only good EEG are used
    bads_idx_pos = bads_idx[picks]
    goods_idx_pos = goods_idx[picks]
    pos_good = pos[goods_idx_pos]
    pos_bad = pos[bads_idx_pos]

    # test spherical fit
    radius, center = _fit_sphere(pos_good)
    distance = np.sqrt(np.sum((pos_good - center) ** 2, 1))
    distance = np.mean(distance / radius)
    if np.abs(1. - distance) > 0.1:
        warn('Your spherical fit is poor, interpolation results are '
             'likely to be inaccurate.')

    logger.info('Computing interpolation matrix from {0} sensor '
                'positions'.format(len(pos_good)))

    interpolation = _make_interpolation_matrix(pos_good, pos_bad)

    logger.info('Interpolating {0} sensors'.format(len(pos_bad)))
    _do_interp_dots(inst, interpolation, goods_idx, bads_idx)
开发者ID:autoreject,项目名称:autoreject,代码行数:56,代码来源:utils.py


示例7: _check_fname

def _check_fname(fname, overwrite=False, must_exist=False):
    """Check for file existence."""
    _validate_type(fname, 'str', 'fname')
    from mne.utils import logger
    if must_exist and not op.isfile(fname):
        raise IOError('File "%s" does not exist' % fname)
    if op.isfile(fname):
        if not overwrite:
            raise IOError('Destination file exists. Please use option '
                          '"overwrite=True" to force overwriting.')
        elif overwrite != 'read':
            logger.info('Overwriting existing file.')
开发者ID:kambysese,项目名称:mne-python,代码行数:12,代码来源:check.py


示例8: _find_bad_channels

def _find_bad_channels(epochs, picks, use_metrics, thresh, max_iter):
    """Implements the first step of the FASTER algorithm.

    This function attempts to automatically mark bad EEG channels by performing
    outlier detection. It operated on epoched data, to make sure only relevant
    data is analyzed.

    Additional Parameters
    ---------------------
    use_metrics : list of str
        List of metrics to use. Can be any combination of:
            'variance', 'correlation', 'hurst', 'kurtosis', 'line_noise'
        Defaults to all of them.
    thresh : float
        The threshold value, in standard deviations, to apply. A channel
        crossing this threshold value is marked as bad. Defaults to 3.
    max_iter : int
        The maximum number of iterations performed during outlier detection
        (defaults to 1, as in the original FASTER paper).
    """
    from scipy.stats import kurtosis
    metrics = {
        'variance': lambda x: np.var(x, axis=1),
        'correlation': lambda x: np.mean(
            np.ma.masked_array(np.corrcoef(x),
                               np.identity(len(x), dtype=bool)), axis=0),
        'hurst': lambda x: _hurst(x),
        'kurtosis': lambda x: kurtosis(x, axis=1),
        'line_noise': lambda x: _freqs_power(x, epochs.info['sfreq'],
                                             [50, 60]),
    }

    if use_metrics is None:
        use_metrics = metrics.keys()

    # Concatenate epochs in time
    data = epochs.get_data()[:, picks]
    data = data.transpose(1, 0, 2).reshape(data.shape[1], -1)

    # Find bad channels
    bads = defaultdict(list)
    info = pick_info(epochs.info, picks, copy=True)
    for ch_type, chs in _picks_by_type(info):
        logger.info('Bad channel detection on %s channels:' % ch_type.upper())
        for metric in use_metrics:
            scores = metrics[metric](data[chs])
            bad_channels = [epochs.ch_names[picks[chs[i]]]
                            for i in find_outliers(scores, thresh, max_iter)]
            logger.info('\tBad by %s: %s' % (metric, bad_channels))
            bads[metric].append(bad_channels)

    bads = dict((k, np.concatenate(v).tolist()) for k, v in bads.items())
    return bads
开发者ID:Qi0116,项目名称:deepthought,代码行数:53,代码来源:faster.py


示例9: _find_bad_channels_in_epochs

def _find_bad_channels_in_epochs(epochs, picks, use_metrics, thresh, max_iter):
    """Implements the fourth step of the FASTER algorithm.

    This function attempts to automatically mark bad channels in each epochs by
    performing outlier detection.

    Additional Parameters
    ---------------------
    use_metrics : list of str
        List of metrics to use. Can be any combination of:
        'amplitude', 'variance', 'deviation', 'median_gradient'
        Defaults to all of them.
    thresh : float
        The threshold value, in standard deviations, to apply. A channel
        crossing this threshold value is marked as bad. Defaults to 3.
    max_iter : int
        The maximum number of iterations performed during outlier detection
        (defaults to 1, as in the original FASTER paper).
    """

    metrics = {
        'amplitude': lambda x: np.ptp(x, axis=2),
        'deviation': lambda x: _deviation(x),
        'variance': lambda x: np.var(x, axis=2),
        'median_gradient': lambda x: np.median(np.abs(np.diff(x)), axis=2),
        'line_noise': lambda x: _freqs_power(x, epochs.info['sfreq'],
                                             [50, 60]),
    }

    if use_metrics is None:
        use_metrics = metrics.keys()

    info = pick_info(epochs.info, picks, copy=True)
    data = epochs.get_data()[:, picks]
    bads = dict((m, np.zeros((len(data), len(picks)), dtype=bool)) for
                m in metrics)
    for ch_type, chs in _picks_by_type(info):
        ch_names = [info['ch_names'][k] for k in chs]
        chs = np.array(chs)
        for metric in use_metrics:
            logger.info('Bad channel-in-epoch detection on %s channels:'
                        % ch_type.upper())
            s_epochs = metrics[metric](data[:, chs])
            for i_epochs, epoch in enumerate(s_epochs):
                outliers = find_outliers(epoch, thresh, max_iter)
                if len(outliers) > 0:
                    bad_segment = [ch_names[k] for k in outliers]
                    logger.info('Epoch %d, Bad by %s:\n\t%s' % (
                        i_epochs, metric, bad_segment))
                    bads[metric][i_epochs, chs[outliers]] = True

    return bads
开发者ID:Qi0116,项目名称:deepthought,代码行数:52,代码来源:faster.py


示例10: __init__

    def __init__(self, input_fname, montage=None, eog=None,
                 misc=(-4, -3, -2, -1), stim_channel=None, scale=1e-6, sfreq=250,
                 missing_tol=1, preload=True, verbose=None):

        bci_info = {'missing_tol': missing_tol, 'stim_channel': stim_channel}
        openbci_channames = ["FP1", "FP2", "C3", "C4", "P7", "P8", "O1", "O2", "F7", "F8", "F3", "F4", "T7", "T8", "P3", "P4"]
        if not eog:
            eog = list()
        if not misc:
            misc = list()
        nsamps, nchan = self._get_data_dims(input_fname)

        last_samps = [nsamps - 1]
        ch_names = ['EEG %03d' % num for num in range(1, nchan + 1)]
        ch_names[:nchan-4] = openbci_channames[:nchan-4]
        ch_types = ['eeg'] * nchan

        

        if misc:
            misc_names = ['MISC %03d' % ii for ii in range(1, len(misc) + 1)]
            misc_types = ['misc'] * len(misc)
            for ii, mi in enumerate(misc):
                ch_names[mi] = misc_names[ii]
                ch_types[mi] = misc_types[ii]
        if eog:
            eog_names = ['EOG %03d' % ii for ii in range(len(eog))]
            eog_types = ['eog'] * len(eog)
            for ii, ei in enumerate(eog):
                ch_names[ei] = eog_names[ii]
                ch_types[ei] = eog_types[ii]
        if stim_channel:
            ch_names[stim_channel] = 'STI 014'
            ch_types[stim_channel] = 'stim'

        # mark last channel as the timestamp channel
        ch_names[-1] = "Timestamps"
        ch_types[-1] = "misc"

        # fix it for eog and misc marking
        info = create_info(ch_names, sfreq, ch_types, montage)
        info["buffer_size_sec"] = 1.
        super(RawOpenBCI, self).__init__(info, last_samps=last_samps,
                                         raw_extras=[bci_info],
                                         filenames=[input_fname],
                                         preload=False, verbose=verbose)
        # load data
        if preload:
            self.preload = preload
            logger.info('Reading raw data from %s...' % input_fname)
            self._data = self._read_segment()
开发者ID:OpenBCI,项目名称:OpenBCI_MNE,代码行数:51,代码来源:mne_openbci.py


示例11: read_info

def read_info(subject, data_type, run_index=0, hcp_path=op.curdir):
    """Read info from unprocessed data

    Parameters
    ----------
    subject : str, file_map
        The subject
    data_type : str
        The kind of data to read. The following options are supported:
        'rest'
        'task_motor'
        'task_story_math'
        'task_working_memory'
        'noise_empty_room'
        'noise_subject'
    run_index : int
        The run index. For the first run, use 0, for the second, use 1.
        Also see HCP documentation for the number of runs for a given data
        type.
    hcp_path : str
        The HCP directory, defaults to op.curdir.

    Returns
    -------
    info : instance of mne.io.meas_info.Info
        The MNE channel info object.

    .. note::
        HCP MEG does not deliver only 3 of the 5 task packages from MRI HCP.
    """
    raw, config = get_file_paths(
        subject=subject, data_type=data_type, output='raw',
        run_index=run_index, hcp_path=hcp_path)

    if not op.exists(raw):
        raw = None

    meg_info = _read_bti_info(raw, config)

    if raw is None:
        logger.info('Did not find Raw data. Guessing EMG, ECG and EOG '
                    'channels')
        rename_channels(meg_info, dict(_label_mapping))
    return meg_info
开发者ID:mne-tools,项目名称:mne-hcp,代码行数:44,代码来源:read.py


示例12: _make_interpolator

def _make_interpolator(inst, bad_channels):
    """Find indexes and interpolation matrix to interpolate bad channels

    Parameters
    ----------
    inst : mne.io.Raw, mne.Epochs or mne.Evoked
        The data to interpolate. Must be preloaded.
    """
    bads_idx = np.zeros(len(inst.ch_names), dtype=np.bool)
    goods_idx = np.zeros(len(inst.ch_names), dtype=np.bool)

    picks = pick_types(inst.info, meg=False, eeg=True, exclude=[])
    bads_idx[picks] = [inst.ch_names[ch] in bad_channels for ch in picks]
    goods_idx[picks] = True
    goods_idx[bads_idx] = False

    if bads_idx.sum() != len(bad_channels):
        logger.warning('Channel interpolation is currently only implemented '
                       'for EEG. The MEG channels marked as bad will remain '
                       'untouched.')

    pos = get_channel_positions(inst, picks)

    # Make sure only EEG are used
    bads_idx_pos = bads_idx[picks]
    goods_idx_pos = goods_idx[picks]

    pos_good = pos[goods_idx_pos]
    pos_bad = pos[bads_idx_pos]

    # test spherical fit
    radius, center = _fit_sphere(pos_good)
    distance = np.sqrt(np.sum((pos_good - center) ** 2, 1))
    distance = np.mean(distance / radius)
    if np.abs(1. - distance) > 0.1:
        logger.warning('Your spherical fit is poor, interpolation results are '
                       'likely to be inaccurate.')

    logger.info('Computing interpolation matrix from {0} sensor '
                'positions'.format(len(pos_good)))

    interpolation = _make_interpolation_matrix(pos_good, pos_bad)

    return goods_idx, bads_idx, interpolation
开发者ID:phoebegaston,项目名称:Eelbrain,代码行数:44,代码来源:_interpolation.py


示例13: _find_bad_epochs

def _find_bad_epochs(epochs, picks, use_metrics, thresh, max_iter):
    """Implements the second step of the FASTER algorithm.

    This function attempts to automatically mark bad epochs by performing
    outlier detection.

    Additional Parameters
    ---------------------
    use_metrics : list of str
        List of metrics to use. Can be any combination of:
        'amplitude', 'variance', 'deviation'. Defaults to all of them.
    thresh : float
        The threshold value, in standard deviations, to apply. A channel
        crossing this threshold value is marked as bad. Defaults to 3.
    max_iter : int
        The maximum number of iterations performed during outlier detection
        (defaults to 1, as in the original FASTER paper).
    """

    metrics = {
        'amplitude': lambda x: np.mean(np.ptp(x, axis=2), axis=1),
        'deviation': lambda x: np.mean(_deviation(x), axis=1),
        'variance': lambda x: np.mean(np.var(x, axis=2), axis=1),
    }

    if use_metrics is None:
        use_metrics = metrics.keys()

    info = pick_info(epochs.info, picks, copy=True)
    data = epochs.get_data()[:, picks]

    bads = defaultdict(list)
    for ch_type, chs in _picks_by_type(info):
        logger.info('Bad epoch detection on %s channels:' % ch_type.upper())
        for metric in use_metrics:
            scores = metrics[metric](data[:, chs])
            bad_epochs = find_outliers(scores, thresh, max_iter)
            logger.info('\tBad by %s: %s' % (metric, bad_epochs))
            bads[metric].append(bad_epochs)

    bads = dict((k, np.concatenate(v).tolist()) for k, v in bads.items())
    return bads
开发者ID:Qi0116,项目名称:deepthought,代码行数:42,代码来源:faster.py


示例14: __init__

    def __init__(self, input_fname, montage=None, eog=None,
                 misc=(-3, -2, -1), stim_channel=None, scale=1e-6, sfreq=250,
                 missing_tol=1, preload=True, verbose=None):

        bci_info = {'missing_tol': missing_tol, 'stim_channel': stim_channel}
        if not eog:
            eog = list()
        if not misc:
            misc = list()
        nsamps, nchan = self._get_data_dims(input_fname)

        last_samps = [nsamps - 1]
        ch_names = ['EEG %03d' % num for num in range(1, nchan + 1)]
        ch_types = ['eeg'] * nchan
        if misc:
            misc_names = ['MISC %03d' % ii for ii in range(1, len(misc) + 1)]
            misc_types = ['misc'] * len(misc)
            for ii, mi in enumerate(misc):
                ch_names[mi] = misc_names[ii]
                ch_types[mi] = misc_types[ii]
        if eog:
            eog_names = ['EOG %03d' % ii for ii in range(len(eog))]
            eog_types = ['eog'] * len(eog)
            for ii, ei in enumerate(eog):
                ch_names[ei] = eog_names[ii]
                ch_types[ei] = eog_types[ii]
        if stim_channel:
            ch_names[stim_channel] = 'STI 014'
            ch_types[stim_channel] = 'stim'

        # fix it for eog and misc marking
        info = create_info(ch_names, sfreq, ch_types, montage)
        super(RawOpenBCI, self).__init__(info, last_samps=last_samps,
                                         raw_extras=[bci_info],
                                         filenames=[input_fname],
                                         preload=False, verbose=verbose)
        # load data
        if preload:
            self.preload = preload
            logger.info('Reading raw data from %s...' % input_fname)
            self._data, _ = self._read_segment()
开发者ID:DominicBreuker,项目名称:OpenBCI_Python,代码行数:41,代码来源:mne_openbci.py


示例15: get_svd_comps

def get_svd_comps(sub_leadfield, n_svd_comp):
    """ Compute SVD components of sub-leadfield for selected channels 
    (all channels in one SVD)
    Parameters:
    -----------
    sub_leadfield: numpy array (n_sens x n_vert) with part of the leadfield matrix
    n_svd_comp: scalar, number of SVD components required

    Returns:
    --------
    u_svd: numpy array, n_svd_comp scaled SVD components of subleadfield for 
           selected channels
    s_svd: corresponding singular values
    """

    u_svd, s_svd, _ = np.linalg.svd(sub_leadfield,
                                 full_matrices=False,
                                 compute_uv=True)        

    # get desired first vectors of u_svd
    u_svd = u_svd[:, :n_svd_comp]
   
    # project SVD components on sub-leadfield, take sum over vertices
    u_svd_proj = u_svd.T.dot(sub_leadfield).sum(axis=1)
    # make sure overall projection has positive sign
    u_svd = u_svd.dot(np.sign(np.diag(u_svd_proj)))

    u_svd = u_svd * s_svd[:n_svd_comp][np.newaxis, :]

    logger.info("\nFirst 5 singular values (n=%d): %s" % (u_svd.shape[0], \
                                                         s_svd[0:5]))
    
    # explained variance by chosen components within sub-leadfield
    my_comps = s_svd[0:n_svd_comp]

    comp_var = (100. * np.sum(my_comps * my_comps) /
                np.sum(s_svd * s_svd))
    logger.info("Your %d component(s) explain(s) %.1f%% "
                "variance." % (n_svd_comp, comp_var)) 

    return u_svd
开发者ID:olafhauk,项目名称:mne-python,代码行数:41,代码来源:DeFleCT.py


示例16: grid_search

def grid_search(epochs, n_interpolates, consensus_percs, prefix, n_folds=3):
    """Grid search to find optimal values of n_interpolate and consensus_perc.

    Parameters
    ----------
    epochs : instance of mne.Epochs
        The epochs object for which bad epochs must be found.
    n_interpolates : array
        The number of sensors to interpolate.
    consensus_percs : array
        The percentage of channels to be interpolated.
    n_folds : int
        Number of folds for cross-validation.
    prefix : str
        Prefix to the log
    """
    cv = KFold(len(epochs), n_folds=n_folds, random_state=42)
    err_cons = np.zeros((len(consensus_percs), len(n_interpolates),
                         n_folds))

    auto_reject = ConsensusAutoReject()
    # The thresholds must be learnt from the entire data
    auto_reject.fit(epochs)

    for fold, (train, test) in enumerate(cv):
        for jdx, n_interp in enumerate(n_interpolates):
            for idx, consensus_perc in enumerate(consensus_percs):
                logger.info('%s[Val fold %d] Trying consensus '
                            'perc %0.2f, n_interp %d' % (
                                prefix, fold + 1, consensus_perc, n_interp))
                # set the params
                auto_reject.consensus_perc = consensus_perc
                auto_reject.n_interpolate = n_interp
                # not do the transform
                auto_reject.transform(epochs[train])
                # score using this param
                X = epochs[test].get_data()
                err_cons[idx, jdx, fold] = -auto_reject.score(X)

    return err_cons
开发者ID:raghavrv,项目名称:autoreject,代码行数:40,代码来源:autoreject.py


示例17: _interpolate_bads_meg

def _interpolate_bads_meg(epochs, bad_channels_by_epoch, mode='fast'):
    """Interpolate bad MEG channels per epoch

    Parameters
    ----------
    inst : mne.io.Raw, mne.Epochs or mne.Evoked
        The data to interpolate. Must be preloaded.
    bad_channels_by_epoch : list of list of str
        Bad channel names specified for each epoch. For example, for an Epochs
        instance containing 3 epochs: ``[['F1'], [], ['F3', 'FZ']]``

    Notes
    -----
    Based on mne 0.9.0 MEG channel interpolation.
    """
    if len(bad_channels_by_epoch) != len(epochs):
        raise ValueError("Unequal length of epochs (%i) and "
                         "bad_channels_by_epoch (%i)"
                         % (len(epochs), len(bad_channels_by_epoch)))

    interp_cache = {}
    for i, bad_channels in enumerate(bad_channels_by_epoch):
        if not bad_channels:
            continue

        # find interpolation matrix
        key = tuple(sorted(bad_channels))
        if key in interp_cache:
            picks_good, picks_bad, interpolation = interp_cache[key]
        else:
            picks_good = pick_types(epochs.info, ref_meg=False, exclude=key)
            picks_bad = pick_channels(epochs.ch_names, key)
            interpolation = _map_meg_channels(epochs, picks_good, picks_bad, mode)
            interp_cache[key] = picks_good, picks_bad, interpolation

        # apply interpolation
        logger.info('Interpolating sensors %s on epoch %s', picks_bad, i)
        epochs._data[i, picks_bad, :] = interpolation.dot(epochs._data[i, picks_good, :])
开发者ID:LauraGwilliams,项目名称:Eelbrain,代码行数:38,代码来源:_interpolation.py


示例18: _run

def _run(subjects_dir, subject, layers, ico, overwrite):
    this_env = copy.copy(os.environ)
    this_env['SUBJECTS_DIR'] = subjects_dir
    this_env['SUBJECT'] = subject

    if 'SUBJECTS_DIR' not in this_env:
        raise RuntimeError('The environment variable SUBJECTS_DIR should '
                           'be set')

    if not op.isdir(subjects_dir):
        raise RuntimeError('subjects directory %s not found, specify using '
                           'the environment variable SUBJECTS_DIR or '
                           'the command line option --subjects-dir')

    if 'FREESURFER_HOME' not in this_env:
        raise RuntimeError('The FreeSurfer environment needs to be set up '
                           'for this script')

    subj_path = op.join(subjects_dir, subject)
    if not op.exists(subj_path):
        raise RuntimeError('%s does not exits. Please check your subject '
                           'directory path.' % subj_path)
    
    logger.info('1. Setting up MRI files...')
    if overwrite:
        run_subprocess(['mne_setup_mri', '--mri', 'T1', '--subject', subject, '--overwrite'], env=this_env)
    else:
        run_subprocess(['mne_setup_mri', '--mri', 'T1', '--subject', subject], env=this_env)

    logger.info('2. Setting up %d layer BEM...' % layers)
    if layers == 3:
        flash05 = op.join(subjects_dir, subject, 'nii/FLASH5.nii')
        flash30 = op.join(subjects_dir, subject, 'nii/FLASH30.nii')

        run_subprocess(['mne', 'flash_bem_model', '-s', subject, '-d', subjects_dir,
                        '--flash05', flash05, '--flash30', flash30, '-v'], env=this_env)
        for srf in ('inner_skull', 'outer_skull', 'outer_skin'):
            shutil.copy(op.join(subjects_dir, subject, 'bem/flash/%s.surf' % srf),
                        op.join(subjects_dir, subject, 'bem/%s.surf' % srf))
    else:
        if overwrite:
            run_subprocess(['mne', 'watershed_bem', '-s', subject, '-d', subjects_dir, '--overwrite'], env=this_env)
        else:
            run_subprocess(['mne', 'watershed_bem', '-s', subject, '-d', subjects_dir], env=this_env)

    # Create dense head surface and symbolic link to head.fif file
    logger.info('3. Creating high resolution skin surface for coregisteration...')
    run_subprocess(['mne', 'make_scalp_surfaces', '--overwrite', '--subject', subject])
    if op.isfile(op.join(subjects_dir, subject, 'bem/%s-head.fif' % subject)):
        os.rename(op.join(subjects_dir, subject, 'bem/%s-head.fif' % subject),
                  op.join(subjects_dir, subject, 'bem/%s-head-sparse.fif' % subject))
    os.symlink((op.join(subjects_dir, subject, 'bem/%s-head-dense.fif' % subject)),
               (op.join(subjects_dir, subject, 'bem/%s-head.fif' % subject)))

    # Create source space
    run_subprocess(['mne_setup_source_space', '--subject', subject, '--spacing', '%.0f' % 5, '--cps'],
                   env=this_env)
开发者ID:jumpity,项目名称:mnefun,代码行数:57,代码来源:run_mne_bem.py


示例19: noise_reducer

def noise_reducer(fname_raw, raw=None, signals=[], noiseref=[], detrending=None,
                  tmin=None, tmax=None, reflp=None, refhp=None, refnotch=None,
                  exclude_artifacts=True, checkresults=True, return_raw=False,
                  complementary_signal=False, fnout=None, verbose=False):

    """Apply noise reduction to signal channels using reference channels.

    Parameters
    ----------
    fname_raw : (list of) rawfile names
    raw : mne Raw objects
        Allows passing of raw object as well.
    signals : list of string
              List of channels to compensate using noiseref.
              If empty use the meg signal channels.
    noiseref : list of string | str
              List of channels to use as noise reference.
              If empty use the magnetic reference channsls (default).
    signals and noiseref may contain regexp, which are resolved
    using mne.pick_channels_regexp(). All other channels are copied.
    tmin : lower latency bound for weight-calc [start of trace]
    tmax : upper latency bound for weight-calc [ end  of trace]
           Weights are calc'd for (tmin,tmax), but applied to entire data set
    refhp : high-pass frequency for reference signal filter [None]
    reflp :  low-pass frequency for reference signal filter [None]
            reflp < refhp: band-stop filter
            reflp > refhp: band-pass filter
            reflp is not None, refhp is None: low-pass filter
            reflp is None, refhp is not None: high-pass filter
    refnotch : (base) notch frequency for reference signal filter [None]
               use raw(ref)-notched(ref) as reference signal
    exclude_artifacts: filter signal-channels thru _is_good() [True]
                       (parameters are at present hard-coded!)
    return_raw : bool
        If return_raw is true, the raw object is returned and raw file
        is not written to disk. It is suggested that this option be used in cases
        where the noise_reducer is applied multiple times. [False]
    complementary_signal : replaced signal by traces that would be subtracted [False]
                           (can be useful for debugging)
    detrending: boolean to ctrl subtraction of linear trend from all magn. chans [False]
    checkresults : boolean to control internal checks and overall success [True]

    Outputfile
    ----------
    <wawa>,nr-raw.fif for input <wawa>-raw.fif

    Returns
    -------
    If return_raw is True, then mne.io.Raw instance is returned.

    Bugs
    ----
    - artifact checking is incomplete (and with arb. window of tstep=0.2s)
    - no accounting of channels used as signal/reference
    - non existing input file handled ungracefully
    """

    if type(complementary_signal) != bool:
        raise ValueError("Argument complementary_signal must be of type bool")

    # handle error if Raw object passed with file list
    if raw and isinstance(fname_raw, list):
        raise ValueError('List of file names cannot be combined with one Raw object')

    # handle error if return_raw is requested with file list
    if return_raw and isinstance(fname_raw, list):
        raise ValueError('List of file names cannot be combined return_raw.'
                         'Please pass one file at a time.')

    # handle error if Raw object is passed with detrending option
    #TODO include perform_detrending for Raw objects
    if raw and detrending:
        raise ValueError('Please perform detrending on the raw file directly. Cannot perform'
                         'detrending on the raw object')

    fnraw = get_files_from_list(fname_raw)

    # loop across all filenames
    for fname in fnraw:

        if verbose:
            print "########## Read raw data:"

        tc0 = time.clock()
        tw0 = time.time()

        if raw is None:
            if detrending:
                raw = perform_detrending(fname, save=False)
            else:
                raw = mne.io.Raw(fname, preload=True)
        else:
            # perform sanity check to make sure Raw object and file are same
            if os.path.basename(fname) != os.path.basename(raw.info['filename']):
                warnings.warn('The file name within the Raw object and provided'
                              'fname are not the same. Please check again.')

        tc1 = time.clock()
        tw1 = time.time()

#.........这里部分代码省略.........
开发者ID:dongqunxi,项目名称:jumeg,代码行数:101,代码来源:jumeg_noise_reducer.py


示例20: _run

def _run(subjects_dir, subject, force, overwrite, no_decimate, verbose=None):
    this_env = copy.copy(os.environ)
    subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
    this_env['SUBJECTS_DIR'] = subjects_dir
    this_env['SUBJECT'] = subject
    if 'FREESURFER_HOME' not in this_env:
        raise RuntimeError('The FreeSurfer environment needs to be set up '
                           'for this script')
    incomplete = 'warn' if force else 'raise'
    subj_path = op.join(subjects_dir, subject)
    if not op.exists(subj_path):
        raise RuntimeError('%s does not exist. Please check your subject '
                           'directory path.' % subj_path)

    mri = 'T1.mgz' if op.exists(op.join(subj_path, 'mri', 'T1.mgz')) else 'T1'

    logger.info('1. Creating a dense scalp tessellation with mkheadsurf...')

    def check_seghead(surf_path=op.join(subj_path, 'surf')):
        surf = None
        for k in ['lh.seghead', 'lh.smseghead']:
            this_surf = op.join(surf_path, k)
            if op.exists(this_surf):
                surf = this_surf
                break
        return surf

    my_seghead = check_seghead()
    if my_seghead is None:
        run_subprocess(['mkheadsurf', '-subjid', subject, '-srcvol', mri],
                       env=this_env)

    surf = check_seghead()
    if surf is None:
        raise RuntimeError('mkheadsurf did not produce the standard output '
                           'file.')

    bem_dir = op.join(subjects_dir, subject, 'bem')
    if not op.isdir(bem_dir) 

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python viz.plot_sparse_source_estimates函数代码示例发布时间:2022-05-27
下一篇:
Python utils.sum_squared函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap