• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python base.debug函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中mvpa2.base.debug函数的典型用法代码示例。如果您正苦于以下问题:Python debug函数的具体用法?Python debug怎么用?Python debug使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了debug函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _train

    def _train(self, samples):
        """Train PrototypeMapper
        """

        self._proj = np.hstack([similarity.computed(samples, self.prototypes) for similarity in self.similarities])
        if __debug__:
            debug("MAP", "projected data of shape %s: %s " % (self._proj.shape, self._proj))
开发者ID:robbisg,项目名称:PyMVPA,代码行数:7,代码来源:prototype.py


示例2: label_voxel

    def label_voxel(self, c, levels = None):

        if self.__referenceLevel is None:
            warning("You did not provide what level to use "
                    "for reference. Assigning 0th level -- '%s'"
                    % (self._levels[0],))
            self.set_reference_level(0)
            # return self.__referenceAtlas.label_voxel(c, levels)

        c = self._check_range(c)

        # obtain coordinates of the closest voxel
        cref = self._data[ self.__referenceLevel.indexes, c[0], c[1], c[2] ]
        dist = norm( (cref - c) * self.voxdim )
        if __debug__:
            debug('ATL__', "Closest referenced point for %r is "
                  "%r at distance %3.2f" % (c, cref, dist))
        if (self.distance - dist) >= 1e-3: # neglect everything smaller
            result = self.__referenceAtlas.label_voxel(cref, levels)
            result['voxel_referenced'] = c
            result['distance'] = dist
        else:
            result = self.__referenceAtlas.label_voxel(c, levels)
            if __debug__:
                debug('ATL__', "Closest referenced point is "
                      "further than desired distance %.2f" % self.distance)
            result['voxel_referenced'] = None
            result['distance'] = 0
        return result
开发者ID:Arthurkorn,项目名称:PyMVPA,代码行数:29,代码来源:base.py


示例3: _suppress_scipy_warnings

def _suppress_scipy_warnings():
    # Infiltrate warnings if necessary
    numpy_ver = versions['numpy']
    scipy_ver = versions['scipy']
    # There is way too much deprecation warnings spit out onto the
    # user. Lets assume that they should be fixed by scipy 0.7.0 time
    if not __debug__ or (__debug__ and 'PY' not in debug.active):
        filter_lines = []
        if "0.6.0" <= scipy_ver and scipy_ver < "0.7.0" \
            and numpy_ver > "1.1.0":
            if __debug__:
                debug('EXT', "Setting up filters for numpy DeprecationWarnings "
                      "regarding scipy < 0.7.0")
            filter_lines += [
                ('NumpyTest will be removed in the next release.*',
                 DeprecationWarning),
                ('PyArray_FromDims: use PyArray_SimpleNew.',
                 DeprecationWarning),
                ('PyArray_FromDimsAndDataAndDescr: use PyArray_NewFromDescr.',
                 DeprecationWarning),
                # Trick re.match, since in warnings absent re.DOTALL in re.compile
                ('[\na-z \t0-9]*The original semantics of histogram is scheduled to be.*'
                 '[\na-z \t0-9]*', Warning) ]
        if scipy_ver >= "0.15":
            filter_lines += [("`scipy.weave` is deprecated, use `weave` instead!",
                              DeprecationWarning)]
        if scipy_ver >= "0.16":
            # scipy deprecated it but statsmodels still import it for now
            filter_lines += [("`scipy.linalg.calc_lwork` is deprecated!",
                              DeprecationWarning)]
        for f, w in filter_lines:
            warnings.filterwarnings('ignore', f, w)
开发者ID:VladimirBadalyan,项目名称:PyMVPA,代码行数:32,代码来源:externals.py


示例4: _level3

    def _level3(self, datasets):
        params = self.params            # for quicker access ;)
        # create a mapper per dataset
        mappers = [deepcopy(params.alignment) for ds in datasets]

        # key different from level-2; the common space is uniform
        #temp_commonspace = commonspace

        residuals = None
        if self.ca['residual_errors'].enabled:
            residuals = np.zeros((1, len(datasets)))
            self.ca.residual_errors = Dataset(samples=residuals)

        # start from original input datasets again
        for i, (m, ds_new) in enumerate(zip(mappers, datasets)):
            if __debug__:
                debug('HPAL_', "Level 3: ds #%i" % i)

            # retrain mapper on final common space
            ds_new.sa[m.get_space()] = self.commonspace
            m.train(ds_new)
            # remove common space attribute again to save on memory
            del ds_new.sa[m.get_space()]

            if residuals is not None:
                # obtain final projection
                data_mapped = m.forward(ds_new.samples)
                residuals[0, i] = np.linalg.norm(data_mapped - self.commonspace)

        return mappers
开发者ID:adamatus,项目名称:PyMVPA,代码行数:30,代码来源:hyperalignment.py


示例5: _prepredict

    def _prepredict(self, dataset):
        """Functionality prior prediction
        """
        if not ('notrain2predict' in self.__tags__):
            # check if classifier was trained if that is needed
            if not self.trained:
                raise FailedToPredictError(
                      "Classifier %s wasn't yet trained, therefore can't "
                      "predict" % self)
            nfeatures = dataset.nfeatures #data.shape[1]
            # check if number of features is the same as in the data
            # it was trained on
            if nfeatures != self.__trainednfeatures:
                raise ValueError, \
                      "Classifier %s was trained on data with %d features, " % \
                      (self, self.__trainednfeatures) + \
                      "thus can't predict for %d features" % nfeatures


        if self.params.retrainable:
            if not self.__changedData_isset:
                self.__reset_changed_data()
                _changedData = self._changedData
                data = np.asanyarray(dataset.samples)
                _changedData['testdata'] = \
                                        self.__was_data_changed('testdata', data)
                if __debug__:
                    debug('CLF_', "prepredict: Obtained _changedData is %s",
                          (_changedData,))
开发者ID:adamatus,项目名称:PyMVPA,代码行数:29,代码来源:base.py


示例6: _untrain

 def _untrain(self):
     if __debug__:
         debug("FS_", "Untraining combined FS: %s" % self)
     for fs in self.__selectors:
         fs.untrain()
     # ask base class to do its untrain
     super(CombinedFeatureSelection, self)._untrain()
开发者ID:arnaudsj,项目名称:PyMVPA,代码行数:7,代码来源:base.py


示例7: newfunc

        def newfunc(*arg, **kwargs):
            nfailed, i = 0, 0  # define i just in case
            for i in xrange(niter):
                try:
                    ret = func(*arg, **kwargs)
                    if i + 1 - nfailed >= niter - nfailures:
                        # so we know already that we wouldn't go over
                        # nfailures
                        break
                except AssertionError, e:
                    nfailed += 1
                    if __debug__:
                        debug("TEST", "Upon %i-th run, test %s failed with %s", (i, func.__name__, e))

                    if nfailed > nfailures:
                        if __debug__:
                            debug(
                                "TEST",
                                "Ran %s %i times. Got %d failures, "
                                "while was allowed %d "
                                "-- re-throwing the last failure %s",
                                (func.__name__, i + 1, nfailed, nfailures, e),
                            )
                        exc_info = sys.exc_info()
                        raise exc_info[1], None, exc_info[2]
开发者ID:schoeke,项目名称:PyMVPA,代码行数:25,代码来源:tools.py


示例8: forward

    def forward(self, data):
        """Map data from input to output space.

        Parameters
        ----------
        data : Dataset-like, (at least 2D)-array-like
          Typically this is a `Dataset`, but it might also be a plain data
          array, or even something completely different(TM) that is supported
          by a subclass' implementation. If such an object is Dataset-like it
          is handled by a dedicated method that also transforms dataset
          attributes if necessary. If an array-like is passed, it has to be
          at least two-dimensional, with the first axis separating samples
          or observations. For single samples `forward1()` might be more
          appropriate.
        """
        if is_datasetlike(data):
            if __debug__:
                debug('MAP', "Forward-map %s-shaped dataset through '%s'."
                        % (data.shape, self))
            return self._forward_dataset(data)
        else:
            if hasattr(data, 'ndim') and data.ndim < 2:
                raise ValueError(
                    'Mapper.forward() only support mapping of data with '
                    'at least two dimensions, where the first axis '
                    'separates samples/observations. Consider using '
                    'Mapper.forward1() instead.')
            if __debug__:
                debug('MAP', "Forward-map data through '%s'." % (self))
            return self._forward_data(data)
开发者ID:jgors,项目名称:PyMVPA,代码行数:30,代码来源:base.py


示例9: _forward_dataset

    def _forward_dataset(self, dataset):
        """Forward-map a dataset.

        This is a private method that can be reimplemented in derived
        classes. The default implementation forward-maps the dataset samples
        and returns a new dataset that is a shallow copy of the input with
        the mapped samples.

        Parameters
        ----------
        dataset : Dataset-like
        """
        if __debug__:
            debug('MAP_', "Forward-map %s-shaped samples in dataset with '%s'."
                        % (dataset.samples.shape, self))
        msamples = self._forward_data(dataset.samples)
        if __debug__:
            debug('MAP_', "Make shallow copy of to-be-forward-mapped dataset "
                    "and assigned forward-mapped samples ({sf}a_filters: "
                    "%s, %s, %s)." % (self._sa_filter, self._fa_filter,
                                      self._a_filter))
        mds = dataset.copy(deep=False,
                           sa=self._sa_filter,
                           fa=self._fa_filter,
                           a=self._a_filter)
        mds.samples = msamples
        return mds
开发者ID:jgors,项目名称:PyMVPA,代码行数:27,代码来源:base.py


示例10: _forward_dataset

    def _forward_dataset(self, dataset):
        # invoke super class _forward_dataset, this calls, _forward_dataset
        # and this calls _forward_data in this class
        mds = super(FlattenMapper, self)._forward_dataset(dataset)
        # attribute collection needs to have a new length check
        mds.fa.set_length_check(mds.nfeatures)
        # we need to duplicate all existing feature attribute, as each original
        # feature is now spread across the new feature axis
        # take all "additional" axes after the actual feature axis and count
        # elements a sample -- if not axis exists this will be 1
        for k in dataset.fa:
            if __debug__:
                debug("MAP_", "Forward-mapping fa '%s'." % k)
            attr = dataset.fa[k].value
            # the maximmum number of axis to flatten in the attr
            if not self.__maxdims is None:
                maxdim = min(len(self.__origshape), self.__maxdims)
            else:
                maxdim = len(self.__origshape)
            multiplier = mds.nfeatures / np.prod(attr.shape[:maxdim])
            if __debug__:
                debug("MAP_", "Broadcasting fa '%s' %s %d times" % (k, attr.shape, multiplier))
            # broadcast as many times as necessary to get 'matching dimensions'
            bced = np.repeat(attr, multiplier, axis=0)
            # now reshape as many dimensions as the mapper knows about
            mds.fa[k] = bced.reshape((-1,) + bced.shape[maxdim:])

        # if there is no inspace return immediately
        if self.get_space() is None:
            return mds
        # otherwise create the coordinates as feature attributes
        else:
            mds.fa[self.get_space()] = list(np.ndindex(dataset.samples[0].shape))
            return mds
开发者ID:reka-daniel,项目名称:PyMVPA,代码行数:34,代码来源:flatten.py


示例11: __reverse_single_level

    def __reverse_single_level(self, wp):

        # local bindings
        level_paths = self.__level_paths

        # define wavelet packet to use
        WP = pywt.WaveletPacket(
            data=None, wavelet=self._wavelet,
            mode=self._mode, maxlevel=self.__level)

        # prepare storage
        signal_shape = wp.shape[:1] + self._inshape[1:]
        signal = np.zeros(signal_shape)
        Ntime_points = self._intimepoints
        for indexes in _get_indexes(signal_shape,
                                   self._dim):
            if __debug__:
                debug('MAP_', " %s" % (indexes,), lf=False, cr=True)

            for path, level_data in zip(level_paths, wp[indexes]):
                WP[path] = level_data

            signal[indexes] = WP.reconstruct(True)[:Ntime_points]

        return signal
开发者ID:PyMVPA,项目名称:PyMVPA,代码行数:25,代码来源:wavelet.py


示例12: _call

    def _call(self, dataset):
        analyzers = []
        # create analyzers
        for clf in self.clf.clfs:
            if self.__analyzer is None:
                analyzer = clf.get_sensitivity_analyzer(**(self._slave_kwargs))
                if analyzer is None:
                    raise ValueError, \
                          "Wasn't able to figure basic analyzer for clf %r" % \
                          (clf,)
                if __debug__:
                    debug("SA", "Selected analyzer %r for clf %r" % \
                          (analyzer, clf))
            else:
                # XXX shallow copy should be enough...
                analyzer = copy.copy(self.__analyzer)

            # assign corresponding classifier
            analyzer.clf = clf
            # if clf was trained already - don't train again
            if clf.trained:
                analyzer._force_train = False
            analyzers.append(analyzer)

        self.__combined_analyzer.analyzers = analyzers

        # XXX not sure if we don't want to call directly ._call(dataset) to avoid
        # double application of transformers/combiners, after all we are just
        # 'proxying' here to combined_analyzer...
        # YOH: decided -- lets call ._call
        return self.__combined_analyzer._call(dataset)
开发者ID:PepGardiola,项目名称:PyMVPA,代码行数:31,代码来源:base.py


示例13: _train

    def _train(self, dataset):
        """Select the most important features

        Parameters
        ----------
        dataset : Dataset
          used to compute sensitivity maps
        """
        # optionally train the analyzer first
        if self.__train_analyzer:
            self.__sensitivity_analyzer.train(dataset)

        sensitivity = self.__sensitivity_analyzer(dataset)
        """Compute the sensitivity map."""

        self.ca.sensitivity = sensitivity

        # Select features to preserve
        selected_ids = self.__feature_selector(sensitivity)

        if __debug__:
            debug("FS_", "Sensitivity: %s Selected ids: %s" %
                  (sensitivity, selected_ids))

        # XXX not sure if it really has to be sorted
        selected_ids.sort()
        # announce desired features to the underlying slice mapper
        self._safe_assign_slicearg(selected_ids)
        # and perform its own training
        super(SensitivityBasedFeatureSelection, self)._train(dataset)
开发者ID:arnaudsj,项目名称:PyMVPA,代码行数:30,代码来源:base.py


示例14: _get_selected_ids

    def _get_selected_ids(self, dataset):
        """Given a dataset actually select the features

        Returns
        -------
        indexes of the selected features
        """
        # optionally train the analyzer first
        if self.__train_analyzer:
            self.__sensitivity_analyzer.train(dataset)

        sensitivity = self.__sensitivity_analyzer(dataset)
        """Compute the sensitivity map."""
        self.ca.sensitivity = sensitivity

        # Select features to preserve
        selected_ids = self.__feature_selector(sensitivity)

        if __debug__:
            debug("FS_", "Sensitivity: %s Selected ids: %s" %
                  (sensitivity, selected_ids))

        # XXX not sure if it really has to be sorted
        selected_ids.sort()
        return selected_ids
开发者ID:PepGardiola,项目名称:PyMVPA,代码行数:25,代码来源:base.py


示例15: _call

    def _call(self, dataset=None):
        """Extract weights from SMLR classifier.

        SMLR always has weights available, so nothing has to be computed here.
        """
        clf = self.clf
        # transpose to have the number of features on the second axis
        # (as usual)
        weights = clf.weights.T

        if __debug__:
            debug('SMLR',
                  "Extracting weights for %d-class SMLR" %
                  (len(weights) + 1) +
                  "Result: min=%f max=%f" %\
                  (np.min(weights), np.max(weights)))

        # limit the labels to the number of sensitivity sets, to deal
        # with the case of `fit_all_weights=False`
        ds = Dataset(weights,
                     sa={clf.get_space(): clf._ulabels[:len(weights)]})

        if clf.params.has_bias:
            ds.sa['biases'] = clf.biases
        return ds
开发者ID:PepGardiola,项目名称:PyMVPA,代码行数:25,代码来源:smlr.py


示例16: _binary_data_bytecount

def _binary_data_bytecount(niml):
    '''helper function that returns how many bytes a NIML binary data
    element should have'''
    niform = niml['ni_form']
    if not 'binary' in niform:
        raise ValueError('Illegal niform %s' % niform)

    tps = niml['vec_typ']
    onetype = types.findonetype(tps)

    if onetype is None:
        debug('NIML', 'Not unique type: %r', tps)
        return None

    # numeric, either int or float
    ncols = niml['vec_num']
    nrows = niml['vec_len']
    tp = types.code2numpy_type(onetype)
    bytes_per_elem = types.numpy_type2bytecount(tp)

    if bytes_per_elem is None:
        raise ValueError("Type not supported: %r" % onetype)

    nb = ncols * nrows * bytes_per_elem

    debug('NIML', 'Number of bytes for %s: %d x %d with %d bytes / element',
                                    (niform, ncols, nrows, bytes_per_elem))

    return nb
开发者ID:andreirusu,项目名称:PyMVPA,代码行数:29,代码来源:afni_niml.py


示例17: _train

    def _train(self, samples):
        """Perform network training.

        Parameters
        ----------
        samples : array-like
            Used for unsupervised training of the SOM.
          
        Notes
        -----
        It is assumed that prior to calling this method the _pretrain method 
        was called with the same argument.  
        """

        # ensure that dqd was set properly
        dqd = self._dqd
        if dqd is None:
            raise ValueError("This should not happen - was _pretrain called?")

        # units weight vector deltas for batch training
        # (height x width x #features)
        unit_deltas = np.zeros(self._K.shape, dtype='float')

        # for all iterations
        for it in xrange(1, self.niter + 1):
            # compute the neighborhood impact kernel for this iteration
            # has to be recomputed since kernel shrinks over time
            k = self._compute_influence_kernel(it, dqd)

            # for all training vectors
            for s in samples:
                # determine closest unit (as element coordinate)
                b = self._get_bmu(s)
                # train all units at once by unfolding the kernel (from the
                # single quadrant that is precomputed), cutting it to the
                # right shape and simply multiply it to the difference of target
                # and all unit weights....
                infl = np.vstack((
                        np.hstack((
                            # upper left
                            k[b[0]:0:-1, b[1]:0:-1],
                            # upper right
                            k[b[0]:0:-1, :self.kshape[1] - b[1]])),
                        np.hstack((
                            # lower left
                            k[:self.kshape[0] - b[0], b[1]:0:-1],
                            # lower right
                            k[:self.kshape[0] - b[0], :self.kshape[1] - b[1]]))
                               ))
                unit_deltas += infl[:, :, np.newaxis] * (s - self._K)

            # apply cumulative unit deltas
            self._K += unit_deltas

            if __debug__:
                debug("SOM", "Iteration %d/%d done: ||unit_deltas||=%g" %
                      (it, self.niter, np.sqrt(np.sum(unit_deltas ** 2))))

            # reset unit deltas
            unit_deltas.fill(0.)
开发者ID:Arthurkorn,项目名称:PyMVPA,代码行数:60,代码来源:som.py


示例18: __init__

    def __init__(self, index=None, *args, **kwargs):
        """
        Parameters
        ----------
        value : arbitrary (see derived implementations)
          The actual value of this attribute.
        **kwargs
          Passed to `Collectable`
        """
        if index is None:
            IndexedCollectable._instance_index += 1
            index = IndexedCollectable._instance_index
        else:
            # TODO: there can be collision between custom provided indexes
            #       and the ones automagically assigned.
            #       Check might be due
            pass
        self._instance_index = index

        self._isset = False
        self.reset()

        Collectable.__init__(self, *args, **kwargs)

        if __debug__ and 'COL' in debug.active:
            debug("COL", "Initialized new IndexedCollectable #%d:%s %r",
                  (index, self.name, self))
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:27,代码来源:attributes.py


示例19: __init__

    def __init__(self, value=None, name=None, doc=None):
        """
        Parameters
        ----------
        value : arbitrary (see derived implementations)
          The actual value of this attribute.
        name : str
          Name of the collectable under which it should be available in its
          respective collection.
        doc : str
          Documentation about the purpose of this collectable.
        """
        if doc is not None:
            # to prevent newlines in the docstring
            try:
                doc = re.sub('[\n ]+', ' ', doc)
            except TypeError:
                # catch some old datasets stored in HDF5
                doc = re.sub('[\n ]+', ' ', np.asscalar(doc))

        self.__doc__ = doc
        self.__name = name
        self._value = None
        if value is not None:
            self._set(value)
        if __debug__ and __mvpadebug__:
            debug("COL", "Initialized %r", (self,))
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:27,代码来源:collections.py


示例20: _set

 def _set(self, val):
     if __debug__ and __mvpadebug__:
         # Since this call is quite often, don't convert
         # values to strings here, rely on passing them
         # withing msgargs
         debug("COL", "Setting %s to %s ", (self, val))
     self._value = val
开发者ID:roshan-srin,项目名称:nidata,代码行数:7,代码来源:collections.py



注:本文中的mvpa2.base.debug函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python base.verbose函数代码示例发布时间:2022-05-27
下一篇:
Python tools.assert_equal函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap