• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numdiff.approx_fprime_cs函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中statsmodels.tools.numdiff.approx_fprime_cs函数的典型用法代码示例。如果您正苦于以下问题:Python approx_fprime_cs函数的具体用法?Python approx_fprime_cs怎么用?Python approx_fprime_cs使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了approx_fprime_cs函数的16个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_hess

    def test_hess(self):
        #NOTE: I had to overwrite this to lessen the tolerance
        for test_params in self.params:
            he = self.mod.hessian(test_params)
            hefd = numdiff.approx_fprime_cs(test_params, self.mod.score)
            assert_almost_equal(he, hefd, decimal=DEC8)

            #NOTE: notice the accuracy below and the epsilon changes
            # this doesn't work well for score -> hessian with non-cs step
            # it's a little better around the optimum
            assert_almost_equal(he, hefd, decimal=7)
            hefd = numdiff.approx_fprime(test_params, self.mod.score,
                                         centered=True)
            assert_almost_equal(he, hefd, decimal=4)
            hefd = numdiff.approx_fprime(test_params, self.mod.score, 1e-9,
                                         centered=False)
            assert_almost_equal(he, hefd, decimal=2)

            hescs = numdiff.approx_fprime_cs(test_params, self.mod.score)
            assert_almost_equal(he, hescs, decimal=DEC8)

            hecs = numdiff.approx_hess_cs(test_params, self.mod.loglike)
            assert_almost_equal(he, hecs, decimal=5)
            #NOTE: these just don't work well
            #hecs = numdiff.approx_hess1(test_params, self.mod.loglike, 1e-3)
            #assert_almost_equal(he, hecs, decimal=1)
            #hecs = numdiff.approx_hess2(test_params, self.mod.loglike, 1e-4)
            #assert_almost_equal(he, hecs, decimal=0)
            hecs = numdiff.approx_hess3(test_params, self.mod.loglike, 1e-4)
            assert_almost_equal(he, hecs, decimal=0)
开发者ID:ChadFulton,项目名称:statsmodels,代码行数:30,代码来源:test_numdiff.py


示例2: deriv

 def deriv(self, mu):
     """
     Derivative of the variance function v'(mu)
     """
     from statsmodels.tools.numdiff import approx_fprime_cs
     # TODO: diag workaround proplem with numdiff for 1d
     return np.diag(approx_fprime_cs(mu, self))
开发者ID:0ceangypsy,项目名称:statsmodels,代码行数:7,代码来源:varfuncs.py


示例3: score

    def score(self, params, *args, **kwargs):
        """
        Compute the score function at params.

        Parameters
        ----------
        params : array_like
            Array of parameters at which to evaluate the score.
        *args, **kwargs
            Additional arguments to the `loglike` method.

        Returns
        ----------
        score : array
            Score, evaluated at `params`.

        Notes
        -----
        This is a numerical approximation, calculated using first-order complex
        step differentiation on the `loglike` method.

        Both \*args and \*\*kwargs are necessary because the optimizer from
        `fit` must call this function and only supports passing arguments via
        \*args (for example `scipy.optimize.fmin_l_bfgs`).
        """
        transformed = (
            args[0] if len(args) > 0 else kwargs.get('transformed', False)
        )

        score = approx_fprime_cs(params, self.loglike, kwargs={
            'transformed': transformed
        })

        return score
开发者ID:andreas-koukorinis,项目名称:statsmodels,代码行数:34,代码来源:mlemodel.py


示例4: score

    def score(self, params, *args, **kwargs):
        """
        Compute the score function at params.

        Parameters
        ----------
        params : array_like
            Array of parameters at which to evaluate the score.
        *args, **kwargs
            Additional arguments to the `loglike` method.

        Returns
        ----------
        score : array
            Score, evaluated at `params`.

        Notes
        -----
        This is a numerical approximation, calculated using first-order complex
        step differentiation on the `loglike` method.

        Both \*args and \*\*kwargs are necessary because the optimizer from
        `fit` must call this function and only supports passing arguments via
        \*args (for example `scipy.optimize.fmin_l_bfgs`).
        """
        nargs = len(args)
        if nargs < 1:
            kwargs.setdefault('average_loglike', True)
        if nargs < 2:
            kwargs.setdefault('transformed', False)
        if nargs < 3:
            kwargs.setdefault('set_params', False)

        return approx_fprime_cs(params, self.loglike, args=args, kwargs=kwargs)
开发者ID:soumyadsanyal,项目名称:statsmodels,代码行数:34,代码来源:mlemodel.py


示例5: test_grad_fun1_cs

    def test_grad_fun1_cs(self):
        for test_params in self.params:
            #gtrue = self.x.sum(0)
            gtrue = self.gradtrue(test_params)
            fun = self.fun()

            gcs = numdiff.approx_fprime_cs(test_params, fun, args=self.args)
            assert_almost_equal(gtrue, gcs, decimal=DEC13)
开发者ID:ChadFulton,项目名称:statsmodels,代码行数:8,代码来源:test_numdiff.py


示例6: deriv2

    def deriv2(self, p):
        """Second derivative of the link function g''(p)

        implemented through numerical differentiation
        """
        from statsmodels.tools.numdiff import approx_fprime_cs
        # TODO: workaround proplem with numdiff for 1d
        return np.diag(approx_fprime_cs(p, self.deriv))
开发者ID:Cassin123,项目名称:statsmodels,代码行数:8,代码来源:links.py


示例7: test_score

    def test_score(self):
        for test_params in self.params:
            sc = self.mod.score(test_params)
            scfd = numdiff.approx_fprime(test_params.ravel(),
                                                      self.mod.loglike)
            assert_almost_equal(sc, scfd, decimal=1)

            sccs = numdiff.approx_fprime_cs(test_params.ravel(),
                                                      self.mod.loglike)
            assert_almost_equal(sc, sccs, decimal=11)
开发者ID:ChadFulton,项目名称:statsmodels,代码行数:10,代码来源:test_numdiff.py


示例8: score

    def score(self, params, *args, **kwargs):
        """
        Compute the score function at params.

        Parameters
        ----------
        params : array_like
            Array of parameters at which to evaluate the score.
        *args, **kwargs
            Additional arguments to the `loglike` method.

        Returns
        ----------
        score : array
            Score, evaluated at `params`.

        Notes
        -----
        This is a numerical approximation.

        Both \*args and \*\*kwargs are necessary because the optimizer from
        `fit` must call this function and only supports passing arguments via
        \*args (for example `scipy.optimize.fmin_l_bfgs`).
        """
        nargs = len(args)
        if nargs < 1:
            kwargs.setdefault('average_loglike', True)
        if nargs < 2:
            kwargs.setdefault('transformed', False)
        if nargs < 3:
            kwargs.setdefault('set_params', False)

        initial_state = kwargs.pop('initial_state', None)
        initial_state_cov = kwargs.pop('initial_state_cov', None)
        if initial_state is not None and initial_state_cov is not None:
            # If initialization is stationary, we don't want to recalculate the
            # initial_state_cov for each new set of parameters here
            initialization = self.initialization
            _initial_state = self._initial_state
            _initial_state_cov = self._initial_state_cov
            _initial_variance = self._initial_variance

            self.initialize_known(initial_state, initial_state_cov)

        score = approx_fprime_cs(params, self.loglike, epsilon=1e-9, args=args,
                                 kwargs=kwargs)

        if initial_state is not None and initial_state_cov is not None:
            # Reset the initialization
            self.initialization = initialization
            self._initial_state = _initial_state
            self._initial_state_cov = _initial_state_cov
            self._initial_variance = _initial_variance

        return score
开发者ID:Wombatpm,项目名称:statsmodels,代码行数:55,代码来源:mlemodel.py


示例9: jac_predict

    def jac_predict(self, params):
        '''jacobian of prediction function using complex step derivative

        This assumes that the predict function does not use complex variable
        but is designed to do so.

        '''
        from statsmodels.tools.numdiff import approx_fprime_cs

        jaccs_err = approx_fprime_cs(params, self._predict)
        return jaccs_err
开发者ID:0ceangypsy,项目名称:statsmodels,代码行数:11,代码来源:nonlinls.py


示例10: score_numdiff

    def score_numdiff(self, params, pen_weight=None, method='fd', **kwds):
        """score based on finite difference derivative

        """
        if pen_weight is None:
            pen_weight = self.pen_weight

        loglike = lambda p: self.loglike(p, pen_weight=pen_weight, **kwds)

        if method == 'cs':
            return approx_fprime_cs(params, loglike)
        elif method == 'fd':
            return approx_fprime(params, loglike, centered=True)
        else:
            raise ValueError('method not recognized, should be "fd" or "cs"')
开发者ID:haribharadwaj,项目名称:statsmodels,代码行数:15,代码来源:_penalized.py


示例11: arma_scoreobs

def arma_scoreobs(endog, ar_params=None, ma_params=None, sigma2=1,
                  prefix=None):
    """
    Compute the score per observation (gradient of the loglikelihood function)

    Parameters
    ----------
    endog : ndarray
        The observed time-series process.
    ar_params : ndarray, optional
        Autoregressive coefficients, not including the zero lag.
    ma_params : ndarray, optional
        Moving average coefficients, not including the zero lag, where the sign
        convention assumes the coefficients are part of the lag polynomial on
        the right-hand-side of the ARMA definition (i.e. they have the same
        sign from the usual econometrics convention in which the coefficients
        are on the right-hand-side of the ARMA definition).
    sigma2 : ndarray, optional
        The ARMA innovation variance. Default is 1.
    prefix : str, optional
        The BLAS prefix associated with the datatype. Default is to find the
        best datatype based on given input. This argument is typically only
        used internally.

    Returns
    ----------
    scoreobs : array
        Score per observation, evaluated at the given parameters.

    Notes
    -----
    This is a numerical approximation, calculated using first-order complex
    step differentiation on the `arma_loglike` method.
    """
    ar_params = [] if ar_params is None else ar_params
    ma_params = [] if ma_params is None else ma_params

    p = len(ar_params)
    q = len(ma_params)

    def func(params):
        return arma_loglikeobs(endog, params[:p], params[p:p + q],
                               params[p + q:])

    params0 = np.r_[ar_params, ma_params, sigma2]
    epsilon = _get_epsilon(params0, 2., None, len(params0))
    return approx_fprime_cs(params0, func, epsilon)
开发者ID:ChadFulton,项目名称:statsmodels,代码行数:47,代码来源:arma_innovations.py


示例12: transform_jacobian

    def transform_jacobian(self, unconstrained):
        """
        Jacobian matrix for the parameter transformation function

        Parameters
        ----------
        unconstrained : array_like
            Array of unconstrained parameters used by the optimizer.

        Returns
        -------
        jacobian : array
            Jacobian matrix of the transformation, evaluated at `unconstrained`

        Notes
        -----
        This is a numerical approximation.

        See Also
        --------
        transform_params
        """
        return approx_fprime_cs(unconstrained, self.transform_params)
开发者ID:andreas-koukorinis,项目名称:statsmodels,代码行数:23,代码来源:mlemodel.py


示例13: score_obs

    def score_obs(self, params, **kwargs):
        """
        Compute the score per observation, evaluated at params
 
        Parameters
        ----------
        params : array_like
            Array of parameters at which to evaluate the score.
        *args, **kwargs
            Additional arguments to the `loglike` method.

        Returns
        ----------
        score : array (nobs, k_vars)
            Score per observation, evaluated at `params`.

        Notes
        -----
        This is a numerical approximation, calculated using first-order complex
        step differentiation on the `loglikeobs` method.
        """
        self.update(params)
        return approx_fprime_cs(params, self.loglikeobs, kwargs=kwargs)
开发者ID:andreas-koukorinis,项目名称:statsmodels,代码行数:23,代码来源:mlemodel.py


示例14: print

    epsilon = 1e-6
    nobs = 200
    x = np.arange(nobs*3).reshape(nobs,-1)
    x = np.random.randn(nobs,3)

    xk = np.array([1,2,3])
    xk = np.array([1.,1.,1.])
    #xk = np.zeros(3)
    beta = xk
    y = np.dot(x, beta) + 0.1*np.random.randn(nobs)
    xkols = np.dot(np.linalg.pinv(x),y)

    print(approx_fprime((1,2,3),fun,epsilon,x))
    gradtrue = x.sum(0)
    print(x.sum(0))
    gradcs = approx_fprime_cs((1,2,3), fun, (x,), h=1.0e-20)
    print(gradcs, maxabs(gradcs, gradtrue))
    print(approx_hess_cs((1,2,3), fun, (x,), h=1.0e-20))  #this is correctly zero

    print(approx_hess_cs((1,2,3), fun2, (y,x), h=1.0e-20)-2*np.dot(x.T, x))
    print(numdiff.approx_hess(xk,fun2,1e-3, (y,x))[0] - 2*np.dot(x.T, x))

    gt = (-x*2*(y-np.dot(x, [1,2,3]))[:,None])
    g = approx_fprime_cs((1,2,3), fun1, (y,x), h=1.0e-20)#.T   #this shouldn't be transposed
    gd = numdiff.approx_fprime((1,2,3),fun1,epsilon,(y,x))
    print(maxabs(g, gt))
    print(maxabs(gd, gt))


    import statsmodels.api as sm
开发者ID:ChadFulton,项目名称:statsmodels,代码行数:30,代码来源:test_numdiff.py


示例15: test_partials_logistic

def test_partials_logistic():
    # Here we compare to analytic derivatives and to finite-difference
    # approximations
    logistic = markov_switching._logistic
    partials_logistic = markov_switching._partials_logistic

    # For a number, logistic(x) = np.exp(x) / (1 + np.exp(x))
    # Then d/dx = logistix(x) - logistic(x)**2
    cases = [0, 10., -4]
    for x in cases:
        assert_allclose(partials_logistic(x), logistic(x) - logistic(x)**2)
        assert_allclose(partials_logistic(x), approx_fprime_cs([x], logistic))

    # For a vector, logistic(x) returns
    # np.exp(x[i]) / (1 + np.sum(np.exp(x[:]))) for each i
    # Then d logistic(x[i]) / dx[i] = (logistix(x) - logistic(x)**2)[i]
    # And d logistic(x[i]) / dx[j] = -(logistic(x[i]) * logistic[x[j]])
    cases = [[1.], [0, 1.], [-2, 3., 1.2, -30.]]
    for x in cases:
        evaluated = np.atleast_1d(logistic(x))
        partials = np.diag(evaluated - evaluated**2)
        for i in range(len(x)):
            for j in range(i):
                partials[i, j] = partials[j, i] = -evaluated[i] * evaluated[j]
        assert_allclose(partials_logistic(x), partials)
        assert_allclose(partials_logistic(x), approx_fprime_cs(x, logistic))

    # For a 2-dim, logistic(x) returns
    # np.exp(x[i, t]) / (1 + np.sum(np.exp(x[:, t]))) for each i, each t
    # but squeezed
    case = [[1.]]
    evaluated = logistic(case)
    partial = [evaluated - evaluated**2]
    assert_allclose(partials_logistic(case), partial)
    assert_allclose(partials_logistic(case), approx_fprime_cs(case, logistic))

    # # Here, np.array(case) is 2x1, so it is interpreted as i=0, 1 and t=0
    case = [[0], [1.]]
    evaluated = logistic(case)[:, 0]
    partials = np.diag(evaluated - evaluated**2)
    partials[0, 1] = partials[1, 0] = -np.multiply(*evaluated)
    assert_allclose(partials_logistic(case)[:, :, 0], partials)
    assert_allclose(partials_logistic(case),
                    approx_fprime_cs(np.squeeze(case), logistic)[..., None])

    # Here, np.array(case) is 1x2, so it is interpreted as i=0 and t=0, 1
    case = [[0, 1.]]
    evaluated = logistic(case)
    partials = (evaluated - evaluated**2)[None, ...]
    assert_allclose(partials_logistic(case), partials)
    assert_allclose(partials_logistic(case),
                    approx_fprime_cs(case, logistic).T)

    # For a 3-dim, logistic(x) returns
    # np.exp(x[i, j, t]) / (1 + np.sum(np.exp(x[:, j, t])))
    # for each i, each j, each t
    case = np.arange(2*3*4).reshape(2, 3, 4)
    evaluated = logistic(case)
    partials = partials_logistic(case)
    for t in range(4):
        for j in range(3):
            desired = np.diag(evaluated[:, j, t] - evaluated[:, j, t]**2)
            desired[0, 1] = desired[1, 0] = -np.multiply(*evaluated[:, j, t])
            assert_allclose(partials[..., j, t], desired)
开发者ID:dieterv77,项目名称:statsmodels,代码行数:64,代码来源:test_markov_switching.py


示例16: margeff_cov_params

def margeff_cov_params(model, params, exog, cov_params, at, derivative,
                       dummy_ind, count_ind, method, J):
    """
    Computes the variance-covariance of marginal effects by the delta method.

    Parameters
    ----------
    model : model instance
        The model that returned the fitted results. Its pdf method is used
        for computing the Jacobian of discrete variables in dummy_ind and
        count_ind
    params : array-like
        estimated model parameters
    exog : array-like
        exogenous variables at which to calculate the derivative
    cov_params : array-like
        The variance-covariance of the parameters
    at : str
       Options are:

        - 'overall', The average of the marginal effects at each
          observation.
        - 'mean', The marginal effects at the mean of each regressor.
        - 'median', The marginal effects at the median of each regressor.
        - 'zero', The marginal effects at zero for each regressor.
        - 'all', The marginal effects at each observation.

        Only overall has any effect here.you

    derivative : function or array-like
        If a function, it returns the marginal effects of the model with
        respect to the exogenous variables evaluated at exog. Expected to be
        called derivative(params, exog). This will be numerically
        differentiated. Otherwise, it can be the Jacobian of the marginal
        effects with respect to the parameters.
    dummy_ind : array-like
        Indices of the columns of exog that contain dummy variables
    count_ind : array-like
        Indices of the columns of exog that contain count variables

    Notes
    -----
    For continuous regressors, the variance-covariance is given by

    Asy. Var[MargEff] = [d margeff / d params] V [d margeff / d params]'

    where V is the parameter variance-covariance.

    The outer Jacobians are computed via numerical differentiation if
    derivative is a function.
    """
    if callable(derivative):
        from statsmodels.tools.numdiff import approx_fprime_cs
        params = params.ravel('F') # for Multinomial
        try:
            jacobian_mat = approx_fprime_cs(params, derivative,
                                            args=(exog,method))
        except TypeError, err: #norm.cdf doesn't take complex values
            from statsmodels.tools.numdiff import approx_fprime1
            jacobian_mat = approx_fprime1(params, derivative,
                                            args=(exog,method))
        if at == 'overall':
            jacobian_mat = np.mean(jacobian_mat, axis=1)
        else:
            jacobian_mat = jacobian_mat.squeeze() # exog was 2d row vector
        if dummy_ind is not None:
            jacobian_mat = _margeff_cov_params_dummy(model, jacobian_mat,
                                params, exog, dummy_ind, method, J)
        if count_ind is not None:
            jacobian_mat = _margeff_cov_params_count(model, jacobian_mat,
                                params, exog, count_ind, method, J)
开发者ID:QuocTran,项目名称:statsmodels,代码行数:71,代码来源:discrete_margins.py



注:本文中的statsmodels.tools.numdiff.approx_fprime_cs函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numdiff.approx_hess函数代码示例发布时间:2022-05-27
下一篇:
Python numdiff.approx_fprime函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap