• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python api.simple_problem函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中regreg.api.simple_problem函数的典型用法代码示例。如果您正苦于以下问题:Python simple_problem函数的具体用法?Python simple_problem怎么用?Python simple_problem使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了simple_problem函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_lasso_dual

def test_lasso_dual():

    """
    Check that the solution of the lasso signal approximator dual composite is soft-thresholding
    """

    l1 = .1
    sparsity = R.l1norm(10, lagrange=l1)
    x = np.arange(10) - 5
    loss = R.quadratic.shift(-x, coef=0.5)

    pen = R.simple_problem(loss, sparsity)
    solver = R.FISTA(pen)
    pen.lipschitz = 1
    solver.fit(backtrack=False)
    soln = solver.composite.coefs
    st = np.maximum(np.fabs(x)-l1,0) * np.sign(x) 

    np.testing.assert_almost_equal(soln,st, decimal=3)

    pen = R.simple_problem(loss, sparsity)
    solver = R.FISTA(pen)
    solver.fit(monotonicity_restart=False)
    soln = solver.composite.coefs
    st = np.maximum(np.fabs(x)-l1,0) * np.sign(x) 

    np.testing.assert_almost_equal(soln,st, decimal=3)


    pen = R.container(loss, sparsity)
    solver = R.FISTA(pen)
    solver.fit()
    soln = solver.composite.coefs

    np.testing.assert_almost_equal(soln,st, decimal=3)
开发者ID:amitibo,项目名称:regreg,代码行数:35,代码来源:more_tests.py


示例2: test_simple_problem

    def test_simple_problem(self):
        tests = []
        atom, q, prox_center, L = self.atom, self.q, self.prox_center, self.L
        loss = self.loss

        problem = rr.simple_problem(loss, atom)
        solver = rr.FISTA(problem)
        solver.fit(tol=1.0e-12, FISTA=self.FISTA, coef_stop=self.coef_stop, min_its=100)

        tests.append((atom.proximal(q), solver.composite.coefs, 'solving prox with simple_problem with monotonicity\n %s' % str(self)))

        # write the loss in terms of a quadratic for the smooth loss and a smooth function...

        q = rr.identity_quadratic(L, prox_center, 0, 0)
        lossq = rr.quadratic.shift(prox_center.copy(), coef=0.6*L)
        lossq.quadratic = rr.identity_quadratic(0.4*L, prox_center.copy(), 0, 0)
        problem = rr.simple_problem(lossq, atom)

        tests.append((atom.proximal(q), 
              problem.solve(coef_stop=self.coef_stop, 
                            FISTA=self.FISTA, 
                            tol=1.0e-12), 
               'solving prox with simple_problem ' +
               'with monotonicity  but loss has identity_quadratic %s\n ' % str(self)))

        problem = rr.simple_problem(loss, atom)
        solver = rr.FISTA(problem)
        solver.fit(tol=1.0e-12, monotonicity_restart=False,
                   coef_stop=self.coef_stop, FISTA=self.FISTA, min_its=100)

        tests.append((atom.proximal(q), solver.composite.coefs, 'solving prox with simple_problem no monotonicity_restart\n %s' % str(self)))

        d = atom.conjugate
        problem = rr.simple_problem(loss, d)
        solver = rr.FISTA(problem)
        solver.fit(tol=1.0e-12, monotonicity_restart=False, 
                   coef_stop=self.coef_stop, FISTA=self.FISTA, min_its=100)
        tests.append((d.proximal(q), problem.solve(tol=1.e-12,
                                                FISTA=self.FISTA,
                                                coef_stop=self.coef_stop,
                                                monotonicity_restart=False), 
               'solving dual prox with simple_problem no monotonocity\n %s ' % str(self)))

        if not self.interactive:
            for test in tests:
                yield (all_close,) + test + (self,)
        else:
            for test in tests:
                yield all_close(*((test + (self,))))
开发者ID:matthew-brett,项目名称:regreg,代码行数:49,代码来源:test_seminorms.py


示例3: test_using_SLOPE_weights

def test_using_SLOPE_weights():

    n, p = 500, 50

    X = np.random.standard_normal((n, p))
    #Y = np.random.standard_normal(n)
    X -= X.mean(0)[None, :]
    X /= (X.std(0)[None, :] * np.sqrt(n))
    beta = np.zeros(p)
    beta[:5] = 5.

    Y = X.dot(beta) + np.random.standard_normal(n)

    output_R = fit_slope_R(X, Y, W = None, normalize = True, choice_weights = "bhq")
    r_beta = output_R[0]
    r_lambda_seq = output_R[2]

    W = r_lambda_seq
    pen = slope(W, lagrange=1.)

    loss = rr.squared_error(X, Y)
    problem = rr.simple_problem(loss, pen)
    soln = problem.solve(tol=1.e-14, min_its=500)

    # we get a better objective value
    nt.assert_true(problem.objective(soln) < problem.objective(np.asarray(r_beta)))
    nt.assert_true(np.linalg.norm(soln - r_beta) < 1.e-6 * np.linalg.norm(soln))
开发者ID:jonathan-taylor,项目名称:regreg,代码行数:27,代码来源:test_slope_compareR.py


示例4: __init__

    def __init__(self, loss, 
                 linear_randomization,
                 quadratic_coef,
                 randomization, 
                 penalty,
                 solve_args={'tol':1.e-10, 'min_its':100, 'max_its':500}):

        (self.loss,
         self.linear_randomization,
         self.randomization,
         self.quadratic_coef) = (loss,
                                 linear_randomization,
                                 randomization,
                                 quadratic_coef)

        # initialize optimization problem

        self.penalty = penalty
        self.problem = rr.simple_problem(loss, penalty)

        random_term = rr.identity_quadratic(
                                quadratic_coef, 0, 
                                self.linear_randomization, 0)

        self.initial_soln = self.problem.solve(random_term,
                                               **solve_args)
        self.initial_grad = self.loss.smooth_objective(self.initial_soln, 
                                                       mode='grad')
        self.opt_vars = self.penalty.setup_sampling( \
            self.initial_grad,
            self.initial_soln,
            self.linear_randomization,
            self.quadratic_coef)
开发者ID:selective-inference,项目名称:merged,代码行数:33,代码来源:sampler.py


示例5: test_changepoint_scaled

def test_changepoint_scaled():

    p = 150
    M = multiscale(p)
    M.minsize = 10
    X = ra.adjoint(M)

    Y = np.random.standard_normal(p)
    Y[20:50] += 8
    Y += 2
    meanY = Y.mean()

    lammax = np.fabs(np.sqrt(M.sizes) * X.adjoint_map(Y) / (1 + np.sqrt(np.log(M.sizes)))).max()

    penalty = rr.weighted_l1norm((1 + np.sqrt(np.log(M.sizes))) / np.sqrt(M.sizes), lagrange=0.5*lammax)
    loss = rr.squared_error(X, Y - meanY)
    problem = rr.simple_problem(loss, penalty)
    soln = problem.solve()
    Yhat = X.linear_map(soln)
    Yhat += meanY

    if INTERACTIVE:
        plt.scatter(np.arange(p), Y)
        plt.plot(np.arange(p), Yhat)
        plt.show()
开发者ID:jonathan-taylor,项目名称:regreg,代码行数:25,代码来源:test_multiscale.py


示例6: test_nesta_lasso

def test_nesta_lasso():

    n, p = 1000, 20
    X = np.random.standard_normal((n, p))
    beta = np.zeros(p)
    beta[:4] = 30
    Y = np.random.standard_normal(n) + np.dot(X, beta)

    loss = rr.squared_error(X,Y)
    penalty = rr.l1norm(p, lagrange=2.)

    # using nesta
    z = rr.zero(p)
    primal, dual = rr.nesta(loss, z, penalty, tol=1.e-10,
                            epsilon=2.**(-np.arange(30)),
                            initial_dual=np.zeros(p))

    # using simple problem

    problem = rr.simple_problem(loss, penalty)
    problem.solve()
    nt.assert_true(np.linalg.norm(primal - problem.coefs) / np.linalg.norm(problem.coefs) < 1.e-3)

    # test None as smooth_atom

    rr.nesta(None, z, penalty, tol=1.e-10,
             epsilon=2.**(-np.arange(30)),
             initial_dual=np.zeros(p))

    # using coefficients to stop

    rr.nesta(loss, z, penalty, tol=1.e-10,
             epsilon=2.**(-np.arange(30)),
             initial_dual=np.zeros(p),
             coef_stop=True)
开发者ID:matthew-brett,项目名称:regreg,代码行数:35,代码来源:test_nesta.py


示例7: test_simple

def test_simple():
    Z = np.random.standard_normal(100) * 4
    p = rr.l1norm(100, lagrange=0.13)
    L = 0.14

    loss = rr.quadratic.shift(-Z, coef=L)
    problem = rr.simple_problem(loss, p)
    solver = rr.FISTA(problem)
    solver.fit(tol=1.0e-10, debug=True)

    simple_coef = solver.composite.coefs
    prox_coef = p.proximal(rr.identity_quadratic(L, Z, 0, 0))

    p2 = rr.l1norm(100, lagrange=0.13)
    p2 = copy(p)
    p2.quadratic = rr.identity_quadratic(L, Z, 0, 0)
    problem = rr.simple_problem.nonsmooth(p2)
    solver = rr.FISTA(problem)
    solver.fit(tol=1.0e-14, debug=True)
    simple_nonsmooth_coef = solver.composite.coefs

    p = rr.l1norm(100, lagrange=0.13)
    p.quadratic = rr.identity_quadratic(L, Z, 0, 0)
    problem = rr.simple_problem.nonsmooth(p)
    simple_nonsmooth_gengrad = gengrad(problem, L, tol=1.0e-10)

    p = rr.l1norm(100, lagrange=0.13)
    problem = rr.separable_problem.singleton(p, loss)
    solver = rr.FISTA(problem)
    solver.fit(tol=1.0e-10)
    separable_coef = solver.composite.coefs

    loss2 = rr.quadratic.shift(-Z, coef=0.6*L)
    loss2.quadratic = rr.identity_quadratic(0.4*L, Z, 0, 0)
    p.coefs *= 0
    problem2 = rr.simple_problem(loss2, p)
    loss2_coefs = problem2.solve(coef_stop=True)
    solver2 = rr.FISTA(problem2)
    solver2.fit(tol=1.0e-10, debug=True, coef_stop=True)

    yield ac, prox_coef, simple_nonsmooth_gengrad, 'prox to nonsmooth gengrad'
    yield ac, prox_coef, separable_coef, 'prox to separable'
    yield ac, prox_coef, simple_nonsmooth_coef, 'prox to simple_nonsmooth'
    yield ac, prox_coef, simple_coef, 'prox to simple'
    yield ac, prox_coef, loss2_coefs, 'simple where loss has quadratic 1'
    yield ac, prox_coef, solver2.composite.coefs, 'simple where loss has quadratic 2'
开发者ID:gmelikian,项目名称:regreg,代码行数:46,代码来源:test_simple.py


示例8: test_path_group_lasso

def test_path_group_lasso():
    """
    this test looks at the paths of three different parameterizations
    of the same problem

    """
    n = 100
    X = np.random.standard_normal((n, 10))
    U = np.random.standard_normal((n, 2))
    Y = np.random.standard_normal(100)
    betaX = np.array([3, 4, 5, 0, 0] + [0] * 5)
    betaU = np.array([10, -5])
    Y += (np.dot(X, betaX) + np.dot(U, betaU)) * 5

    Xn = rr.normalize(
        np.hstack([np.ones((100, 1)), X]), inplace=True, center=True, scale=True, intercept_column=0
    ).normalized_array()
    lasso = rr.lasso.squared_error(Xn[:, 1:], Y, penalty_structure=[0] * 7 + [1] * 3, nstep=10)

    sol = lasso.main(inner_tol=1.0e-12, verbose=True)
    beta = np.array(sol["beta"].todense())

    sols = []
    sols_sep = []
    for l in sol["lagrange"]:
        loss = rr.squared_error(Xn, Y, coef=1.0 / n)
        penalty = rr.mixed_lasso([rr.UNPENALIZED] + [0] * 7 + [1] * 3, lagrange=l)  # matrix contains an intercept...
        problem = rr.simple_problem(loss, penalty)
        sols.append(problem.solve(tol=1.0e-12).copy())

        sep = rr.separable(
            (11,),
            [rr.l2norm((7,), np.sqrt(7) * l), rr.l2norm((3,), np.sqrt(3) * l)],
            [np.arange(1, 8), np.arange(8, 11)],
        )
        sep_problem = rr.simple_problem(loss, sep)
        sols_sep.append(sep_problem.solve(tol=1.0e-12).copy())

    sols = np.array(sols).T
    sols_sep = np.array(sols_sep).T

    nt.assert_true(np.linalg.norm(beta - sols) / (1 + np.linalg.norm(beta)) <= 1.0e-4)
    nt.assert_true(np.linalg.norm(beta - sols_sep) / (1 + np.linalg.norm(beta)) <= 1.0e-4)
开发者ID:regreg,项目名称:regreg,代码行数:43,代码来源:test_path.py


示例9: solve_sqrt_lasso_skinny

def solve_sqrt_lasso_skinny(X, Y, weights=None, initial=None, quadratic=None, solve_args={}):
    """

    Solve the square-root LASSO optimization problem:

    $$
    \text{minimize}_{\beta} \|y-X\beta\|_2 + D |\beta|,
    $$
    where $D$ is the diagonal matrix with weights on its diagonal.

    Parameters
    ----------

    y : np.float((n,))
        The target, in the model $y = X\beta$

    X : np.float((n, p))
        The data, in the model $y = X\beta$

    weights : np.float
        Coefficients of the L-1 penalty in
        optimization problem, note that different
        coordinates can have different coefficients.

    initial : np.float(p)
        Initial point for optimization.

    solve_args : dict
        Arguments passed to regreg solver.

    quadratic : `regreg.identity_quadratic`
        A quadratic term added to objective function.

    """
    n, p = X.shape
    if weights is None:
        lam = choose_lambda(X)
        weights = lam * np.ones((p,))
    weight_dict = dict(zip(np.arange(p),
                           2 * weights))
    penalty = rr.mixed_lasso(range(p) + [rr.NONNEGATIVE], lagrange=1.,
                             weights=weight_dict)

    loss = sqlasso_objective_skinny(X, Y)
    problem = rr.simple_problem(loss, penalty)
    problem.coefs[-1] = np.linalg.norm(Y)
    if initial is not None:
        problem.coefs[:-1] = initial
    soln = problem.solve(quadratic, **solve_args)
    _loss = sqlasso_objective(X, Y)
    return soln[:-1], _loss
开发者ID:selective-inference,项目名称:merged,代码行数:51,代码来源:sqrt_lasso.py


示例10: test_class

def test_class():
    """
    runs several class methods on generic instance
    """

    n, p = 100, 20
    X = np.random.standard_normal((n, p))
    Y = np.random.standard_normal(n)
    loss = rr.squared_error(X, Y)
    pen = rr.l1norm(p, lagrange=1.0)
    problem = rr.simple_problem(loss, pen)

    problem.latexify()

    for debug, coef_stop, max_its in product([True, False], [True, False], [5, 100]):
        rr.gengrad(problem, rr.power_L(X) ** 2, max_its=max_its, debug=debug, coef_stop=coef_stop)
开发者ID:jonathan-taylor,项目名称:regreg,代码行数:16,代码来源:test_simple.py


示例11: test_admm

def test_admm(n=100, p=10):

    X = np.random.standard_normal((n, p))
    Y = np.random.standard_normal(n)
    loss = rr.squared_error(X, Y)
    D = np.identity(p)
    pen = rr.l1norm(p, lagrange=1.5)

    ADMM = admm_problem(loss, pen, ra.astransform(D), 0.5)
    ADMM.solve(niter=1000)

    coef1 = ADMM.atom_coefs
    problem2 = rr.simple_problem(loss, pen)
    coef2 = problem2.solve(tol=1.e-12, min_its=500)

    np.testing.assert_allclose(coef1, coef2, rtol=1.e-3, atol=1.e-4)
开发者ID:jonathan-taylor,项目名称:regreg,代码行数:16,代码来源:test_admm.py


示例12: solve_sqrt_lasso_fat

def solve_sqrt_lasso_fat(X, Y, weights=None, initial=None, quadratic=None, solve_args={}):
    """

    Solve the square-root LASSO optimization problem:

    $$
    \text{minimize}_{\beta} \|y-X\beta\|_2 + D |\beta|,
    $$
    where $D$ is the diagonal matrix with weights on its diagonal.

    Parameters
    ----------

    y : np.float((n,))
        The target, in the model $y = X\beta$

    X : np.float((n, p))
        The data, in the model $y = X\beta$

    weights : np.float
        Coefficients of the L-1 penalty in
        optimization problem, note that different
        coordinates can have different coefficients.

    initial : np.float(p)
        Initial point for optimization.

    solve_args : dict
        Arguments passed to regreg solver.

    quadratic : `regreg.identity_quadratic`
        A quadratic term added to objective function.

    """
    X = rr.astransform(X)
    n, p = X.output_shape[0], X.input_shape[0]
    if weights is None:
        lam = choose_lambda(X)
        weights = lam * np.ones((p,))

    loss = sqlasso_objective(X, Y)
    penalty = rr.weighted_l1norm(weights, lagrange=1.)
    problem = rr.simple_problem(loss, penalty)
    if initial is not None:
        problem.coefs[:] = initial
    soln = problem.solve(quadratic, **solve_args)
    return soln, loss
开发者ID:selective-inference,项目名称:merged,代码行数:47,代码来源:sqrt_lasso.py


示例13: test_lasso_dual_with_monotonicity

def test_lasso_dual_with_monotonicity():

    """
    restarting is funny for this simple problem
    """

    l1 = .1
    sparsity = R.l1norm(10, lagrange=l1)
    x = np.arange(10) - 5
    loss = R.quadratic.shift(-x, coef=0.5)


    pen = R.simple_problem(loss, sparsity)
    solver = R.FISTA(pen)
    solver.fit()
    soln = solver.composite.coefs
    st = np.maximum(np.fabs(x)-l1,0) * np.sign(x) 

    np.testing.assert_almost_equal(soln,st, decimal=3)
开发者ID:amitibo,项目名称:regreg,代码行数:19,代码来源:more_tests.py


示例14: test_equivalence_sqrtlasso

def test_equivalence_sqrtlasso(n=200, p=400, s=10, sigma=3.):

    """
    Check equivalent LASSO and sqrtLASSO solutions.
    """

    Y = np.random.standard_normal(n) * sigma
    beta = np.zeros(p)
    beta[:s] = 8 * (2 * np.random.binomial(1, 0.5, size=(s,)) - 1)
    X = np.random.standard_normal((n,p)) + 0.3 * np.random.standard_normal(n)[:,None]
    X /= (X.std(0)[None,:] * np.sqrt(n))
    Y += np.dot(X, beta) * sigma
    lam_theor = choose_lambda(X, quantile=0.9)

    weights = lam_theor*np.ones(p)
    weights[:3] = 0.
    soln1, loss1 = solve_sqrt_lasso(X, Y, weights=weights, quadratic=None, solve_args={'min_its':500, 'tol':1.e-10})

    G1 = loss1.smooth_objective(soln1, 'grad') 

    # find active set, and estimate of sigma                                                                                                                          

    active = (soln1 != 0)
    nactive = active.sum()
    subgrad = np.sign(soln1[active]) * weights[active]
    X_E = X[:,active]
    X_Ei = np.linalg.pinv(X_E)
    sigma_E= np.linalg.norm(Y - X_E.dot(X_Ei.dot(Y))) / np.sqrt(n - nactive)

    multiplier = sigma_E * np.sqrt((n - nactive) / (1 - np.linalg.norm(X_Ei.T.dot(subgrad))**2))

    # XXX how should quadratic be changed?                                                                                                                            
    # multiply everything by sigma_E?                                                                                                                                 

    loss2 = rr.glm.gaussian(X, Y)
    penalty = rr.weighted_l1norm(weights, lagrange=multiplier)
    problem = rr.simple_problem(loss2, penalty)

    soln2 = problem.solve(tol=1.e-12, min_its=200)
    G2 = loss2.smooth_objective(soln2, 'grad') / multiplier

    np.testing.assert_allclose(G1[3:], G2[3:])
    np.testing.assert_allclose(soln1, soln2)
开发者ID:selective-inference,项目名称:merged,代码行数:43,代码来源:test_lasso.py


示例15: test_choose_parameter

def test_choose_parameter(delta=2, p=60):

    signal = np.zeros(p)
    signal[(p//2):] += delta
    Z = np.random.standard_normal(p) + signal
    p = Z.shape[0]
    M = multiscale(p)
    M.scaling = np.sqrt(M.sizes)
    lam = choose_tuning_parameter(M)
    weights = (lam + np.sqrt(2 * np.log(p / M.sizes))) / np.sqrt(p)

    Z0 = Z - Z.mean()
    loss = rr.squared_error(ra.adjoint(M), Z0)
    penalty = rr.weighted_l1norm(weights, lagrange=1.)
    problem = rr.simple_problem(loss, penalty)
    coef = problem.solve()
    active = coef != 0

    if active.sum():
        X = M.form_matrix(M.slices[active])[0]
开发者ID:jonathan-taylor,项目名称:regreg,代码行数:20,代码来源:test_multiscale.py


示例16: fit

    def fit(self, **solve_args):
        """
        Fit the lasso using `regreg`.
        This sets the attributes `soln`, `onestep` and
        forms the constraints necessary for post-selection inference
        by calling `form_constraints()`.

        Parameters
        ----------

        solve_args : keyword args
             Passed to `regreg.problems.simple_problem.solve`.

        Returns
        -------

        soln : np.float
             Solution to lasso.
             
        """

        penalty = weighted_l1norm(self.feature_weights, lagrange=1.)
        problem = simple_problem(self.loglike, penalty)
        _soln = problem.solve(**solve_args)
        self._soln = _soln
        if not np.all(_soln == 0):
            self.active = np.nonzero(_soln != 0)[0]
            self.active_signs = np.sign(_soln[self.active])
            self._active_soln = _soln[self.active]
            H = self.loglike.hessian(self._soln)[self.active][:,self.active]
            Hinv = np.linalg.inv(H)
            G = self.loglike.gradient(self._soln)[self.active]
            delta = Hinv.dot(G)
            self._onestep = self._active_soln - delta
            self.active_penalized = self.feature_weights[self.active] != 0
            self._constraints = constraints(-np.diag(self.active_signs)[self.active_penalized],
                                             (self.active_signs * delta)[self.active_penalized],
                                             covariance=Hinv)
        else:
            self.active = []
        return self._soln
开发者ID:Xiaoying-Tian,项目名称:selective-inference,代码行数:41,代码来源:lasso.py


示例17: test_changepoint

def test_changepoint():

    p = 150
    M = multiscale(p)
    M.minsize = 10
    X = ra.adjoint(M)

    Y = np.random.standard_normal(p)
    Y[20:50] += 8
    Y += 2
    meanY = Y.mean()

    lammax = np.fabs(X.adjoint_map(Y)).max()

    penalty = rr.l1norm(X.input_shape, lagrange=0.5*lammax)
    loss = rr.squared_error(X, Y - meanY)
    problem = rr.simple_problem(loss, penalty)
    soln = problem.solve()
    Yhat = X.linear_map(soln)
    Yhat += meanY

    plt.scatter(np.arange(p), Y)
    plt.plot(np.arange(p), Yhat)
开发者ID:jasondlee88,项目名称:regreg,代码行数:23,代码来源:test_multiscale.py


示例18: test_simple

def test_simple():
    Z = np.random.standard_normal((10,10)) * 4
    p = rr.l1_l2((10,10), lagrange=0.13)
    dual = p.conjugate
    L = 0.23

    loss = rr.quadratic.shift(-Z, coef=L)
    problem = rr.simple_problem(loss, p)
    solver = rr.FISTA(problem)
    solver.fit(tol=1.0e-10, debug=True)

    simple_coef = solver.composite.coefs
    q = rr.identity_quadratic(L, Z, 0, 0)
    prox_coef = p.proximal(q)

    p2 = copy(p)
    p2.quadratic = rr.identity_quadratic(L, Z, 0, 0)
    problem = rr.simple_problem.nonsmooth(p2)
    solver = rr.FISTA(problem)
    solver.fit(tol=1.0e-14, debug=True)
    simple_nonsmooth_coef = solver.composite.coefs

    p = rr.l1_l2((10,10), lagrange=0.13)
    p.quadratic = rr.identity_quadratic(L, Z, 0, 0)
    problem = rr.simple_problem.nonsmooth(p)
    simple_nonsmooth_gengrad = gengrad(problem, L, tol=1.0e-10)

    p = rr.l1_l2((10,10), lagrange=0.13)
    problem = rr.separable_problem.singleton(p, loss)
    solver = rr.FISTA(problem)
    solver.fit(tol=1.0e-10)
    separable_coef = solver.composite.coefs

    ac(prox_coef, Z-simple_coef, 'prox to simple')
    ac(prox_coef, simple_nonsmooth_gengrad, 'prox to nonsmooth gengrad')
    ac(prox_coef, separable_coef, 'prox to separable')
    ac(prox_coef, simple_nonsmooth_coef, 'prox to simple_nonsmooth')
开发者ID:amitibo,项目名称:regreg,代码行数:37,代码来源:test_simple_block.py


示例19: test_gengrad_blocknorms

def test_gengrad_blocknorms():
    Z = np.random.standard_normal((10, 10)) * 4
    p = rr.l1_l2((10, 10), lagrange=0.13)
    dual = p.conjugate
    L = 0.23

    loss = rr.quadratic_loss.shift(Z, coef=L)
    problem = rr.simple_problem(loss, p)
    solver = rr.FISTA(problem)
    solver.fit(tol=1.0e-10, debug=True)
    simple_coef = solver.composite.coefs

    q = rr.identity_quadratic(L, Z, 0, 0)
    prox_coef = p.proximal(q)

    p2 = copy(p)
    p2.quadratic = rr.identity_quadratic(L, Z, 0, 0)
    problem = rr.simple_problem.nonsmooth(p2)
    solver = rr.FISTA(problem)
    solver.fit(tol=1.0e-14, debug=True)
    simple_nonsmooth_coef = solver.composite.coefs

    p = rr.l1_l2((10, 10), lagrange=0.13)
    p.quadratic = rr.identity_quadratic(L, Z, 0, 0)
    problem = rr.simple_problem.nonsmooth(p)
    simple_nonsmooth_gengrad = rr.gengrad(problem, L, tol=1.0e-10)

    p = rr.l1_l2((10, 10), lagrange=0.13)
    problem = rr.separable_problem.singleton(p, loss)
    solver = rr.FISTA(problem)
    solver.fit(tol=1.0e-10)
    separable_coef = solver.composite.coefs

    yield (all_close, prox_coef, simple_coef, "prox to simple", None)
    yield (all_close, prox_coef, simple_nonsmooth_gengrad, "prox to nonsmooth gengrad", None)
    yield (all_close, prox_coef, separable_coef, "prox to separable", None)
    yield (all_close, prox_coef, simple_nonsmooth_coef, "prox to simple_nonsmooth", None)
开发者ID:jonathan-taylor,项目名称:regreg,代码行数:37,代码来源:test_simple.py


示例20: fit

    def fit(self, tol=1.e-12, min_its=50, **solve_args):

        lasso.fit(self, tol=tol, min_its=min_its, **solve_args)

        n1 = self.loglike.get_data()[0].shape[0]
        n = self.loglike_full.get_data()[0].shape[0]

        _feature_weights = self.feature_weights.copy()
        _feature_weights[self.active] = 0.
        _feature_weights[self.inactive] = np.inf
        
        _unpenalized_problem = simple_problem(self.loglike_full, 
                                              weighted_l1norm(_feature_weights, lagrange=1.))
        _unpenalized = _unpenalized_problem.solve(**solve_args)
        _unpenalized_active = _unpenalized[self.active]

        s = len(self.active)
        H = self.loglike_full.hessian(_unpenalized)
        H_AA = H[self.active][:,self.active]

        _cov_block = np.linalg.inv(H_AA)
        _subsample_block = (n * 1. / n1) * _cov_block
        _carve_cov = np.zeros((2*s,2*s))
        _carve_cov[:s][:,:s] = _cov_block
        _carve_cov[s:][:,:s] = _subsample_block
        _carve_cov[:s][:,s:] = _subsample_block
        _carve_cov[s:][:,s:] = _subsample_block

        _carve_linear_part = self._constraints.linear_part.dot(np.identity(2*s)[s:])
        _carve_offset = self._constraints.offset
        self._carve_constraints = constraints(_carve_linear_part,
                                              _carve_offset,
                                              covariance=_carve_cov)
        self._carve_feasible = np.hstack([_unpenalized_active, self.onestep_estimator])
        self._unpenalized_active = _unpenalized_active
        self._carve_invcov = H_AA
开发者ID:allenzhuaz,项目名称:Python-software,代码行数:36,代码来源:lasso.py



注:本文中的regreg.api.simple_problem函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python regression.Regression类代码示例发布时间:2022-05-26
下一篇:
Python api.l1norm函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap