• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python optimize.check_grad函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中scipy.optimize.check_grad函数的典型用法代码示例。如果您正苦于以下问题:Python check_grad函数的具体用法?Python check_grad怎么用?Python check_grad使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了check_grad函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_back_prop_with_diff_grad_checks

    def test_back_prop_with_diff_grad_checks(self, iter=200):
        eps = math.sqrt(np.finfo(float).eps)
        init_val = self.packTheta(self.W1, self.b1, self.W2, self.b2)

        err = optimize.check_grad(self.cost, self.cost_prime, init_val, self.X)
        print ("Error after 0 iterations: %f, Error per Param: %f" % (err, err/init_val.size))
        res = optimize.minimize(fun=self.cost, x0=init_val, args=(self.X,), jac=self.cost_prime, method='L-BFGS-B', options={'maxiter':iter})
        self.W1, self.b1, self.W2, self.b2 = self.unpackTheta(res.x)

        err = optimize.check_grad(self.cost, self.cost_prime, init_val, self.X)
        print ("Error after 200 iterations: %f, Error per Param: %f" % (err, err/init_val.size))
        init_val = res.x
        res = optimize.minimize(fun=self.cost, x0=init_val, args=(self.X,), jac=self.cost_prime, method='L-BFGS-B', options={'maxiter':iter})
        self.W1, self.b1, self.W2, self.b2 = self.unpackTheta(res.x)

        err = optimize.check_grad(self.cost, self.cost_prime, init_val, self.X)
        print ("Error after 400 iterations: %f, Error per Param: %f" % (err, err/init_val.size))
        init_val = res.x
        res = optimize.minimize(fun=self.cost, x0=init_val, args=(self.X,), jac=self.cost_prime, method='L-BFGS-B', options={'maxiter':iter})
        self.W1, self.b1, self.W2, self.b2 = self.unpackTheta(res.x)

        err = optimize.check_grad(self.cost, self.cost_prime, init_val, self.X)
        print ("Error after 600 iterations: %f, Error per Param: %f" % (err, err/init_val.size))
        init_val = res.x
        res = optimize.minimize(fun=self.cost, x0=init_val, args=(self.X,), jac=self.cost_prime, method='L-BFGS-B', options={'maxiter':iter})
        self.W1, self.b1, self.W2, self.b2 = self.unpackTheta(res.x)

        err = optimize.check_grad(self.cost, self.cost_prime, init_val, self.X)
        print ("Error after 800 iterations: %f, Error per Param: %f" % (err, err/init_val.size))
开发者ID:thushv89,项目名称:AutoEncorder_Simple,代码行数:29,代码来源:autoencoder2_l_bfgs_sparcity.py


示例2: fit

 def fit(self,X,y,initParams = None):
     self.params = np.zeros([1,X.shape[1]+1])
     self.labels = np.unique(y)
     X_nopad = X
     X = np.pad(X,((0,0),(1,0)),mode='constant',constant_values=1)
     
     #print self.cost(self.params,X, y)
     
     if initParams is None:
         init = np.random.random(self.params.size)
         #init = np.zeros(self.params.size)
     else:
         init = initParams
     
     if DEBUG:
         _epsilon = np.sqrt(np.finfo(float).eps)
         #print approx_fprime(self.params[0], self.cost, _epsilon, X,y)
         print check_grad(self.cost, self.grad, init,X,y)
     
     if self.optimizeOrder == 0:
         self.params = self.optimize(self.cost,init,args=(X,y),disp=False)
     if self.optimizeOrder == 1:
         self.params = self.optimize(self.cost,init,self.grad,args=(X,y),disp=False)
         
     return self
开发者ID:lukastencer,项目名称:RNNpy,代码行数:25,代码来源:logreg.py


示例3: test

def test():
    data = np.loadtxt("data.txt")
    X = data[:,0:-1] # everything except the last column
    y = data[:,-1]   # just the last column

    args = (X,y)

    #theta = np.array([ 1.7657065779589087, -1.3841332550882446, -10.162222605402242])
    #theta = np.array([ 1.7999382115210827, -14.001391904643032 , -5.577578503745549])
    theta = np.zeros(3)
    theta[0] = np.random.normal(0,5)
    theta[1] = np.random.normal(0,5)
    theta[2] = np.random.normal(0,5)
    print theta
    print np.exp(theta)
    print logPosterior(theta,args)
    print gradLogPosterior(theta,args)
    print so.check_grad(logPosterior, gradLogPosterior, theta, args)

    newTheta = so.fmin_cg(logPosterior, theta, fprime=gradLogPosterior, args=[args], gtol=1e-4,maxiter=100,disp=1)
    print newTheta, logPosterior(newTheta,args)

    K = kernel2(X,X,newTheta,wantderiv=False)
    L = np.linalg.cholesky(K)
    beta = np.linalg.solve(L.transpose(), np.linalg.solve(L,y))
    test = X
    #pred = [predict(i,input,K,target,newTheta,L,beta) for i in input]
    #pred = np.squeeze([predict(i,input,K,target,newTheta,L,beta) for i in input])
    demoplot(theta,args)
    demoplot(newTheta,args)
开发者ID:bigaidream,项目名称:subsets_ml_cookbook,代码行数:30,代码来源:gp.py


示例4: test_dldtheta

 def test_dldtheta(self):
     self.ECG.primary = ['q']
     def f(X):
         self.ECG.array2primary(X)
         lv = self.ECG.loglik(self.data);
         slv = sum(lv)
         return slv
     def df(X):
         self.ECG.array2primary(X)
         gv = self.ECG.dldtheta(self.data)
         sgv = sum(gv, axis=1);
         return sgv
     theta0 = self.ECG.primary2array()
     theta0 = abs(randn(len(theta0)))+1
     err = check_grad(f,df,theta0)
     print "error in gradient: ", err
     self.ECG.primary = ['W']
     def f2(X):
         self.ECG.array2primary(X)
         lv = self.ECG.loglik(self.data);
         slv = sum(lv)
         return slv
     def df2(X):
         self.ECG.array2primary(X)
         gv = self.ECG.dldtheta(self.data)
         sgv = sum(gv, axis=1);
         return sgv
     theta0 = self.ECG.primary2array()
     theta0 = abs(randn(len(theta0)))+1
     err = check_grad(f2,df2,theta0)
     print "error in gradient: ", err
     self.assertTrue(err < 1e-02)
开发者ID:fabiansinz,项目名称:natter,代码行数:32,代码来源:TestEllipticallyContourGamma.py


示例5: test_gradients

def test_gradients():
    K = 1
    B = 3
    T = 100
    dt = 1.0
    true_model = DiscreteTimeNetworkHawkesModelGammaMixture(K=K, B=B, dt=dt)
    S,R = true_model.generate(T=T)

    # Test with a standard Hawkes model
    test_model = DiscreteTimeStandardHawkesModel(K=K, B=B, dt=dt)
    test_model.add_data(S)

    # Check gradients with the initial parameters
    def objective(x):
        test_model.weights[0,:] = np.exp(x)
        return test_model.log_likelihood()

    def gradient(x):
        test_model.weights[0,:] = np.exp(x)
        return test_model.compute_gradient(0)

    print("Checking initial gradient: ")
    print(gradient(np.log(test_model.weights[0,:])))
    check_grad(objective, gradient,
               np.log(test_model.weights[0,:]))

    print("Checking gradient at true model parameters: ")
    test_model.initialize_with_gibbs_model(true_model)

    print(gradient(np.log(test_model.weights[0,:])))
    check_grad(objective, gradient,
               np.log(test_model.weights[0,:]))
开发者ID:slinderman,项目名称:pyhawkes,代码行数:32,代码来源:test_standard_grads.py


示例6: check_gradient

 def check_gradient(self):
     def cost(ws):
         return   self.cost_function(ws,self._training_data[0:100,:],self._training_labels[0:100])
                                     
     def gradcost(ws):
         return self._back_prop(ws,self._training_data[0:100,:],self._training_labels[0:100])
     
     print check_grad(cost, gradcost,self._betas)
开发者ID:mfcabrera,项目名称:deeplearning-praktikum-ss2013,代码行数:8,代码来源:neuralnet.py


示例7: test_logistic_loss_derivative

def test_logistic_loss_derivative(n_samples=4, n_features=10, decimal=5):
    rng = np.random.RandomState(42)
    X = rng.randn(n_samples, n_features)
    y = rng.randn(n_samples)
    n_features = X.shape[1]
    w = rng.randn(n_features + 1)
    np.testing.assert_almost_equal(
        check_grad(lambda w: _logistic(X, y, w), lambda w: _logistic_loss_grad(X, y, w), w), 0.0, decimal=decimal
    )

    np.testing.assert_almost_equal(
        check_grad(lambda w: _logistic(X, y, w), lambda w: _logistic_loss_grad(X, y, w), w), 0.0, decimal=decimal
    )
开发者ID:LisaLeroi,项目名称:nilearn,代码行数:13,代码来源:test_objective_functions.py


示例8: fit

    def fit(self,X,y,initParams = None):
        
        X = np.pad(X,((0,0),(1,0)),mode='constant',constant_values=1)
        
        inDim = X.shape[1]
        
#         if DEBUG:
# #             self.layersSize.append(1)
# #             self.layersSize.insert(0, int(inDim))
# #             self.yindi = np.asarray(np.logical_not(y),dtype=np.int32)         
#         else:            
        self.layersSize.append(len(np.unique(y)))
        self.layersSize.insert(0, int(inDim))
        self.setIndi(y)
        
#         self.layersSize[-1]=1
#         self.yindi = np.expand_dims(self.yindi[:,0].T,1)
        
        paramSum = 0
        for i,layer in enumerate(self.layers):
            if not( i == len(self.layers)-1):
                layer.initParams([self.layersSize[i+1],self.layersSize[i]])
                split = self.layersSize[i+1] * self.layersSize[i]
                paramSum += split
                self.paramSplits.append(paramSum)
            else:
                layer.setParams(None)

        if initParams is None:
            init = self.getParams()
        else:
            init = initParams

        
        if DEBUG:
            _epsilon = np.sqrt(np.finfo(float).eps)
            #print approx_fprime(self.params[0], self.cost, _epsilon, X,y)
            print check_grad(self.cost, self.grad, np.zeros(init.shape),X,self.yindi)
            print check_grad(self.cost, self.grad, init,X,self.yindi)
        
        if self.optimizeOrder == 0:
            newParams = self.optimize(self.cost,init,args=(X,self.yindi),disp=False)
        if self.optimizeOrder == 1:
            newParams = self.optimize(self.cost,init,args=(X,self.yindi),disp=False)
        
        #newParams = self.optimize(self.cost, self.getParams(), args = (X,y))
        
        self.setParams(newParams)
开发者ID:lukastencer,项目名称:RNNpy,代码行数:48,代码来源:mlp_network.py


示例9: test_pairwise_gradient

def test_pairwise_gradient():
    fcts = PairwiseFcts(PAIRWISE_DATA, 0.2)
    for sigma in np.linspace(1, 20, num=10):
        xs = sigma * RND.randn(8)
        val = approx_fprime(xs, fcts.objective, EPS)
        err = check_grad(fcts.objective, fcts.gradient, xs, epsilon=EPS)
        assert abs(err / np.linalg.norm(val)) < 1e-5
开发者ID:lucasmaystre,项目名称:choix,代码行数:7,代码来源:test_opt.py


示例10: test_nonlinear_mean_return_model

    def test_nonlinear_mean_return_model(self):
        model = Nonlinear(delta=0.1, lmb=1.0, hidden=7)

        for i in range(10):
            diff = check_grad(model.cost, model.grad, model.weights(self.trX, i), self.trX, self.trY)

            self.assertTrue(diff < 1.0e-5, diff)
开发者ID:rreas,项目名称:drl,代码行数:7,代码来源:test_gradients.py


示例11: test_checkgrad

def test_checkgrad():
    from scipy.optimize import check_grad
    import numpy as np

    for x in range(100):
        x = x * np.ones((1)) / 10
        print "check_grad @ %.2f: %.6f" % (x, check_grad(f, fgrad, x))
开发者ID:jgera,项目名称:Segmentation-Code,代码行数:7,代码来源:test.py


示例12: test_01_6_unitary_hadamard_grad

    def test_01_6_unitary_hadamard_grad(self):
        """
        control.pulseoptim: Hadamard gate gradient check
        assert that gradient approx and exact gradient match in tolerance
        """
        # Hadamard
        H_d = sigmaz()
        H_c = [sigmax()]
        U_0 = identity(2)
        U_targ = hadamard_transform(1)

        n_ts = 10
        evo_time = 10

        # Create the optim objects
        optim = cpo.create_pulse_optimizer(H_d, H_c, U_0, U_targ,
                        n_ts, evo_time,
                        fid_err_targ=1e-10,
                        dyn_type='UNIT',
                        init_pulse_type='LIN',
                        gen_stats=True)
        dyn = optim.dynamics

        init_amps = optim.pulse_generator.gen_pulse().reshape([-1, 1])
        dyn.initialize_controls(init_amps)

        # Check the exact gradient
        func = optim.fid_err_func_wrapper
        grad = optim.fid_err_grad_wrapper
        x0 = dyn.ctrl_amps.flatten()
        grad_diff = check_grad(func, grad, x0)
        assert_almost_equal(grad_diff, 0.0, decimal=6,
                            err_msg="Unitary gradient outside tolerance")
开发者ID:NunoEdgarGub1,项目名称:qutip,代码行数:33,代码来源:test_control_pulseoptim.py


示例13: learnGPparamsWithPrior

def learnGPparamsWithPrior(oldParams, infRes, experiment, tauOptimMethod, regularizer_stepsize_tau):
    xdim, T = np.shape(infRes['post_mean'][0])
    binSize = experiment.binSize
    oldTau = oldParams['tau']*1000/binSize
    
    precomp = makePrecomp(infRes)
    
    tempTau = np.zeros(xdim)

    pOptimizeDetails = [[]]*xdim
    for xd in range(xdim): 
        initp = np.log(1/oldTau[xd]**2)

        if False: # gradient check and stuff
            gradcheck = op.check_grad(
                MStepGPtimescaleCostWithPrior,
                MStepGPtimescaleCostWithPrior_grad,
                initp,precomp[0],0.001,binSize, oldParams['tau'][xd], regularizer_stepsize_tau)
            print('tau learning grad check = ' + str(gradcheck))
            pdb.set_trace()
            apprxGrad = op.approx_fprime(
                initp,MStepGPtimescaleCostWithPrior,1e-8,
                precomp[xd],0.001,binSize,oldParams['tau'][xd],regularizer_stepsize_tau)
            calcdGrad = MStepGPtimescaleCostWithPrior_grad(
                initp,precomp[xd],0.001,binSize,oldParams['tau'][xd],regularizer_stepsize_tau)
            plt.plot(apprxGrad,linewidth = 10, color = 'k', alpha = 0.4)
            plt.plot(calcdGrad,linewidth = 2, color = 'k', alpha = 0.4)
            plt.legend(['approximated','calculated'])
            plt.title('Approx. vs. calculated Grad of Tau learning cost')
            plt.tight_layout()
            plt.show()
            def cost(p): 
                cost = MStepGPtimescaleCostWithPrior(
                    p, precomp[xd], 0.001, binSize, oldParams['tau'][xd], regularizer_stepsize_tau)
                return cost
            def cost_grad(p): 
                grad = MStepGPtimescaleCostWithPrior_grad(
                    p, precomp[xd], 0.001, binSize, oldParams['tau'][xd], regularizer_stepsize_tau)
                return grad
            pdb.set_trace()

        if False: # bench for setting hessian as inverse variance
            hessTau = op.approx_fprime([initp], MStepGPtimescaleCost_grad, 1e-14, 
                precomp[xd], 0.001)
            priorVar = -1/hessTau
            regularizer_stepsize_tau = np.sqrt(np.abs(priorVar))
            # pdb.set_trace()

        res = op.minimize(
            fun = MStepGPtimescaleCostWithPrior,
            x0 = initp,
            args = (precomp[xd], 0.001, binSize, oldParams['tau'][xd], regularizer_stepsize_tau),
            jac = MStepGPtimescaleCostWithPrior_grad,
            options = {'disp': False,'gtol':1e-10},
            method = tauOptimMethod)
        pOptimizeDetails[xd] = res
        tempTau[xd] = (1/np.exp(res.x))**(0.5)

    newTau = tempTau*binSize/1000
    return newTau, pOptimizeDetails
开发者ID:mackelab,项目名称:poisson-gpfa,代码行数:60,代码来源:learning.py


示例14: gradient_check

def gradient_check(theta, x, y, l2_regularization):
    print 'check_grad:', check_grad(calculate_cost, calculate_gradient, theta, x, y, l2_regularization)
    spatial_alpha_vec, spatial_mean_vec, spatial_sigma_vec, temporal_mean, temporal_sigma = span_params(theta)
    cost1 = calculate_cost(theta, x, y, l2_regularization)
    num_of_params = len(spatial_alpha_vec) + 2*len(spatial_mean_vec) + len(spatial_sigma_vec) + 2
    direction = np.random.randint(2, size=num_of_params)*2-1
    eps = 1e-7
    gradient = eps * direction
    total = 0
    spatial_alpha_vec2 = spatial_alpha_vec + gradient[0:len(spatial_alpha_vec)]
    total += len(spatial_alpha_vec)
    spatial_mean_vec2 = spatial_mean_vec + gradient[total:total+2*len(spatial_mean_vec)].reshape(-1,2)
    total += 2*len(spatial_mean_vec)
    spatial_sigma_vec2 = spatial_sigma_vec + gradient[total:total+len(spatial_sigma_vec)]
    total += len(spatial_sigma_vec)
    temporal_mean2 = np.array(temporal_mean + gradient[-2])
    temporal_sigma2 = np.array(temporal_sigma + gradient[-1])

    theta2 = compress_params(spatial_alpha_vec2, spatial_mean_vec2, spatial_sigma_vec2, temporal_mean2, temporal_sigma2)
    cost2 = calculate_cost(theta2, x, y, l2_regularization)
    delta = (cost2-cost1)
    print 'Gradient check:'
    print 'Empiric:', delta
    print 'Analytic:', gradient.dot(calculate_gradient(theta, x, y, l2_regularization))
    diff = abs(delta - gradient.dot(calculate_gradient(theta, x, y, l2_regularization)))
    print 'Difference:', diff
    if diff < 1e-3:
        print 'Gradient is O.K'
    else:
        print 'Gradient check FAILED'
开发者ID:drorsimon,项目名称:Kaggle-West-Nile-Virus,代码行数:30,代码来源:mosquitoes_regression.py


示例15: test_gradient

def test_gradient():
    # Test gradient of Kullback-Leibler divergence.
    random_state = check_random_state(0)

    n_samples = 50
    n_features = 2
    n_components = 2
    alpha = 1.0

    distances = random_state.randn(n_samples, n_features).astype(np.float32)
    distances = np.abs(distances.dot(distances.T))
    np.fill_diagonal(distances, 0.0)
    X_embedded = random_state.randn(n_samples, n_components).astype(np.float32)

    P = _joint_probabilities(distances, desired_perplexity=25.0,
                             verbose=0)

    def fun(params):
        return _kl_divergence(params, P, alpha, n_samples, n_components)[0]

    def grad(params):
        return _kl_divergence(params, P, alpha, n_samples, n_components)[1]

    assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
                        decimal=5)
开发者ID:BasilBeirouti,项目名称:scikit-learn,代码行数:25,代码来源:test_t_sne.py


示例16: self_test1

def self_test1():
    D = 100
    K = 2
    N = 10
    L = 1e-6

    # check parsing    
    W01 = np.random.randn(D,nh)
    b1 = np.random.randn(1,nh)
    W12 = np.random.randn(nh,nh)
    b2 = np.random.randn(1,nh)
    W23 = np.random.randn(nh,K)
    b3 = np.random.randn(1,K)

    w = np.concatenate((W01.flatten(), b1.flatten(), W12.flatten(), b2.flatten(), W23.flatten(), b3.flatten()), axis=0)
    W01_,b1_,W12_,b2_,W23_,b3_ = parseParams(w,D,K)
    print ((W01-W01_)**2).sum()/(W01**2).sum()
    print ((b1-b1_)**2).sum()/(b1**2).sum()
    print ((W12-W12_)**2).sum()/(W12**2).sum()
    print ((b2-b2_)**2).sum()/(b2**2).sum()
    print ((W23-W23_)**2).sum()/(W23**2).sum()
    print ((b3-b3_)**2).sum()/(b3**2).sum()

    w = init(D, K)
    w = 1e-0*np.random.normal(size=w.size)
    X = np.random.normal(size=(N,D))
    y = np.random.randint(K,size=(N,))
    err = check_grad(loss, grad, w, X, y, L, K)
    print err
开发者ID:jihunhamm,项目名称:Crowd-ML,代码行数:29,代码来源:loss_nndemo1.py


示例17: test_grad

def test_grad(x1,y1,x2,y2,alpha1,alpha2):

    
    import numpy as np
    # initial guess
    xc0 = 0.5*(x1+x2)
    yc0 = y1 + 0.5*(y1 - y2)
    r1 = np.sqrt((x1 - xc0)**2+(y1 - yc0)**2)
    r2 = np.sqrt((x2 - xc0)**2+(y2 - yc0)**2)
    a0 = 0.5*(r1+r2)
    b0 = 0.1
    theta1 = np.pi
    theta2 = 1.5*np.pi

    x0 = np.ones(4)
    x0[0] = xc0
    x0[1] = yc0
    x0[2] = a0
    x0[3] = b0
    #x0[4] = theta1
    #x0[5] = theta2

    #args={'x1':x1,'y1':y1,'x2':y2,'alpha1':alpha1,'alpha2':alpha2}
    xargs=(x1,y1,x2,y2,alpha1,alpha2)
    err=scio.check_grad(objectf_4x4,objectfprime_4x4,x0,x1,y1,x2,y2,alpha1,alpha2)

    print err
开发者ID:dlgeorge,项目名称:digclaw-4.x,代码行数:27,代码来源:findlogspiral.py


示例18: check_gradient

    def check_gradient(self, input, expected_output):
        """
        Check whether cost properly calculates gradients.
        Result should be close to zero.
        Input and expected_output must be lists,
        even if they only contain a single item.
        """
        array, shapes = NeuralNet.unroll(self.weights)

        def fun(x):
            """
            Wrapper around cost which allows it to interact
            with scipy.optimize.check_grad.
            """
            return NeuralNet.cost(NeuralNet.roll(x, shapes),
                                  self.lambda_,
                                  input,
                                  expected_output,
                                  self.is_analog)[0]

        def grad(x):
            """
            Wrapper around cost which serves as the derivative
            function for scipy.optimize.check_grad.
            """
            return NeuralNet.unroll(
                NeuralNet.cost(NeuralNet.roll(x, shapes),
                               self.lambda_,
                               input,
                               expected_output,
                               self.is_analog)[1])[0]

        return check_grad(fun, grad, array)
开发者ID:tomgarcia,项目名称:Deep-Red,代码行数:33,代码来源:neuralnet.py


示例19: test_grad

 def test_grad(self):
     from scipy import optimize
     f = lambda z: crps_gaussian(self.obs[0, 0], z[0], z[1], grad=False)
     g = lambda z: crps_gaussian(self.obs[0, 0], z[0], z[1], grad=True)[1]
     x0 = np.array([self.mu.reshape(-1),
                    self.sig.reshape(-1)]).T
     for x in x0:
         self.assertLessEqual(optimize.check_grad(f, g, x), 1e-6)
开发者ID:pombredanne,项目名称:properscoring,代码行数:8,代码来源:test_crps.py


示例20: test_ridge_grad_cov

def test_ridge_grad_cov():
    """Test ovk.OVKRidgeRisk gradient with finite differences."""
    K = ovk.DecomposableKernel(A=eye(2))
    risk = ovk.OVKRidgeRisk(0.01)
    assert check_grad(lambda *args: risk.functional_grad_val(*args)[0],
                      lambda *args: risk.functional_grad_val(*args)[1],
                      randn(X.shape[0] * y.shape[1]),
                      y.ravel(), K(X, X)) < 1e-3
开发者ID:operalib,项目名称:operalib,代码行数:8,代码来源:test_risk.py



注:本文中的scipy.optimize.check_grad函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python optimize.curve_fit函数代码示例发布时间:2022-05-27
下一篇:
Python optimize.brute函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap