• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python nlopt.opt函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中nlopt.opt函数的典型用法代码示例。如果您正苦于以下问题:Python opt函数的具体用法?Python opt怎么用?Python opt使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了opt函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: minimizeCustom

    def minimizeCustom(self, p, q, **kwargs):
        S = numpy.matrix(numpy.identity(4))
        # TODO: try using functions from the nlopt module

        def objectiveFunc(*args, **kwargs):
            d = p
            m = q
            params = args[0]
            if args[1].size > 0:  # gradient
                args[1][:] = numpy.array([pi / 100, pi / 100, pi / 100, 0.01, 0.01, 0.01])  # arbitrary gradient

#            transform = numpy.matrix(numpy.identity(4))
            translate = numpyTransform.translation(params[3:6])
            rotx = numpyTransform.rotation(params[0], [1, 0, 0], N=4)
            roty = numpyTransform.rotation(params[1], [0, 1, 0], N=4)
            rotz = numpyTransform.rotation(params[2], [0, 0, 1], N=4)
            transform = translate * rotx * roty * rotz

            Dicp = numpyTransform.transformPoints(transform, d)

#            err = self.rms_error(m, Dicp)
            err = numpy.mean(numpy.sqrt(numpy.sum((m - Dicp) ** 2, axis=1)))
#            err = numpy.sqrt(numpy.sum((m - Dicp) ** 2, axis=1))
            return err

        x0 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
        if 'optAlg' in kwargs:
            opt = nlopt.opt(kwargs['optAlg'], 6)
        else:
            opt = nlopt.opt(nlopt.GN_CRS2_LM, 6)

        opt.set_min_objective(objectiveFunc)
        opt.set_lower_bounds([-pi, -pi, -pi, -3.0, -3.0, -3.0])
        opt.set_upper_bounds([pi, pi, pi, 3.0, 3.0, 3.0])
        opt.set_maxeval(1500)
        params = opt.optimize(x0)

#        output = scipy.optimize.leastsq(objectiveFunc, x0, args=funcArgs)
#        params = output[0]

#        params = scipy.optimize.fmin(objectiveFunc, x0, args=funcArgs)

#        constraints = []
#        varBounds = [(-pi, pi), (-pi, pi), (-pi, pi), (-3.0, 3.0), (-3.0, 3.0), (-3.0, 3.0)]
#        params = scipy.optimize.fmin_slsqp(objectiveFunc, x0, eqcons=constraints, bounds=varBounds, args=funcArgs)

#        output = scipy.optimize.fmin_l_bfgs_b(objectiveFunc, x0, bounds=varBounds, args=funcArgs, approx_grad=True)
#        params = output[0]
#        print  'Min error:', output[1]

#        params = scipy.optimize.fmin_tnc(objectiveFunc, x0, bounds=varBounds, args=funcArgs, approx_grad=True)
#        params = scipy.optimize.fmin_slsqp(objectiveFunc, x0, eqcons=constraints, bounds=varBounds, args=funcArgs)
#        params = scipy.optimize.fmin_slsqp(objectiveFunc, x0, eqcons=constraints, bounds=varBounds, args=funcArgs)

        translate = numpyTransform.translation(params[3:6])
        rotx = numpyTransform.rotation(params[0], [1, 0, 0], N=4)
        roty = numpyTransform.rotation(params[1], [0, 1, 0], N=4)
        rotz = numpyTransform.rotation(params[2], [0, 0, 1], N=4)
        transform = translate * rotx * roty * rotz
        return rotx * roty * rotz, S
开发者ID:vanossj,项目名称:pyAtlasBoneSegmentation,代码行数:60,代码来源:ICP.py


示例2: optimise_hypers

def optimise_hypers(criterion, optParams):
    objective = lambda theta, grad: criterion(*unpack(theta, unpackinfo))
    theta_low, _ = pack(optParams.sigma.lowerBound, optParams.noise.lowerBound)
    theta_0, unpackinfo = pack(optParams.sigma.initialVal, optParams.noise.initialVal)
    theta_high, _ = pack(optParams.sigma.upperBound, optParams.noise.upperBound)

    nParams = theta_0.shape[0]
    opt = nl.opt(nl.LN_BOBYQA, nParams)
    opt.set_lower_bounds(theta_low)
    opt.set_upper_bounds(theta_high)
    opt.set_min_objective(objective)
    opt.set_maxtime(optParams.walltime)
    if optParams.global_opt is True:
        opt = nl.opt(nl.G_MLSL_LDS, nParams)
        local_opt = nl.opt(nl.LN_BOBYQA, nParams)
        local_opt.set_ftol_rel(1e-4)
        opt.set_local_optimizer(local_opt)
    else:
        opt.set_ftol_rel(1e-6)

    assert( (theta_low<=theta_0).all())
    assert( (theta_high>=theta_0).all())

    theta_opt = opt.optimize(theta_0)
    sigma, noise_sigma = unpack(theta_opt, unpackinfo)
    opt_val = opt.last_optimum_value()
    return sigma, noise_sigma, opt_val
开发者ID:KelvyHsu,项目名称:ocean-exploration,代码行数:27,代码来源:train.py


示例3: _optimize_CRS2_LM

    def _optimize_CRS2_LM(self, vector):
        """
        Controlled random search with local mutations
        """

        # Create a global optimizer
        opt = nlopt.opt(nlopt.GN_CRS2_LM, vector.size)
        opt.set_min_objective(self._objective)
        lower_bounds, upper_bounds = self._getBounds()
        opt.set_lower_bounds(lower_bounds)
        opt.set_upper_bounds(upper_bounds)
        neval = 10000 * opt.get_dimension()  # TODO allow to tune this parameter
        opt.set_maxeval(neval)

        # Optimize parameters
        vector = opt.optimize(vector)  # TODO check optimizer status
        self.loss = opt.last_optimum_value()
        assert self._objective(vector, None) == self.loss

        # Create a local optimizer
        opt = nlopt.opt(nlopt.LN_BOBYQA, opt.get_dimension())
        opt.set_min_objective(self._objective)
        opt.set_lower_bounds(lower_bounds)
        opt.set_upper_bounds(upper_bounds)
        opt.set_xtol_rel(1e-3)
        opt.set_maxeval(neval)
        opt.set_initial_step(1e-3 * (upper_bounds-lower_bounds))

        # Optimize parameters
        vector = opt.optimize(vector)  # TODO check optimizer status
        self.loss = opt.last_optimum_value()
        assert self._objective(vector, None) == self.loss

        return vector
开发者ID:alejandrovr,项目名称:htmd,代码行数:34,代码来源:dihedral.py


示例4: nlopt_test

def nlopt_test():
    '''This is from the tutorial'''
    raise SkipTest
    def myfunc(x, grad):
        if grad.size > 0:
            grad[0] = 0.0
            grad[1] = old_div(0.5, math.sqrt(x[1]))
        return math.sqrt(x[1])

    def myconstraint(x, grad, a, b):
        if grad.size > 0:
            grad[0] = 3 * a * (a*x[0] + b)**2
            grad[1] = -1.0
        return (a*x[0] + b)**3 - x[1]

    opt = nlopt.opt(nlopt.LD_MMA, 2)
    opt.set_lower_bounds([-float('inf'), 0])
    opt.set_min_objective(myfunc)
    opt.add_inequality_constraint(lambda x,grad: myconstraint(x,grad,2,0), 1e-8)
    opt.add_inequality_constraint(lambda x,grad: myconstraint(x,grad,-1,1), 1e-8)
    opt.set_xtol_rel(1e-4)
    x = opt.optimize([1.234, 5.678])
    minf = opt.last_optimum_value()
    print("optimum at ", x[0],x[1])
    print("minimum value = ", minf)
    print("result code = ", opt.last_optimize_result())
开发者ID:E-LLP,项目名称:RtRetrievalFramework,代码行数:26,代码来源:nlopt_constrained_test.py


示例5: __init__

        def __init__(self, function, parameters, ftol=1e-5, verbosity=1):
            super(BOBYQAMinimizer, self).__init__(function, parameters, ftol, verbosity)

            # setup the bobyqa minimizer
            self.x0 = map(lambda x: x.value, self.parameters.values())
            self.lowerBounds = map(lambda x: x.minValue, self.parameters.values())
            self.upperBounds = map(lambda x: x.maxValue, self.parameters.values())
            self.steps = map(lambda x: x.delta, self.parameters.values())
            self.objectiveFunction = function

            def wrapper(x, grad):
                if grad.size > 0:
                    print("This won't ever happen, since BOBYQA does not use derivatives")
                return self.objectiveFunction(x)

            pass
            self.wrapper = wrapper

            self.bob = nlopt.opt(nlopt.LN_BOBYQA, self.Npar)
            self.bob.set_min_objective(self.wrapper)
            self.bob.set_ftol_abs(ftol)
            # Stop if the value of all the parameter change by less than 1%
            self.bob.set_xtol_rel(0.001)
            self.bob.set_initial_step(self.steps)

            self.bob.set_lower_bounds(self.lowerBounds)
            self.bob.set_upper_bounds(self.upperBounds)
开发者ID:sybenzvi,项目名称:3ML,代码行数:27,代码来源:minimization.py


示例6: start_training

def start_training(f):
    """ define the training parameters
    """
    opt=nlopt.opt(nlopt.GN_DIRECT_L,f.get_len_output())
    # build the boundaries
    minout=[]
    maxout=[]
    startout=[]
    for i in range(f.get_len_output()-1):
        minout.append(f.get_output(i))
    for i in range(1,f.get_len_output()):
        maxout.append(f.get_output(i))
    for i in range(f.get_len_output()):
        startout.append(f.get_output(i))
    minout.insert(0,minout[0]-(minout[1]-minout[0]))
    maxout.append(maxout[-1]+(maxout[-1]-maxout[-2]))
    print 'minout:',minout
    print 'maxout:',maxout
    print 'start:', startout
    opt.set_lower_bounds(np.array(minout))
    opt.set_upper_bounds(np.array(maxout))
    opt.set_initial_step((f.get_output(1)-f.get_output(0))/500.)
    opt.set_min_objective(f.myfunc)
    opt.set_ftol_rel((f.get_output(1)-f.get_output(0))/100000.)
    opt.set_maxtime(60)  # 60 s
    xopt=opt.optimize(np.array(startout))
    opt_val=opt.last_optimum_value()
    result=opt.last_optimize_result()
    print ' *************Result of Optimization*****************'
    print 'max:', opt_val
    print 'parameter:', xopt
    # set the best values
    for i in range(f.get_len_output()):
        f.set_output(i,xopt[i])
开发者ID:yanfeng1022,项目名称:samt2,代码行数:34,代码来源:Pyfuzzy.py


示例7: direct

    def direct(self, alpha):
	import nlopt
	
	fn = lambda x, grad: self.objective_func(x, grad, alpha)
	
	# Using DIRECT as the optimization scheme
	opt = nlopt.opt(nlopt.GN_DIRECT, self.dim)

	# Set the objective
	opt.set_max_objective(fn)

	# Set the maximum number of iterations
	opt.set_maxeval(self.maxeval)

	# Set lower and upper bounds
	opt.set_lower_bounds(self.lb)
	opt.set_upper_bounds(self.ub)

	# Optimize with starting point
	x = opt.optimize(self.start_point)
	#minf = opt.last_optimum_value()
	#print "optimum at ", x[0]
	#print "minimum value = ", minf
	#print "result code = ", opt.last_optimize_result()

	return x
开发者ID:ziyuw,项目名称:AHMC,代码行数:26,代码来源:optimize.py


示例8: run

    def run(self):

        ff = FFEvaluate(self.molecule)

        results = []
        for iframe in range(self.molecule.numFrames):
            self.molecule.frame = iframe

            directory = os.path.join(self.directory, '%05d' % iframe)
            os.makedirs(directory, exist_ok=True)
            pickleFile = os.path.join(directory, 'data.pkl')

            if self._completed(directory):
                with open(pickleFile, 'rb') as fd:
                    result = pickle.load(fd)
                logger.info('Loading QM data from %s' % pickleFile)

            else:
                result = QMResult()
                result.errored = False
                result.coords = self.molecule.coords[:, :, iframe:iframe + 1].copy()

                if self.optimize:
                    opt = nlopt.opt(nlopt.LN_COBYLA, result.coords.size)
                    opt.set_min_objective(lambda x, _: ff.run(x.reshape((-1, 3)))['total'])
                    if self.restrained_dihedrals is not None:
                        for dihedral in self.restrained_dihedrals:
                            indices = dihedral.copy()
                            ref_angle = np.deg2rad(dihedralAngle(self.molecule.coords[indices, :, iframe]))
                            def constraint(x, _):
                                coords = x.reshape((-1, 3))
                                angle = np.deg2rad(dihedralAngle(coords[indices]))
                                return np.sin(.5*(angle - ref_angle))
                            opt.add_equality_constraint(constraint)
                    opt.set_xtol_abs(1e-3) # Similar to Psi4 default
                    opt.set_maxeval(1000*opt.get_dimension())
                    opt.set_initial_step(1e-3)
                    result.coords = opt.optimize(result.coords.ravel()).reshape((-1, 3, 1))
                    logger.info('Optimization status: %d' % opt.last_optimize_result())

                result.energy = ff.run(result.coords[:, :, 0])['total']
                result.dipole = self.molecule.getDipole()

                if self.optimize:
                    assert opt.last_optimum_value() == result.energy # A self-consistency test

                # Compute ESP values
                if self.esp_points is not None:
                    assert self.molecule.numFrames == 1
                    result.esp_points = self.esp_points
                    distances = cdist(result.esp_points, result.coords[:, :, 0])  # Angstrom
                    distances *= const.physical_constants['Bohr radius'][0] / const.angstrom  # Angstrom --> Bohr
                    result.esp_values = np.dot(np.reciprocal(distances), self.molecule.charge)  # Hartree/Bohr

                with open(pickleFile, 'wb') as fd:
                    pickle.dump(result, fd)

            results.append(result)

        return results
开发者ID:jeiros,项目名称:htmd,代码行数:60,代码来源:fake.py


示例9: run

    def run(self):
        """
        Run ESP charge fitting

        Return
        ------
        results : dict
            Dictionary with the fitted charges and fitting loss value
        """

        # Get charge bounds
        lower_bounds, upper_bounds = self._get_bounds()

        # Set up NLopt
        opt = nlopt.opt(nlopt.LN_COBYLA, self.ngroups)
        opt.set_min_objective(self._compute_objective)
        opt.set_lower_bounds(lower_bounds)
        opt.set_upper_bounds(upper_bounds)
        opt.add_equality_constraint(self._compute_constraint)
        opt.set_xtol_rel(1.e-6)
        opt.set_maxeval(1000*self.ngroups)
        opt.set_initial_step(0.001)

        # Optimize the charges
        group_charges = opt.optimize(np.zeros(self.ngroups) + 0.001) # TODO: a more elegant way to set initial charges
        # TODO: check optimizer status
        charges = self._map_groups_to_atoms(group_charges)
        loss = self._compute_objective(group_charges, None)

        return {'charges': charges, 'loss': loss}
开发者ID:jeiros,项目名称:htmd,代码行数:30,代码来源:esp.py


示例10: test_make_nlopt_fun_neldermead

def test_make_nlopt_fun_neldermead(start_point):
    x0 = start_point
    opt = nlopt.opt(nlopt.LN_NELDERMEAD, len(x0))
    obj_fun = make_nlopt_fun(rosen, jac=False)
    opt.set_min_objective(obj_fun)
    assert np.allclose(opt.optimize(x0), np.array([1., 1., 1., 1., 1.]))
    assert np.isclose(opt.last_optimum_value(), 0)
开发者ID:pombredanne,项目名称:revrand,代码行数:7,代码来源:test_nlopt_wrap.py


示例11: test_make_nlopt_fun_grad1

def test_make_nlopt_fun_grad1(start_point):
    x0 = start_point
    opt = nlopt.opt(nlopt.LD_LBFGS, len(x0))
    obj_fun = make_nlopt_fun(rosen_couple, jac=rosen_der)
    opt.set_min_objective(obj_fun)
    assert np.allclose(opt.optimize(x0), np.array([1., 1., 1., 1., 1.]))
    assert np.isclose(opt.last_optimum_value(), 0)
开发者ID:pombredanne,项目名称:revrand,代码行数:7,代码来源:test_nlopt_wrap.py


示例12: test_make_nlopt_fun_bobyqa

def test_make_nlopt_fun_bobyqa(start_point):
    x0 = start_point
    opt = nlopt.opt(nlopt.LN_BOBYQA, len(x0))
    obj_fun = make_nlopt_fun(rosen, jac=False)
    opt.set_min_objective(obj_fun)
    opt.set_ftol_abs(1e-11)
    assert np.allclose(opt.optimize(x0), np.array([1., 1., 1., 1., 1.]))
    assert np.isclose(opt.last_optimum_value(), 0)
开发者ID:pombredanne,项目名称:revrand,代码行数:8,代码来源:test_nlopt_wrap.py


示例13: test_make_nlopt_fun_grad_free1

def test_make_nlopt_fun_grad_free1(start_point):
    # When using derivative-free optimization methods, gradient information
    # supplied in any form is disregarded without warning.
    x0 = start_point
    opt = nlopt.opt(nlopt.LN_NELDERMEAD, len(x0))
    obj_fun = make_nlopt_fun(rosen_couple, jac=True)
    opt.set_min_objective(obj_fun)
    assert np.allclose(opt.optimize(x0), np.array([1., 1., 1., 1., 1.]))
    assert np.isclose(opt.last_optimum_value(), 0)
开发者ID:pombredanne,项目名称:revrand,代码行数:9,代码来源:test_nlopt_wrap.py


示例14: mle

 def mle(self, params, maxiter=100):
     opt = nlopt.opt(nlopt.LN_COBYLA, params.size)
     opt.set_min_objective(self.likelihood)
     opt.set_maxeval(maxiter)
     opt.set_lower_bounds(np.zeros( params.size) )
     opt.set_initial_step(np.linalg.norm(params))
     opt.set_ftol_rel(1e-3)
     params = opt.optimize( params )
     return params
开发者ID:chicagohawk,项目名称:twinmodel,代码行数:9,代码来源:final.py


示例15: test_make_nlopt_fun_grad5

def test_make_nlopt_fun_grad5(start_point):
    # Of course, you can use gradient-based optimization and not supply
    # any gradient information at your own discretion.
    # No warning are raised.
    x0 = start_point
    opt = nlopt.opt(nlopt.LD_LBFGS, len(x0))
    obj_fun = make_nlopt_fun(rosen, jac=False)
    opt.set_min_objective(obj_fun)
    assert np.allclose(opt.optimize(x0), x0)
开发者ID:pombredanne,项目名称:revrand,代码行数:9,代码来源:test_nlopt_wrap.py


示例16: amoroso_binned_max_log_likelihood

def amoroso_binned_max_log_likelihood(samples, initial_guess=None, nbins=50):
    if initial_guess is None:
        initial_guess = np.array([1.005 * samples.max(), samples.std(), 1.1, 1])
        # for negative skew, use negative scale parameter
        if scipy.stats.skew(samples) < 0:
            initial_guess[1] *= -1

    # initial_guess = np.array([ 1.44631991, -0.02302599,  1.370993,    1.00922993])
    # initial_guess = np.array([ 1.44506214, -0.02157434,  1.28101393,  0.90385331])

    bounds = [(None, None),
              (0.0, None) if initial_guess[1] > 0 else (None, 0.0),
              (0, None),
              (0.0, None)
              ]

    # bin the data with Bayesian blocks
    # from astroML.plotting import hist
    # bin_counts, bin_edges, _ = hist(samples, bins='blocks')

    from matplotlib.pyplot import hist
    bin_counts, bin_edges, _ = hist(samples, bins=nbins)

    print()
    print("initial guess", initial_guess, "f", amoroso_binned_log_likelihood(initial_guess, bin_edges, bin_counts))

    # scipy.optimize
    # kwargs = dict(bounds=bounds, options=dict(disp=True, maxiter=100),
    #               method='Powell',
    #               args=(bin_edges, bin_counts))
    # return scipy.optimize.minimize(amoroso_binned_log_likelihood, initial_guess, **kwargs)

    # nlopt
    import nlopt

    # best results with LN_COBYLA, LN_SBPLX, GN_CRS2_LM
    # not good: LN_BOBYQA, LN_PRAXIS, GN_DIRECT_L, GN_ISRES, GN_ESCH
    # opt = nlopt.opt(nlopt.GN_CRS2_LM, 4)
    # opt = nlopt.opt(nlopt.LN_SBPLX, 4)
    opt = nlopt.opt(nlopt.LN_COBYLA, 4)
    opt.set_min_objective(lambda x, grad: amoroso_binned_log_likelihood(x, bin_edges, bin_counts))

    opt.set_lower_bounds([0.95 * bin_edges[0], 0.0 if initial_guess[1] > 0.0 else -20.0, 0.0, 0.0])
    opt.set_upper_bounds([1.05 * bin_edges[-1], 50.0 if initial_guess[1] > 0.0 else 0.0, 10, 10.0])

    tol = 1e-12
    opt.set_ftol_abs(tol)
    opt.set_xtol_rel(math.sqrt(tol))
    opt.set_maxeval(1500)

    xopt = opt.optimize(initial_guess)
    fmin = opt.last_optimum_value()

    print("Mode", repr(xopt), ", min. f =", fmin)
    return xopt
开发者ID:tardis-sn,项目名称:statistics-notes,代码行数:55,代码来源:likelihood.py


示例17: test_make_nlopt_fun_grad4

def test_make_nlopt_fun_grad4(start_point):
    # Likewise, if you *do* supply gradient information, but set `jac=False`
    # you will be reminded of the fact that the gradient information is
    # being ignored through a `RuntimeWarning`.
    x0 = start_point
    opt = nlopt.opt(nlopt.LD_LBFGS, len(x0))
    obj_fun = make_nlopt_fun(rosen_couple, jac=False)
    opt.set_min_objective(obj_fun)
    with pytest.warns(RuntimeWarning):
        x_opt = opt.optimize(x0)
    assert np.allclose(x_opt, x0)
开发者ID:pombredanne,项目名称:revrand,代码行数:11,代码来源:test_nlopt_wrap.py


示例18: test_make_nlopt_fun_grad3

def test_make_nlopt_fun_grad3(start_point):
    # If you use a gradient-based optimization method with `jac=True` but
    # fail to supply any gradient information, you will receive a
    # `RuntimeWarning` and poor results.
    x0 = start_point
    opt = nlopt.opt(nlopt.LD_LBFGS, len(x0))
    obj_fun = make_nlopt_fun(rosen, jac=True)
    opt.set_min_objective(obj_fun)
    with pytest.warns(RuntimeWarning):
        x_opt = opt.optimize(x0)
    assert np.allclose(x_opt, x0)
开发者ID:pombredanne,项目名称:revrand,代码行数:11,代码来源:test_nlopt_wrap.py


示例19: test_make_nlopt_fun_grad2

def test_make_nlopt_fun_grad2(start_point):
    # If a callable jacobian `jac` is specified, it will take precedence
    # over the gradient given by a function that returns a tuple with the
    # gradient as its second value.
    x0 = start_point
    opt = nlopt.opt(nlopt.LD_LBFGS, len(x0))
    # We give some function that is clearly not the correct derivative.
    obj_fun = make_nlopt_fun(couple(rosen, lambda x: 2 * x), jac=rosen_der)
    opt.set_min_objective(obj_fun)
    assert np.allclose(opt.optimize(x0), np.array([1., 1., 1., 1., 1.]))
    assert np.isclose(opt.last_optimum_value(), 0)
开发者ID:pombredanne,项目名称:revrand,代码行数:11,代码来源:test_nlopt_wrap.py


示例20: min_nlopt

def min_nlopt(pdfs, guess, p0=1.e-5, regulator=1000., maxtime=25., maxeval=10000, algorithm='CRS', Delta_Ar_neighbor=None, weight_neighbor=None):
	N_regions = guess.size - 1
	
	opt = None
	if algorithm == 'CRS':
		opt = nlopt.opt(nlopt.GN_CRS2_LM, N_regions+1)
	elif algorithm == 'MLSL':
		opt = nlopt.opt(nlopt.G_MLSL_LDS, N_regions+1)
	
	# Set lower and upper bounds on Delta_Ar
	lower = np.empty(N_regions+1, dtype=np.float64)
	upper = np.empty(N_regions+1, dtype=np.float64)
	lower.fill(1.e-10)
	upper.fill(max(float(pdfs.shape[2]), 1.2*np.max(guess)))
	opt.set_lower_bounds(lower)
	opt.set_upper_bounds(upper)
	
	# Set local optimizer (if required)
	if algorithm == 'MLSL':
		local_opt = nlopt.opt(nlopt.LN_COBYLA, N_regions+1)
		local_opt.set_lower_bounds(lower)
		local_opt.set_upper_bounds(upper)
		local_opt.set_initial_step(15.)
		opt.set_local_optimizer(local_opt)
	
	opt.set_initial_step(15.)
	
	# Set stopping conditions
	opt.set_maxtime(maxtime)
	opt.set_maxeval(maxeval)
	#opt.set_xtol_abs(0.1)
	
	# Set the objective function
	opt.set_min_objective(lambda x, grad: nlopt_measure(x, grad, pdfs, p0, regulator, Delta_Ar_neighbor, weight_neighbor))
	
	# Run optimization algorithm
	x = opt.optimize(guess)
	measure = opt.last_optimum_value()
	success = opt.last_optimize_result()
	
	return x, success, measure
开发者ID:schlafly,项目名称:galstar,代码行数:41,代码来源:fit_pdfs.py



注:本文中的nlopt.opt函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python metadata.Metadata类代码示例发布时间:2022-05-27
下一篇:
Python nlmmanipulate.NlmManipulate类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap