• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python learning_rule.Momentum类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pylearn2.training_algorithms.learning_rule.Momentum的典型用法代码示例。如果您正苦于以下问题:Python Momentum类的具体用法?Python Momentum怎么用?Python Momentum使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了Momentum类的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __init__

    def __init__(
        self,
        learning_rate,
        cost=None,
        batch_size=None,
        monitoring_batches=None,
        monitoring_dataset=None,
        monitor_iteration_mode="sequential",
        termination_criterion=None,
        update_callbacks=None,
        learning_rule=None,
        init_momentum=None,
        set_batch_size=False,
        train_iteration_mode=None,
        batches_per_iter=None,
        theano_function_mode=None,
        monitoring_costs=None,
        seed=[2012, 10, 5],
    ):

        if isinstance(cost, (list, tuple, set)):
            raise TypeError(
                "SGD no longer supports using collections of "
                + "Costs to represent a sum of Costs. Use "
                + "pylearn2.costs.cost.SumOfCosts instead."
            )

        if init_momentum:
            warnings.warn(
                "init_momentum interface is deprecated and will "
                "become officially unsuported as of May 9, 2014. Please use the "
                "`learning_rule` parameter instead, providing an object of type "
                "`pylearn2.training_algorithms.learning_rule.Momentum` instead"
            )
            # Convert to new interface under the hood.
            self.learning_rule = Momentum(init_momentum)
        else:
            self.learning_rule = learning_rule

        self.learning_rate = sharedX(learning_rate, "learning_rate")
        self.cost = cost
        self.batch_size = batch_size
        self.set_batch_size = set_batch_size
        self.batches_per_iter = batches_per_iter
        self._set_monitoring_dataset(monitoring_dataset)
        self.monitoring_batches = monitoring_batches
        self.monitor_iteration_mode = monitor_iteration_mode
        if monitoring_dataset is None:
            if monitoring_batches is not None:
                raise ValueError("Specified an amount of monitoring batches " + "but not a monitoring dataset.")
        self.termination_criterion = termination_criterion
        self._register_update_callbacks(update_callbacks)
        if train_iteration_mode is None:
            train_iteration_mode = "shuffled_sequential"
        self.train_iteration_mode = train_iteration_mode
        self.first = True
        self.rng = make_np_rng(seed, which_method=["randn", "randint"])
        self.theano_function_mode = theano_function_mode
        self.monitoring_costs = monitoring_costs
开发者ID:pangyuteng,项目名称:chalearn2014,代码行数:59,代码来源:sgd.py


示例2: SGD


#.........这里部分代码省略.........
        things like check for NaNs at every step, or record md5 digests
        of all computations performed by the update function to help
        isolate problems with nondeterminism.
    monitoring_costs : list, optional
        a list of Cost instances. The Monitor will also include all
        channels defined by these Costs, even though we don't train
        using them.
    seed : valid argument to np.random.RandomState, optional
        The seed used for the random number generate to be passed to the
        training dataset iterator (if any)
    """
    def __init__(self, learning_rate, cost=None, batch_size=None,
                 monitoring_batch_size=None, monitoring_batches=None,
                 monitoring_dataset=None, monitor_iteration_mode='sequential',
                 termination_criterion=None, update_callbacks=None,
                 learning_rule = None, init_momentum = None,
                 set_batch_size = False,
                 train_iteration_mode = None, batches_per_iter=None,
                 theano_function_mode = None, monitoring_costs=None,
                 seed=[2012, 10, 5], discriminator_steps=1):

        self.discriminator_steps = discriminator_steps
        self.train_generator = 0

        if isinstance(cost, (list, tuple, set)):
            raise TypeError("SGD no longer supports using collections of " +
                            "Costs to represent a sum of Costs. Use " +
                            "pylearn2.costs.cost.SumOfCosts instead.")

        if init_momentum:
            warnings.warn("init_momentum interface is deprecated and will "
            "become officially unsuported as of May 9, 2014. Please use the "
            "`learning_rule` parameter instead, providing an object of type "
            "`pylearn2.training_algorithms.learning_rule.Momentum` instead")
            # Convert to new interface under the hood.
            self.learning_rule = Momentum(init_momentum)
        else:
            self.learning_rule = learning_rule

        self.learning_rate = sharedX(learning_rate, 'learning_rate')
        self.cost = cost
        self.batch_size = batch_size
        self.set_batch_size = set_batch_size
        self.batches_per_iter = batches_per_iter
        self._set_monitoring_dataset(monitoring_dataset)
        self.monitoring_batch_size = monitoring_batch_size
        self.monitoring_batches = monitoring_batches
        self.monitor_iteration_mode = monitor_iteration_mode
        if monitoring_dataset is None:
            if monitoring_batch_size is not None:
                raise ValueError("Specified a monitoring batch size " +
                                 "but not a monitoring dataset.")
            if monitoring_batches is not None:
                raise ValueError("Specified an amount of monitoring batches " +
                                 "but not a monitoring dataset.")
        self.termination_criterion = termination_criterion
        self._register_update_callbacks(update_callbacks)
        if train_iteration_mode is None:
            train_iteration_mode = 'shuffled_sequential'
        self.train_iteration_mode = train_iteration_mode
        self.first = True
        self.rng = make_np_rng(seed, which_method=["randn","randint"])
        self.theano_function_mode = theano_function_mode
        self.monitoring_costs = monitoring_costs

    def setup(self, model, dataset):
开发者ID:AdityoSanjaya,项目名称:adversarial,代码行数:67,代码来源:sgd_alt.py


示例3: __init__


#.........这里部分代码省略.........
            channels will be computed for all monitoring Datasets and will
            have the dataset name and an underscore prepended to them.
        monitor_iteration_mode : optional, str
            The iteration mode used to iterate over the examples in all
            monitoring datasets. If not specified, defaults to 'sequential'.
            TODO: make it possible to specify different modes for different
            datasets.
        termination_criterion : optional, instance of
            pylearn2.termination_criteria.TerminationCriterion
            Used to determine when the algorithm should stop running.
            If not specified, runs forever--or more realistically, until
            external factors halt the python process (Kansas 1977).
        update_callbacks : optional, list
            If specified, each member of the list should be a callable that
            accepts an SGD instance as its only argument.
            All callbacks will be called with this SGD instance after each
            SGD step.
        learning_rule : training_algorithms.learning_rule.LearningRule
            A learning rule computes the new parameter values given old \
            parameters and first-order gradients. If learning_rule is None, \
            sgd.SGD will update parameters according to the standard SGD \
            learning rule:
                param := param - learning_rate * d cost / d param
            This argument allows more sophisticated learning rules, such
            as SGD with momentum.
        init_momentum : **DEPRECATED** option, float
            Use learning_rule instead.
            If None, does not use momentum otherwise, use momentum and \
            initialize the momentum coefficient to init_momentum. Callbacks \
            can change this over time just like the learning rate. If the \
            gradient is the same on every step, then the update taken by the \
            SGD algorithm is scaled by a factor of 1/(1-momentum). See \
            section 9 of Geoffrey Hinton's "A Practical Guide to Training \
            Restricted Boltzmann Machines" for details.
        set_batch_size : optional, bool
            Defaults to False.
            If True, and batch_size conflicts with model.force_batch_size, \
            will call model.set_batch_size(batch_size) in an attempt to \
            change model.force_batch_size
        train_iteration_mode : optional, str
            Defaults to 'shuffled_sequential'.
            The iteration mode to use for iterating through training examples.
        batches_per_iter : optional, int
            The number of batches to draw from the iterator over training
            examples.
            If iterational mode is 'sequential' or 'shuffled_sequential', this
            is unnecessary; when unspecified we will iterate over all examples.
        theano_function_mode : optional, a valid argument to theano.function's
            'mode' parameter.
            The theano mode to compile the updates function with. Note that \
            pylearn2 includes some wraplinker modes that are not bundled with \
            theano. See pylearn2.devtools. These extra modes let you do \
            things like check for NaNs at every step, or record md5 digests \
            of all computations performed by the update function to help \
            isolate problems with nondeterminism.
        monitoring_costs : optional, list
            a list of Cost instances. The Monitor will also include all
            channels defined by these Costs, even though we don't train
            using them.
        seed : optional, valid argument to np.random.RandomState
            The seed used for the random number generate to be passed to the
            training dataset iterator (if any)
        """

        if isinstance(cost, (list, tuple, set)):
            raise TypeError("SGD no longer supports using collections of " +
                            "Costs to represent a sum of Costs. Use " +
                            "pylearn2.costs.cost.SumOfCosts instead.")

        if init_momentum:
            warnings.warn("init_momentum interface is deprecated and will "
            "become officially unsuported as of May 9, 2014. Please use the "
            "`learning_rule` parameter instead, providing an object of type "
            "`pylearn2.training_algorithms.learning_rule.Momentum` instead")
            # Convert to new interface under the hood.
            self.learning_rule = Momentum(init_momentum)
        else:
            self.learning_rule = learning_rule

        self.learning_rate = sharedX(learning_rate, 'learning_rate')
        self.cost = cost
        self.batch_size = batch_size
        self.set_batch_size = set_batch_size
        self.batches_per_iter = batches_per_iter
        self._set_monitoring_dataset(monitoring_dataset)
        self.monitoring_batches = monitoring_batches
        self.monitor_iteration_mode = monitor_iteration_mode
        if monitoring_dataset is None:
            if monitoring_batches is not None:
                raise ValueError("Specified an amount of monitoring batches " +
                                 "but not a monitoring dataset.")
        self.termination_criterion = termination_criterion
        self._register_update_callbacks(update_callbacks)
        if train_iteration_mode is None:
            train_iteration_mode = 'shuffled_sequential'
        self.train_iteration_mode = train_iteration_mode
        self.first = True
        self.rng = np.random.RandomState(seed)
        self.theano_function_mode = theano_function_mode
        self.monitoring_costs = monitoring_costs
开发者ID:yosinski,项目名称:pylearn2,代码行数:101,代码来源:sgd.py


示例4: __init__

    def __init__(self, learning_rate, cost=None, batch_size=None,
                 monitoring_batches=None, monitoring_dataset=None,
                 monitor_iteration_mode='sequential',
                 termination_criterion=None, update_callbacks=None,
                 learning_rule = None, init_momentum = None, set_batch_size = False,
                 train_iteration_mode = None, batches_per_iter=None,
                 theano_function_mode = None, monitoring_costs=None,
                 seed=[2012, 10, 5]):
        """
            WRITEME

            learning_rate: The learning rate to use.
                            Train object callbacks can change the learning
                            rate after each epoch. SGD update_callbacks
                            can change it after each minibatch.
            cost: a pylearn2.costs.cost.Cost object specifying the objective
                  function to be minimized.
                  Optionally, may be None. In this case, SGD will call the model's
                  get_default_cost method to obtain the objective function.
            init_momentum: **DEPRECATED** if None, does not use momentum
                            otherwise, use momentum and initialize the
                            momentum coefficient to init_momentum.
                            Callbacks can change this over time just like
                            the learning rate.

                            If the gradient is the same on every step, then
                            the update taken by the SGD algorithm is scaled
                            by a factor of 1/(1-momentum).

                            See section 9 of Geoffrey Hinton's "A Practical
                            Guide to Training Restricted Boltzmann Machines"
                            for details.
            learning_rule: training_algorithms.learning_rule.LearningRule,
                           a learning rule computes the new parameter values given
                           old parameters and first-order gradients. If learning_rule
                           is None, sgd.SGD will update parameters according to
                           the standard SGD learning rule.
            set_batch_size: if True, and batch_size conflicts with
                            model.force_batch_size, will call
                            model.set_batch_size(batch_size) in an attempt
                            to change model.force_batch_size
            theano_function_mode: The theano mode to compile the updates function with.
                            Note that pylearn2 includes some wraplinker modes that are
                            not bundled with theano. See pylearn2.devtools. These
                            extra modes let you do things like check for NaNs at every
                            step, or record md5 digests of all computations performed
                            by the update function to help isolate problems with nondeterminism.

            Parameters are updated by the formula:

            inc := momentum * inc - learning_rate * d cost / d param
            param := param + inc
        """

        if isinstance(cost, (list, tuple, set)):
            raise TypeError("SGD no longer supports using collections of Costs to represent "
                    " a sum of Costs. Use pylearn2.costs.cost.SumOfCosts instead.")

        if init_momentum:
            warnings.warn("init_momentum interface is deprecated and will "
            "become officially unsuported as of May 9, 2014. Please use the "
            "`learning_rule` parameter instead, providing an object of type "
            "`pylearn2.training_algorithms.learning_rule.Momentum` instead")
            # Convert to new interface under the hood.
            self.learning_rule = Momentum(init_momentum)
        else:
            self.learning_rule = learning_rule

        self.learning_rate = sharedX(learning_rate, 'learning_rate')
        self.cost = cost
        self.batch_size = batch_size
        self.set_batch_size = set_batch_size
        self.batches_per_iter = batches_per_iter
        self._set_monitoring_dataset(monitoring_dataset)
        self.monitoring_batches = monitoring_batches
        self.monitor_iteration_mode = monitor_iteration_mode
        if monitoring_dataset is None:
            if monitoring_batches is not None:
                raise ValueError("Specified an amount of monitoring batches but not a monitoring dataset.")
        self.termination_criterion = termination_criterion
        self._register_update_callbacks(update_callbacks)
        if train_iteration_mode is None:
            train_iteration_mode = 'shuffled_sequential'
        self.train_iteration_mode = train_iteration_mode
        self.first = True
        self.rng = np.random.RandomState(seed)
        self.theano_function_mode = theano_function_mode
        self.monitoring_costs = monitoring_costs
开发者ID:antran89,项目名称:pylearn2,代码行数:88,代码来源:sgd.py


示例5: SGD

class SGD(TrainingAlgorithm):
    """
    Stochastic Gradient Descent

    WRITEME: what is a good reference to read about this algorithm?

    A TrainingAlgorithm that does gradient descent on minibatches.

    """
    def __init__(self, learning_rate, cost=None, batch_size=None,
                 monitoring_batches=None, monitoring_dataset=None,
                 monitor_iteration_mode='sequential',
                 termination_criterion=None, update_callbacks=None,
                 learning_rule = None, init_momentum = None, set_batch_size = False,
                 train_iteration_mode = None, batches_per_iter=None,
                 theano_function_mode = None, monitoring_costs=None,
                 seed=[2012, 10, 5]):
        """
            WRITEME

            learning_rate: The learning rate to use.
                            Train object callbacks can change the learning
                            rate after each epoch. SGD update_callbacks
                            can change it after each minibatch.
            cost: a pylearn2.costs.cost.Cost object specifying the objective
                  function to be minimized.
                  Optionally, may be None. In this case, SGD will call the model's
                  get_default_cost method to obtain the objective function.
            init_momentum: **DEPRECATED** if None, does not use momentum
                            otherwise, use momentum and initialize the
                            momentum coefficient to init_momentum.
                            Callbacks can change this over time just like
                            the learning rate.

                            If the gradient is the same on every step, then
                            the update taken by the SGD algorithm is scaled
                            by a factor of 1/(1-momentum).

                            See section 9 of Geoffrey Hinton's "A Practical
                            Guide to Training Restricted Boltzmann Machines"
                            for details.
            learning_rule: training_algorithms.learning_rule.LearningRule,
                           a learning rule computes the new parameter values given
                           old parameters and first-order gradients. If learning_rule
                           is None, sgd.SGD will update parameters according to
                           the standard SGD learning rule.
            set_batch_size: if True, and batch_size conflicts with
                            model.force_batch_size, will call
                            model.set_batch_size(batch_size) in an attempt
                            to change model.force_batch_size
            theano_function_mode: The theano mode to compile the updates function with.
                            Note that pylearn2 includes some wraplinker modes that are
                            not bundled with theano. See pylearn2.devtools. These
                            extra modes let you do things like check for NaNs at every
                            step, or record md5 digests of all computations performed
                            by the update function to help isolate problems with nondeterminism.

            Parameters are updated by the formula:

            inc := momentum * inc - learning_rate * d cost / d param
            param := param + inc
        """

        if isinstance(cost, (list, tuple, set)):
            raise TypeError("SGD no longer supports using collections of Costs to represent "
                    " a sum of Costs. Use pylearn2.costs.cost.SumOfCosts instead.")

        if init_momentum:
            warnings.warn("init_momentum interface is deprecated and will "
            "become officially unsuported as of May 9, 2014. Please use the "
            "`learning_rule` parameter instead, providing an object of type "
            "`pylearn2.training_algorithms.learning_rule.Momentum` instead")
            # Convert to new interface under the hood.
            self.learning_rule = Momentum(init_momentum)
        else:
            self.learning_rule = learning_rule

        self.learning_rate = sharedX(learning_rate, 'learning_rate')
        self.cost = cost
        self.batch_size = batch_size
        self.set_batch_size = set_batch_size
        self.batches_per_iter = batches_per_iter
        self._set_monitoring_dataset(monitoring_dataset)
        self.monitoring_batches = monitoring_batches
        self.monitor_iteration_mode = monitor_iteration_mode
        if monitoring_dataset is None:
            if monitoring_batches is not None:
                raise ValueError("Specified an amount of monitoring batches but not a monitoring dataset.")
        self.termination_criterion = termination_criterion
        self._register_update_callbacks(update_callbacks)
        if train_iteration_mode is None:
            train_iteration_mode = 'shuffled_sequential'
        self.train_iteration_mode = train_iteration_mode
        self.first = True
        self.rng = np.random.RandomState(seed)
        self.theano_function_mode = theano_function_mode
        self.monitoring_costs = monitoring_costs

    def setup(self, model, dataset):
        if self.cost is None:
#.........这里部分代码省略.........
开发者ID:antran89,项目名称:pylearn2,代码行数:101,代码来源:sgd.py



注:本文中的pylearn2.training_algorithms.learning_rule.Momentum类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python sgd.SGD类代码示例发布时间:2022-05-25
下一篇:
Python train.Train类代码示例发布时间:2022-05-25
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap