• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python data_specs.DataSpecsMapping类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pylearn2.utils.data_specs.DataSpecsMapping的典型用法代码示例。如果您正苦于以下问题:Python DataSpecsMapping类的具体用法?Python DataSpecsMapping怎么用?Python DataSpecsMapping使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了DataSpecsMapping类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: setup

    def setup(self, model, dataset):
        """
        Allows the training algorithm to do some preliminary configuration
        *before* we actually start training the model. The dataset is provided
        in case other derived training algorithms need to modify model based on
        the dataset.

        Parameters
        ----------
        model: a Python object representing the model to train loosely
        implementing the interface of models.model.Model.

        dataset: a pylearn2.datasets.dataset.Dataset object used to draw
        training data
        """
        self.model = model

        self.monitor = Monitor.get_monitor(model)

        if self.monitoring_dataset is not None:
            # Get the data specifications needed by the model
            space, source = model.get_monitoring_data_specs()

            # Create Theano variables for each of the individual components
            # of that data. Usually, it will be X for inputs and Y for targets.
            # First, we need to find these components, and put them in a tuple
            mapping = DataSpecsMapping((space, source))
            space_tuple = mapping.flatten(space, return_tuple=True)
            source_tuple = mapping.flatten(source, return_tuple=True)
            # Then, build a flat tuple of these Theano variables
            ipt = tuple(sp.make_theano_batch(name='monitor_%s' % src)
                    for (sp, src) in safe_zip(space_tuple, source_tuple))
            # Finally, organize them back into a structure expected by the
            # monitoring channels of the model
            nested_ipt = mapping.nest(ipt)

            self.monitor.add_dataset(dataset=self.monitoring_dataset,
                                mode="sequential",
                                batch_size=self.batch_size,
                                num_batches=self.monitoring_batches)

            channels = model.get_monitoring_channels(nested_ipt)
            if not isinstance(channels, dict):
                raise TypeError("model.get_monitoring_channels must return a "
                                "dictionary, but it returned " + str(channels))
            for name in channels:
                J = channels[name]
                if isinstance(J, tuple):
                    assert len(J) == 2
                    J, prereqs = J
                else:
                    prereqs = None

                self.monitor.add_channel(name=name,
                                         ipt=nested_ipt,
                                         val=J,
                                         prereqs=prereqs,
                                         data_specs=(space, source))
        self.first = True
        self.bSetup = True
开发者ID:Alienfeel,项目名称:pylearn2,代码行数:60,代码来源:default.py


示例2: test_variational_cd

def test_variational_cd():

    # Verifies that VariationalCD works well with make_layer_to_symbolic_state
    visible_layer = BinaryVector(nvis=100)
    hidden_layer = BinaryVectorMaxPool(detector_layer_dim=500,
                                       pool_size=1,
                                       layer_name='h',
                                       irange=0.05,
                                       init_bias=-2.0)
    model = DBM(visible_layer=visible_layer,
                hidden_layers=[hidden_layer],
                batch_size=100,
                niter=1)

    cost = VariationalCD(num_chains=100, num_gibbs_steps=2)

    data_specs = cost.get_data_specs(model)
    mapping = DataSpecsMapping(data_specs)
    space_tuple = mapping.flatten(data_specs[0], return_tuple=True)
    source_tuple = mapping.flatten(data_specs[1], return_tuple=True)

    theano_args = []
    for space, source in safe_zip(space_tuple, source_tuple):
        name = '%s' % (source)
        arg = space.make_theano_batch(name=name)
        theano_args.append(arg)
    theano_args = tuple(theano_args)
    nested_args = mapping.nest(theano_args)

    grads, updates = cost.get_gradients(model, nested_args)
开发者ID:BloodNg,项目名称:pylearn2,代码行数:30,代码来源:test_dbm.py


示例3: train

    def train(self, dataset):
        if not hasattr(self, 'sgd_update'):
            raise Exception("train called without first calling setup")

        # Make sure none of the parameters have bad values
        for param in self.params:
            value = param.get_value(borrow=True)
            if np.any(np.isnan(value)) or np.any(np.isinf(value)):
                raise Exception("NaN in " + param.name)

        self.first = False
        rng = self.rng
        if not is_stochastic(self.train_iteration_mode):
            rng = None

        data_specs = self.cost.get_data_specs(self.model)

        # The iterator should be built from flat data specs, so it returns
        # flat, non-redundent tuples of data.
        mapping = DataSpecsMapping(data_specs)
        space_tuple = mapping.flatten(data_specs[0], return_tuple=True)
        source_tuple = mapping.flatten(data_specs[1], return_tuple=True)
        if len(space_tuple) == 0:
            # No data will be returned by the iterator, and it is impossible
            # to know the size of the actual batch.
            # It is not decided yet what the right thing to do should be.
            raise NotImplementedError("Unable to train with SGD, because "
                    "the cost does not actually use data from the data set. "
                    "data_specs: %s" % str(data_specs))
        flat_data_specs = (CompositeSpace(space_tuple), source_tuple)

        iterator = dataset.iterator(mode=self.train_iteration_mode,
                batch_size=self.batch_size,
                data_specs=flat_data_specs, return_tuple=True,
                rng = rng, num_batches = self.batches_per_iter)

        on_load_batch = self.on_load_batch
        for batch in iterator:
            for callback in on_load_batch:
                callback(mapping.nest(batch))
            self.sgd_update(*batch)
            # iterator might return a smaller batch if dataset size
            # isn't divisible by batch_size
            # Note: if data_specs[0] is a NullSpace, there is no way to know
            # how many examples would actually have been in the batch,
            # since it was empty, so actual_batch_size would be reported as 0.
            actual_batch_size = flat_data_specs[0].np_batch_size(batch)
            self.monitor.report_batch(actual_batch_size)
            for callback in self.update_callbacks:
                callback(self)

        # Make sure none of the parameters have bad values
        for param in self.params:
            value = param.get_value(borrow=True)
            if np.any(np.isnan(value)) or np.any(np.isinf(value)):
                raise Exception("NaN in " + param.name)
开发者ID:ahmed26,项目名称:pylearn2,代码行数:56,代码来源:sgd.py


示例4: train

    def train(self, dataset):
        """
        .. todo::

            WRITEME
        """
        assert self.bSetup
        model = self.model

        rng = self.rng
        train_iteration_mode = "shuffled_sequential"
        if not is_stochastic(train_iteration_mode):
            rng = None

        data_specs = self.cost.get_data_specs(self.model)
        # The iterator should be built from flat data specs, so it returns
        # flat, non-redundent tuples of data.
        mapping = DataSpecsMapping(data_specs)
        space_tuple = mapping.flatten(data_specs[0], return_tuple=True)
        source_tuple = mapping.flatten(data_specs[1], return_tuple=True)
        if len(space_tuple) == 0:
            # No data will be returned by the iterator, and it is impossible
            # to know the size of the actual batch.
            # It is not decided yet what the right thing to do should be.
            raise NotImplementedError(
                "Unable to train with BGD, because "
                "the cost does not actually use data from the data set. "
                "data_specs: %s" % str(data_specs)
            )
        flat_data_specs = (CompositeSpace(space_tuple), source_tuple)

        iterator = dataset.iterator(
            mode=train_iteration_mode,
            batch_size=self.batch_size,
            num_batches=self.batches_per_iter,
            data_specs=flat_data_specs,
            return_tuple=True,
            rng=rng,
        )

        mode = self.theano_function_mode
        for data in iterator:
            if "targets" in source_tuple and mode is not None and hasattr(mode, "record"):
                Y = data[source_tuple.index("targets")]
                stry = str(Y).replace("\n", " ")
                mode.record.handle_line("data Y " + stry + "\n")

            for on_load_batch in self.on_load_batch:
                on_load_batch(mapping.nest(data))

            self.before_step(model)
            self.optimizer.minimize(*data)
            self.after_step(model)
            actual_batch_size = flat_data_specs[0].np_batch_size(data)
            model.monitor.report_batch(actual_batch_size)
开发者ID:pangyuteng,项目名称:chalearn2014,代码行数:55,代码来源:bgd.py


示例5: setup

    def setup(self):
        self.X = T.matrix('X')
        self.Y = T.matrix('Y')

        # Taken from pylearn2/training_algorithms/sgd.py


        data_specs = self.cost.get_data_specs(self.model)
        mapping = DataSpecsMapping(data_specs)
        space_tuple = mapping.flatten(data_specs[0], return_tuple=True)
        source_tuple = mapping.flatten(data_specs[1], return_tuple=True)

        # Build a flat tuple of Theano Variables, one for each space.
        # We want that so that if the same space/source is specified
        # more than once in data_specs, only one Theano Variable
        # is generated for it, and the corresponding value is passed
        # only once to the compiled Theano function.
        theano_args = []
        for space, source in safe_zip(space_tuple, source_tuple):
            name = '%s[%s]' % (self.__class__.__name__, source)
            arg = space.make_theano_batch(name=name, batch_size = self.batch_size)
            theano_args.append(arg)
        print 'BATCH SIZE=',self.batch_size
        theano_args = tuple(theano_args)

        # Methods of `self.cost` need args to be passed in a format compatible
        # with data_specs
        nested_args = mapping.nest(theano_args)
        print self.cost
        fixed_var_descr = self.cost.get_fixed_var_descr(self.model, nested_args)
        print self.cost
        self.on_load_batch = fixed_var_descr.on_load_batch
        params = list(self.model.get_params())
        self.X = nested_args[0]
        self.Y = nested_args[1]
        init_grads, updates = self.cost.get_gradients(self.model, nested_args)

        params = self.model.get_params()
        # We need to replace parameters with purely symbolic variables in case some are shared
        # Create gradient and cost functions
        self.params = params
        symbolic_params = [self._convert_variable(param) for param in params]
        givens = dict(zip(params, symbolic_params))
        costfn = self.model.cost_from_X((self.X, self.Y))
        gradfns = [init_grads[param] for param in params]
        #self.symbolic_params = symbolic_params
        #self._loss = theano.function(symbolic_para[self.X, self.Y], self.model.cost_from_X((self.X, self.Y)))#, givens=givens)
        #1/0
        print 'Compiling function...'
        self.theano_f_df = theano.function(inputs=symbolic_params + [self.X, self.Y], outputs=[costfn] + gradfns, givens=givens)
        print 'done'
开发者ID:NuelASRB,项目名称:Sum-of-Functions-Optimizer,代码行数:51,代码来源:model_gradient.py


示例6: CallbackCost

class CallbackCost(Cost):
    """
    A Cost that runs callbacks on the data.
    Returns the sum of the data multiplied by the
    sum of all model parameters as the cost.
    The callback is run via the CallbackOp
    so the cost must be used to compute one
    of the outputs of your theano graph if you
    want the callback to get called.
    The is cost is designed so that the SGD algorithm
    will result in in the CallbackOp getting
    evaluated.
    """

    def __init__(self, data_callbacks, data_specs):
        """
        data_callback: optional, callbacks to run on data.
            It is either a Python callable, or a tuple (possibly nested),
            in the same format as data_specs.
        data_specs: (space, source) pair specifying the format
            and label associated to the data.
        """
        self.data_callbacks = data_callbacks
        self.data_specs = data_specs
        self._mapping = DataSpecsMapping(data_specs)

    def get_data_specs(self, model):
        return self.data_specs

    def expr(self, model, data):
        self.get_data_specs(model)[0].validate(data)
        callbacks = self.data_callbacks

        cb_tuple = self._mapping.flatten(callbacks, return_tuple=True)
        data_tuple = self._mapping.flatten(data, return_tuple=True)

        costs = []
        for (callback, data_var) in safe_zip(cb_tuple, data_tuple):
            orig_var = data_var
            data_var = CallbackOp(callback)(data_var)
            assert len(data_var.owner.inputs) == 1
            assert orig_var is data_var.owner.inputs[0]

            costs.append(data_var.sum())

        # sum() will call theano.add on the symbolic variables
        cost = sum(costs)
        model_terms = sum([param.sum() for param in model.get_params()])
        cost = cost * model_terms
        return cost
开发者ID:sonu5623,项目名称:pylearn2,代码行数:50,代码来源:cost.py


示例7: get_fixed_var_descr

        def get_fixed_var_descr(self, model, data, **kwargs):
            data_specs = self.get_data_specs(model)
            data_specs[0].validate(data)
            rval = FixedVarDescr()
            rval.fixed_vars = {'unsup_aux_var': unsup_counter}

            # The input to function should be a flat, non-redundent tuple
            mapping = DataSpecsMapping(data_specs)
            data_tuple = mapping.flatten(data, return_tuple=True)
            theano_func = function([],
                    updates=[(unsup_counter, unsup_counter + 1)])
            def on_load(batch, mapping=mapping, theano_func=theano_func):
                return theano_func()
            rval.on_load_batch = [on_load]

            return rval
开发者ID:123fengye741,项目名称:pylearn2,代码行数:16,代码来源:test_bgd.py


示例8: _build_data_specs

    def _build_data_specs(self):
        """
        Computes a nested data_specs for input and all channels

        Also computes the mapping to flatten it. This function is
        called from redo_theano.
        """
        # Ask the model what it needs
        m_space, m_source = self.model.get_monitoring_data_specs()
        input_spaces = [m_space]
        input_sources = [m_source]
        for channel in self.channels.values():
            space = channel.data_specs[0]
            assert isinstance(space, Space)
            input_spaces.append(space)
            input_sources.append(channel.data_specs[1])

        nested_space = CompositeSpace(input_spaces)
        nested_source = tuple(input_sources)

        self._nested_data_specs = (nested_space, nested_source)
        self._data_specs_mapping = DataSpecsMapping(self._nested_data_specs)

        flat_space = self._data_specs_mapping.flatten(nested_space,
                                                      return_tuple=True)
        flat_source = self._data_specs_mapping.flatten(nested_source,
                                                       return_tuple=True)
        self._flat_data_specs = (CompositeSpace(flat_space), flat_source)
开发者ID:goller,项目名称:pylearn2,代码行数:28,代码来源:monitor.py


示例9: test_nest_specs

def test_nest_specs():
    x1 = TT.matrix("x1")
    x2 = TT.matrix("x2")
    x3 = TT.matrix("x3")
    x4 = TT.matrix("x4")

    for nested_space, nested_source, nested_data in [
        (VectorSpace(dim=10), "target", x2),
        (CompositeSpace([VectorSpace(dim=3), VectorSpace(dim=9)]), ("features", "features"), (x1, x4)),
        (
            CompositeSpace([VectorSpace(dim=3), CompositeSpace([VectorSpace(dim=10), VectorSpace(dim=7)])]),
            ("features", ("target", "features")),
            (x1, (x2, x3)),
        ),
    ]:

        mapping = DataSpecsMapping((nested_space, nested_source))
        flat_space = mapping.flatten(nested_space)
        flat_source = mapping.flatten(nested_source)
        flat_data = mapping.flatten(nested_data)

        renested_space = mapping.nest(flat_space)
        renested_source = mapping.nest(flat_source)
        renested_data = mapping.nest(flat_data)

        assert_equal(renested_space, nested_space)
        assert_equal(renested_source, nested_source)
        assert_equal(renested_data, nested_data)
开发者ID:Bowen-C,项目名称:pylearn2,代码行数:28,代码来源:test_data_specs.py


示例10: test_nest_specs

def test_nest_specs():
    x1 = TT.matrix('x1')
    x2 = TT.matrix('x2')
    x3 = TT.matrix('x3')
    x4 = TT.matrix('x4')

    for nested_space, nested_source, nested_data in [
            (VectorSpace(dim=10), 'target', x2),
            (CompositeSpace([VectorSpace(dim=3), VectorSpace(dim=9)]),
                ('features', 'features'),
                (x1, x4)),
            (CompositeSpace([VectorSpace(dim=3),
                             CompositeSpace([VectorSpace(dim=10),
                                             VectorSpace(dim=7)])]),
                ('features', ('target', 'features')),
                (x1, (x2, x3))),
            ]:

        mapping = DataSpecsMapping((nested_space, nested_source))
        flat_space = mapping.flatten(nested_space)
        flat_source = mapping.flatten(nested_source)
        flat_data = mapping.flatten(nested_data)

        renested_space = mapping.nest(flat_space)
        renested_source = mapping.nest(flat_source)
        renested_data = mapping.nest(flat_data)

        assert_equal(renested_space, nested_space)
        assert_equal(renested_source, nested_source)
        assert_equal(renested_data, nested_data)
开发者ID:123fengye741,项目名称:pylearn2,代码行数:30,代码来源:test_data_specs.py


示例11: __init__

 def __init__(self, data_callbacks, data_specs):
     """
     data_callback: optional, callbacks to run on data.
         It is either a Python callable, or a tuple (possibly nested),
         in the same format as data_specs.
     data_specs: (space, source) pair specifying the format
         and label associated to the data.
     """
     self.data_callbacks = data_callbacks
     self.data_specs = data_specs
     self._mapping = DataSpecsMapping(data_specs)
开发者ID:sonu5623,项目名称:pylearn2,代码行数:11,代码来源:cost.py


示例12: get_fixed_var_descr

        def get_fixed_var_descr(self, model, data):
            data_specs = self.get_data_specs(model)
            data_specs[0].validate(data)
            rval = FixedVarDescr()
            rval.fixed_vars = {'sup_aux_var': sup_counter}
            rval.data_specs = data_specs

            # data has to be flattened into a tuple before being passed
            # to `function`.
            mapping = DataSpecsMapping(data_specs)
            flat_data = mapping.flatten(data, return_tuple=True)
            theano_func = function(flat_data,
                                 updates=[(sup_counter, sup_counter + 1)])
            # the on_load_batch function will take numerical data formatted
            # as rval.data_specs, so we have to flatten it inside the
            # returned function too.
            # Using default argument binds the variables used in the lambda
            # function to the value they have when the lambda is defined.
            on_load = (lambda batch, mapping=mapping, theano_func=theano_func:
                    theano_func(*mapping.flatten(batch, return_tuple=True)))
            rval.on_load_batch = [on_load]
            return rval
开发者ID:Alienfeel,项目名称:pylearn2,代码行数:22,代码来源:test_bgd.py


示例13: test_flatten_specs

def test_flatten_specs():
    for space, source, flat_space, flat_source in [
        # (None, None),
        (VectorSpace(dim=5), "features", VectorSpace(dim=5), "features"),
        (
            CompositeSpace([VectorSpace(dim=5), VectorSpace(dim=2)]),
            ("features", "features"),
            CompositeSpace([VectorSpace(dim=5), VectorSpace(dim=2)]),
            ("features", "features"),
        ),
        (
            CompositeSpace([VectorSpace(dim=5), VectorSpace(dim=5)]),
            ("features", "targets"),
            CompositeSpace([VectorSpace(dim=5), VectorSpace(dim=5)]),
            ("features", "targets"),
        ),
        (
            CompositeSpace([VectorSpace(dim=5), VectorSpace(dim=5)]),
            ("features", "features"),
            VectorSpace(dim=5),
            "features",
        ),
        (
            CompositeSpace([VectorSpace(dim=5), CompositeSpace([VectorSpace(dim=9), VectorSpace(dim=12)])]),
            ("features", ("features", "targets")),
            CompositeSpace([VectorSpace(dim=5), VectorSpace(dim=9), VectorSpace(dim=12)]),
            ("features", "features", "targets"),
        ),
        (
            CompositeSpace([VectorSpace(dim=5), VectorSpace(dim=9), VectorSpace(dim=12)]),
            ("features", "features", "targets"),
            CompositeSpace([VectorSpace(dim=5), VectorSpace(dim=9), VectorSpace(dim=12)]),
            ("features", "features", "targets"),
        ),
    ]:

        mapping = DataSpecsMapping((space, source))
        rval = (mapping.flatten(space), mapping.flatten(source))
        assert_equal((flat_space, flat_source), rval)
开发者ID:Bowen-C,项目名称:pylearn2,代码行数:39,代码来源:test_data_specs.py


示例14: setup

    def setup(self, model, dataset, algorithm):
        self.origin = model.get_param_vector()

        cost = algorithm.cost
        # Cargo cult all the Pascal bullshit needed to evaluate the fucking cost function now
        # =======================================
        data_specs = cost.get_data_specs(model)
        mapping = DataSpecsMapping(data_specs)
        space_tuple = mapping.flatten(data_specs[0], return_tuple=True)
        source_tuple = mapping.flatten(data_specs[1], return_tuple=True)

        # Build a flat tuple of Theano Variables, one for each space.
        # We want that so that if the same space/source is specified
        # more than once in data_specs, only one Theano Variable
        # is generated for it, and the corresponding value is passed
        # only once to the compiled Theano function.
        theano_args = []
        for space, source in safe_zip(space_tuple, source_tuple):
            name = '%s[%s]' % (self.__class__.__name__, source)
            arg = space.make_theano_batch(name=name,
                                          batch_size=self.batch_size)
            theano_args.append(arg)
        theano_args = tuple(theano_args)

        # Methods of `cost` need args to be passed in a format compatible
        # with data_specs
        nested_args = mapping.nest(theano_args)
        fixed_var_descr = cost.get_fixed_var_descr(model, nested_args)
        self.on_load_batch = fixed_var_descr.on_load_batch

        cost_value = cost.expr(model, nested_args,
                                    ** fixed_var_descr.fixed_vars)
        # End cargo culting
        # ======================

        print "Compiling cost function..."
        cost_fn = function(theano_args, cost_value)
        self.cost_fn = cost_fn
开发者ID:cc13ny,项目名称:galatea,代码行数:38,代码来源:__init__.py


示例15: setup

    def setup(self, model, dataset):
        """
        Allows the training algorithm to do some preliminary configuration
        *before* we actually start training the model. The dataset is provided
        in case other derived training algorithms need to modify model based on
        the dataset.

        Parameters
        ----------
        model : object
            A Python object representing the model to train loosely \
            implementing the interface of models.model.Model.
        dataset : pylearn2.datasets.dataset.Dataset
            Dataset object used to draw training data
        """
        self.model = model

        if self.cost is None:
            self.cost = model.get_default_cost()

        if self.batch_size is None:
            self.batch_size = model.force_batch_size
        else:
            batch_size = self.batch_size
            if self.set_batch_size:
                model.set_batch_size(batch_size)
            elif hasattr(model, 'force_batch_size'):
                if not (model.force_batch_size <= 0 or batch_size ==
                        model.force_batch_size):
                    raise ValueError("batch_size is %d but " +
                                     "model.force_batch_size is %d" %
                                     (batch_size, model.force_batch_size))

        self.monitor = Monitor.get_monitor(model)
        self.monitor.set_theano_function_mode(self.theano_function_mode)

        data_specs = self.cost.get_data_specs(model)
        mapping = DataSpecsMapping(data_specs)
        space_tuple = mapping.flatten(data_specs[0], return_tuple=True)
        source_tuple = mapping.flatten(data_specs[1], return_tuple=True)

        # Build a flat tuple of Theano Variables, one for each space,
        # named according to the sources.
        theano_args = []
        for space, source in safe_zip(space_tuple, source_tuple):
            name = 'BGD_[%s]' % source
            arg = space.make_theano_batch(name=name)
            theano_args.append(arg)
        theano_args = tuple(theano_args)

        # Methods of `self.cost` need args to be passed in a format compatible
        # with their data_specs
        nested_args = mapping.nest(theano_args)
        fixed_var_descr = self.cost.get_fixed_var_descr(model, nested_args)
        self.on_load_batch = fixed_var_descr.on_load_batch

        cost_value = self.cost.expr(model, nested_args,
                                    ** fixed_var_descr.fixed_vars)
        grads, grad_updates = self.cost.get_gradients(
                model, nested_args, ** fixed_var_descr.fixed_vars)

        assert isinstance(grads, OrderedDict)
        assert isinstance(grad_updates, OrderedDict)

        if cost_value is None:
            raise ValueError("BGD is incompatible with " + str(self.cost) +
                             " because it is intractable, but BGD uses the " +
                             "cost function value to do line searches.")

        # obj_prereqs has to be a list of function f called with f(*data),
        # where data is a data tuple coming from the iterator.
        # this function enables capturing "mapping" and "f", while
        # enabling the "*data" syntax
        def capture(f, mapping=mapping):
            new_f = lambda *args: f(mapping.flatten(args, return_tuple=True))
            return new_f

        obj_prereqs = [capture(f) for f in fixed_var_descr.on_load_batch]

        if self.monitoring_dataset is not None:
            self.monitor.setup(
                    dataset=self.monitoring_dataset,
                    cost=self.cost,
                    batch_size=self.batch_size,
                    num_batches=self.monitoring_batches,
                    obj_prereqs=obj_prereqs,
                    cost_monitoring_args=fixed_var_descr.fixed_vars)

            # TODO : Why is this commented?
            '''
            channels = model.get_monitoring_channels(theano_args)
            if not isinstance(channels, dict):
                raise TypeError("model.get_monitoring_channels must return a "
                                "dictionary, but it returned " + str(channels))
            channels.update(self.cost.get_monitoring_channels(model, theano_args, ** fixed_var_descr.fixed_vars))

            for dataset_name in self.monitoring_dataset:
                if dataset_name == '':
                    prefix = ''
                else:
#.........这里部分代码省略.........
开发者ID:alouisos,项目名称:pylearn2,代码行数:101,代码来源:bgd.py


示例16: Monitor

class Monitor(object):
    """
    A class for monitoring Models while they are being trained.

    A monitor object records the number of minibatches and number of
    examples the model has trained, as well as any number of "channels"
    that track quantities of interest (examples: the objective
    function, measures of hidden unit activity, reconstruction error,
    sum of squared second derivatives, average norm of the weight
    vectors, etc.)

    Parameters
    ----------
    model : `pylearn2.models.model.Model`
    """

    def __init__(self, model):
        self.training_succeeded = False
        self.model = model
        self.channels = OrderedDict()
        self._num_batches_seen = 0
        self._examples_seen = 0
        self._epochs_seen = 0
        self._datasets = []
        self._iteration_mode = []
        self._batch_size = []
        self._num_batches = []
        self._dirty = True
        self._rng_seed = []
        self.names_to_del = ['theano_function_mode']
        self.t0 = time.time()
        self.theano_function_mode = None

        # Initialize self._nested_data_specs, self._data_specs_mapping,
        # and self._flat_data_specs
        self._build_data_specs()

    def _build_data_specs(self):
        """
        Computes a nested data_specs for input and all channels

        Also computes the mapping to flatten it. This function is
        called from redo_theano.
        """
        # Ask the model what it needs
        m_space, m_source = self.model.get_monitoring_data_specs()
        input_spaces = [m_space]
        input_sources = [m_source]
        for channel in self.channels.values():
            space = channel.data_specs[0]
            assert isinstance(space, Space)
            input_spaces.append(space)
            input_sources.append(channel.data_specs[1])

        nested_space = CompositeSpace(input_spaces)
        nested_source = tuple(input_sources)

        self._nested_data_specs = (nested_space, nested_source)
        self._data_specs_mapping = DataSpecsMapping(self._nested_data_specs)

        flat_space = self._data_specs_mapping.flatten(nested_space,
                                                      return_tuple=True)
        flat_source = self._data_specs_mapping.flatten(nested_source,
                                                       return_tuple=True)
        self._flat_data_specs = (CompositeSpace(flat_space), flat_source)

    def set_theano_function_mode(self, mode):
        """
        .. todo::

            WRITEME

        Parameters
        ----------
        mode : theano.compile.Mode
            Theano functions for the monitoring channels will be
            compiled and run using this mode.
        """
        if self.theano_function_mode != mode:
            self._dirty = True
            self.theano_function_mode = mode

    def add_dataset(self, dataset, mode='sequential', batch_size=None,
                    num_batches=None, seed=None):
        """
        Determines the data used to calculate the values of each channel.

        Parameters
        ----------
        dataset : object
            A `pylearn2.datasets.Dataset` object.
        mode : str or object, optional
            Iteration mode; see the docstring of the `iterator` method
            on `pylearn2.datasets.Dataset` for details.
        batch_size : int, optional
            The size of an individual batch. Optional if `mode` is
            'sequential' and `num_batches` is specified (batch size
            will be calculated based on full dataset size).
        num_batches : int, optional
            The total number of batches. Unnecessary if `mode` is
#.........这里部分代码省略.........
开发者ID:goller,项目名称:pylearn2,代码行数:101,代码来源:monitor.py


示例17: agent_train

    def agent_train(self, terminal):
        """
        Training function.

        terminal: boolean
            Whether current state is a terminal state.
        """
        # Wait until we have enough data to train
        if self.action_count >= ((self.train.algorithm.batch_size+1)*self.k+1):
            tic = time()
            if self.train_setup == 0:
                self.train.main_loop()

                data_specs = self.train.algorithm.cost.get_data_specs(
                    self.model)

                # The iterator should be built from flat data specs, so it
                # returns flat, non-redundent tuples of data.
                mapping = DataSpecsMapping(data_specs)
                space_tuple = mapping.flatten(data_specs[0], return_tuple=True)
                source_tuple = mapping.flatten(
                    data_specs[1],
                    return_tuple=True
                )
                if len(space_tuple) == 0:
                    # No data will be returned by the iterator, and it is
                    # impossible to know the size of the actual batch. It
                    # is not decided yet what the right thing to do should be.
                    raise NotImplementedError(
                        "Unable to train with SGD, because the cost does not"
                        " actually use data from the data set. "
                        "data_specs: %s" % str(data_specs)
                    )
                flat_data_specs = (CompositeSpace(space_tuple), source_tuple)
                self.flat_data_specs = flat_data_specs
                self.train_setup = 1

            else:
                tic_iter = time()
                temp_iter = self.train.dataset.iterator(
                    mode=self.train.algorithm.train_iteration_mode,
                    batch_size=self.train.algorithm.batch_size,
                    data_specs=self.flat_data_specs,
                    return_tuple=True,
                    rng=self.train.algorithm.rng,
                    num_batches=self.train.algorithm.batches_per_iter
                )
                toc_iter = time()
                log.debug('Iter creation time: %0.2f' % (toc_iter - tic_iter))

                tic_next = time()
                batch = temp_iter.next()
                toc_next = time()
                log.debug('Iter next time: %0.2f' % (toc_next - tic_next))

                tic_sgd = time()
                self.train.algorithm.sgd_update(*batch)
                toc_sgd = time()
                log.debug('SGD time: %0.2f' % (toc_sgd - tic_sgd))

                log.info('Frames seen: %d' % self.all_time_total_frames)
                log.info('Epsilon: %0.10f' % self.epsilon)

            toc = time()
            self.episode_training_time += toc-tic
            log.debug('Real train time: %0.2f' % (toc-tic))
开发者ID:UncleYu,项目名称:hedgehog,代码行数:66,代码来源:basic.py


示例18: setup

    def setup(self, model, dataset):
        """
        Compiles the theano functions needed for the train method.

        Parameters
        ----------
        model : a Model instance
        dataset : Dataset
        """
        if self.cost is None:
            self.cost = model.get_default_cost()

        inf_params = [param for param in model.get_params()
                      if np.any(np.isinf(param.get_value()))]
        if len(inf_params) > 0:
            raise ValueError("These params are Inf: "+str(inf_params))
        if any([np.any(np.isnan(param.get_value()))
                for param in model.get_params()]):
            nan_params = [param for param in model.get_params()
                          if np.any(np.isnan(param.get_value()))]
            raise ValueError("These params are NaN: "+str(nan_params))
        self.model = model

        self._synchronize_batch_size(model)
        model._test_batch_size = self.batch_size
        self.monitor = Monitor.get_monitor(model)
        self.monitor._sanity_check()

        # test if force batch size and batch size
        if getattr(model, "force_batch_size", False) and \
           any(dataset.get_design_matrix().shape[0] % self.batch_size != 0 for
               dataset in self.monitoring_dataset.values()) and \
           not has_uniform_batch_size(self.monitor_iteration_mode):

            raise ValueError("Dataset size is not a multiple of batch size."
                             "You should set monitor_iteration_mode to "
                             "even_sequential, even_shuffled_sequential or "
                             "even_batchwise_shuffled_sequential")

        data_specs = self.cost.get_data_specs(self.model)
        mapping = DataSpecsMapping(data_specs)
        space_tuple = mapping.flatten(data_specs[0], return_tuple=True)
        source_tuple = mapping.flatten(data_specs[1], return_tuple=True)

        # Build a flat tuple of Theano Variables, one for each space.
        # We want that so that if the same space/source is specified
        # more than once in data_specs, only one Theano Variable
        # is generated for it, and the corresponding value is passed
        # only once to the compiled Theano function.
        theano_args = []
        for space, source in safe_zip(space_tuple, source_tuple):
            name = '%s[%s]' % (self.__class__.__name__, source)
            arg = space.make_theano_batch(name=name,
                                          batch_size=self.batch_size)
            theano_args.append(arg)
        theano_args = tuple(theano_args)

        # Methods of `self.cost` need args to be passed in a format compatible
        # with data_specs
        nested_args = mapping.nest(theano_args)
        fixed_var_descr = self.cost.get_fixed_var_descr(model, nested_args)
        self.on_load_batch = fixed_var_descr.on_load_batch

        cost_value = self.cost.expr(model, nested_args,
                                    ** fixed_var_descr.fixed_vars)

        if cost_value is not None and cost_value.name is None:
            # Concatenate the name of all tensors in theano_args !?
            cost_value.name = 'objective'

        # Set up monitor to model the objective value, learning rate,
        # momentum (if applicable), and extra channels defined by
        # the cost
        learning_rate = self.learning_rate
        if self.monitoring_dataset is not None:
            if (self.monitoring_batch_size is None and
                    self.monitoring_batches is None):
                self.monitoring_batch_size = self.batch_size
                self.monitoring_batches = self.batches_per_iter
            self.monitor.setup(dataset=self.monitoring_dataset,
                               cost=self.cost,
                               batch_size=self.monitoring_batch_size,
                               num_batches=self.monitoring_batches,
                               extra_costs=self.monitoring_costs,
                               mode=self.monitor_iteration_mode)
            dataset_name = self.monitoring_dataset.keys()[0]
            monitoring_dataset = self.monitoring_dataset[dataset_name]
            #TODO: have Monitor support non-data-dependent channels
            self.monitor.add_channel(name='learning_rate',
                                     ipt=None,
                                     val=learning_rate,
                                     data_specs=(NullSpace(), ''),
                                     dataset=monitoring_dataset)

            if self.learning_rule:
                self.learning_rule.add_channels_to_monitor(
                        self.monitor,
                        monitoring_dataset)

        params = list(model.get_params())
#.........这里部分代码省略.........
开发者ID:AdityoSanjaya,项目名称:adversarial,代码行数:101,代码来源:sgd_alt.py

<

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python exc.reraise_as函数代码示例发布时间:2022-05-25
下一篇:
Python utils.sharedX函数代码示例发布时间:2022-05-25
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap