• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python dbm.flatten函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pylearn2.models.dbm.flatten函数的典型用法代码示例。如果您正苦于以下问题:Python flatten函数的具体用法?Python flatten怎么用?Python flatten使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了flatten函数的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: get_fixed_var_descr

    def get_fixed_var_descr(self, model, X, Y):
        """
        .. todo::

            WRITEME
        """

        assert Y is not None

        batch_size = model.batch_size

        drop_mask_X = sharedX(model.get_input_space().get_origin_batch(batch_size))
        drop_mask_X.name = 'drop_mask'

        X_space = model.get_input_space()

        updates = OrderedDict()
        rval = FixedVarDescr()
        inputs=[X, Y]

        if not self.supervised:
            update_X = self.mask_gen(X, X_space = X_space)
        else:
            drop_mask_Y = sharedX(np.ones(batch_size,))
            drop_mask_Y.name = 'drop_mask_Y'
            update_X, update_Y = self.mask_gen(X, Y, X_space)
            updates[drop_mask_Y] = update_Y
            rval.fixed_vars['drop_mask_Y'] =  drop_mask_Y
        if self.mask_gen.sync_channels:
            n = update_X.ndim
            assert n == drop_mask_X.ndim - 1
            update_X.name = 'raw_update_X'
            zeros_like_X = T.zeros_like(X)
            zeros_like_X.name = 'zeros_like_X'
            update_X = zeros_like_X + update_X.dimshuffle(0,1,2,'x')
            update_X.name = 'update_X'
        updates[drop_mask_X] = update_X

        rval.fixed_vars['drop_mask'] = drop_mask_X

        if hasattr(model.inference_procedure, 'V_dropout'):
            include_prob = model.inference_procedure.include_prob
            include_prob_V = model.inference_procedure.include_prob_V
            include_prob_Y = model.inference_procedure.include_prob_Y

            theano_rng = MRG_RandomStreams(2012+11+20)
            for elem in flatten([model.inference_procedure.V_dropout]):
                updates[elem] = theano_rng.binomial(p=include_prob_V, size=elem.shape, dtype=elem.dtype, n=1) / include_prob_V
            if "Softmax" in str(type(model.hidden_layers[-1])):
                hid = model.inference_procedure.H_dropout[:-1]
                y = model.inference_procedure.H_dropout[-1]
                updates[y] = theano_rng.binomial(p=include_prob_Y, size=y.shape, dtype=y.dtype, n=1) / include_prob_Y
            else:
                hid = model.inference_procedure.H_dropout
            for elem in flatten(hid):
                updates[elem] =  theano_rng.binomial(p=include_prob, size=elem.shape, dtype=elem.dtype, n=1) / include_prob

        rval.on_load_batch = [utils.function(inputs, updates=updates)]

        return rval
开发者ID:amishtal,项目名称:pylearn2,代码行数:60,代码来源:dbm.py


示例2: get_gradients

    def get_gradients(self, model, data, **kwargs):
        self.get_data_specs(model)[0].validate(data)
        obj, scratch = self.base_cost(model, data, return_locals=True, **kwargs)
        if self.supervised:
            assert isinstance(data, (list, tuple))
            assert len(data) == 2
            (X, Y) = data
        else:
            X, = data
        interm_grads = OrderedDict()


        H_hat = scratch['H_hat']
        terms = scratch['terms']
        hidden_layers = scratch['hidden_layers']

        grads = OrderedDict()

        assert len(H_hat) == len(terms)
        assert len(terms) == len(hidden_layers)
        num_layers = len(hidden_layers)
        for i in xrange(num_layers):
            state = H_hat[i]
            layer = model.hidden_layers[i]
            term = terms[i]

            if term == 0.:
                continue
            else:
                print 'term is ',term

            if i == 0:
                state_below = X
                layer_below = model.visible_layer
            else:
                layer_below = model.hidden_layers[i-1]
                state_below = H_hat[i-1]
            state_below = layer_below.upward_state(state_below)

            components = flatten(state)

            real_grads = T.grad(term, components)

            fake_state = layer.linear_feed_forward_approximation(state_below)

            fake_components = flatten(fake_state)
            real_grads = OrderedDict(safe_zip(fake_components, real_grads))

            params = list(layer.get_params())
            fake_grads = T.grad(cost=None, consider_constant=flatten(state_below),
                    wrt=params, known_grads = real_grads)

            for param, grad in safe_zip(params, fake_grads):
                if param in grads:
                    grads[param] = grads[param] + grad
                else:
                    grads[param] = grad

        return grads, OrderedDict()
开发者ID:tempbottle,项目名称:pylearn2,代码行数:59,代码来源:dbm.py


示例3: get_monitoring_channels

    def get_monitoring_channels(self, data):
        """
        .. todo::

            WRITEME
        """
        space, source = self.get_monitoring_data_specs()
        space.validate(data)
        X = data
        history = self.mf(X, return_history=True)
        q = history[-1]

        rval = OrderedDict()

        ch = self.visible_layer.get_monitoring_channels()
        for key in ch:
            rval['vis_' + key] = ch[key]

        for state, layer in safe_zip(q, self.hidden_layers):
            ch = layer.get_monitoring_channels()
            for key in ch:
                rval[layer.layer_name + '_' + key] = ch[key]
            ch = layer.get_monitoring_channels_from_state(state)
            for key in ch:
                rval['mf_' + layer.layer_name + '_' + key] = ch[key]

        if len(history) > 1:
            prev_q = history[-2]

            flat_q = flatten(q)
            flat_prev_q = flatten(prev_q)

            mx = None
            for new, old in safe_zip(flat_q, flat_prev_q):
                cur_mx = abs(new - old).max()
                if new is old:
                    logger.error('{0} is {1}'.format(new, old))
                    assert False
                if mx is None:
                    mx = cur_mx
                else:
                    mx = T.maximum(mx, cur_mx)

            rval['max_var_param_diff'] = mx

            for layer, new, old in safe_zip(self.hidden_layers,
                                            q, prev_q):
                sum_diff = 0.
                for sub_new, sub_old in safe_zip(flatten(new), flatten(old)):
                    sum_diff += abs(sub_new - sub_old).sum()
                denom = self.batch_size * \
                    layer.get_total_state_space().get_total_dimension()
                denom = np.cast[config.floatX](denom)
                rval['mean_'+layer.layer_name+'_var_param_diff'] = \
                    sum_diff / denom

        return rval
开发者ID:HALLAB-Halifax,项目名称:pylearn2,代码行数:57,代码来源:dbm.py


示例4: nan_check

 def nan_check(i, node, fn):
     inputs = fn.inputs
     # TODO: figure out why individual inputs are themselves lists sometimes
     for x in flatten(inputs):
         do_check_on(x, node, fn, True)
     fn()
     outputs = fn.outputs
     for j, x in enumerate(flatten(outputs)):
         do_check_on(x, node, fn, False)
开发者ID:JakeMick,项目名称:pylearn2,代码行数:9,代码来源:nan_guard.py


示例5: _get_standard_neg

    def _get_standard_neg(self, model, layer_to_chains):
        params = list(model.get_params())

        warnings.warn("""TODO: reduce variance of negative phase by
                         integrating out the even-numbered layers. The
                         Rao-Blackwellize method can do this for you when
                         expected gradient = gradient of expectation, but
                         doing this in general is trickier.""")
        #layer_to_chains = model.rao_blackwellize(layer_to_chains)
        expected_energy_p = model.energy(
            layer_to_chains[model.visible_layer],
            [layer_to_chains[layer] for layer in model.hidden_layers]
        ).mean()

        samples = flatten(layer_to_chains.values())
        for i, sample in enumerate(samples):
            if sample.name is None:
                sample.name = 'sample_'+str(i)

        neg_phase_grads = OrderedDict(
            safe_zip(params, T.grad(-expected_energy_p, params,
                                    consider_constant=samples,
                                    disconnected_inputs='ignore'))
        )
        return neg_phase_grads
开发者ID:abhiggarwal,项目名称:pylearn2,代码行数:25,代码来源:dbm.py


示例6: nan_check

        def nan_check(i, node, fn):
            """
            Runs `fn` while checking its inputs and outputs for NaNs / Infs

            Parameters
            ----------
            i : currently ignored (TODO: determine why it is here or remove)
            node : theano.gof.Apply
                The Apply node currently being executed
            fn : callable
                The thunk to execute for this Apply node
            """
            inputs = fn.inputs
            # TODO: figure out why individual inputs are themselves lists sometimes
            for x in flatten(inputs):
                do_check_on(x, node, fn, True)
            fn()
            outputs = fn.outputs
            for j, x in enumerate(flatten(outputs)):
                do_check_on(x, node, fn, False)
开发者ID:BloodD,项目名称:pylearn2,代码行数:20,代码来源:nan_guard.py


示例7: _get_sampling_pos

    def _get_sampling_pos(self, model, X, Y):
        """
        .. todo::

            WRITEME
        """
        layer_to_clamp = OrderedDict([(model.visible_layer, True)])
        layer_to_pos_samples = OrderedDict([(model.visible_layer, X)])
        if self.supervised:
            # note: if the Y layer changes to something without linear energy,
            #       we'll need to make the expected energy clamp Y in the
            #       positive phase
            assert isinstance(model.hidden_layers[-1], Softmax)
            layer_to_clamp[model.hidden_layers[-1]] = True
            layer_to_pos_samples[model.hidden_layers[-1]] = Y
            hid = model.hidden_layers[:-1]
        else:
            assert Y is None
            hid = model.hidden_layers

        for layer in hid:
            mf_state = layer.init_mf_state()

            def recurse_zeros(x):
                if isinstance(x, tuple):
                    return tuple([recurse_zeros(e) for e in x])
                return x.zeros_like()

            layer_to_pos_samples[layer] = recurse_zeros(mf_state)

        layer_to_pos_samples = model.mcmc_steps(
            layer_to_state=layer_to_pos_samples,
            layer_to_clamp=layer_to_clamp,
            num_steps=self.num_gibbs_steps,
            theano_rng=self.theano_rng,
        )

        q = [layer_to_pos_samples[layer] for layer in model.hidden_layers]

        pos_samples = flatten(q)

        # The gradients of the expected energy under q are easy, we can just
        # do that in theano
        expected_energy_q = model.energy(X, q).mean()
        params = list(model.get_params())
        gradients = OrderedDict(
            safe_zip(
                params, T.grad(expected_energy_q, params, consider_constant=pos_samples, disconnected_inputs="ignore")
            )
        )
        return gradients
开发者ID:jpompe,项目名称:pylearn2,代码行数:51,代码来源:dbm.py


示例8: _get_variational_pos

    def _get_variational_pos(self, model, X, Y):
        """
        .. todo::

            WRITEME
        """
        if self.supervised:
            assert Y is not None
            # note: if the Y layer changes to something without linear energy,
            # we'll need to make the expected energy clamp Y in the positive
            # phase
            assert isinstance(model.hidden_layers[-1], Softmax)

        q = model.mf(X, Y)

        """
            Use the non-negativity of the KL divergence to construct a lower
            bound on the log likelihood. We can drop all terms that are
            constant with repsect to the model parameters:

            log P(v) = L(v, q) + KL(q || P(h|v))
            L(v, q) = log P(v) - KL(q || P(h|v))
            L(v, q) = log P(v) - sum_h q(h) log q(h) + q(h) log P(h | v)
            L(v, q) = log P(v) + sum_h q(h) log P(h | v) + const
            L(v, q) = log P(v) + sum_h q(h) log P(h, v)
                               - sum_h q(h) log P(v) + const
            L(v, q) = sum_h q(h) log P(h, v) + const
            L(v, q) = sum_h q(h) -E(h, v) - log Z + const

            so the cost we want to minimize is
            expected_energy + log Z + const


            Note: for the RBM, this bound is exact, since the KL divergence
                  goes to 0.
        """

        variational_params = flatten(q)

        # The gradients of the expected energy under q are easy, we can just
        # do that in theano
        expected_energy_q = model.expected_energy(X, q).mean()
        params = list(model.get_params())
        gradients = OrderedDict(
            safe_zip(params, T.grad(expected_energy_q,
                                    params,
                                    consider_constant=variational_params,
                                    disconnected_inputs='ignore'))
        )
        return gradients
开发者ID:jbornschein,项目名称:pylearn2,代码行数:50,代码来源:dbm.py


示例9: _get_toronto_neg

    def _get_toronto_neg(self, model, layer_to_chains):
        """
        .. todo::

            WRITEME
        """
        # Ruslan Salakhutdinov's undocumented negative phase from
        # http://www.mit.edu/~rsalakhu/code_DBM/dbm_mf.m
        # IG copied it here without fully understanding it, so it
        # only applies to exactly the same model structure as
        # in that code.

        assert isinstance(model.visible_layer, BinaryVector)
        assert isinstance(model.hidden_layers[0], BinaryVectorMaxPool)
        assert model.hidden_layers[0].pool_size == 1
        assert isinstance(model.hidden_layers[1], BinaryVectorMaxPool)
        assert model.hidden_layers[1].pool_size == 1
        assert isinstance(model.hidden_layers[2], Softmax)
        assert len(model.hidden_layers) == 3

        params = list(model.get_params())

        V_samples = layer_to_chains[model.visible_layer]
        H1_samples, H2_samples, Y_samples = [layer_to_chains[layer] for
                                             layer in model.hidden_layers]

        H1_mf = model.hidden_layers[0].mf_update(
            state_below=model.visible_layer.upward_state(V_samples),
            state_above=model.hidden_layers[1].downward_state(H2_samples),
            layer_above=model.hidden_layers[1])
        Y_mf = model.hidden_layers[2].mf_update(
            state_below=model.hidden_layers[1].upward_state(H2_samples))
        H2_mf = model.hidden_layers[1].mf_update(
            state_below=model.hidden_layers[0].upward_state(H1_mf),
            state_above=model.hidden_layers[2].downward_state(Y_mf),
            layer_above=model.hidden_layers[2])

        expected_energy_p = model.energy(
            V_samples, [H1_mf, H2_mf, Y_samples]
        ).mean()

        constants = flatten([V_samples, H1_mf, H2_mf, Y_samples])

        neg_phase_grads = OrderedDict(
            safe_zip(params, T.grad(-expected_energy_p, params,
                                    consider_constant=constants)))
        return neg_phase_grads
开发者ID:jbornschein,项目名称:pylearn2,代码行数:47,代码来源:dbm.py


示例10: get_gradients

    def get_gradients(self, model, X, Y=None):
        """
        PCD approximation to the gradient.
        Keep in mind this is a cost, so we use
        the negative log likelihood.
        """

        layer_to_clamp = OrderedDict([(model.visible_layer, True )])
        layer_to_pos_samples = OrderedDict([(model.visible_layer, X)])
        if self.supervised:
            assert Y is not None
            # note: if the Y layer changes to something without linear energy,
            # we'll need to make the expected energy clamp Y in the positive phase
            assert isinstance(model.hidden_layers[-1], dbm.Softmax)
            layer_to_clamp[model.hidden_layers[-1]] = True
            layer_to_pos_samples[model.hidden_layers[-1]] = Y
            hid = model.hidden_layers[:-1]
        else:
            assert Y is None
            hid = model.hidden_layers

        for layer in hid:
            mf_state = layer.init_mf_state()
            def recurse_zeros(x):
                if isinstance(x, tuple):
                    return tuple([recurse_zeros(e) for e in x])
                return x.zeros_like()
            layer_to_pos_samples[layer] = recurse_zeros(mf_state)

        layer_to_pos_samples = model.mcmc_steps(
                layer_to_state=layer_to_pos_samples,
                layer_to_clamp=layer_to_clamp,
                num_steps=self.num_gibbs_steps,
                theano_rng = self.theano_rng)

        q = [layer_to_pos_samples[layer] for layer in model.hidden_layers]

        pos_samples = flatten(q)

        # The gradients of the expected energy under q are easy, we can just do that in theano
        expected_energy_q = model.energy(X, q).mean()
        params = list(model.get_params())
        gradients = OrderedDict(safe_zip(params, T.grad(expected_energy_q, params,
            consider_constant = pos_samples,
            disconnected_inputs = 'ignore')))

        """
        d/d theta log Z = (d/d theta Z) / Z
                        = (d/d theta sum_h sum_v exp(-E(v,h)) ) / Z
                        = (sum_h sum_v - exp(-E(v,h)) d/d theta E(v,h) ) / Z
                        = - sum_h sum_v P(v,h)  d/d theta E(v,h)
        """

        layer_to_chains = model.make_layer_to_state(self.num_chains)

        def recurse_check(l):
            if isinstance(l, (list, tuple)):
                for elem in l:
                    recurse_check(elem)
            else:
                assert l.get_value().shape[0] == self.num_chains

        recurse_check(layer_to_chains.values())

        model.layer_to_chains = layer_to_chains

        # Note that we replace layer_to_chains with a dict mapping to the new
        # state of the chains
        updates, layer_to_chains = model.get_sampling_updates(layer_to_chains,
                self.theano_rng, num_steps=self.num_gibbs_steps,
                return_layer_to_updated = True)


        if self.toronto_neg:
            # Ruslan Salakhutdinov's undocumented negative phase from
            # http://www.mit.edu/~rsalakhu/code_DBM/dbm_mf.m
            # IG copied it here without fully understanding it, so it
            # only applies to exactly the same model structure as
            # in that code.

            assert isinstance(model.visible_layer, dbm.BinaryVector)
            assert isinstance(model.hidden_layers[0], dbm.BinaryVectorMaxPool)
            assert model.hidden_layers[0].pool_size == 1
            assert isinstance(model.hidden_layers[1], dbm.BinaryVectorMaxPool)
            assert model.hidden_layers[1].pool_size == 1
            assert isinstance(model.hidden_layers[2], dbm.Softmax)
            assert len(model.hidden_layers) == 3

            V_samples = layer_to_chains[model.visible_layer]
            H1_samples, H2_samples, Y_samples = [layer_to_chains[layer] for layer in model.hidden_layers]

            H1_mf = model.hidden_layers[0].mf_update(state_below=model.visible_layer.upward_state(V_samples),
                                                    state_above=model.hidden_layers[1].downward_state(H2_samples),
                                                    layer_above=model.hidden_layers[1])
            Y_mf = model.hidden_layers[2].mf_update(state_below=model.hidden_layers[1].upward_state(H2_samples))
            H2_mf = model.hidden_layers[1].mf_update(state_below=model.hidden_layers[0].upward_state(H1_mf),
                                                    state_above=model.hidden_layers[2].downward_state(Y_mf),
                                                    layer_above=model.hidden_layers[2])

            expected_energy_p = model.energy(V_samples, [H1_mf, H2_mf, Y_samples]).mean()
#.........这里部分代码省略.........
开发者ID:dnouri,项目名称:pylearn2,代码行数:101,代码来源:dbm.py


示例11: keep_func

    keep = keep_func(filter_me.X, filter_me.y)

    keep = keep.astype('bool')

    filter_me.X = filter_me.X[keep, :]
    filter_me.y = filter_me.y[keep, :]



dropout = hasattr(model.inference_procedure, 'V_dropout')
if dropout:
    include_prob = model.inference_procedure.include_prob
    theano_rng = MRG_RandomStreams(2012+11+20)
    updates = {}
    for elem in flatten([model.inference_procedure.V_dropout, model.inference_procedure.H_dropout]):
        updates[elem] =  theano_rng.binomial(p=include_prob, size=elem.shape, dtype=elem.dtype, n=1) / include_prob
    do_dropout = function([], updates=updates)


while True:
    if dropout:
        do_dropout()

    if cost.supervised:
        X, Y = dataset.get_batch_design(m, include_labels = True)
    else:
        X = dataset.get_batch_design(m)
    if topo:
        X = dataset.get_topological_view(X)
开发者ID:cc13ny,项目名称:galatea,代码行数:29,代码来源:animate_multi_inference.py


示例12: expr

    def expr(self, model, data, drop_mask = None, drop_mask_Y = None,
            return_locals = False, include_toronto = True, ** kwargs):
        """
        .. todo::

            WRITEME
        """

        if self.supervised:
            X, Y = data
        else:
            X = data
            Y = None

        if not self.supervised:
            assert drop_mask_Y is None
            # ignore Y if some other cost is supervised and has made it get
            # passed in (can this still happen after the (space, source)
            # interface change?)
            Y = None
        if self.supervised:
            assert Y is not None
            if drop_mask is not None:
                assert drop_mask_Y is not None

        if not hasattr(model,'cost'):
            model.cost = self
        if not hasattr(model,'mask_gen'):
            model.mask_gen = self.mask_gen

        dbm = model

        X_space = model.get_input_space()

        if drop_mask is None:
            if self.supervised:
                drop_mask, drop_mask_Y = self.mask_gen(X, Y, X_space=X_space)
            else:
                drop_mask = self.mask_gen(X, X_space=X_space)

        if drop_mask_Y is not None:
            assert drop_mask_Y.ndim == 1

        if drop_mask.ndim < X.ndim:
            if self.mask_gen is not None:
                assert self.mask_gen.sync_channels
            if X.ndim != 4:
                raise NotImplementedError()
            drop_mask = drop_mask.dimshuffle(0,1,2,'x')

        if not hasattr(self,'noise'):
            self.noise = False

        history = dbm.do_inpainting(X, Y = Y, drop_mask = drop_mask,
                drop_mask_Y = drop_mask_Y, return_history = True,
                noise = self.noise,
                niter = self.niter, block_grad = self.block_grad)
        final_state = history[-1]

        new_drop_mask = None
        new_drop_mask_Y = None
        new_history = [ None for state in history ]

        if not hasattr(self, 'both_directions'):
            self.both_directions = False
        if self.both_directions:
            new_drop_mask = 1. - drop_mask
            if self.supervised:
                new_drop_mask_Y = 1. - drop_mask_Y
            new_history = dbm.do_inpainting(X, Y = Y,
                    drop_mask=new_drop_mask,
                    drop_mask_Y=new_drop_mask_Y, return_history=True,
                    noise = self.noise,
                    niter = self.niter, block_grad = self.block_grad)

        new_final_state = new_history[-1]

        total_cost, sublocals = self.cost_from_states(final_state,
                new_final_state, dbm, X, Y, drop_mask, drop_mask_Y,
                new_drop_mask, new_drop_mask_Y,
                return_locals=True)
        l1_act_cost = sublocals['l1_act_cost']
        inpaint_cost = sublocals['inpaint_cost']
        reweighted_act_cost = sublocals['reweighted_act_cost']

        if not hasattr(self, 'robustness'):
            self.robustness = None
        if self.robustness is not None:
            inpainting_H_hat = history[-1]['H_hat']
            mf_H_hat = dbm.mf(X, Y=Y)
            if self.supervised:
                inpainting_H_hat = inpainting_H_hat[:-1]
                mf_H_hat = mf_H_hat[:-1]
                for ihh, mhh in safe_izip(flatten(inpainting_H_hat),
                        flatten(mf_H_hat)):
                    total_cost += self.robustness * T.sqr(mhh-ihh).sum()

        if not hasattr(self, 'toronto_act_targets'):
            self.toronto_act_targets = None
        toronto_act_cost = None
#.........这里部分代码省略.........
开发者ID:jbornschein,项目名称:pylearn2,代码行数:101,代码来源:dbm.py


示例13: get_fixed_var_descr

    def get_fixed_var_descr(self, model, data):
        """
        .. todo::

            WRITEME
        """

        X, Y = data

        assert Y is not None

        batch_size = model.batch_size

        drop_mask_X = sharedX(model.get_input_space().get_origin_batch(batch_size))
        drop_mask_X.name = "drop_mask"

        X_space = model.get_input_space()

        updates = OrderedDict()
        rval = FixedVarDescr()
        inputs = [X, Y]

        if not self.supervised:
            update_X = self.mask_gen(X, X_space=X_space)
        else:
            drop_mask_Y = sharedX(np.ones(batch_size))
            drop_mask_Y.name = "drop_mask_Y"
            update_X, update_Y = self.mask_gen(X, Y, X_space)
            updates[drop_mask_Y] = update_Y
            rval.fixed_vars["drop_mask_Y"] = drop_mask_Y
        if self.mask_gen.sync_channels:
            n = update_X.ndim
            assert n == drop_mask_X.ndim - 1
            update_X.name = "raw_update_X"
            zeros_like_X = T.zeros_like(X)
            zeros_like_X.name = "zeros_like_X"
            update_X = zeros_like_X + update_X.dimshuffle(0, 1, 2, "x")
            update_X.name = "update_X"
        updates[drop_mask_X] = update_X

        rval.fixed_vars["drop_mask"] = drop_mask_X

        if hasattr(model.inference_procedure, "V_dropout"):
            include_prob = model.inference_procedure.include_prob
            include_prob_V = model.inference_procedure.include_prob_V
            include_prob_Y = model.inference_procedure.include_prob_Y

            theano_rng = make_theano_rng(None, 2012 + 10 + 20, which_method="binomial")
            for elem in flatten([model.inference_procedure.V_dropout]):
                updates[elem] = (
                    theano_rng.binomial(p=include_prob_V, size=elem.shape, dtype=elem.dtype, n=1) / include_prob_V
                )
            if "Softmax" in str(type(model.hidden_layers[-1])):
                hid = model.inference_procedure.H_dropout[:-1]
                y = model.inference_procedure.H_dropout[-1]
                updates[y] = theano_rng.binomial(p=include_prob_Y, size=y.shape, dtype=y.dtype, n=1) / include_prob_Y
            else:
                hid = model.inference_procedure.H_dropout
            for elem in flatten(hid):
                updates[elem] = (
                    theano_rng.binomial(p=include_prob, size=elem.shape, dtype=elem.dtype, n=1) / include_prob
                )

        rval.on_load_batch = [utils.function(inputs, updates=updates)]

        return rval
开发者ID:jpompe,项目名称:pylearn2,代码行数:66,代码来源:dbm.py



注:本文中的pylearn2.models.dbm.flatten函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python mlp.MLP类代码示例发布时间:2022-05-25
下一篇:
Python matrixmul.MatrixMul类代码示例发布时间:2022-05-25
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap