• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python rng.make_np_rng函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pylearn2.utils.rng.make_np_rng函数的典型用法代码示例。如果您正苦于以下问题:Python make_np_rng函数的具体用法?Python make_np_rng怎么用?Python make_np_rng使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了make_np_rng函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __init__

    def __init__(self,
                 window_shape,
                 randomize=None,
                 randomize_once=None,
                 center=None,
                 rng=(2013, 2, 20),
                 pad_randomized=0,
                 flip=True):
        self._window_shape = tuple(window_shape)

        # Defined in setup(). A dict that maps Datasets in self._randomize and
        # self._randomize_once to zero-padded versions of their topological
        # views.
        self._original = None

        self._randomize = randomize if randomize else []
        self._randomize_once = randomize_once if randomize_once else []
        self._center = center if center else []
        self._pad_randomized = pad_randomized
        self._flip = flip

        if randomize is None and randomize_once is None and center is None:
            warnings.warn(self.__class__.__name__ + " instantiated without "
                          "any dataset arguments, and therefore does nothing",
                          stacklevel=2)

        self._rng = make_np_rng(rng, which_method="random_integers")
开发者ID:DevSinghSachan,项目名称:pylearn2,代码行数:27,代码来源:window_flip.py


示例2: make_sparse_random_conv2D

def make_sparse_random_conv2D(num_nonzero, input_space, output_space,
                              kernel_shape, pad=0, kernel_stride=(1, 1),
                              border_mode='valid', message="", rng=None,
                              partial_sum=None):
    """
    .. todo::

        WRITEME properly

    Creates a Conv2D with random kernels, where the randomly initialized
    values are sparse
    """

    rng = make_np_rng(rng, default_sparse_seed,
                      which_method=['randn', 'randint'])

    W = np.zeros((input_space.num_channels, kernel_shape[0],
                  kernel_shape[1], output_space.num_channels))

    def random_coord():
        return [rng.randint(dim) for dim in W.shape[0:3]]

    for o in xrange(output_space.num_channels):
        for i in xrange(num_nonzero):
            ch, r, c = random_coord()
            while W[ch, r, c, o] != 0:
                ch, r, c = random_coord()
            W[ch, r, c, o] = rng.randn()

    W = sharedX(W)

    return Conv2D(filters=W, input_axes=input_space.axes,
                  output_axes=output_space.axes, kernel_stride=kernel_stride,
                  pad=pad, message=message, partial_sum=partial_sum)
开发者ID:CURG,项目名称:pylearn2,代码行数:34,代码来源:conv2d_c01b.py


示例3: make_weights

    def make_weights(input_space, output_space, kernel_shape, **kwargs):
        rs = make_np_rng(rng, default_seed, which_method='uniform')

        shape = (output_space.num_channels, input_space.num_channels,
             kernel_shape[0], kernel_shape[1])

        return sharedX(rs.uniform(-irange, irange, shape))
开发者ID:Refefer,项目名称:pylearn2,代码行数:7,代码来源:conv2d.py


示例4: __init__

    def __init__(self, which_set='debug', start=None, end=None, shuffle=True,
                 lazy_load=False, rng=_default_seed):

        assert which_set in ['debug', 'train', 'test']
        if which_set == 'debug':
            maxlen, n_samples, n_annotations, n_features = 10, 12, 13, 14
            X = N.zeros(shape=(n_samples, maxlen))
            X_mask = X  # same with X
            Z = N.zeros(shape=(n_annotations, n_samples, n_features))
        elif which_set == 'train':
            pass
        else:
            pass

        self.X, self.X_mask, self.Z = (X, X_mask, Z)
        self.sources = ('features', 'target')

        self.spaces = CompositeSpace([
            SequenceSpace(space=VectorSpace(dim=self.X.shape[1])),
            SequenceDataSpace(space=VectorSpace(dim=self.Z.shape[-1]))
        ])
        self.data_spces = (self.spaces, self.sources)
        # self.X_space, self.X_mask_space, self.Z_space
        # Default iterator
        self._iter_mode = resolve_iterator_class('sequential')
        self._iter_topo = False
        self._iter_target = False
        self._iter_data_specs = self.data_spces
        self.rng = make_np_rng(rng, which_method='random_intergers')
开发者ID:EugenePY,项目名称:tensor-work,代码行数:29,代码来源:im2latex.py


示例5: __init__

    def __init__(self, X, y):
        if (self.dataset_name in dataset_info.aod_datasets
            and self.which_set == "full"):
            self.targets, self.novels = self.load_aod_gts()
            assert self.targets.shape == self.novels.shape
            if X.shape[0] % self.targets.shape[0] != 0:
                raise ValueError("AOD data and targets seems to have "
                                 "incompatible shapes: %r vs %r"
                                 % (X.shape, self.targets.shape))

        X = self.preprocess(X)

        if self.shuffle:
            logger.info("Shuffling data")
            self.shuffle_rng = make_np_rng(None, [1 ,2 ,3],
                                           which_method="shuffle")
            for i in xrange(m):
                j = self.shuffle_rng.randint(m)
                tmp = X[i].copy()
                X[i] = X[j]
                X[j] = tmp
                tmp = y[i:i+1].copy()
                y[i] = y[j]
                y[j] = tmp

        max_labels = np.amax(y) + 1
        logger.info("%d labels found." % max_labels)

        super(MRI, self).__init__(X=X,
                                  y=y,
                                  view_converter=self.view_converter,
                                  y_labels=max_labels)

        assert not np.any(np.isnan(self.X))
开发者ID:ecastrow,项目名称:pl2mind,代码行数:34,代码来源:MRI.py


示例6: __init__

    def __init__(self, data=None, data_specs=None, rng=_default_seed,
                 preprocessor=None, fit_preprocessor=False):
        # data_specs should be flat, and there should be no
        # duplicates in source, as we keep only one version
        assert is_flat_specs(data_specs)
        if isinstance(data_specs[1], tuple):
            assert sorted(set(data_specs[1])) == sorted(data_specs[1])
        space, source = data_specs
        space.np_validate(data)
        # TODO: assume that data[0] is num example => error if channel in c01b
        # assert len(set(elem.shape[0] for elem in list(data))) <= 1
        self.data = data
        self.data_specs = data_specs
        # TODO: assume that data[0] is num example => error if channel in c01b
        self.num_examples = list(data)[-1].shape[0] # TODO: list(data)[0].shape[0]

        self.compress = False
        self.design_loc = None
        self.rng = make_np_rng(rng, which_method='random_integers')
        # Defaults for iterators
        self._iter_mode = resolve_iterator_class('sequential')

        if preprocessor:
            preprocessor.apply(self, can_fit=fit_preprocessor)
        self.preprocessor = preprocessor
开发者ID:Dining-Engineers,项目名称:Multi-Column-Deep-Neural-Network,代码行数:25,代码来源:vector_spaces_dataset_c01b.py


示例7: __init__

    def __init__(self, dataset_size, batch_size, num_batches=None, rng=None):
        self._rng = make_np_rng(rng, which_method=["random_integers",
                                                   "shuffle"])
        assert num_batches is None or num_batches >= 0
        self._dataset_size = dataset_size
        if batch_size is None:
            if num_batches is not None:
                batch_size = int(np.ceil(self._dataset_size / num_batches))
            else:
                raise ValueError("need one of batch_size, num_batches "
                                 "for sequential batch iteration")
        elif batch_size is not None:
            if num_batches is not None:
                max_num_batches = np.ceil(self._dataset_size / batch_size)
                if num_batches > max_num_batches:
                    raise ValueError("dataset of %d examples can only provide "
                                     "%d batches with batch_size %d, but %d "
                                     "batches were requested" %
                                     (self._dataset_size, max_num_batches,
                                      batch_size, num_batches))
            else:
                num_batches = np.ceil(self._dataset_size / batch_size)

        self._batch_size = batch_size
        self._num_batches = int(num_batches)
        self._next_batch_no = 0
        self._idx = 0
        self._batch_order = list(range(self._num_batches))
        self._rng.shuffle(self._batch_order)
开发者ID:dwf,项目名称:pylearn2,代码行数:29,代码来源:iteration.py


示例8: setup_rng

    def setup_rng(self):
        """
        .. todo::

            WRITEME
        """
        self.rng = make_np_rng(None, [2012, 10, 17], which_method="uniform")
开发者ID:HALLAB-Halifax,项目名称:pylearn2,代码行数:7,代码来源:dbm.py


示例9: make_random_conv2D

def make_random_conv2D(irange, input_space, output_space,
                       kernel_shape, batch_size=None, \
                       subsample = (1,1), border_mode = 'valid',
                       message = "", rng = None):
    """
    .. todo::

        WRITEME properly

    Creates a Conv2D with random kernels
    """

    rng = make_np_rng(rng, default_seed, which_method='uniform')

    W = sharedX(rng.uniform(
        -irange, irange,
        (output_space.num_channels, input_space.num_channels,
         kernel_shape[0], kernel_shape[1])
    ))

    return Conv2D(
        filters=W,
        batch_size=batch_size,
        input_space=input_space,
        output_axes=output_space.axes,
        subsample=subsample, border_mode=border_mode,
        filters_shape=W.get_value(borrow=True).shape, message=message
    )
开发者ID:AlexArgus,项目名称:pylearn2,代码行数:28,代码来源:conv2d.py


示例10: _create_subset_iterator

 def _create_subset_iterator(self, mode, batch_size=None, num_batches=None,
                             rng=None):
     subset_iterator = resolve_iterator_class(mode)
     if rng is None and subset_iterator.stochastic:
         rng = make_np_rng()
     return subset_iterator(self.get_num_examples(), batch_size,
                            num_batches, rng)
开发者ID:123fengye741,项目名称:pylearn2,代码行数:7,代码来源:penntree.py


示例11: make_random_conv2D

def make_random_conv2D(irange, input_channels, input_axes, output_axes,
        output_channels,
        kernel_shape,
        kernel_stride = (1,1), pad=0, message = "", rng = None,
        partial_sum = None, sparse_init = None):
    """
    .. todo::

        WRITEME properly

    Creates a Conv2D with random kernels.
    Should be functionally equivalent to
    pylearn2.linear.conv2d.make_random_conv2D
    """

    rng = make_np_rng(rng, default_seed, which_method='uniform')

    W = sharedX( rng.uniform(-irange,irange,(input_channels, \
            kernel_shape[0], kernel_shape[1], output_channels)))

    return Conv2D(filters = W,
        input_axes = input_axes,
        output_axes = output_axes,
        kernel_stride = kernel_stride, pad=pad,
        message = message, partial_sum=partial_sum)
开发者ID:SinaHonari,项目名称:pylearn2,代码行数:25,代码来源:conv2d_c01b.py


示例12: __init__

    def __init__(self, nvis, nhid,
            init_lambda,
            init_p, init_alpha, learning_rate):
        """
        .. todo::

            WRITEME
        """
        self.nvis = int(nvis)
        self.nhid = int(nhid)
        self.init_lambda = float(init_lambda)
        self.init_p = float(init_p)
        self.init_alpha = N.cast[config.floatX](init_alpha)
        self.tol = 1e-6
        self.time_constant = 1e-2
        self.learning_rate = N.cast[config.floatX](learning_rate)

        self.predictor_learning_rate = self.learning_rate

        self.rng = make_np_rng(None, [1,2,3], which_method="randn")

        self.error_record = []
        self.ERROR_RECORD_MODE_MONITORING = 0
        self.error_record_mode = self.ERROR_RECORD_MODE_MONITORING

        self.instrumented = False

        self.redo_everything()
开发者ID:bobchennan,项目名称:pylearn2,代码行数:28,代码来源:differentiable_sparse_coding.py


示例13: make_sparse_random_local

def make_sparse_random_local(num_nonzero, input_space, output_space,
        kernel_shape, batch_size, \
        kernel_stride = (1,1), border_mode = 'valid', message = "", rng=None):
    """
    .. todo::

        WRITEME
    """
    raise NotImplementedError("Not yet modified after copy-paste from "
            "pylearn2.linear.conv2d_c01b")
    """ Creates a Conv2D with random kernels, where the randomly initialized
    values are sparse"""

    rng = make_np_rng(rng, default_sparse_seed, which_method=['randn','randint'])

    W = np.zeros(( output_space.num_channels, input_space.num_channels, \
            kernel_shape[0], kernel_shape[1]))

    def random_coord():
        return [ rng.randint(dim) for dim in W.shape ]

    for i in xrange(num_nonzero):
        o, ch, r, c = random_coord()
        while W[o, ch, r, c] != 0:
            o, ch, r, c = random_coord()
        W[o, ch, r, c] = rng.randn()


    W = sharedX( W)
开发者ID:EderSantana,项目名称:pylearn2,代码行数:29,代码来源:local_c01b.py


示例14: split_patients

def split_patients(patients, valid_percent, test_percent, rng=(2014, 10, 22)):
    if isinstance(rng, (list, tuple)):
        rng = make_np_rng(None, rng, which_method='uniform')

    vals = np.asarray(patients.values())
    keys = np.asarray(patients.keys())
    sss = StratifiedShuffleSplit(
        vals, n_iter=1, test_size=test_percent, random_state=rng)
    remaining_idx, test_idx = sss.__iter__().next()

    if valid_percent > 0:
        # Rate of samples required to build validation set
        valid_rate = valid_percent / (1 - test_percent)

        sss = StratifiedShuffleSplit(
            vals[remaining_idx], n_iter=1, test_size=valid_rate, random_state=rng)
        tr_idx, val_idx = sss.__iter__().next()
        valid_idx = remaining_idx[val_idx]
        train_idx = remaining_idx[tr_idx]
    else:
        train_idx = remaining_idx
        valid_idx = []

    train_patients = dict(zip(keys[train_idx], vals[train_idx]))
    valid_patients = dict(zip(keys[valid_idx], vals[valid_idx]))
    test_patients = dict(zip(keys[test_idx], vals[test_idx]))
    return train_patients, valid_patients, test_patients
开发者ID:johnarevalo,项目名称:cnn-bcdr,代码行数:27,代码来源:utils.py


示例15: make_random_conv3D

def make_random_conv3D(irange, input_axes, output_axes,
        signal_shape,
        filter_shape,
        kernel_stride = (1,1), pad=0, message = "", rng = None,
        partial_sum = None):
    

    if rng is None:
        rng = make_np_rng(rng, default_seed, which_method='uniform')

    _filter_5d_shape = (
                filter_shape[0],
                filter_shape[1],
                filter_shape[2],
                filter_shape[3],filter_shape[4])

    # initialize weights
    W = sharedX(rng.uniform(-irange,irange,(_filter_5d_shape)))

    return Conv3DBCT01(filters = W,
                       input_axes = input_axes,
                       output_axes = output_axes,
                       signal_shape = signal_shape,
                       filter_shape = filter_shape,
                       kernel_stride = kernel_stride, pad=pad,
                       message = message, partial_sum=partial_sum)
开发者ID:AtousaTorabi,项目名称:HumanActivityRecognition,代码行数:26,代码来源:conv3d_bct01.py


示例16: test_convolutional_compatible

def test_convolutional_compatible():
    """
    VAE allows convolutional encoding networks
    """
    encoding_model = MLP(
        layers=[
            SpaceConverter(layer_name="conv2d_converter", output_space=Conv2DSpace(shape=[4, 4], num_channels=1)),
            ConvRectifiedLinear(
                layer_name="h",
                output_channels=2,
                kernel_shape=[2, 2],
                kernel_stride=[1, 1],
                pool_shape=[1, 1],
                pool_stride=[1, 1],
                pool_type="max",
                irange=0.01,
            ),
        ]
    )
    decoding_model = MLP(layers=[Linear(layer_name="h", dim=16, irange=0.01)])
    prior = DiagonalGaussianPrior()
    conditional = BernoulliVector(mlp=decoding_model, name="conditional")
    posterior = DiagonalGaussian(mlp=encoding_model, name="posterior")
    vae = VAE(nvis=16, prior=prior, conditional=conditional, posterior=posterior, nhid=16)
    X = T.matrix("X")
    lower_bound = vae.log_likelihood_lower_bound(X, num_samples=10)
    f = theano.function(inputs=[X], outputs=lower_bound)
    rng = make_np_rng(default_seed=11223)
    f(as_floatX(rng.uniform(size=(10, 16))))
开发者ID:JesseLivezey,项目名称:pylearn2,代码行数:29,代码来源:test_vae.py


示例17: __init__

    def __init__(self, nvis, nhid, coeff):
        self.nvis = nvis
        self.nhid = nhid
        self.coeff = float(coeff)
        self.rng = make_np_rng(None, [1, 2, 3], which_method="randn")

        self.redo_everything()
开发者ID:EderSantana,项目名称:pylearn2,代码行数:7,代码来源:local_coordinate_coding.py


示例18: reset_rng

    def reset_rng(self):
        """
        .. todo::

            WRITEME
        """
        self.rng = make_np_rng([1,2,3], which_method=['randn','uniform'])
开发者ID:fancyspeed,项目名称:pylearn2,代码行数:7,代码来源:dense_binary_dbm.py


示例19: __init__

    def __init__(self, min_x=-6.28, max_x=6.28, std=.05, rng=_default_seed):
        """
        Constructor.
        """
        super(CosDataset, self).__init__()
        
        #: lower limit for x as in cos(x)
        self.min_x = min_x
        
        #: higher limit for x as in cos(x)
        self.max_x = max_x
        
        #: standard deviation for the noise added to the values we generate
        self.std = std

        # argument to resolve_iterator_class() can be either
        # a string from [sequential, shuffled_sequential, random_slice,
        # random_uniform, batchwise_shuffled_sequential, even_sequential,
        # even_shuffled_sequential, even_batchwise_shuffled_sequential,
        # even_sequences] or a SubsetIterator sublass.

        #: default iterator implementation (a class to be instantiated)
        self._iter_subset_class = resolve_iterator_class('sequential')
        
        #: default data specifications for iterator
        self._iter_data_specs = (VectorSpace(2), 'features')
        
        #: default batch size for the iterator
        self._iter_batch_size = 100
        
        #: default number of batches for the iterator
        self._iter_num_batches = 10
        
        #: random number generator
        self.rng = make_np_rng(rng, which_method=['uniform', 'randn'])
开发者ID:TNick,项目名称:pyl2extra,代码行数:35,代码来源:cos_dataset.py


示例20: __init__

    def __init__(
        self,
        cost=None,
        batch_size=None,
        batches_per_iter=None,
        updates_per_batch=10,
        monitoring_batches=None,
        monitoring_dataset=None,
        termination_criterion=None,
        set_batch_size=False,
        reset_alpha=True,
        conjugate=False,
        min_init_alpha=0.001,
        reset_conjugate=True,
        line_search_mode=None,
        verbose_optimization=False,
        scale_step=1.0,
        theano_function_mode=None,
        init_alpha=None,
        seed=None,
    ):

        self.__dict__.update(locals())
        del self.self

        if monitoring_dataset is None:
            assert monitoring_batches == None

        self._set_monitoring_dataset(monitoring_dataset)

        self.bSetup = False
        self.termination_criterion = termination_criterion
        self.rng = make_np_rng(seed, [2012, 10, 16], which_method=["randn", "randint"])
开发者ID:pangyuteng,项目名称:chalearn2014,代码行数:33,代码来源:bgd.py



注:本文中的pylearn2.utils.rng.make_np_rng函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python rng.make_theano_rng函数代码示例发布时间:2022-05-25
下一篇:
Python iteration.resolve_iterator_class函数代码示例发布时间:2022-05-25
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap