• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python yaml_parse.load_path函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pylearn2.config.yaml_parse.load_path函数的典型用法代码示例。如果您正苦于以下问题:Python load_path函数的具体用法?Python load_path怎么用?Python load_path使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了load_path函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: load_train_file

def load_train_file(config_file_path, environ=None):
    """
    Loads and parses a yaml file for a Train object.
    Publishes the relevant training environment variables

    Parameters
    ----------
    config_file_path : WRITEME

    Returns
    -------
    WRITEME
    """
    from pylearn2.config import yaml_parse

    suffix_to_strip = '.yaml'

    # publish environment variables related to file name
    if config_file_path.endswith(suffix_to_strip):
        config_file_full_stem = config_file_path[0:-len(suffix_to_strip)]
    else:
        config_file_full_stem = config_file_path

    for varname in ["PYLEARN2_TRAIN_FILE_FULL_STEM"]:
        os.environ[varname] = config_file_full_stem

    directory = config_file_path.split('/')[:-1]
    directory = '/'.join(directory)
    if directory != '':
        directory += '/'
    os.environ["PYLEARN2_TRAIN_DIR"] = directory
    os.environ["PYLEARN2_TRAIN_BASE_NAME"] = config_file_path.split('/')[-1]
    os.environ["PYLEARN2_TRAIN_FILE_STEM"] = config_file_full_stem.split('/')[-1]

    return yaml_parse.load_path(config_file_path, environ=environ)
开发者ID:amishtal,项目名称:pylearn2,代码行数:35,代码来源:serial.py


示例2: test_IS_cost

def test_IS_cost():
    """
    VAE trains properly with the importance sampling cost
    """
    yaml_src_path = os.path.join(os.path.dirname(__file__), "test_vae_cost_is_criterion.yaml")
    train_object = yaml_parse.load_path(yaml_src_path)
    train_object.main_loop()
开发者ID:JesseLivezey,项目名称:pylearn2,代码行数:7,代码来源:test_vae.py


示例3: test_load_from_yaml

 def test_load_from_yaml(self):
     """
     Load dataset from an yaml file.
     """
     imdset = yaml_parse.load_path(self.yaml_file)
     imdset = imdset['dataset']
     self.assertEqual(len(imdset.adjusters), 6)
开发者ID:TNick,项目名称:pyl2extra,代码行数:7,代码来源:test_dataset.py


示例4: load_path

def load_path(path, environ=None, **kwargs):
    """
    Convenience function for loading a YAML configuration from a file
    into a `PartialPlus` graph.

    Parameters
    ----------
    path : str
        The path to the file to load on disk.
    environ : dict, optional
        A dictionary used for ${FOO} substitutions in addition to
        environment variables. If a key appears both in `os.environ`
        and this dictionary, the value in this dictionary is used.

    Returns
    -------
    graph : Node
        A `PartialPlus` or `Literal` node representing the root
        node of the YAML hierarchy.

    Notes
    -----
    Other keyword arguments are passed on to `yaml.load`.
    """
    return proxy_to_partialplus(yaml_parse.load_path(path, instantiate=False,
                                                     **kwargs),
                                environ=environ)
开发者ID:Qwlouse,项目名称:searchspaces,代码行数:27,代码来源:pylearn2_yaml.py


示例5: test_load_path

def test_load_path():
    fd, fname = tempfile.mkstemp()
    with os.fdopen(fd, 'wb') as f:
        f.write("a: 23")
    loaded = load_path(fname)
    assert_(loaded['a'] == 23)
    os.remove(fname)
开发者ID:JakeMick,项目名称:pylearn2,代码行数:7,代码来源:test_yaml_parse.py


示例6: yaml_file_execution

def yaml_file_execution(file_path):
    try:
        train = yaml_parse.load_path(file_path)
        train.algorithm.termination_criterion = EpochCounter(max_epochs=2)
        train.main_loop()
    except NoDataPathError:
        raise SkipTest("PYLEARN2_DATA_PATH environment variable not defined")
开发者ID:BloodNg,项目名称:pylearn2,代码行数:7,代码来源:yaml_testing.py


示例7: load_train_file

def load_train_file(config_file_path):
    """Loads and parses a yaml file for a Train object.
    Publishes the relevant training environment variables"""
    from pylearn2.config import yaml_parse

    suffix_to_strip = '.yaml'

    # publish environment variables related to file name
    if config_file_path.endswith(suffix_to_strip):
        config_file_full_stem = config_file_path[0:-len(suffix_to_strip)]
    else:
        config_file_full_stem = config_file_path

    for varname in ["PYLEARN2_TRAIN_FILE_NAME", #this one is deprecated
            "PYLEARN2_TRAIN_FILE_FULL_STEM"]: #this is the new, accepted name
        environ.putenv(varname, config_file_full_stem)

    directory = config_file_path.split('/')[:-1]
    directory = '/'.join(directory)
    if directory != '':
        directory += '/'
    environ.putenv("PYLEARN2_TRAIN_DIR", directory)
    environ.putenv("PYLEARN2_TRAIN_BASE_NAME", config_file_path.split('/')[-1] )
    environ.putenv("PYLEARN2_TRAIN_FILE_STEM", config_file_full_stem.split('/')[-1] )

    return yaml_parse.load_path(config_file_path)
开发者ID:casperkaae,项目名称:pylearn2,代码行数:26,代码来源:serial.py


示例8: construct_model

 def construct_model(self):
     filedir = os.path.join(os.path.dirname(__file__), 'mlps.yaml')
     layer_args = yaml_parse.load_path(filedir)[self.modelname]
     layers = []
     
     # adapt in case of 2d layer
     if (self.conv_class == ConvElemwise):
         self.adapt_for_2d_conv(layer_args)
     else:
         self.adapt_for_time_dim(layer_args)
     print layer_args
         
     for i, layer_arg in enumerate(layer_args):
         layer = self.construct_layer(layer_arg, i)
         layers.append(layer)
     input_space = self.create_input_space()
     mlp = MLP(input_space=input_space, layers=layers)
     return mlp
开发者ID:robintibor,项目名称:pylearn3dconv,代码行数:18,代码来源:perf_mlp.py


示例9: load_train_file

def load_train_file(config_file_path, environ=None):
    """
    Loads and parses a yaml file for a Train object.
    Publishes the relevant training environment variables

    Parameters
    ----------
    config_file_path : str
        Path to a config file containing a YAML string describing a
        pylearn2.train.Train object
    environ : dict, optional
        A dictionary used for ${FOO} substitutions in addition to
        environment variables when parsing the YAML file. If a key appears
        both in `os.environ` and this dictionary, the value in this
        dictionary is used.


    Returns
    -------
    Object described by the YAML string stored in the config file
    """
    from pylearn2.config import yaml_parse

    suffix_to_strip = '.yaml'

    # Publish environment variables related to file name
    if config_file_path.endswith(suffix_to_strip):
        config_file_full_stem = config_file_path[0:-len(suffix_to_strip)]
    else:
        config_file_full_stem = config_file_path

    os.environ["PYLEARN2_TRAIN_FILE_FULL_STEM"] = config_file_full_stem

    directory = config_file_path.split('/')[:-1]
    directory = '/'.join(directory)
    if directory != '':
        directory += '/'
    os.environ["PYLEARN2_TRAIN_DIR"] = directory
    os.environ["PYLEARN2_TRAIN_BASE_NAME"] = config_file_path.split('/')[-1]
    os.environ[
        "PYLEARN2_TRAIN_FILE_STEM"] = config_file_full_stem.split('/')[-1]

    return yaml_parse.load_path(config_file_path, environ=environ)
开发者ID:123fengye741,项目名称:pylearn2,代码行数:43,代码来源:serial.py


示例10: load_yaml

    def load_yaml(self, fname):
        """
        Slot that loads a YAML file.
        """
        if not fname:
            return
        try:
            # publish environment variables relevant to this file
            serial.prepare_train_file(fname)

            # load the tree of Proxy objects
            environ = {}
            yaml_tree = yaml_parse.load_path(fname,
                                             instantiate=False,
                                             environ=environ)
            yaml_tree = yaml_parse._instantiate(yaml_tree)
            self.show_object_tree(yaml_tree)
        except Exception, exc:
            logger.error('Loading aml file failed', exc_info=True)
            QtGui.QMessageBox.warning(self, 'Exception', str(exc))
开发者ID:TNick,项目名称:pyl2extra,代码行数:20,代码来源:main_window.py


示例11: main

def main(options, positional_args):
    """
    .. todo::

        WRITEME
    """
    assert len(positional_args) == 1

    path ,= positional_args

    out = options.out
    rescale = options.rescale

    if rescale == 'none':
        global_rescale = False
        patch_rescale = False
    elif rescale == 'global':
        global_rescale = True
        patch_rescale = False
    elif rescale == 'individual':
        global_rescale = False
        patch_rescale = True
    else:
        assert False

    if path.endswith('.pkl'):
        from pylearn2.utils import serial
        obj = serial.load(path)
    elif path.endswith('.yaml'):
        print 'Building dataset from yaml...'
        obj =yaml_parse.load_path(path)
        print '...done'
    else:
        obj = yaml_parse.load(path)

    rows = options.rows
    cols = options.cols

    if hasattr(obj,'get_batch_topo'):
        # obj is a Dataset
        dataset = obj

        examples = dataset.get_batch_topo(rows*cols)
    else:
        # obj is a Model
        model = obj
        from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
        theano_rng = RandomStreams(42)
        design_examples_var = model.random_design_matrix(batch_size =
                rows * cols, theano_rng = theano_rng)
        from theano import function
        print 'compiling sampling function'
        f = function([],design_examples_var)
        print 'sampling'
        design_examples = f()
        print 'loading dataset'
        dataset = yaml_parse.load(model.dataset_yaml_src)
        examples = dataset.get_topological_view(design_examples)

    norms = N.asarray( [
            N.sqrt(N.sum(N.square(examples[i,:])))
                        for i in xrange(examples.shape[0])
                        ])
    print 'norms of examples: '
    print '\tmin: ',norms.min()
    print '\tmean: ',norms.mean()
    print '\tmax: ',norms.max()

    print 'range of elements of examples', \
            (examples.min(),examples.max())
    print 'dtype: ', examples.dtype

    examples = dataset.adjust_for_viewer(examples)

    if global_rescale:
        examples /= N.abs(examples).max()

    if len(examples.shape) != 4:
        print 'sorry, view_examples.py only supports image examples' + \
                'for now.'
        print 'this dataset has ' + \
                str(len(examples.shape)-2)+' topological dimensions'
        quit(-1)

    if examples.shape[3] == 1:
        is_color = False
    elif examples.shape[3] == 3:
        is_color = True
    else:
        print 'got unknown image format with ' + str(examples.shape[3]) + \
                ' channels'
        print 'supported formats are 1 channel greyscale or three channel RGB'
        quit(-1)

    print examples.shape[1:3]

    pv = patch_viewer.PatchViewer((rows, cols), examples.shape[1:3],
            is_color = is_color)

    for i in xrange(rows*cols):
#.........这里部分代码省略.........
开发者ID:dzeno,项目名称:pylearn2,代码行数:101,代码来源:show_examples.py


示例12: main

def main(options, positional_args):
    assert len(positional_args) == 1

    path, = positional_args

    out = options.out
    rescale = options.rescale

    if rescale == "none":
        global_rescale = False
        patch_rescale = False
    elif rescale == "global":
        global_rescale = True
        patch_rescale = False
    elif rescale == "individual":
        global_rescale = False
        patch_rescale = True
    else:
        assert False

    if path.endswith(".pkl"):
        from pylearn2.utils import serial

        obj = serial.load(path)
    elif path.endswith(".yaml"):
        print "Building dataset from yaml..."
        obj = yaml_parse.load_path(path)
        print "...done"
    else:
        obj = yaml_parse.load(path)

    rows = options.rows
    cols = options.cols

    if hasattr(obj, "get_batch_topo"):
        # obj is a Dataset
        dataset = obj

        examples = dataset.get_batch_topo(rows * cols)
    else:
        # obj is a Model
        model = obj
        from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams

        theano_rng = RandomStreams(42)
        design_examples_var = model.random_design_matrix(batch_size=rows * cols, theano_rng=theano_rng)
        from theano import function

        print "compiling sampling function"
        f = function([], design_examples_var)
        print "sampling"
        design_examples = f()
        print "loading dataset"
        dataset = yaml_parse.load(model.dataset_yaml_src)
        examples = dataset.get_topological_view(design_examples)

    norms = N.asarray([N.sqrt(N.sum(N.square(examples[i, :]))) for i in xrange(examples.shape[0])])
    print "norms of examples: "
    print "\tmin: ", norms.min()
    print "\tmean: ", norms.mean()
    print "\tmax: ", norms.max()

    print "range of elements of examples", (examples.min(), examples.max())
    print "dtype: ", examples.dtype

    examples = dataset.adjust_for_viewer(examples)

    if global_rescale:
        examples /= N.abs(examples).max()

    if len(examples.shape) != 4:
        print "sorry, view_examples.py only supports image examples for now."
        print "this dataset has " + str(len(examples.shape) - 2) + " topological dimensions"
        quit(-1)
    #

    if examples.shape[3] == 1:
        is_color = False
    elif examples.shape[3] == 3:
        is_color = True
    else:
        print "got unknown image format with " + str(examples.shape[3]) + " channels"
        print "supported formats are 1 channel greyscale or three channel RGB"
        quit(-1)
    #

    print examples.shape[1:3]

    pv = patch_viewer.PatchViewer((rows, cols), examples.shape[1:3], is_color=is_color)

    for i in xrange(rows * cols):
        pv.add_patch(examples[i, :, :, :], activation=0.0, rescale=patch_rescale)
    #

    if out is None:
        pv.show()
    else:
        pv.save(out)
开发者ID:jpompe,项目名称:pylearn2,代码行数:98,代码来源:show_examples.py


示例13: __init__

    def __init__(self,
                 path='train.csv',
                 task='classification',
                 expect_labels=True,
                 expect_headers=True,
                 delimiter=',',
                 start=None,
                 stop=None,
                 start_fraction=None,
                 end_fraction=None,
                 yaml_src=None,
                 one_hot=True,
                 num_classes=4,
                 which_set=None):
        """
        .. todo:: ..

            WRITEME
        """
        self.path = path
        self.task = task
        self.expect_labels = expect_labels
        self.expect_headers = expect_headers
        self.delimiter = delimiter
        if which_set is not None:
            self.start = start
            self.stop = stop
        self.start_fraction = start_fraction
        self.end_fraction = end_fraction

        self.view_converter = None

        if yaml_src is not None:
            self.yaml_src = yaml_parse.load_path(yaml_src)
        # self.yaml_src=yaml_parse.load_path("mlp.yaml")
        # eventually; triple-quoted yaml...
        self.one_hot = one_hot
        self.num_classes = num_classes

        if which_set is not None and which_set not in[
                                                     'train', 'test', 'valid']:
            raise ValueError(
                'Unrecognized which_set value "%s".' % (which_set,) +
                '". Valid values are ["train","test","valid"].')
        else:
            self.which_set = which_set
            if self.start is not None or self.stop is not None:
                raise ValueError("Use start/stop or which_set,"
                    " just not together.")

        if task not in ['classification', 'regression']:
            raise ValueError('task must be either "classification" or '
                             '"regression"; got ' + str(task))

        if start_fraction is not None:
            if end_fraction is not None:
                raise ValueError("Use start_fraction or end_fraction, "
                                 " not both.")
            if start_fraction <= 0:
                raise ValueError("start_fraction should be > 0")

            if start_fraction >= 1:
                raise ValueError("start_fraction should be < 1")

        if end_fraction is not None:
            if end_fraction <= 0:
                raise ValueError("end_fraction should be > 0")

            if end_fraction >= 1:
                raise ValueError("end_fraction should be < 1")

        if start is not None:
            if start_fraction is not None or end_fraction is not None:
                raise ValueError("Use start, start_fraction, or end_fraction,"
                                 " just not together.")

        if stop is not None:
            if start_fraction is not None or end_fraction is not None:
                raise ValueError("Use stop, start_fraction, or end_fraction,"
                                 " just not together.")

        # and go
        self.path = preprocess(self.path)
        X, y = self._load_data()

        # y=y.astype(int)
        # y=map(int, np.rint(y).astype(int))

        if self.task == 'regression':
            super(CSVDatasetPlus, self).__init__(X=X, y=y)
        else:
            # , y_labels=4 # y_labels=np.max(y)+1
            super(CSVDatasetPlus, self).__init__(
                X=X, y=y.astype(int), y_labels=self.num_classes)
开发者ID:eivind88,项目名称:master_code,代码行数:94,代码来源:adni_eivind.py


示例14: hasattr

    patch_rescale = False
elif rescale == 'global':
    global_rescale = True
    patch_rescale = False
elif rescale == 'individual':
    global_rescale = False
    patch_rescale = True
else:
    assert False

if path.endswith('.pkl'):
    from pylearn2.utils import serial
    obj = serial.load(path)
elif path.endswith('.yaml'):
    print 'Building dataset from yaml...'
    obj =yaml_parse.load_path(path)
    print '...done'
else:
    obj = yaml_parse.load(path)

rows = options.rows
cols = options.cols

if hasattr(obj,'get_batch_topo'):
    #obj is a Dataset
    dataset = obj

    examples = dataset.get_batch_topo(rows*cols)
else:
    #obj is a Model
    model = obj
开发者ID:casperkaae,项目名称:pylearn2,代码行数:31,代码来源:show_examples.py


示例15:

__author__ = "Ian Goodfellow"

from pylearn2.config import yaml_parse
import sys

_, path = sys.argv

simulator = yaml_parse.load_path(path)

simulator.main_loop()
开发者ID:123fengye741,项目名称:pylearn2,代码行数:10,代码来源:simulate.py


示例16: print

            print((t6-t1, t2-t1, t3-t2, t4-t3, t5-t4, t6-t5))

        if self.chunk_size is not None:
            assert save_path.endswith('.npy')
            save_path_pieces = save_path.split('.npy')
            assert len(save_path_pieces) == 2
            assert save_path_pieces[1] == ''
            save_path = save_path_pieces[0] + '_' + chr(ord('A')+self.chunk_id)+'.npy'
        np.save(save_path,output)


        if nan > 0:
            warnings.warn(str(nan)+' features were nan')

if __name__ == '__main__':
    assert len(sys.argv) == 2
    yaml_path = sys.argv[1]

    assert yaml_path.endswith('.yaml')
    val = yaml_path[0:-len('.yaml')]
    os.environ['FEATURE_EXTRACTOR_YAML_PATH'] = val
    os.putenv('FEATURE_EXTRACTOR_YAML_PATH',val)
    val = val.split('/')[-1]
    os.environ['FEATURE_EXTRACTOR_YAML_NAME'] = val
    os.putenv('FEATURE_EXTRACTOR_YAML_NAME', val)


    extractor = yaml_parse.load_path(yaml_path)

    extractor()
开发者ID:123fengye741,项目名称:pylearn2,代码行数:30,代码来源:extract_features.py


示例17: open

import os
import shutil

from pylearn2.config import yaml_parse
from pylearn2.utils import serial
from pylearn2.utils import shell


status, rc = shell.run_shell_command("qstat -u goodfell -t @hades")
assert rc == 0

results = open("results.dat", "r")
lines = results.readlines()
results.close()

params = yaml_parse.load_path('params.yaml')
added = 0
print 'Experiment numbers reported by this script start at 0.'
print 'Keep in mind that vim will refer to the first line of results.dat as line 1'
for expnum, line in enumerate(lines):
    elems = line.split(' ')
    assert elems[-1] == '\n'
    obj = elems[0]
    if obj == 'P':
        # print expnum, 'pending according to results.dat'
        expdir = '/RQexec/goodfell/experiment_7/%d' % expnum
        if not os.path.exists(expdir):
            print 'Experiment not yet configured for experiment',expnum
            continue
        cluster_info = expdir + '/cluster_info.txt'
        if not os.path.exists(cluster_info):
开发者ID:cc13ny,项目名称:galatea,代码行数:31,代码来源:01D_report_results.py


示例18: main

def main(options, positional_args):
    assert len(positional_args) == 1

    path ,= positional_args

    out = options.out
    rescale = options.rescale

    if rescale == 'none':
        global_rescale = False
        patch_rescale = False
    elif rescale == 'global':
        global_rescale = True
        patch_rescale = False
    elif rescale == 'individual':
        global_rescale = False
        patch_rescale = True
    else:
        assert False

    if path.endswith('.pkl'):
        from pylearn2.utils import serial
        obj = serial.load(path)
    elif path.endswith('.yaml'):
        print 'Building dataset from yaml...'
        obj =yaml_parse.load_path(path)
        print '...done'
    else:
        obj = yaml_parse.load(path)

    rows = options.rows
    cols = options.cols

    if hasattr(obj,'get_batch_topo'):
        #obj is a Dataset
        dataset = obj

        examples = dataset.get_batch_topo(rows*cols)
    else:
        #obj is a Model
        model = obj
        from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
        theano_rng = RandomStreams(42)
        design_examples_var = model.random_design_matrix(batch_size = rows * cols, theano_rng = theano_rng)
        from theano import function
        print 'compiling sampling function'
        f = function([],design_examples_var)
        print 'sampling'
        design_examples = f()
        print 'loading dataset'
        dataset = yaml_parse.load(model.dataset_yaml_src)
        examples = dataset.get_topological_view(design_examples)

    norms = N.asarray( [
            N.sqrt(N.sum(N.square(examples[i,:])))
                        for i in xrange(examples.shape[0])
                        ])
    print 'norms of examples: '
    print '\tmin: ',norms.min()
    print '\tmean: ',norms.mean()
    print '\tmax: ',norms.max()

    print 'range of elements of examples',(examples.min(),examples.max())
    print 'dtype: ', examples.dtype

    examples = dataset.adjust_for_viewer(examples)

    if global_rescale:
        examples /= N.abs(examples).max()

    if len(examples.shape) != 4:
        print 'sorry, view_examples.py only supports image examples for now.'
        print 'this dataset has '+str(len(examples.shape)-2)+' topological dimensions'
        quit(-1)

    is_color = False
    assert examples.shape[3] == 2

    print examples.shape[1:3]

    pv = patch_viewer.PatchViewer( (rows, cols * 2), examples.shape[1:3], is_color = is_color)

    for i in xrange(rows*cols):
        # Load patches in backwards order for easier cross-eyed viewing
        # (Ian can't do the magic eye thing where you focus your eyes past the screen, must
        # focus eyes in front of screen)
        pv.add_patch(examples[i,:,:,1], activation = 0.0, rescale = patch_rescale)
        pv.add_patch(examples[i,:,:,0], activation = 0.0, rescale = patch_rescale)

    if out is None:
        pv.show()
    else:
        pv.save(out)
开发者ID:EderSantana,项目名称:pylearn2,代码行数:93,代码来源:show_binocular_greyscale_examples.py


示例19: len

#!/bin/env python
import numpy as N
import sys
from pylearn2.gui import patch_viewer
from pylearn2.config import yaml_parse

assert len(sys.argv) == 2
path = sys.argv[1]

if path.endswith('.pkl'):
    from pylearn2.utils import serial
    dataset = serial.load(path)
elif path.endswith('.yaml'):
    dataset =yaml_parse.load_path(path)
else:
    dataset = yaml_parse.load(path)

rows = 20
cols = 20

examples = dataset.get_batch_topo(rows*cols)

norms = N.asarray( [
        N.sqrt(N.sum(N.square(examples[i,:])))
                    for i in xrange(examples.shape[0])
                    ])
print 'norms of exmaples: '
print '\tmin: ',norms.min()
print '\tmean: ',norms.mean()
print '\tmax: ',norms.max()
开发者ID:LeeEdel,项目名称:pylearn,代码行数:30,代码来源:show_examples.py


示例20:

import gc
import numpy as np
import sys

from pylearn2.config import yaml_parse
from pylearn2.utils import serial

_, config_path = sys.argv
model = yaml_parse.load_path(config_path)

f = model.dump_func()

model.strip_down()
stripped_model_path = config_path.replace('.yaml', '_stripped.pkl')
serial.save(stripped_model_path, model)

srcs = {
        'train' : """!obj:pylearn2.datasets.norb_small.FoveatedNORB {
        which_set: "train",
        scale: 1,
        one_hot: 1
    }""",
        'test' : """!obj:pylearn2.datasets.norb_small.FoveatedNORB {
        which_set: "test",
        scale: 1,
        one_hot: 1
    }"""
        }

for which_set in srcs:
    gc.collect()
开发者ID:cc13ny,项目名称:galatea,代码行数:31,代码来源:norb_retrain_dumper.py



注:本文中的pylearn2.config.yaml_parse.load_path函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python control.get_load_data函数代码示例发布时间:2022-05-25
下一篇:
Python yaml_parse.load函数代码示例发布时间:2022-05-25
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap