• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python argparser.NeonArgparser类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中neon.util.argparser.NeonArgparser的典型用法代码示例。如果您正苦于以下问题:Python NeonArgparser类的具体用法?Python NeonArgparser怎么用?Python NeonArgparser使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了NeonArgparser类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_iterator

def test_iterator():
    parser = NeonArgparser(__doc__)
    args = parser.parse_args()
    (X_train, y_train), (X_test, y_test), nclass = load_cifar10_imgs(path=args.data_dir)
    train = DataIterator(X_train, y_train, nclass=nclass, lshape=(3, 32, 32))
    test = DataIterator(X_test, y_test, nclass=nclass, lshape=(3, 32, 32))
    return run(args, train, test)
开发者ID:ferenckulcsar,项目名称:neon,代码行数:7,代码来源:compare.py


示例2: test_iterator

def test_iterator():
    print('Testing iterator based data loader')
    parser = NeonArgparser(__doc__)
    args = parser.parse_args()
    (X_train, y_train), (X_test, y_test), nclass = load_cifar10_imgs(path=args.data_dir)
    train = ArrayIterator(X_train, y_train, nclass=nclass, lshape=(3, 32, 32))
    test = ArrayIterator(X_test, y_test, nclass=nclass, lshape=(3, 32, 32))
    return run(args, train, test)
开发者ID:JediKoder,项目名称:neon,代码行数:8,代码来源:compare.py


示例3: get_data

def get_data():
    """
    Download bilingual text dataset for Machine translation example.
    """

    # vocab_size and time_steps are hard coded here
    vocab_size = 16384
    time_steps = 20

    # download dataset
    url = 'http://www-lium.univ-lemans.fr/~schwenk/cslm_joint_paper/data/'
    filename = 'bitexts.tgz'
    size = 1313280000

    parser = NeonArgparser(__doc__)
    args = parser.parse_args(gen_be=False)
    data_dir = os.path.join(args.data_dir, 'nmt')

    _, filepath = Dataset._valid_path_append(data_dir, '', filename)
    if not os.path.exists(filepath):
        Dataset.fetch_dataset(url, filename, filepath, size)

    # extract selected datasets
    datafiles = dict()
    datafiles['un2000'] = ('un2000_pc34.en.gz', 'un2000_pc34.fr.gz')
    datafiles['europarl7'] = ('ep7_pc45.en.gz', 'ep7_pc45.fr.gz')

    extractpath = os.path.join(data_dir, 'bitexts.selected')
    with tarfile.open(filepath, 'r') as tar_ref:
        for dset, files in datafiles.items():
            datasetpath = os.path.join(data_dir, dset)
            # extract the files for dataset, if not already there
            for zipped in files:
                fname = '.'.join(zipped.split('.')[:-1])
                fpath = os.path.join(datasetpath, fname)
                if not os.path.exists(fpath):
                    gzpath = os.path.join(extractpath, zipped)
                    if not os.path.exists(gzpath):
                        select = [ti for ti in tar_ref if os.path.split(ti.name)[1] == zipped]
                        tar_ref.extractall(path=data_dir, members=select)
                    # get contents of gz files
                    if not os.path.exists(datasetpath):
                        os.makedirs(datasetpath)
                    with gzip.open(gzpath, 'r') as fin, open(fpath, 'w') as fout:
                        fout.write(fin.read())
                    os.remove(gzpath)

    if os.path.exists(extractpath):
        os.rmdir(extractpath)

    # process data and save to h5 file
    # loop through all datasets and get train and valid splits
    for dataset in datafiles.keys():

        s_vocab, t_vocab = create_h5py(data_dir, dataset, 'train',
                                       vocab_size=vocab_size, time_steps=time_steps)
        create_h5py(data_dir, dataset, 'valid', s_vocab=s_vocab, t_vocab=t_vocab,
                    time_steps=time_steps)
开发者ID:rlugojr,项目名称:neon,代码行数:58,代码来源:data.py


示例4: test_loader

def test_loader():
    parser = NeonArgparser(__doc__)
    args = parser.parse_args()

    train_dir = os.path.join(args.data_dir, 'macrotrain')
    test_dir = os.path.join(args.data_dir, 'macrotest')
    write_batches(args, train_dir, trainimgs, 0)
    write_batches(args, test_dir, testimgs, 1)
    train = ImageLoader(set_name='train', do_transforms=False, inner_size=32,
                        repo_dir=train_dir)
    test = ImageLoader(set_name='validation', do_transforms=False, inner_size=32,
                       repo_dir=test_dir)
    err = run(args, train, test)
    return err
开发者ID:pitseli,项目名称:deepwhales,代码行数:14,代码来源:compare.py


示例5: main

def main():
	# parse the command line arguments
	parser = NeonArgparser(__doc__)

	args = parser.parse_args()

	logger = logging.getLogger()
	logger.setLevel(args.log_thresh)

	#Set up batch iterator for training images
	print "Setting up data batch loaders..."
	train = ImgMaster(repo_dir='dataTmp', set_name='train', inner_size=120, subset_pct=100)
	val = ImgMaster(repo_dir='dataTmp', set_name='train', inner_size=120, subset_pct=100, do_transforms=False)
	test = ImgMaster(repo_dir='dataTestTmp', set_name='train', inner_size=120, subset_pct=100, do_transforms=False)

	train.init_batch_provider()
	val.init_batch_provider()
	test.init_batch_provider()

	print "Constructing network..."
	#Create AlexNet architecture
	model = constuct_network()

	#model.load_weights(args.model_file)

	# drop weights LR by 1/250**(1/3) at epochs (23, 45, 66), drop bias LR by 1/10 at epoch 45
	weight_sched = Schedule([22, 44, 65, 90, 97], (1/250.)**(1/3.))
	opt_gdm = GradientDescentMomentum(0.01, 0.9, wdecay=0.005, schedule=weight_sched)
	opt_biases = GradientDescentMomentum(0.04, 1.0, schedule=Schedule([130],.1))
	opt = MultiOptimizer({'default': opt_gdm, 'Bias': opt_biases})

	# configure callbacks
	valmetric = TopKMisclassification(k=5)
	callbacks = Callbacks(model, train, eval_set=val, metric=valmetric, **args.callback_args)

	cost = GeneralizedCost(costfunc=CrossEntropyMulti())

	#flag = input("Press Enter if you want to begin training process.")
	print "Training network..."
	model.fit(train, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
	mets = model.eval(test, metric=valmetric)

	print 'Validation set metrics:'
	print 'LogLoss: %.2f, Accuracy: %.1f %%0 (Top-1), %.1f %% (Top-5)' % (mets[0], 
																		(1.0-mets[1])*100,
																		(1.0-mets[2])*100)
	test.exit_batch_provider()
	val.exit_batch_provider()
	train.exit_batch_provider()
开发者ID:mwoodson1,项目名称:PatternRec-Project,代码行数:49,代码来源:cropped_CNN.py


示例6: main

def main():
    # parse the command line arguments
    parser = NeonArgparser(__doc__)

    args = parser.parse_args()

    logger = logging.getLogger()
    logger.setLevel(args.log_thresh)

    # Set up batch iterator for training images
    train = ImgMaster(repo_dir="spectroDataTmp", set_name="train", inner_size=400, subset_pct=100)
    val = ImgMaster(
        repo_dir="spectroDataTmp", set_name="validation", inner_size=400, subset_pct=100, do_transforms=False
    )
    test = ImgMaster(
        repo_dir="spectroTestDataTmp", set_name="validation", inner_size=400, subset_pct=100, do_transforms=False
    )

    train.init_batch_provider()
    test.init_batch_provider()

    print "Constructing network..."
    model = constuct_network()

    model.load_weights(args.model_file)

    # Optimizer
    opt = Adadelta()

    # configure callbacks
    valmetric = TopKMisclassification(k=5)
    callbacks = Callbacks(model, train, eval_set=val, metric=valmetric, **args.callback_args)

    cost = GeneralizedCost(costfunc=CrossEntropyMulti())

    # flag = input("Press Enter if you want to begin training process.")
    print "Training network..."
    print args.epochs
    model.fit(train, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
    mets = model.eval(test, metric=valmetric)

    print "Validation set metrics:"
    print "LogLoss: %.2f, Accuracy: %.1f %%0 (Top-1), %.1f %% (Top-5)" % (
        mets[0],
        (1.0 - mets[1]) * 100,
        (1.0 - mets[2]) * 100,
    )
    test.exit_batch_provider()
    train.exit_batch_provider()
开发者ID:mwoodson1,项目名称:PatternRec-Project,代码行数:49,代码来源:spectrogram_CNN.py


示例7: test_loader

def test_loader():
    print('Testing image loader')
    parser = NeonArgparser(__doc__)
    args = parser.parse_args()

    train_archive = os.path.join(args.data_dir, traindir + '-ingested')
    test_archive = os.path.join(args.data_dir, testdir + '-ingested')
    write_batches(args, train_archive, traindir, 0)
    write_batches(args, test_archive, testdir, 1)
    train = ImageLoader(set_name='train', do_transforms=False, inner_size=32,
                        scale_range=0, repo_dir=train_archive)
    test = ImageLoader(set_name='validation', do_transforms=False, inner_size=32,
                       scale_range=0, repo_dir=test_archive)
    err = run(args, train, test)
    return err
开发者ID:AnnaZhou,项目名称:neon,代码行数:15,代码来源:compare.py


示例8: test_loader

def test_loader():
    print('Testing generic data loader')
    parser = NeonArgparser(__doc__)
    args = parser.parse_args()

    train_path = os.path.join(args.data_dir, traindir + '-ingested')
    test_path = os.path.join(args.data_dir, testdir + '-ingested')

    params = ImageParams(channel_count=3, height=32, width=32)
    common = dict(media_params=params, target_size=1, nclasses=10)
    train = DataLoader('train', repo_dir=os.path.join(args.data_dir, 'train'),
                       **common)
    test = DataLoader('test', repo_dir=os.path.join(args.data_dir, 'test'),
                      **common)
    err = run(args, train, test)
    return err
开发者ID:JediKoder,项目名称:neon,代码行数:16,代码来源:compare.py


示例9: get_args_and_hyperparameters

def get_args_and_hyperparameters():
    parser = NeonArgparser(__doc__)
    args = parser.parse_args(gen_be=False)
    
    # Override save path if None
    if args.save_path is None:
        args.save_path = 'frcn_alexnet.pickle'
    
    if args.callback_args['save_path'] is None:
        args.callback_args['save_path'] = args.save_path
    
    if args.callback_args['serialize'] is None:
        args.callback_args['serialize'] = min(args.epochs, 10)
    
    
    # hyperparameters
    args.batch_size = 64
    hyper_params = lambda: None
    hyper_params.use_pre_trained_weights = True # If true, load pre-trained weights to the model
    hyper_params.max_train_imgs = 5000 # Make this smaller in small trial runs to save time
    hyper_params.max_test_imgs = 5000 # Make this smaller in small trial runs to save time
    hyper_params.num_epochs = args.epochs
    hyper_params.samples_per_batch = args.batch_size # The mini-batch size
    # The number of multi-scale samples to make for each input image. These
    # samples are then fed into the network in multiple minibatches.
    hyper_params.samples_per_img = hyper_params.samples_per_batch*7 
    hyper_params.frcn_fine_tune = False
    hyper_params.shuffle = True
    if hyper_params.use_pre_trained_weights:
        # This will typically train in 10-15 epochs. Use a small learning rate
        # and quickly reduce every 5-10 epochs. Use a high momentum since we
        # are close to the minima.
        s = 1e-4
        hyper_params.learning_rate_scale = s
        hyper_params.learning_rate_sched = Schedule(step_config=[15, 20], 
                                        change=[0.1*s, 0.01*s])
        hyper_params.momentum = 0.9
    else: # need to be less aggressive with reducing learning rate if the model is not pre-trained
        s = 1e-2
        hyper_params.learning_rate_scale = 1e-2
        hyper_params.learning_rate_sched = Schedule(step_config=[8, 14, 18, 20], 
                                        change=[0.5*s, 0.1*s, 0.05*s, 0.01*s])
        hyper_params.momentum = 0.1
    hyper_params.class_score_threshold = 0.000001
    hyper_params.score_exponent = 5
    hyper_params.shuffle = True
    return args, hyper_params
开发者ID:623401157,项目名称:ModelZoo,代码行数:47,代码来源:transfer_learning.py


示例10: run_once

def run_once(web_input):
    """
    Run forward pass for a single input. Receives input vector from the web form.
    """

    parser = NeonArgparser(__doc__)
    
    args = parser.parse_args()
    
    num_feat = 4
    
    npzfile = np.load('./model/homeapp_preproc.npz')
    mean = npzfile['mean']
    std = npzfile['std']
    mean = np.reshape(mean, (1,mean.shape[0]))
    std = np.reshape(std, (1,std.shape[0]))
    
    # Reloading saved model
    mlp=Model("./model/homeapp_model.prm")
    
    # Horrible terrible hack that should never be needed :-(
    NervanaObject.be.bsz = 1
    
    # Actual: 275,000 Predicted: 362,177 
    #web_input = np.array([51.2246169879,-1.48577399748,223.0,0.0,0.0,0.0,1.0,0.0,0.0,1.0,0.0,1.0])
    # Actual 185,000 Predicted: 244,526
    #web_input = np.array([51.4395375168,-1.07174234072,5.0,0.0,0.0,1.0,0.0,0.0,0.0,1.0,0.0,1.0])
    # Actual 231,500 Predicted 281,053
    web_input = np.array([52.2010084131,-2.18181259148,218.0,0.0,0.0,0.0,1.0,0.0,0.0,1.0,0.0,1.0])
    web_input = np.reshape(web_input, (1,web_input.shape[0]))
    
    web_input[:,:num_feat-1] -= mean[:,1:num_feat]
    web_input[:,:num_feat-1] /= std[:,1:num_feat]
    
    web_test_set = ArrayIterator(X=web_input, make_onehot=False)
    
    web_output = mlp.get_outputs(web_test_set)
    
    #Rescale the output
    web_output *= std[:,0]
    web_output += mean[:,0]
    
    return web_output[0]
开发者ID:ankitvb,项目名称:homeprice,代码行数:43,代码来源:run_mlp.py


示例11: main

def main():
	# parse the command line arguments
	parser = NeonArgparser(__doc__)

	args = parser.parse_args()

	logger = logging.getLogger()
	logger.setLevel(args.log_thresh)

	#Set up batch iterator for training images
	train = ImgMaster(repo_dir='dataTmp_optFlow_BW', set_name='train', inner_size=240, subset_pct=100)
	val = ImgMaster(repo_dir='dataTmp_optFlow_BW', set_name='train', inner_size=240, subset_pct=100, do_transforms=False)
	test = ImgMaster(repo_dir='dataTestTmp_optFlow_BW', set_name='train', inner_size=240, subset_pct=100, do_transforms=False)

	train.init_batch_provider()
	val.init_batch_provider()
	test.init_batch_provider()

	print "Constructing network..."
	#Create AlexNet architecture
	model = constuct_network()

	# Optimzer for model
	opt = Adadelta()

	# configure callbacks
	valmetric = TopKMisclassification(k=5)
	callbacks = Callbacks(model, train, eval_set=test, metric=valmetric, **args.callback_args)

	cost = GeneralizedCost(costfunc=CrossEntropyMulti())

	#flag = input("Press Enter if you want to begin training process.")
	print "Training network..."
	model.fit(train, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
	mets = model.eval(test, metric=valmetric)

	print 'Validation set metrics:'
	print 'LogLoss: %.2f, Accuracy: %.1f %%0 (Top-1), %.1f %% (Top-5)' % (mets[0], 
																		(1.0-mets[1])*100,
																		(1.0-mets[2])*100)
	return 
开发者ID:mwoodson1,项目名称:PatternRec-Project,代码行数:41,代码来源:optFlow_CRNN.py


示例12: caption_video

    return manifest_file


def caption_video(infile, caption, outfile):
    cmd = '''ffmpeg  -i {0} -an \
    -vf drawtext="textfile={1}: fontcolor=white: fontsize=16: box=1: [email protected]" \
    -y {2}'''
    proc = subprocess.Popen(cmd.format(infile, caption, outfile), shell=True)
    proc.communicate()


# parse the command line arguments
demo_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'test.cfg')
config_files = [demo_config] if os.path.exists(demo_config) else []

parser = NeonArgparser(__doc__, default_config_files=config_files)
parser.add_argument('--input_video', help='video file')
parser.add_argument('--output_video', help='Video file with overlayed inference hypotheses')
args = parser.parse_args()

assert args.model_file is not None, "need a model file for testing"
model = Model(args.model_file)

assert 'categories' in args.manifest, "Missing categories file"
category_map = {t[0]: t[1] for t in np.genfromtxt(args.manifest['categories'],
                                                  dtype=None, delimiter=',')}

# Make a temporary directory and clean up afterwards
outdir = mkdtemp()
atexit.register(shutil.rmtree, outdir)
caption_file = os.path.join(outdir, 'caption.txt')
开发者ID:rlugojr,项目名称:neon,代码行数:31,代码来源:demo.py


示例13: NeonArgparser

from neon.util.argparser import NeonArgparser, extract_valid_args
from neon.optimizers import GradientDescentMomentum, MultiOptimizer, StepSchedule
from neon.callbacks.callbacks import Callbacks
from neon.util.persist import save_obj, get_data_cache_dir
from objectlocalization import PASCALVOC
from neon.transforms import CrossEntropyMulti, SmoothL1Loss
from neon.layers import Multicost, GeneralizedCostMask
import util
import faster_rcnn
import os

train_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'pascalvoc.cfg')
config_files = [train_config] if os.path.exists(train_config) else []

# parse the command line arguments
parser = NeonArgparser(__doc__, default_config_files=config_files)
parser.add_argument('--width', type=int, default=1000, help='Width of input image')
parser.add_argument('--height', type=int, default=1000, help='Height of input image')
parser.add_argument('--subset_pct', type=float, default=100,
                    help='subset of training dataset to use (percentage)')
args = parser.parse_args(gen_be=False)

# hyperparameters
assert args.batch_size is 1, "Faster-RCNN only supports batch size 1"
assert 'train' in args.manifest

rpn_rois_per_img = 256  # number of rois to sample to train rpn
frcn_rois_per_img = 128  # number of rois to sample to train frcn

# setup backend
be = gen_backend(**extract_valid_args(args, gen_backend))
开发者ID:StevenLOL,项目名称:neon,代码行数:31,代码来源:train.py


示例14: train_mlp

def train_mlp():
	"""
	Train data and save scaling and network weights and biases to file
	to be used by forward prop phase on test data
	"""
	parser = NeonArgparser(__doc__)
	
	args = parser.parse_args()
	
	logger = logging.getLogger()
	logger.setLevel(args.log_thresh)
	
	# hyperparameters
	num_epochs = args.epochs
	
	#preprocessor
	std_scale = preprocessing.StandardScaler(with_mean=True,with_std=True)
	#std_scale = feature_scaler(type='Standardizer',with_mean=True,with_std=True)
	
	#number of non one-hot encoded features, including ground truth
	num_feat = 4
	
	# load up the mnist data set
	# split into train and tests sets
	#load data from csv-files and rescale
	#training
	traindf = pd.DataFrame.from_csv('data/train.csv')
	ncols = traindf.shape[1]
	
	#tmpmat=std_scale.fit_transform(traindf.as_matrix())
	#print std_scale.scale_
	#print std_scale.mean_
	
	tmpmat = traindf.as_matrix()
	#print tmpmat[:,1:num_feat]
	
	tmpmat[:,:num_feat] = std_scale.fit_transform(tmpmat[:,:num_feat])
	X_train = tmpmat[:,1:]
	y_train = np.reshape(tmpmat[:,0],(tmpmat[:,0].shape[0],1))
	
	#validation
	validdf = pd.DataFrame.from_csv('data/validate.csv')
	ncols = validdf.shape[1]
	tmpmat = validdf.as_matrix()
	tmpmat[:,:num_feat] = std_scale.transform(tmpmat[:,:num_feat])
	X_valid = tmpmat[:,1:]
	y_valid = np.reshape(tmpmat[:,0],(tmpmat[:,0].shape[0],1))
	
	#test
	testdf = pd.DataFrame.from_csv('data/test.csv')
	ncols = testdf.shape[1]
	tmpmat = testdf.as_matrix()
	tmpmat[:,:num_feat] = std_scale.transform(tmpmat[:,:num_feat])
	X_test = tmpmat[:,1:]
	y_test = np.reshape(tmpmat[:,0],(tmpmat[:,0].shape[0],1))
	
	# setup a training set iterator
	train_set = CustomDataIterator(X_train, lshape=(X_train.shape[1]), y_c=y_train)
	# setup a validation data set iterator
	valid_set = CustomDataIterator(X_valid, lshape=(X_valid.shape[1]), y_c=y_valid)
	# setup a validation data set iterator
	test_set = CustomDataIterator(X_test, lshape=(X_test.shape[1]), y_c=y_test)
	
	# setup weight initialization function
	init_norm = Xavier()
	
	# setup model layers
	layers = [Affine(nout=X_train.shape[1], init=init_norm, activation=Rectlin()),
	          Dropout(keep=0.5),
	          Affine(nout=X_train.shape[1]/2, init=init_norm, activation=Rectlin()),
			  Linear(nout=1, init=init_norm)]
	
	# setup cost function as CrossEntropy
	cost = GeneralizedCost(costfunc=SmoothL1Loss())
	
	# setup optimizer
	#schedule
	#schedule = ExpSchedule(decay=0.3)
	#optimizer = GradientDescentMomentum(0.0001, momentum_coef=0.9, stochastic_round=args.rounding, schedule=schedule)
	optimizer = Adam(learning_rate=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1.e-8)
	
	# initialize model object
	mlp = Model(layers=layers)
	
	# configure callbacks
	if args.callback_args['eval_freq'] is None:
		args.callback_args['eval_freq'] = 1
	
	# configure callbacks
	callbacks = Callbacks(mlp, eval_set=valid_set, **args.callback_args)
	
	callbacks.add_early_stop_callback(stop_func)
	callbacks.add_save_best_state_callback(os.path.join(args.data_dir, "early_stop-best_state.pkl"))
	
	# run fit
	mlp.fit(train_set, optimizer=optimizer, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
	
	#evaluate model
	print('Evaluation Error = %.4f'%(mlp.eval(valid_set, metric=SmoothL1Metric())))
	print('Test set error = %.4f'%(mlp.eval(test_set, metric=SmoothL1Metric())))
#.........这里部分代码省略.........
开发者ID:ankitvb,项目名称:homeprice,代码行数:101,代码来源:train_mlp.py


示例15: NeonArgparser

Referece:
https://github.com/karpathy/neuraltalk

"""
from neon.backends import gen_backend
from neon.data import load_flickr8k, ImageCaption, ImageCaptionTest
from neon.initializers import Uniform, Constant
from neon.layers import GeneralizedCostMask, LSTM, Affine, Dropout, Sequential, MergeMultistream
from neon.models import Model
from neon.optimizers import RMSProp
from neon.transforms import Logistic, Tanh, Softmax, CrossEntropyMulti
from neon.callbacks.callbacks import Callbacks
from neon.util.argparser import NeonArgparser, extract_valid_args

# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args(gen_be=False)

# hyperparameters
hidden_size = 512
num_epochs = args.epochs

# setup backend
be = gen_backend(**extract_valid_args(args, gen_backend))

# download dataset
data_path = load_flickr8k(path=args.data_dir)  # Other setnames are flickr30k and coco

# load data
train_set = ImageCaption(path=data_path, max_images=-1)
开发者ID:bin2000,项目名称:neon,代码行数:30,代码来源:image_caption.py


示例16: NeonArgparser

from neon.data.pascal_voc import PASCAL_VOC_CLASSES
from neon.data import PASCALVOCInference
from neon.util.argparser import NeonArgparser
from util import create_frcn_model

do_plots = True
try:
    import matplotlib.pyplot as plt
    plt.switch_backend('agg')
except ImportError:
    neon_logger.display('matplotlib needs to be installed manually to generate plots needed '
                        'for this example.  Skipping plot generation')
    do_plots = False

# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--img_prefix', type=str,
                    help='prefix for the saved image file names. If None, use '
                         'the model file name')
args = parser.parse_args(gen_be=True)
assert args.model_file is not None, "need a model file to do Fast R-CNN testing"

if args.img_prefix is None:
    args.img_prefix = os.path.splitext(os.path.basename(args.model_file))[0]

output_dir = os.path.join(args.data_dir, 'frcn_output')
if not os.path.isdir(output_dir):
    os.mkdir(output_dir)

# hyperparameters
args.batch_size = 1
开发者ID:JediKoder,项目名称:neon,代码行数:31,代码来源:demo.py


示例17: NeonArgparser

            x = x.reshape(x.size)
            y = y.reshape(y.size)
            dd = x - y
            worst_case = np.max(np.abs(dd))
            print "worst case abs diff = %e" % worst_case
            ind = np.where((x != 0) | (y != 0))
            rel_err = np.abs(np.divide(dd[ind], np.abs(x[ind]) + np.abs(y[ind])))
            worst_case = np.max(rel_err)
            print "worst case rel diff = %e" % worst_case
            assert False
    else:
        assert x == y


# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()

# hyperparameters
batch_size = 128
num_epochs = args.epochs


def gen_model(backend_type):
    # setup backend
    gen_backend(
        backend=backend_type, batch_size=batch_size, rng_seed=2, device_id=args.device_id, default_dtype=args.datatype
    )

    init_uni = Uniform(low=-0.1, high=0.1)
开发者ID:GerritKlaschke,项目名称:neon,代码行数:30,代码来源:serialization_check.py


示例18: dict

except ImportError as err:
    neon_logger.display("Running this example requires scipy packages.")
    neon_logger.display("try activating your virtualenv then: pip install scipy")
    sys.exit(1)

from neon.models import Model
from neon.layers import Activation
from neon.data.datasets import Dataset
from neon.layers import GeneralizedCost
from neon.transforms.cost import Cost
from neon.util.argparser import NeonArgparser

# force use of CPU backend since we require a batch size of 1
# (GPU needs a multiple of 32)
default_overrides = dict(backend='cpu', batch_size=1)
parser = NeonArgparser(__doc__, default_overrides=default_overrides)
parser.add_argument("image", help="Base image to create dream on.")
parser.add_argument("--dream_file", default='dream_out.png',
                    help="Save dream to named file.")
args = parser.parse_args()


# redirect the dream file to the path of output_file
if args.output_file is None:
    output_dir = parser.work_dir
elif osp.isdir(args.output_file):
    output_dir = args.output_file
else:
    output_dir = osp.dirname(args.output_file)

args.dream_file = osp.expanduser(
开发者ID:StevenLOL,项目名称:neon,代码行数:31,代码来源:deep_dream.py


示例19: NeonArgparser

Googlenet V1 implementation
"""

import os

from neon.util.argparser import NeonArgparser
from neon.layers import Conv, Pooling, MergeBroadcast, BranchNode, Affine, Tree, Dropout
from neon.layers import GeneralizedCost, Multicost
from neon.initializers import Constant, Xavier
from neon.backends import gen_backend
from neon.optimizers import GradientDescentMomentum, MultiOptimizer
from neon.transforms import Rectlin, Softmax, CrossEntropyMulti, TopKMisclassification
from neon.models import Model
from neon.data import ImageLoader

parser = NeonArgparser(__doc__)
parser.add_argument('--subset_pct', type=float, default=100,
                    help='subset of training dataset to use (percentage)')
parser.add_argument('--test_only', action='store_true',
                    help='skip fitting - evaluate metrics on trained model weights')
args = parser.parse_args()

# setup data provider
img_set_options = dict(repo_dir=args.data_dir, inner_size=224,
                       dtype=args.datatype, subset_pct=args.subset_pct)
test = ImageLoader(set_name='validation', scale_range=(256, 256),
                   do_transforms=False, **img_set_options)

init1 = Xavier(local=False)
initx = Xavier(local=True)
bias = Constant(val=0.20)
开发者ID:BwRy,项目名称:NervanaModelZoo,代码行数:31,代码来源:googlenet_neon.py


示例20: NeonArgparser

import sys
from neon.util.argparser import NeonArgparser
from neon.backends import gen_backend
from neon.initializers import Constant, Gaussian
from neon.layers import Conv, Dropout, Pooling, GeneralizedCost, Affine
from neon.optimizers import GradientDescentMomentum, MultiOptimizer, Schedule
from neon.transforms import Rectlin, Softmax, CrossEntropyMulti, TopKMisclassification
from neon.models import Model
from neon.data import ImgMaster
from neon.callbacks.callbacks import Callbacks, Callback

# For running complete alexnet
# alexnet.py -e 90 -val 1 -s <save-path> -w <path-to-saved-batches>
# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--model_file', help='load model from pkl file')
args = parser.parse_args()

# hyperparameters
batch_size = 128

# setup backend
be = gen_backend(backend=args.backend, rng_seed=args.rng_seed, device_id=args.device_id,
                 batch_size=batch_size, default_dtype=args.datatype)

try:
    train = ImgMaster(repo_dir=args.data_dir, inner_size=224, set_name='train')
    test = ImgMaster(repo_dir=args.data_dir, inner_size=224, set_name='validation',
                      do_transforms=False)
except (OSError, IOError, ValueError) as err:
开发者ID:sunclx,项目名称:neon,代码行数:30,代码来源:alexnet.py



注:本文中的neon.util.argparser.NeonArgparser类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python compat.range函数代码示例发布时间:2022-05-27
下一篇:
Python argparser.extract_valid_args函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap