本文整理汇总了Python中simplelearn.data.dataset.Dataset类的典型用法代码示例。如果您正苦于以下问题:Python Dataset类的具体用法?Python Dataset怎么用?Python Dataset使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Dataset类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_mean_over_epoch
def test_mean_over_epoch():
rng = numpy.random.RandomState(3851)
vectors = rng.uniform(-1.0, 1.0, size=(12, 10))
fmt = DenseFormat(axes=('b', 'f'), shape=(-1, 10), dtype=vectors.dtype)
dataset = Dataset(names=['vectors'], formats=[fmt], tensors=[vectors])
iterator = dataset.iterator('sequential',
batch_size=2,
loop_style="divisible")
input_node = iterator.make_input_nodes()[0]
l2_norm_node = L2Norm(input_node)
num_averages_compared = [0]
def compare_with_expected_average(values, _): # ignore format argument
assert_equal(len(values), 1)
average = values[0]
assert_is_instance(fmt, DenseFormat)
l2_norms = numpy.sqrt((vectors ** 2.0).sum(fmt.axes.index('f')))
expected_average = l2_norms.sum() / l2_norms.size
assert_allclose(average, expected_average)
num_averages_compared[0] += 1
average_monitor = MeanOverEpoch(l2_norm_node,
[compare_with_expected_average])
class DatasetRandomizer(EpochCallback):
'''
Fills the dataset with a fresh set of random values after each epoch.
'''
def on_start_training(self):
pass
def on_epoch(self):
vectors[...] = rng.uniform(-1.0, 1.0, size=vectors.shape)
trainer = Sgd([input_node],
iterator,
callbacks=[average_monitor,
LimitsNumEpochs(3),
DatasetRandomizer()])
trainer.train()
assert_equal(num_averages_compared[0], 3)
开发者ID:paulfun92,项目名称:simplelearn,代码行数:50,代码来源:test_training.py
示例2: main
def main():
args = parse_args()
# Hyperparameter values taken from Pylearn2:
# In pylearn2/scripts/tutorials/multilayer_perceptron/:
# multilayer_perceptron.ipynb
# mlp_tutorial_part_3.yaml
sizes = [500, 500, 10]
sparse_init_counts = [15, 15]
assert_equal(len(sparse_init_counts), len(sizes) - 1)
assert_equal(sizes[-1], 10)
mnist_training, mnist_testing = load_mnist()
# split training set into training and validation sets
tensors = mnist_training.tensors
training_tensors = [t[: -args.validation_size, ...] for t in tensors]
validation_tensors = [t[-args.validation_size :, ...] for t in tensors]
if args.no_shuffle_dataset == False:
def shuffle_in_unison_inplace(a, b):
assert len(a) == len(b)
p = numpy.random.permutation(len(a))
return a[p], b[p]
[training_tensors[0], training_tensors[1]] = shuffle_in_unison_inplace(training_tensors[0], training_tensors[1])
[validation_tensors[0], validation_tensors[1]] = shuffle_in_unison_inplace(
validation_tensors[0], validation_tensors[1]
)
all_images_shared = theano.shared(numpy.vstack([training_tensors[0], validation_tensors[0]]))
all_labels_shared = theano.shared(numpy.concatenate([training_tensors[1], validation_tensors[1]]))
length_training = training_tensors[0].shape[0]
length_validation = validation_tensors[0].shape[0]
indices_training = numpy.asarray(range(length_training))
indices_validation = numpy.asarray(range(length_training, length_training + length_validation))
indices_training_dataset = Dataset(
tensors=[indices_training], names=["indices"], formats=[DenseFormat(axes=["b"], shape=[-1], dtype="int64")]
)
indices_validation_dataset = Dataset(
tensors=[indices_validation], names=["indices"], formats=[DenseFormat(axes=["b"], shape=[-1], dtype="int64")]
)
indices_training_iterator = indices_training_dataset.iterator(
iterator_type="sequential", batch_size=args.batch_size
)
indices_validation_iterator = indices_validation_dataset.iterator(iterator_type="sequential", batch_size=10000)
mnist_validation_iterator = indices_validation_iterator
mnist_training_iterator = indices_training_iterator
input_indices_symbolic, = indices_training_iterator.make_input_nodes()
image_lookup_node = ImageLookeupNode(input_indices_symbolic, all_images_shared)
label_lookup_node = LabelLookeupNode(input_indices_symbolic, all_labels_shared)
image_node = CastNode(image_lookup_node, "floatX")
# image_node = RescaleImage(image_uint8_node)
rng = numpy.random.RandomState(34523)
theano_rng = RandomStreams(23845)
(affine_nodes, output_node) = build_fc_classifier(
image_node, sizes, sparse_init_counts, args.dropout_include_rates, rng, theano_rng
)
loss_node = CrossEntropy(output_node, label_lookup_node)
loss_sum = loss_node.output_symbol.mean()
max_epochs = 200
#
# Makes parameter updaters
#
parameters = []
parameter_updaters = []
momentum_updaters = []
for affine_node in affine_nodes:
for params in (affine_node.linear_node.params, affine_node.bias_node.params):
parameters.append(params)
gradients = theano.gradient.grad(loss_sum, params)
parameter_updater = SgdParameterUpdater(
params, gradients, args.learning_rate, args.initial_momentum, args.nesterov
)
parameter_updaters.append(parameter_updater)
momentum_updaters.append(
LinearlyInterpolatesOverEpochs(
parameter_updater.momentum, args.final_momentum, args.epochs_to_momentum_saturation
)
)
#
# Makes batch and epoch callbacks
#
"""
def make_output_basename(args):
#.........这里部分代码省略.........
开发者ID:paulfun92,项目名称:project_code,代码行数:101,代码来源:SGD_nesterov.py
示例3: main
def main():
args = parse_args()
# Hyperparameter values taken from Pylearn2:
# In pylearn2/scripts/tutorials/multilayer_perceptron/:
# multilayer_perceptron.ipynb
# mlp_tutorial_part_3.yaml
sizes = [500, 500, 10]
sparse_init_counts = [15, 15]
assert_equal(len(sparse_init_counts), len(sizes) - 1)
assert_equal(sizes[-1], 10)
mnist_training, mnist_testing = load_mnist()
if args.validation_size == 0:
# use testing set as validation set
mnist_validation = mnist_testing
else:
# split training set into training and validation sets
tensors = mnist_training.tensors
training_tensors = [t[:-args.validation_size, ...] for t in tensors]
validation_tensors = [t[-args.validation_size:, ...] for t in tensors]
mnist_training = Dataset(tensors=training_tensors,
names=mnist_training.names,
formats=mnist_training.formats)
mnist_validation = Dataset(tensors=validation_tensors,
names=mnist_training.names,
formats=mnist_training.formats)
mnist_validation_iterator = mnist_validation.iterator(
iterator_type='sequential',
batch_size=args.batch_size)
image_uint8_node, label_node = mnist_validation_iterator.make_input_nodes()
image_node = CastNode(image_uint8_node, 'floatX')
# image_node = RescaleImage(image_uint8_node)
rng = numpy.random.RandomState(34523)
theano_rng = RandomStreams(23845)
(affine_nodes,
output_node) = build_fc_classifier(image_node,
sizes,
sparse_init_counts,
args.dropout_include_rates,
rng,
theano_rng)
loss_node = CrossEntropy(output_node, label_node)
loss_sum = loss_node.output_symbol.mean()
max_epochs = 10000
#
# Makes parameter updaters
#
parameters = []
parameter_updaters = []
momentum_updaters = []
for affine_node in affine_nodes:
for params in (affine_node.linear_node.params,
affine_node.bias_node.params):
parameters.append(params)
gradients = theano.gradient.grad(loss_sum, params)
parameter_updater = SgdParameterUpdater(params,
gradients,
args.learning_rate,
args.initial_momentum,
args.nesterov)
parameter_updaters.append(parameter_updater)
momentum_updaters.append(LinearlyInterpolatesOverEpochs(
parameter_updater.momentum,
args.final_momentum,
args.epochs_to_momentum_saturation))
#
# Makes batch and epoch callbacks
#
def make_output_basename(args):
assert_equal(os.path.splitext(args.output_prefix)[1], "")
if os.path.isdir(args.output_prefix) and \
not args.output_prefix.endswith('/'):
args.output_prefix += '/'
output_dir, output_prefix = os.path.split(args.output_prefix)
if output_prefix != "":
output_prefix = output_prefix + "_"
output_prefix = os.path.join(output_dir, output_prefix)
return "{}lr-{}_mom-{}_nesterov-{}_bs-{}".format(
output_prefix,
args.learning_rate,
args.initial_momentum,
args.nesterov,
args.batch_size)
#.........这里部分代码省略.........
开发者ID:paulfun92,项目名称:simplelearn,代码行数:101,代码来源:mnist_fully_connected.py
示例4: main
def main():
'''
Entry point of this script.
'''
args = parse_args()
# Hyperparameter values taken from Pylearn2:
# In pylearn2/scripts/tutorials/convolutional_network/:
# convolutional_network.ipynb
filter_counts = [96, 192, 192]
filter_init_uniform_ranges = [0.005]* len(filter_counts)
filter_shapes = [(8, 8), (8,8), (5, 5)]
pool_shapes = [(4, 4),(4, 4), (2, 2)]
pool_strides = [(2, 2), (2, 2), (2,2)]
pool_pads = [(2,2), (2,2), (2,2)]
affine_output_sizes = [10]
affine_init_stddevs = [.05] * len(affine_output_sizes)
dropout_include_rates = [0.8, 0.5, 0.5, 0.5]
#dropout_include_rates = ([.8 if args.dropout else 1.0] *
# (len(filter_counts) + len(affine_output_sizes)))
conv_pads = [(4, 4), (3, 3), (3, 3)]
assert_equal(affine_output_sizes[-1], 10)
def unpickle(file):
import cPickle
fo = open(file, 'rb')
dict = cPickle.load(fo)
fo.close()
return dict
batch1 = unpickle('/home/s1422538/datasets/simplelearn/cifar10/original_files/cifar-10-batches-py/data_batch_1')
batch2 = unpickle('/home/s1422538/datasets/simplelearn/cifar10/original_files/cifar-10-batches-py/data_batch_2')
batch3 = unpickle('/home/s1422538/datasets/simplelearn/cifar10/original_files/cifar-10-batches-py/data_batch_3')
batch4 = unpickle('/home/s1422538/datasets/simplelearn/cifar10/original_files/cifar-10-batches-py/data_batch_4')
batch5 = unpickle('/home/s1422538/datasets/simplelearn/cifar10/original_files/cifar-10-batches-py/data_batch_5')
training_tensors = [ numpy.concatenate((batch1['data'].reshape(10000,3,32,32), batch2['data'].reshape(10000,3,32,32), batch3['data'].reshape(10000,3,32,32), batch4['data'].reshape(10000,3,32,32) )), numpy.concatenate((batch1['labels'], batch2['labels'], batch3['labels'], batch4['labels'])) ]
validation_tensors = [ batch5['data'].reshape(10000,3,32,32), numpy.asarray(batch5['labels']) ]
if args.no_shuffle_dataset == False:
def shuffle_in_unison_inplace(a, b):
assert len(a) == len(b)
p = numpy.random.permutation(len(a))
return a[p], b[p]
[training_tensors[0],training_tensors[1]] = shuffle_in_unison_inplace(training_tensors[0],training_tensors[1])
[validation_tensors[0], validation_tensors[1]] = shuffle_in_unison_inplace(validation_tensors[0], validation_tensors[1])
all_images_shared = theano.shared(numpy.vstack([training_tensors[0],validation_tensors[0]]))
all_labels_shared = theano.shared(numpy.concatenate([training_tensors[1],validation_tensors[1]]))
length_training = training_tensors[0].shape[0]
length_validation = validation_tensors[0].shape[0]
indices_training = numpy.asarray(range(length_training))
indices_validation = numpy.asarray(range(length_training, length_training + length_validation))
indices_training_dataset = Dataset( tensors=[indices_training], names=['indices'], formats=[DenseFormat(axes=['b'],shape=[-1],dtype='int64')] )
indices_validation_dataset = Dataset( tensors=[indices_validation], names=['indices'], formats=[DenseFormat(axes=['b'],shape=[-1],dtype='int64')] )
indices_training_iterator = indices_training_dataset.iterator(iterator_type='sequential',batch_size=args.batch_size)
indices_validation_iterator = indices_validation_dataset.iterator(iterator_type='sequential',batch_size=args.batch_size)
mnist_validation_iterator = indices_validation_iterator
mnist_training_iterator = indices_training_iterator
input_indices_symbolic, = indices_training_iterator.make_input_nodes()
image_lookup_node = ImageLookeupNode(input_indices_symbolic, all_images_shared)
label_lookup_node = LabelLookeupNode(input_indices_symbolic, all_labels_shared)
image_node = RescaleImage(image_lookup_node)
image_node = Lcn(image_node)
rng = numpy.random.RandomState(129734)
theano_rng = RandomStreams(2387845)
(conv_layers,
affine_layers,
output_node) = build_conv_classifier(image_node,
filter_shapes,
filter_counts,
filter_init_uniform_ranges,
pool_shapes,
pool_strides,
affine_output_sizes,
affine_init_stddevs,
dropout_include_rates,
conv_pads,
rng,
theano_rng)
loss_node = CrossEntropy(output_node, label_lookup_node)
scalar_loss = loss_node.output_symbol.mean()
if args.weight_decay != 0.0:
for conv_layer in conv_layers:
filters = conv_layer.conv2d_node.filters
filter_loss = args.weight_decay * theano.tensor.sqr(filters).sum()
scalar_loss = scalar_loss + filter_loss
#.........这里部分代码省略.........
开发者ID:paulfun92,项目名称:project_code,代码行数:101,代码来源:cifar10_conv3.py
示例5: main
def main():
'''
Entry point of this script.
'''
args = parse_args()
# Hyperparameter values taken from Pylearn2:
# In pylearn2/scripts/tutorials/convolutional_network/:
# convolutional_network.ipynb
filter_counts = [64, 64]
filter_init_uniform_ranges = [.05] * len(filter_counts)
filter_shapes = [(5, 5), (5, 5)]
pool_shapes = [(4, 4), (4, 4)]
pool_strides = [(2, 2), (2, 2)]
affine_output_sizes = [10]
affine_init_stddevs = [.05] * len(affine_output_sizes)
dropout_include_rates = ([.5 if args.dropout else 1.0] *
(len(filter_counts) + len(affine_output_sizes)))
assert_equal(affine_output_sizes[-1], 10)
mnist_training, mnist_testing = load_mnist()
# split training set into training and validation sets
tensors = mnist_training.tensors
training_tensors = [t[:-args.validation_size, ...] for t in tensors]
validation_tensors = [t[-args.validation_size:, ...] for t in tensors]
if args.no_shuffle_dataset == False:
def shuffle_in_unison_inplace(a, b):
assert len(a) == len(b)
p = numpy.random.permutation(len(a))
return a[p], b[p]
[training_tensors[0],training_tensors[1]] = shuffle_in_unison_inplace(training_tensors[0],training_tensors[1])
[validation_tensors[0], validation_tensors[1]] = shuffle_in_unison_inplace(validation_tensors[0], validation_tensors[1])
all_images_shared = theano.shared(numpy.vstack([training_tensors[0],validation_tensors[0]]))
all_labels_shared = theano.shared(numpy.concatenate([training_tensors[1],validation_tensors[1]]))
length_training = training_tensors[0].shape[0]
length_validation = validation_tensors[0].shape[0]
indices_training = numpy.asarray(range(length_training))
indices_validation = numpy.asarray(range(length_training, length_training + length_validation))
indices_training_dataset = Dataset( tensors=[indices_training], names=['indices'], formats=[DenseFormat(axes=['b'],shape=[-1],dtype='int64')] )
indices_validation_dataset = Dataset( tensors=[indices_validation], names=['indices'], formats=[DenseFormat(axes=['b'],shape=[-1],dtype='int64')] )
indices_training_iterator = indices_training_dataset.iterator(iterator_type='sequential',batch_size=args.batch_size)
indices_validation_iterator = indices_validation_dataset.iterator(iterator_type='sequential',batch_size=10000)
mnist_validation_iterator = indices_validation_iterator
mnist_training_iterator = indices_training_iterator
input_indices_symbolic, = indices_training_iterator.make_input_nodes()
image_lookup_node = ImageLookeupNode(input_indices_symbolic, all_images_shared)
label_lookup_node = LabelLookeupNode(input_indices_symbolic, all_labels_shared)
image_node = RescaleImage(image_lookup_node)
rng = numpy.random.RandomState(129734)
theano_rng = RandomStreams(2387845)
(conv_layers,
affine_layers,
output_node,
params_flat,
params_old_flat,
shapes) = build_conv_classifier(image_node,
filter_shapes,
filter_counts,
filter_init_uniform_ranges,
pool_shapes,
pool_strides,
affine_output_sizes,
affine_init_stddevs,
dropout_include_rates,
rng,
theano_rng)
loss_node = CrossEntropy(output_node, label_lookup_node)
scalar_loss = loss_node.output_symbol.mean()
# scalar_loss2 = theano.clone(scalar_loss, replace = {params_flat: params_old_flat})
if args.weight_decay != 0.0:
for conv_layer in conv_layers:
filters = conv_layer.conv2d_node.filters
filter_loss = args.weight_decay * theano.tensor.sqr(filters).sum()
scalar_loss = scalar_loss + filter_loss
for affine_layer in affine_layers:
weights = affine_layer.affine_node.linear_node.params
weight_loss = args.weight_decay * theano.tensor.sqr(weights).sum()
scalar_loss = scalar_loss + weight_loss
max_epochs = 500
#
# Makes parameter updater
#
#.........这里部分代码省略.........
开发者ID:paulfun92,项目名称:project_code,代码行数:101,代码来源:LBFGS_mnist_conv3.py
示例6: main
def main():
'''
Entry point of this script.
'''
args = parse_args()
# Hyperparameter values taken from Pylearn2:
# In pylearn2/scripts/tutorials/convolutional_network/:
# convolutional_network.ipynb
filter_counts = [64, 64]
filter_init_uniform_ranges = [.05] * len(filter_counts)
filter_shapes = [(5, 5), (5, 5)]
pool_shapes = [(4, 4), (4, 4)]
pool_strides = [(2, 2), (2, 2)]
affine_output_sizes = [10]
affine_init_stddevs = [.05] * len(affine_output_sizes)
dropout_include_rates = ([.5 if args.dropout else 1.0] *
(len(filter_counts) + len(affine_output_sizes)))
assert_equal(affine_output_sizes[-1], 10)
mnist_training, mnist_testing = load_mnist()
# split training set into training and validation sets
tensors = mnist_training.tensors
training_tensors = [t[:-args.validation_size, ...] for t in tensors]
validation_tensors = [t[-args.validation_size:, ...] for t in tensors]
if args.no_shuffle_dataset == False:
def shuffle_in_unison_inplace(a, b):
assert len(a) == len(b)
p = numpy.random.permutation(len(a))
return a[p], b[p]
[training_tensors[0],training_tensors[1]] = shuffle_in_unison_inplace(training_tensors[0],training_tensors[1])
[validation_tensors[0], validation_tensors[1]] = shuffle_in_unison_inplace(validation_tensors[0], validation_tensors[1])
all_images_shared = theano.shared(numpy.vstack([training_tensors[0],validation_tensors[0]]))
all_labels_shared = theano.shared(numpy.concatenate([training_tensors[1],validation_tensors[1]]))
length_training = training_tensors[0].shape[0]
length_validation = validation_tensors[0].shape[0]
indices_training = numpy.asarray(range(length_training))
indices_validation = numpy.asarray(range(length_training, length_training + length_validation))
indices_training_dataset = Dataset( tensors=[indices_training], names=['indices'], formats=[DenseFormat(axes=['b'],shape=[-1],dtype='int64')] )
indices_validation_dataset = Dataset( tensors=[indices_validation], names=['indices'], formats=[DenseFormat(axes=['b'],shape=[-1],dtype='int64')] )
indices_training_iterator = indices_training_dataset.iterator(iterator_type='sequential',batch_size=args.batch_size)
indices_validation_iterator = indices_validation_dataset.iterator(iterator_type='sequential',batch_size=10000)
training_iterator_full = indices_training_dataset.iterator(iterator_type='sequential',batch_size=args.batch_size_full)
mnist_validation_iterator = indices_validation_iterator
mnist_training_iterator = indices_training_iterator
input_indices_symbolic, = indices_training_iterator.make_input_nodes()
image_lookup_node = ImageLookeupNode(input_indices_symbolic, all_images_shared)
label_lookup_node = LabelLookeupNode(input_indices_symbolic, all_labels_shared)
image_node = RescaleImage(image_lookup_node)
rng = numpy.random.RandomState(129734)
theano_rng = RandomStreams(2387845)
(conv_layers,
affine_layers,
output_node) = build_conv_classifier(image_node,
filter_shapes,
filter_counts,
filter_init_uniform_ranges,
pool_shapes,
pool_strides,
affine_output_sizes,
affine_init_stddevs,
dropout_include_rates,
rng,
theano_rng)
loss_node = CrossEntropy(output_node, label_lookup_node)
scalar_loss = loss_node.output_symbol.mean()
if args.weight_decay != 0.0:
for conv_layer in conv_layers:
filters = conv_layer.conv2d_node.filters
filter_loss = args.weight_decay * theano.tensor.sqr(filters).sum()
scalar_loss = scalar_loss + filter_loss
for affine_layer in affine_layers:
weights = affine_layer.affine_node.linear_node.params
weight_loss = args.weight_decay * theano.tensor.sqr(weights).sum()
scalar_loss = scalar_loss + weight_loss
max_epochs = 200
#
# Extract variables
#
parameters = []
old_parameters = []
#.........这里部分代码省略.........
开发者ID:paulfun92,项目名称:project_code,代码行数:101,代码来源:S2GD_plus.py
示例7: main
def main():
args = parse_args()
# Hyperparameter values taken from Pylearn2:
# In pylearn2/scripts/tutorials/multilayer_perceptron/:
# multilayer_perceptron.ipynb
# mlp_tutorial_part_3.yaml
sizes = [500, 500, 10]
sparse_init_counts = [15, 15]
assert_equal(len(sparse_init_counts), len(sizes) - 1)
assert_equal(sizes[-1], 10)
mnist_training, mnist_testing = load_mnist()
if args.validation_size == 0:
# use testing set as validation set
mnist_validation = mnist_testing
else:
# split training set into training and validation sets
tensors = mnist_training.tensors
size_tensors = tensors[0].shape[0]
training_tensors = [t[:-args.validation_size, ...] for t in tensors]
validation_tensors = [t[size_tensors - args.validation_size:, ...] for t in tensors]
shuffle_dataset = True
if shuffle_dataset == True:
def shuffle_in_unison_inplace(a, b):
assert len(a) == len(b)
p = numpy.random.permutation(len(a))
return a[p], b[p]
[training_tensors[0],training_tensors[1]] = shuffle_in_unison_inplace(training_tensors[0],training_tensors[1])
[validation_tensors[0], validation_tensors[1]] = shuffle_in_unison_inplace(validation_tensors[0], validation_tensors[1])
mnist_training = Dataset(tensors=training_tensors,
names=mnist_training.names,
formats=mnist_training.formats)
mnist_validation = Dataset(tensors=validation_tensors,
names=mnist_training.names,
formats=mnist_training.formats)
mnist_validation_iterator = mnist_validation.iterator(
iterator_type='sequential',
batch_size=args.batch_size)
image_uint8_node, label_node = mnist_validation_iterator.make_input_nodes()
image_node = CastNode(image_uint8_node, 'floatX')
# image_node = RescaleImage(image_uint8_node)
rng = numpy.random.RandomState(34523)
theano_rng = RandomStreams(23845)
(affine_nodes,
output_node) = build_fc_classifier(image_node,
sizes,
sparse_init_counts,
args.dropout_include_rates,
rng,
theano_rng)
loss_node = CrossEntropy(output_node, label_node)
loss_sum = loss_node.output_symbol.mean()
max_epochs = 10000
#
# Makes parameter updaters
#
parameters = []
parameters_peek_ahead = []
for affine_node in affine_nodes:
for params in (affine_node.linear_node.params,
affine_node.bias_node.params):
parameters.append(params)
parameter_peek_ahead = theano.shared(numpy.zeros(params.get_value().shape, dtype=params.dtype))
parameters_peek_ahead.append(parameter_peek_ahead)
loss_sum2 = theano.clone(loss_sum, replace = {parameter: parameter_peek_ahead for parameter,parameter_peek_ahead in safe_izip(parameters, parameters_peek_ahead)} )
#
# Makes parameter updaters
#
training_iterator = mnist_training.iterator(iterator_type='sequential',batch_size=args.batch_size)
parameter_updaters = []
momentum_updaters = []
for params, params_peek_ahead in safe_izip(parameters, parameters_peek_ahead):
gradient_peek_ahead = theano.gradient.grad(loss_sum2, params_peek_ahead)
parameter_updater = RMSpropSgdParameterUpdater(params,
params_peek_ahead,
gradient_peek_ahead,
args.learning_rate,
args.initial_momentum,
args.nesterov)
parameter_updaters.append(parameter_updater)
momentum_updaters.append(LinearlyInterpolatesOverEpochs(
parameter_updater.momentum,
args.final_momentum,
#.........这里部分代码省略.........
开发者ID:paulfun92,项目名称:project_code,代码行数:101,代码来源:RMSprop_nesterov2_mnist_fully_connected.py
示例8: main
def main():
'''
Entry point of this script.
'''
args = parse_args()
# Hyperparameter values taken from Pylearn2:
# In pylearn2/scripts/tutorials/convolutional_network/:
# convolutional_network.ipynb
filter_counts = [96, 192, 192]
filter_init_uniform_ranges = [0.005]* len(filter_counts)
filter_shapes = [(8, 8), (8,8), (5, 5)]
pool_shapes = [(4, 4),(4, 4), (2, 2)]
pool_strides = [(2, 2), (2, 2), (2,2)]
pool_pads = [(2,2), (2,2), (2,2)]
affine_output_sizes = [10]
affine_init_stddevs = [.005] * len(affine_output_sizes)
dropout_include_rates = [0.8, 0.5, 0.5, 0.5]
#dropout_include_rates = ([.8 if args.dropout else 1.0] *
# (len(filter_counts) + len(affine_output_sizes)))
conv_pads = [(4, 4), (3, 3), (3, 3)]
assert_equal(affine_output_sizes[-1], 10)
def unpickle(file):
import cPickle
fo = open(file, 'rb')
dict = cPickle.load(fo)
fo.close()
return dict
batch1 = unpickle('/home/s1422538/datasets/simplelearn/cifar10/original_files/cifar-10-batches-py/data_batch_1')
batch2 = unpickle('/home/s1422538/datasets/simplelearn/cifar10/original_files/cifar-10-batches-py/data_batch_2')
batch3 = unpickle('/home/s1422538/datasets/simplelearn/cifar10/original_files/cifar-10-batches-py/data_batch_3')
batch4 = unpickle('/home/s1422538/datasets/simplelearn/cifar10/original_files/cifar-10-batches-py/data_batch_4')
batch5 = unpickle('/home/s1422538/datasets/simplelearn/cifar10/original_files/cifar-10-batches-py/data_batch_5')
training_tensors = [ numpy.concatenate((batch1['data'].reshape(10000,3,32,32), batch2['data'].reshape(10000,3,32,32), batch3['data'].reshape(10000,3,32,32), batch4['data'].reshape(10000,3,32,32) )), numpy.concatenate((batch1['labels'], batch2['labels'], batch3['labels'], batch4['labels'])) ]
validation_tensors = [ batch5['data'].reshape(10000,3,32,32), numpy.asarray(batch5['labels']) ]
shuffle_dataset = True
if shuffle_dataset == True:
def shuffle_in_unison_inplace(a, b):
assert len(a) == len(b)
p = numpy.random.permutation(len(a))
return a[p], b[p]
[training_tensors[0],training_tensors[1]] = shuffle_in_unison_inplace(training_tensors[0],training_tensors[1])
[validation_tensors[0], validation_tensors[1]] = shuffle_in_unison_inplace(validation_tensors[0], validation_tensors[1])
cifar10_training = Dataset(tensors=training_tensors,
names=('images', 'labels'),
formats=(DenseFormat(axes=('b', 'c', '0', '1'),
shape=(-1,3, 32, 32),
dtype='uint8'),
DenseFormat(axes=('b',),
shape=(-1, ),
dtype='int64')))
cifar10_validation = Dataset(tensors=validation_tensors,
names=('images', 'labels'),
formats=(DenseFormat(axes=('b', 'c', '0', '1'),
shape=(-1,3, 32, 32),
dtype='uint8'),
DenseFormat(axes=('b',),
shape=(-1, ),
dtype='int64')))
cifar10_validation_iterator = cifar10_validation.iterator(
iterator_type='sequential',
loop_style='divisible',
batch_size=args.batch_size)
image_uint8_node, label_node = cifar10_validation_iterator.make_input_nodes()
image_node = RescaleImage(image_uint8_node)
image_node_lcn = Lcn(image_node)
# image_node = RescaleImage(image_uint8_node)
rng = numpy.random.RandomState(3447523)
theano_rng = RandomStreams(2387345)
(conv_layers,
affine_layers,
output_node) = build_conv_classifier(image_node_lcn,
filter_shapes,
filter_counts,
filter_init_uniform_ranges,
pool_shapes,
pool_strides,
pool_pads,
affine_output_sizes,
affine_init_stddevs,
dropout_include_rates,
conv_pads,
rng,
theano_rng)
loss_node = CrossEntropy(output_node, label_node)
scalar_loss = loss_node.output_symbol.mean()
#.........这里部分代码省略.........
开发者ID:paulfun92,项目名称:project_code,代码行数:101,代码来源:cifar10_conv.py
示例9: load_mnist
mnist_training, mnist_testing = load_mnist()
# split training set into training and validation sets
tensors = mnist_training.tensors
training_tensors = [t[:-args.validation_size, ...] for t in tensors]
validation_tensors = [t[-args.validation_size:, ...] for t in tensors]
if args.no_shuffle_dataset == False:
def shuffle_in_unison_inplace(a, b):
assert len(a) == len(b)
p = numpy.random.permutation(len(a))
return a[p], b[p]
[training_tensors[0],training_tensors[1]] = shuffle_in_unison_inplace(training_tensors[0],training_tensors[1])
[validation_tensors[0], validation_tensors[1]] = shuffle_in_unison_inplace(validation_tensors[0], validation_tensors[1])
mnist_training = Dataset(tensors=training_tensors,
names=mnist_training.names,
formats=mnist_training.formats)
mnist_validation = Dataset(tensors=validation_tensors,
names=mnist_training.names,
formats=mnist_training.formats)
mnist_validation_iterator = mnist_validation.iterator(
iterator_type='sequential',
batch_size=args.batch_size)
image_uint8_node, label_node = mnist_validation_iterator.make_input_nodes()
image_node = CastNode(image_uint8_node, 'floatX')
# image_node = RescaleImage(image_uint8_node)
开发者ID:paulfun92,项目名称:project_code,代码行数:29,代码来源:Dataset_test.py
示例10: main
def main():
'''
Entry point of this script.
'''
args = parse_args()
# Hyperparameter values taken from Pylearn2:
# In pylearn2/scripts/tutorials/convolutional_network/:
# convolutional_network.ipynb
filter_counts = [64, 64]
filter_init_uniform_ranges = [.05] * len(filter_counts)
filter_shapes = [(5, 5), (5, 5)]
pool_shapes = [(4, 4), (4, 4)]
pool_strides = [(2, 2), (2, 2)]
affine_output_sizes = [10]
affine_init_stddevs = [.05] * len(affine_output_sizes)
dropout_include_rates = ([.5 if args.dropout else 1.0] *
(len(filter_counts) + len(affine_output_sizes)))
assert_equal(affine_output_sizes[-1], 10)
mnist_training, mnist_testing = load_mnist()
if args.validation_size == 0:
# use testing set as validation set
mnist_validation = mnist_testing
else:
# split training set into training and validation sets
tensors = mnist_training.tensors
training_tensors = [t[:-args.validation_size, ...] for t in tensors]
validation_tensors = [t[-args.validation_size:, ...] for t in tensors]
mnist_training = Dataset(tensors=training_tensors,
names=mnist_training.names,
formats=mnist_training.formats)
mnist_validation = Dataset(tensors=validation_tensors,
names=mnist_training.names,
formats=mnist_training.formats)
mnist_validation_iterator = mnist_validation.iterator(
iterator_type='sequential',
loop_style='divisible',
batch_size=args.batch_size)
image_uint8_node, label_node = mnist_validation_iterator.make_input_nodes()
image_node = RescaleImage(image_uint8_node)
rng = numpy.random.RandomState(1234)
theano_rng = RandomStreams(23845)
(conv_layers,
affine_layers,
output_node) = build_conv_classifier(image_node,
filter_shapes,
filter_counts,
filter_init_uniform_ranges,
pool_shapes,
pool_strides,
affine_output_sizes,
affine_init_stddevs,
dropout_include_rates,
rng,
theano_rng)
loss_node = CrossEntropy(output_node, label_node)
scalar_loss = loss_node.output_symbol.mean()
if args.weight_decay != 0.0:
for conv_layer in conv_layers:
filters = conv_layer.conv2d_node.filters
filter_loss = args.weight_decay * theano.tensor.sqr(filters).sum()
scalar_loss = scalar_loss + filter_loss
for affine_layer in affine_layers:
weights = affine_layer.affine_node.linear_node.params
weight_loss = args.weight_decay * theano.tensor.sqr(weights).sum()
scalar_loss = scalar_loss + weight_loss
max_epochs = 500
#
# Makes parameter updaters
#
parameters = []
parameter_updaters = []
momentum_updaters = []
def add_updaters(parameter,
scalar_loss,
parameter_updaters,
momentum_updaters):
'''
Adds a ParameterUpdater to parameter_updaters, and a
LinearlyInterpolatesOverEpochs to momentum_updaters.
'''
gradient = theano.gradient.grad(scalar_loss, parameter)
parameter_updaters.append(SgdParameterUpdater(parameter,
gradient,
#.........这里部分代码省略.........
开发者ID:paulfun92,项目名称:simplelearn,代码行数:101,代码来源:mnist_conv.py
示例11: main
def main():
args = parse_args()
# Hyperparameter values taken from Pylearn2:
# In pylearn2/scripts/tutorials/multilayer_perceptron/:
# multilayer_perceptron.ipynb
# mlp_tutorial_part_3.yaml
sizes = [500, 500, 10]
sparse_init_counts = [15, 15]
assert_equal(len(sparse_init_counts), len(sizes) - 1)
assert_equal(sizes[-1], 10)
'''
mnist_training, mnist_testing = load_mnist()
if args.validation_size == 0:
# use testing set as validation set
mnist_validation = mnist_testing
else:
# split training set into training and validation sets
tensors = mnist_training.tensors
size_tensors = tensors[0].shape[0]
training_tensors = [t[:-args.validation_size, ...] for t in tensors]
validation_tensors = [t[size_tensors - args.validation_size:, ...] for t in tensors]
shuffle_dataset = True
if shuffle_dataset == True:
def shuffle_in_unison_inplace(a, b):
assert len(a) == len(b)
p = numpy.random.permutation(len(a))
return a[p], b[p]
[training_tensors[0],training_tensors[1]] = shuffle_in_unison_inplace(training_tensors[0],training_tensors[1])
[validation_tensors[0], validation_tensors[1]] = shuffle_in_unison_inplace(validation_tensors[0], validation_tensors[1])
'''
def unpickle(file):
import cPickle
fo = open(file, 'rb')
dict = cPickle.load(fo)
fo.close()
return dict
batch1 = unpickle('/home/paul/cifar-10-batches-py/data_batch_1')
batch2 = unpickle('/home/paul/cifar-10-batches-py/data_batch_2')
batch3 = unpickle('/home/paul/cifar-10-batches-py/data_batch_3')
batch4 = unpickle('/home/paul/cifar-10-batches-py/data_batch_4')
batch5 = unpickle('/home/paul/cifar-10-batches-py/data_batch_5')
training_tensors = [ numpy.concatenate((batch1['data'].reshape(10000,3,32,32), batch2['data'].reshape(10000,3,32,32), batch3['data'].reshape(10000,3,32,32), batch4['data'].reshape(10000,3,32,32) )), numpy.concatenate((batch1['labels'], batch2['labels'], batch3['labels'], batch4['labels'])) ]
validation_tensors = [ batch5['data'].reshape(10000,3,32,32), numpy.asarray(batch5['labels']) ]
shuffle_dataset = True
if shuffle_dataset == True:
def shuffle_in_unison_inplace(a, b):
assert len(a) == len(b)
p = numpy.random.permutation(len(a))
return a[p], b[p]
[training_tensors[0],training_tensors[1]] = shuffle_in_unison_inplace(training_tensors[0],training_tensors[1])
[validation_tensors[0], validation_tensors[1]] = shuffle_in_unison_inplace(validation_tensors[0], validation_tensors[1])
cifar10_training = Dataset(tensors=training_tensors,
names=('images', 'labels'),
formats=(DenseFormat(axes=('b', 'c', '0', '1'),
shape=(-1,3, 32, 32),
dtype='uint8'),
DenseFormat(axes=('b',),
shape=(-1, ),
dtype='int64')))
cifar10_validation = Dataset(tensors=validation_tensors,
names=('images', 'labels'),
formats=(DenseFormat(axes=('b', 'c', '0', '1'),
shape=(-1,3, 32, 32),
dtype='uint8'),
DenseFormat(axes=('b',),
shape=(-1, ),
dtype='int64')))
cifar10_validation_iterator = cifar10_validation.iterator(
iterator_type='sequential',
batch_size=args.batch_size)
image_uint8_node, label_node = cifar10_validation_iterator.make_input_nodes()
image_node = CastNode(image_uint8_node, 'floatX')
image_node_lcn = Lcn(image_node)
# image_node = RescaleImage(image_uint8_node)
rng = numpy.random.RandomState(3447523)
theano_rng = RandomStreams(2387345)
(affine_nodes,
output_node) = build_fc_classifier(image_node_lcn,
sizes,
sparse_init_counts,
args.dropout_include_rates,
rng,
theano_rng)
#.........这里部分代码省略.........
开发者ID:paulfun92,项目名称:project_code,代码行数:101,代码来源:SGD_mnist_fully_connected.py
示例12: shuffle_in_unison_inplace
def shuffle_in_unison_inplace(a, b):
assert len(a) == len(b)
p = numpy.random.permutation(len(a))
return a[p], b[p]
[training_tensors[0],training_tensors[1]] = shuffle_in_unison_inplace(training_tensors[0],training_tensors[1])
[validation_tensors[0], validation_tensors[1]] = shuffle_in_unison_inplace(validation_tensors[0], validation_tensors[1])
all_images_shared = theano.shared(numpy.vstack([training_tensors[0],validation_tensors[0]]))
all_labels_shared = theano.shared(numpy.concatenate([training_tensors[1],validation_tensors[1]]))
length_training = training_tensors[0].shape[0]
length_validation = validation_tensors[0].shape[0]
indices_training = numpy.asarray(range(length_training))
indices_validation = numpy.asarray(range(length_training, length_training + length_validation))
indices_training_dataset = Dataset( tensors=[indices_training], names=['indices'], formats=[DenseFormat(axes=['b'],shape=[-1],dtype='int64')] )
indices_validation_dataset = Dataset( tensors=[indices_validation], names=['indices'], formats=[DenseFormat(axes=['b'],shape=[-1],dtype='int64')] )
indices_training_iterator = indices_training_dataset.iterator(iterator_type='sequential',batch_size=50000)
indices_validation_iterator = indices_validation_dataset.iterator(iterator_type='sequential',batch_size=10000)
mnist_validation_iterator = indices_validation_iterator
mnist_training_iterator = indices_training_iterator
input_indices_symbolic, = indices_training_iterator.make_input_nodes()
image_lookup_node = ImageLookeupNode(input_indices_symbolic, all_images_shared)
label_lookup_node = LabelLookeupNode(input_indices_symbolic, all_labels_shared)
input_size = training_tensors[0].shape[1]*training_tensors[0].shape[2]
sizes = [500,500,10]
float_image_node = RescaleImage(image_lookup_node)
开发者ID:paulfun92,项目名称:project_code,代码行数:31,代码来源:LBGFS_mnist_fully_connected4.py
示例13: main
def main():
args = parse_args()
# Hyperparameter values taken from Pylearn2:
# In pylearn2/scripts/tutorials/multilayer_perceptron/:
# multilayer_perceptron.ipynb
# mlp_tutorial_part_3.yaml
sizes = [1000, 1000, 1000, 10]
sparse_init_counts = [15, 15, 15]
assert_equal(len(sparse_init_counts), len(sizes) - 1)
assert_equal(sizes[-1], 10)
def unpickle(file):
import cPickle
fo = open(file, 'rb')
dict = cPickle.load(fo)
fo.close()
return dict
batch1 = unpickle('/home/s1422538/datasets/simplelearn/cifar10/original_files/cifar-10-batches-py/data_batch_1')
batch2 = unpickle('/home/s1422538/datasets/simplelearn/cifar10/original_files/cifar-10-batches-py/data_batch_2')
batch3 = unpickle('/home/s1422538/datasets/simplelearn/cifar10/original_files/cifar-10-batches-py/data_batch_3')
batch4 = unpickle('/home/s1422538/datasets/simplelearn/cifar10/original_files/cifar-10-batches-py/data_batch_4')
batch5 = unpickle('/home/s1422538/datasets/simplelearn/cifar10/original_files/cifar-10-batches-py/data_batch_5')
training_tensors = [ numpy.concatenate((batch1['data'].reshape(10000,3,32,32), batch2['data'].reshape(10000,3,32,32), batch3['data'].reshape(10000,3,32,32), batch4['data'].reshape(10000,3,32,32) )), numpy.concatenate((batch1['labels'], batch2['labels'], batch3['labels'], batch4['labels'])) ]
validation_tensors = [ batch5['data'].reshape(1000
|
请发表评论