本文整理汇总了Python中neon.util.compat.range函数的典型用法代码示例。如果您正苦于以下问题:Python range函数的具体用法?Python range怎么用?Python range使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了range函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: get_ranks
def get_ranks(self, values):
"""
Computes the rank of the list of values passed from lowest to highest.
Note that ties are given equal ranking value (the average of their
positions)
Arguments:
values (list): The list of numeric values to be ranked.
Returns:
list: Same length as values with the positional rank of each
original value (1-based).
"""
num_vals = len(values)
srt_vals = sorted(zip(values, list(range(num_vals))))
ranks = [0 for i in values]
val = srt_vals[0][0]
high_rank = 0
for i in range(num_vals):
if val != srt_vals[i][0]:
val = srt_vals[i][0]
for j in range(high_rank, i):
ranks[srt_vals[j][1]] = float(high_rank + i + 1) / 2.0
high_rank = i
if i == (num_vals - 1):
for j in range(high_rank, i + 1):
ranks[srt_vals[j][1]] = float(high_rank + i + 2) / 2.0
return ranks
开发者ID:AI-Cdrone,项目名称:neon,代码行数:28,代码来源:roc.py
示例2: trunc_bprop_tt
def trunc_bprop_tt(self, debug, numgrad=None):
"""
TODO: move the loop over t into the layer class.
"""
if numgrad is None:
min_unroll = 1
else:
logger.debug("MLP.bprop single unrolling for numgrad")
min_unroll = self.unrolls
for tau in range(min_unroll-0, self.unrolls+1):
self.cost_layer.cost.set_outputbuf(
self.class_layer.output_list[tau-1])
self.cost_layer.bprop(None, tau-1)
if debug:
tmp = self.cost_layer.targets[tau-1].asnumpyarray()
tmp = tmp.argmax(0)[0]
logger.debug("in RNNB.bprop, tau %d target %d" % (tau-1, tmp))
error = self.cost_layer.deltas
self.class_layer.bprop(error, tau, numgrad=numgrad)
error = self.class_layer.deltas
for t in list(range(0, tau))[::-1]:
if 'c_t' in self.rec_layer.__dict__:
cerror = self.rec_layer.celtas # on t=0, prev batch state
else:
cerror = None # for normal RNN
self.rec_layer.bprop(error, cerror, t, numgrad=numgrad)
error[:] = self.rec_layer.deltas # [TODO] why need deepcopy?
开发者ID:nkhuyu,项目名称:neon,代码行数:28,代码来源:rnn.py
示例3: load
def load(self, backend=None, experiment=None):
if self.inputs['train'] is not None:
return
if 'repo_path' in self.__dict__:
self.repo_path = os.path.expandvars(os.path.expanduser(
self.repo_path))
save_dir = os.path.join(self.repo_path,
self.__class__.__name__)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
for url in (self.raw_train_input_gz, self.raw_train_target_gz,
self.raw_test_input_gz, self.raw_test_target_gz):
name = os.path.basename(url).rstrip('.gz')
repo_gz_file = os.path.join(save_dir, name + '.gz')
repo_file = repo_gz_file.rstrip('.gz')
if not os.path.exists(repo_file):
self.download_to_repo(url, save_dir)
with gzip.open(repo_gz_file, 'rb') as infile:
with open(repo_file, 'w') as outfile:
for line in infile:
outfile.write(line)
logger.info('loading: %s', name)
if 'images' in repo_file and 'train' in repo_file:
indat = self.read_image_file(repo_file, 'float32')
# flatten to 1D images
self.inputs['train'] = indat
elif 'images' in repo_file and 't10k' in repo_file:
indat = self.read_image_file(repo_file, 'float32')
self.inputs['test'] = indat[0:self.num_test_sample]
elif 'labels' in repo_file and 'train' in repo_file:
indat = self.read_label_file(repo_file)
# Prep a 1-hot label encoding
tmp = np.zeros((indat.shape[0], 10), dtype=np.float32)
for col in range(10):
tmp[:, col] = indat == col
self.targets['train'] = tmp
elif 'labels' in repo_file and 't10k' in repo_file:
indat = self.read_label_file(
repo_file)[0:self.num_test_sample]
tmp = np.zeros((self.num_test_sample, 10),
dtype=np.float32)
for col in range(10):
tmp[:, col] = indat == col
self.targets['test'] = tmp
else:
logger.error('problems loading: %s', name)
if 'sample_pct' in self.__dict__:
self.sample_training_data()
if hasattr(self, 'validation_pct'):
self.split_set(
self.validation_pct, from_set='train', to_set='validation')
self.format()
else:
raise AttributeError('repo_path not specified in config')
开发者ID:neuroidss,项目名称:neon,代码行数:55,代码来源:mnist.py
示例4: load
def load(self, backend=None, experiment=None):
self.initialize()
if self.inputs['train'] is not None:
return
if 'repo_path' in self.__dict__:
self.repo_path = os.path.expandvars(os.path.expanduser(
self.repo_path))
save_dir = os.path.join(self.repo_path,
self.__class__.__name__)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
train_idcs = list(range(1000000)) # 1M letters out of 1.23M
test_idcs = range(1000000, 1010000)
if 'sample_pct' in self.__dict__:
if self.sample_pct >= 1.0:
self.sample_pct /= 100.0
logger.info('sampling pct: %0.2f' % self.sample_pct)
if self.sample_pct < 1.0:
# numpy.random.shuffle(train_idcs)
pass
train_idcs = train_idcs[0:int(1000000 * self.sample_pct)]
url = self.raw_base_url
name = os.path.basename(url).rstrip('.txt')
repo_file = os.path.join(save_dir, name + '.txt')
if not os.path.exists(repo_file):
self.download_to_repo(url, save_dir)
logger.info('loading: %s' % name)
indat = self.read_txt_file(repo_file)
self.preinputs = dict()
self.preinputs['train'] = indat[:, train_idcs]
self.preinputs['test'] = indat[:, test_idcs]
for dataset in ('train', 'test'):
num_batches = self.preinputs[dataset].shape[1]/self.batch_size
idx_list = numpy.arange(num_batches * self.batch_size)
idx_list = idx_list.reshape(self.batch_size, num_batches)
splay_3d = self.preinputs[dataset][:, idx_list.T]
splay_3d = numpy.transpose(splay_3d, (1, 0, 2))
splay_3d = splay_3d.reshape(-1, self.batch_size)
self.inputs[dataset] = splay_3d
offbyone = numpy.zeros(splay_3d.shape)
length = offbyone.shape[0]
offbyone[0:length - self.data_dim, :] = splay_3d[self.data_dim:
length, :]
self.targets[dataset] = offbyone
if hasattr(self, 'validation_pct'):
self.split_set(
self.validation_pct, from_set='train', to_set='validation')
self.format(dtype=self.backend_type) # runs transpose_batches
else:
raise AttributeError('repo_path not specified in config')
开发者ID:neuroidss,项目名称:neon,代码行数:52,代码来源:mobydick.py
示例5: link_local
def link_local(self):
req_param(self, ['nifm', 'ifmshape', 'fshape'])
opt_param(self, ['ofmlocs', 'links'])
opt_param(self, ['deltasbuf', 'outputbuf'])
opt_param(self, ['nofm'], self.nifm)
opt_param(self, ['pooling'], False)
opt_param(self, ['stride'], 1)
opt_param(self, ['pad'], 0)
assert len(self.ifmshape) == len(self.fshape)
ofmshape = []
for dim in range(len(self.ifmshape)):
assert self.ifmshape[dim] >= self.fshape[dim]
num = self.ifmshape[dim] - self.fshape[dim] + 2 * self.pad
ofmshape.extend([num // self.stride + 1])
self.ofmshape = tuple(ofmshape)
self.negpad = -self.pad
self.ifmsize = np.prod(self.ifmshape)
self.ofmsize = np.prod(self.ofmshape)
self.fpsize = np.prod(self.fshape)
self.fsize = self.nifm * self.fpsize
self.nout = self.nofm * self.ofmsize
logger.debug('name=%s, nifm=%d, ifmshape=%s, ofmshape=%s',
self.name, self.nifm, self.ifmshape, self.ofmshape)
开发者ID:neuroidss,项目名称:neon,代码行数:26,代码来源:layer.py
示例6: predict_fullset
def predict_fullset(self, dataset, setname):
"""
Generate predicitons and true labels for the given dataset.
Note that this requires enough memory to house the predictions and
labels for the entire dataset at one time (not recommended for large
datasets, see predict_generator instead).
Agruments:
dataset: A neon dataset instance
setname: Which set to compute predictions for (test, train, val)
Returns:
tuple: on each call will yield a 2-tuple of outputs and references.
The first item is the model probabilities for each class,
and the second item is either the one-hot or raw labels with
ground truth.
See Also:
predict_generator
"""
self.data_layer.init_dataset(dataset)
assert self.data_layer.has_set(setname)
self.data_layer.use_set(setname, predict=True)
self.data_layer.reset_counter()
predlabels = self.backend.empty((1, self.batch_size))
labels = self.backend.empty((1, self.batch_size))
outputs_pred = self.backend.zeros((self.data_layer.num_batches *
self.unrolls, self.batch_size))
outputs_targ = self.backend.zeros((self.data_layer.num_batches *
self.unrolls, self.batch_size))
mb_id = 0
self.data_layer.reset_counter()
while self.data_layer.has_more_data():
mb_id += 1
self.reset(mb_id)
self.fprop(debug=False)
# time unrolling loop to disseminate fprop results
for tau in range(self.unrolls):
probs = self.class_layer.output_list[tau]
targets = self.data_layer.targets[tau]
self.backend.argmax(targets, axis=0, out=labels)
self.backend.argmax(probs, axis=0, out=predlabels)
# collect batches to re-assemble continuous data
idx = self.unrolls * (mb_id - 1) + tau
outputs_pred[idx, :] = predlabels
outputs_targ[idx, :] = labels
self.data_layer.cleanup()
# flatten the 2d predictions into our canonical 1D format
pred_flat = outputs_pred.transpose().reshape((1, -1))
targ_flat = outputs_targ.transpose().reshape((1, -1))
self.write_string(pred_flat, targ_flat, setname)
return (pred_flat, targ_flat)
开发者ID:nkhuyu,项目名称:neon,代码行数:60,代码来源:rnn.py
示例7: extract_images
def extract_images(self, overwrite=False):
from neon.data import load_cifar10
from PIL import Image
dataset = dict()
dataset['train'], dataset['val'], _ = load_cifar10(self.out_dir, normalize=False)
for setn in ('train', 'val'):
data, labels = dataset[setn]
img_dir = os.path.join(self.out_dir, setn)
ulabels = np.unique(labels)
for ulabel in ulabels:
subdir = os.path.join(img_dir, str(ulabel))
if not os.path.exists(subdir):
os.makedirs(subdir)
for idx in range(data.shape[0]):
im = np.pad(data[idx].reshape((3, 32, 32)), self.pad_width, mode='mean')
im = np.uint8(np.transpose(im, axes=[1, 2, 0]).copy())
im = Image.fromarray(im)
path = os.path.join(img_dir, str(labels[idx][0]), str(idx) + '.png')
im.save(path, format='PNG')
if setn == 'train':
self.pixel_mean = list(data.mean(axis=0).reshape(3, -1).mean(axis=1))
self.pixel_mean.reverse() # We will see this in BGR order b/c of opencv
开发者ID:AnnaZhou,项目名称:neon,代码行数:26,代码来源:batch_writer.py
示例8: load_data
def load_data(self, shape):
data = np.random.uniform(low=0.0, high=1.0, size=shape)
labels = np.random.randint(low=0, high=self.nout, size=shape[0])
onehot = np.zeros((len(labels), self.nout), dtype='float32')
for col in range(self.nout):
onehot[:, col] = (labels == col)
return (data, onehot)
开发者ID:Eynaliyev,项目名称:neon,代码行数:7,代码来源:synthetic.py
示例9: predict_generator
def predict_generator(self, dataset, setname):
"""
Generate flattened predicitons and true labels for the given dataset,
one mini-batch at a time.
Agruments:
dataset: A neon dataset instance
setname: Which set to compute predictions for (test, train, val)
Returns:
tuple: on each call will yield a 2-tuple of outputs and references.
The first item is the model probabilities for each class,
and the second item is either the one-hot or raw labels with
ground truth.
See Also:
predict_fullset
"""
# TODO: find some alternate way of re-assembling data that doesn't
# require allocating space for the entire dataset so we can avoid the
# call to predict_fullset
(pred_flat, targ_flat) = self.predict_fullset(dataset, setname)
for i in range(self.data_layer.num_batches):
start = i * self.unrolls * self.batch_size
end = start + (self.unrolls * self.batch_size)
yield (pred_flat[start:end], targ_flat[start:end])
开发者ID:Eynaliyev,项目名称:neon,代码行数:27,代码来源:rnn.py
示例10: write_csv_files
def write_csv_files(self):
# Get the labels as the subdirs
subdirs = glob(os.path.join(self.image_dir, '*'))
self.label_names = sorted([os.path.basename(x) for x in subdirs])
indexes = range(len(self.label_names))
self.label_dict = {k: v for k, v in zip(self.label_names, indexes)}
tlines = []
vlines = []
for subdir in subdirs:
subdir_label = self.label_dict[os.path.basename(subdir)]
files = glob(os.path.join(subdir, self.file_pattern))
if self.class_samples_max is not None:
files = files[:self.class_samples_max]
lines = [(filename, subdir_label) for filename in files]
v_idx = int(self.validation_pct * len(lines))
tlines += lines[v_idx:]
vlines += lines[:v_idx]
np.random.shuffle(tlines)
if not os.path.exists(self.out_dir):
os.makedirs(self.out_dir)
for ff, ll in zip([self.train_file, self.val_file], [tlines, vlines]):
with gzip.open(ff, 'wb') as f:
f.write('filename,l_id\n')
for tup in ll:
f.write('{},{}\n'.format(*tup))
self.train_nrec = len(tlines)
self.train_start = 0
self.val_nrec = len(vlines)
self.val_start = -(-self.train_nrec // self.macro_size)
开发者ID:AnnaZhou,项目名称:neon,代码行数:35,代码来源:batch_writer.py
示例11: write_csv_files
def write_csv_files(self):
files = glob(os.path.join(self.image_dir, "*.jpg"))
files.sort()
if self.val_frac != 1.0:
filemap, idmap, x1map, y1map, x2map, y2map = read_labels(
self.image_dir, self.points1_file, self.points2_file, self.target_size
)
if self.id_label == 1:
self.label_names = ["id"]
else:
self.label_names = ["x1", "y1", "x2", "y2"]
indexes = range(len(self.label_names))
self.label_dict = {k: v for k, v in zip(self.label_names, indexes)}
tlines = []
vlines = []
np.random.shuffle(files)
v_idx = int(self.val_frac * len(files))
tfiles = files[v_idx:]
vfiles = files[:v_idx]
vfiles.sort()
if self.id_label == 1:
if self.val_frac == 1.0:
vlines = [(f, 0) for f in vfiles]
else:
tlines = [(f, idmap[filemap[f]]) for f in tfiles]
else:
if self.val_frac == 1.0:
vlines = [(f, 0, 0, 0, 0) for f in vfiles]
else:
tlines = [(f, x1map[f], y1map[f], x2map[f], y2map[f]) for f in tfiles]
np.random.shuffle(tlines)
if not os.path.exists(self.out_dir):
os.makedirs(self.out_dir)
for ff, ll in zip([self.train_file, self.val_file], [tlines, vlines]):
with open(ff, "wb") as f:
if self.id_label == 1:
f.write("filename,id\n")
for tup in ll:
f.write("{},{}\n".format(*tup))
else:
f.write("filename,x,y\n")
for tup in ll:
f.write("{},{},{},{},{}\n".format(*tup))
self.train_nrec = len(tlines)
self.ntrain = -(-self.train_nrec // self.macro_size)
self.train_start = 0
self.val_nrec = len(vlines)
self.nval = -(-self.val_nrec // self.macro_size)
if self.ntrain == 0:
self.val_start = 100
else:
self.val_start = 10 ** int(np.log10(self.ntrain * 10))
开发者ID:shettyrajesh,项目名称:whale-2015,代码行数:59,代码来源:batch_writer.py
示例12: write_batches
def write_batches(self, name, start, labels, imfiles, targets=None,
is_tar=False):
pool = Pool(processes=self.num_workers)
psz = self.batch_size
osz = self.output_image_size
npts = (len(imfiles) + psz - 1) // psz
imfiles = [imfiles[i*psz: (i+1)*psz] for i in range(npts)]
if targets is not None:
targets = [targets[i*psz: (i+1)*psz].T.copy() for i in range(npts)]
labels = [{k: v[i*psz: (i+1)*psz] for k, v in labels.iteritems()}
for i in range(npts)]
accum_buf = np.zeros(self.train_mean.shape, dtype=np.int32)
batch_mean = np.zeros(accum_buf.shape, dtype=np.uint8)
logger.info("Writing %s batches...", name)
for i, jpeg_file_batch in enumerate(imfiles):
t = time()
if is_tar:
jpeg_file_batch = [j.read() for j in jpeg_file_batch]
jpeg_strings = pool.map(
functools.partial(proc_img, is_string=is_tar), jpeg_file_batch)
targets_batch = None if targets is None else targets[i]
labels_batch = labels[i]
bfile = os.path.join(self.out_dir, 'data_batch_%d' % (start + i))
serialize({'data': jpeg_strings,
'labels': labels_batch,
'targets': targets_batch},
bfile)
logger.info("Wrote to %s (%s batch %d of %d) (%.2f sec)",
self.out_dir, name, i + 1, len(imfiles), time() - t)
# get the means and accumulate
imgworker.calc_batch_mean(jpglist=jpeg_strings, tgt=batch_mean,
orig_size=osz, rgb=self.rgb,
nthreads=self.num_workers)
# scale for the case where we have an undersized batch
if len(jpeg_strings) < self.batch_size:
batch_mean *= len(jpeg_strings) / self.batch_size
accum_buf += batch_mean
pool.close()
mean_buf = self.train_mean if name == 'train' else self.val_mean
mean_buf[:] = accum_buf / len(imfiles)
开发者ID:nkhuyu,项目名称:neon,代码行数:46,代码来源:batch_writer.py
示例13: allocate_output_bufs
def allocate_output_bufs(self):
make_zbuf = self.backend.zeros
opt_param(self, ['out_shape'], (self.nout, self.batch_size))
self.output = make_zbuf(self.out_shape, self.output_dtype)
self.pre_act = self.activation.pre_act_buffer(self.backend,
self.output,
self.pre_act_dtype)
# TODO: Get rid of output and pre_act. But they seem to be used in the
# cost to set a buffer size.
self.pre_act_list = [self.pre_act] + \
[make_zbuf(self.out_shape, self.pre_act_dtype)
for k in range(1, self.unrolls)]
self.output_list = [self.output] + \
[make_zbuf(self.out_shape, self.output_dtype)
for k in range(1, self.unrolls)]
开发者ID:JesseLivezey,项目名称:neon,代码行数:17,代码来源:recurrent.py
示例14: allocate_output_bufs
def allocate_output_bufs(self):
make_zbuf = self.backend.zeros
super(RecurrentHiddenLayer, self).allocate_output_bufs()
# these buffers are specific to RHL:
# might want self.temp_in=temp_out, to save a buffer.
self.temp_in = make_zbuf(self.weight_shape, self.weight_dtype)
self.temp_rec = make_zbuf(self.weight_rec_shape)
# Extra temp buffers z[0]=w*x and z[1]=w*input.
self.z = [make_zbuf(self.out_shape) for k in range(2)]
开发者ID:AI-Cdrone,项目名称:neon,代码行数:10,代码来源:recurrent.py
示例15: transpose_batches
def transpose_batches(self, data, dtype, is_target=False):
"""
Transpose each minibatch within the dataset.
"""
bs = self.data_dim * self.unrolls
dd = self.data_dim
if data.shape[0] % bs != 0:
logger.warning('Incompatible batch size. '
'Discarding %d samples...',
data.shape[0] % bs)
nbatches = data.shape[0] / bs
batchwise = [[] for k in range(nbatches)]
for batch in range(nbatches):
batchdata = [self.backend.array(data[(batch * bs + k * dd):
(batch * bs + (k + 1) *
dd)], dtype)
for k in range(self.unrolls)]
batchwise[batch] = batchdata
return batchwise
开发者ID:neuroidss,项目名称:neon,代码行数:19,代码来源:mobydick.py
示例16: ellipse
def ellipse(self, canvas, xrad, yrad):
rcanvas = canvas.reshape((self.nifm, self.ifmheight, self.ifmwidth))
smooth = 10
angs = np.linspace(0, 2 * np.pi, smooth * 360)
si = np.sin(angs)
co = np.cos(angs)
xvals = np.int32(xrad * co) + self.center[0]
yvals = np.int32(yrad * si) + self.center[1]
for fm in range(self.nifm):
rcanvas[fm, xvals, yvals] = np.random.randint(256)
开发者ID:Eynaliyev,项目名称:neon,代码行数:10,代码来源:synthetic.py
示例17: initialize
def initialize(self, kwargs):
super(BranchLayer, self).initialize(kwargs)
self.startidx = [0] * len(self.sublayers)
self.endidx = [0] * len(self.sublayers)
self.endidx[0] = self.sublayers[0].nout
for i in range(1, len(self.sublayers)):
self.endidx[i] = self.endidx[i - 1] + self.sublayers[i].nout
self.startidx[i] = self.endidx[i - 1]
self.allocate_output_bufs()
开发者ID:AI-Cdrone,项目名称:neon,代码行数:11,代码来源:compositional.py
示例18: fprop
def fprop(self, debug=False, eps_tau=-1, eps=0,
num=None):
"""
Adding numerical gradient functionality here to avoid duplicate fprops.
TODO: Make a version where the for tau loop is inside the layer. The
best way is to have a baseclass for both RNN and LSTM for this.
"""
self.data_layer.fprop(None) # get next mini batch
inputs = self.data_layer.output
y = self.rec_layer.output_list # note: just a shorthand, no copy.
c = [None for k in range(len(y))]
if 'c_t' in self.rec_layer.__dict__:
c = self.rec_layer.c_t
# loop for rec_layer
for tau in range(0, self.unrolls):
if num and num['target'] and (tau == eps_tau):
# inject epsilon for numerical gradient
numpy_target = num['target'][num['i'], num['j']].asnumpyarray()
num['target'][num['i'], num['j']] = (numpy_target + eps)
if debug:
logger.debug("in RNNB.fprop, tau %d, input %s" % (tau,
inputs[tau].asnumpyarray().argmax(0)[0:5]))
self.rec_layer.fprop(y[tau-1], c[tau-1], inputs[tau], tau)
if num and num['target'] and (tau == eps_tau):
# remove epsilon
num['target'][num['i'], num['j']] = numpy_target
# loop for class_layer
for tau in range(0, self.unrolls):
if num and num['target'] and (tau == eps_tau):
# inject epsilon for numerical gradient
numpy_target = num['target'][num['i'], num['j']].asnumpyarray()
num['target'][num['i'], num['j']] = (numpy_target + eps)
if debug:
logger.debug("in RNNB.fprop, tau %d, input %s" % (tau,
inputs[tau].asnumpyarray().argmax(0)[0:5]))
self.class_layer.fprop(y[tau], tau)
if num and num['target'] and (tau == eps_tau):
# remove epsilon
num['target'][num['i'], num['j']] = numpy_target
开发者ID:nkhuyu,项目名称:neon,代码行数:41,代码来源:rnn.py
示例19: load_file
def load_file(self, filename, nclasses):
logger.info("loading: %s", filename)
dict = deserialize(filename)
full_image = np.float32(dict["data"])
full_image /= 255.0
labels = np.array(dict["labels"])
onehot = np.zeros((len(labels), nclasses), dtype="float32")
for col in range(nclasses):
onehot[:, col] = labels == col
return (full_image, onehot)
开发者ID:nkhuyu,项目名称:neon,代码行数:12,代码来源:cifar10.py
示例20: load
def load(self, backend=None, experiment=None):
"""
main function
"""
import scipy.io
if 'repo_path' in self.__dict__:
self.repo_path = os.path.expandvars(os.path.expanduser(
self.repo_path))
save_dir = os.path.join(self.repo_path,
self.__class__.__name__)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
train_idcs = list(range(10000))
if 'sample_pct' in self.__dict__:
if self.sample_pct > 1.0:
self.sample_pct /= 100.0
if self.sample_pct < 1.0:
numpy.random.seed(self.backend.rng_seed)
numpy.random.shuffle(train_idcs)
train_idcs = train_idcs[0:int(10000 * self.sample_pct)]
for url in (self.raw_train_unwhitened, self.raw_train_whitened):
name = os.path.basename(url).rstrip('.mat')
repo_mat_file = os.path.join(save_dir, name + '.mat')
repo_file = repo_mat_file.rstrip('.mat')
# download and create dataset
if not os.path.exists(repo_file):
self.download_to_repo(url, save_dir)
infile = scipy.io.loadmat(repo_mat_file)
with open(repo_file, 'wb') as outfile:
data = infile[infile.keys()[0]]
# patches are extracted so they can be cached
# doing non-overlapping 16x16 patches (1024 per image)
patches = data.reshape(512 / 16, 16, 512 / 16, 16, 10)
patches = patches.transpose(1, 3, 0, 2, 4)
patches = patches.reshape(16, 16, 1024 * 10)
logger.info("Caching to pickle file: %s", outfile)
pickle.dump(patches, outfile)
outfile.close()
logger.info('loading: %s', name)
# load existing data
if 'IMAGES' in repo_file:
indat = self.read_image_file(repo_file, 'float32')
# flatten to 1D images
indat = indat.reshape((256, 10240)).transpose()[train_idcs]
self.inputs['train'] = indat
else:
logger.error('problems loading: %s', name)
if hasattr(self, 'validation_pct'):
self.split_set(
self.validation_pct, from_set='train', to_set='validation')
self.format()
else:
raise AttributeError('repo_path not specified in config')
开发者ID:neuroidss,项目名称:neon,代码行数:53,代码来源:sparsenet.py
注:本文中的neon.util.compat.range函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论