本文整理汇总了Python中numpy.split函数的典型用法代码示例。如果您正苦于以下问题:Python split函数的具体用法?Python split怎么用?Python split使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了split函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _read_tile
def _read_tile(self, filename):
with open(filename, "r") as tilefile:
# this is reversed from the fortran b/c in is a reserved word
self.ni, self.nj, self.nk = np.fromfile(tilefile, dtype="int32",
count = 3, sep = " ")
raw_data= np.genfromtxt(tilefile,
dtype = ("int32", "float64", "float64", "float64", "float64"),
names = ("idx", "a", "b", "vla", "vlb"))
self.ii, self.ij, self.ik = np.split(raw_data["idx"],
[self.ni,
self.ni+self.nj])
self.x1a, self.x2a, self.x3a = np.split(raw_data["a"],
[self.ni,
self.ni+self.nj])
self.x1b, self.x2b, self.x3b = np.split(raw_data["b"],
[self.ni,
self.ni+self.nj])
self.vl1a, self.vl2a, self.vl3a = np.split(raw_data["vla"],
[self.ni,
self.ni+self.nj])
self.vl1b, self.vl2b, self.vl3b = np.split(raw_data["vlb"],
[self.ni,
self.ni+self.nj])
return
开发者ID:jschwab,项目名称:zeustools,代码行数:33,代码来源:grid.py
示例2: test_stratified_batches
def test_stratified_batches():
data = np.array([('a', -1), ('b', 0), ('c', 1), ('d', -1), ('e', -1)],
dtype=[('x', np.str_, 8), ('y', np.int32)])
assert list(data['x']) == ['a', 'b', 'c', 'd', 'e']
assert list(data['y']) == [-1, 0, 1, -1, -1]
batch_generator = training_batches(data, batch_size=3, n_labeled_per_batch=1)
first_ten_batches = list(islice(batch_generator, 10))
labeled_batch_portions = [batch[:1] for batch in first_ten_batches]
unlabeled_batch_portions = [batch[1:] for batch in first_ten_batches]
labeled_epochs = np.split(np.concatenate(labeled_batch_portions), 5)
unlabeled_epochs = np.split(np.concatenate(unlabeled_batch_portions), 4)
assert ([sorted(items['x'].tolist()) for items in labeled_epochs] ==
[['b', 'c']] * 5)
assert ([sorted(items['y'].tolist()) for items in labeled_epochs] ==
[[0, 1]] * 5)
assert ([sorted(items['x'].tolist()) for items in unlabeled_epochs] ==
[['a', 'b', 'c', 'd', 'e']] * 4)
assert ([sorted(items['y'].tolist()) for items in unlabeled_epochs] ==
[[-1, -1, -1, -1, -1]] * 4)
开发者ID:ys2899,项目名称:mean-teacher,代码行数:25,代码来源:test_minibatching.py
示例3: drop_samples
def drop_samples(game, prob):
"""Drop samples from a sample game
Samples are dropped independently with probability prob."""
sample_map = {}
for prof, pays in zip(np.split(game.profiles, game.sample_starts[1:]),
game.sample_payoffs):
num_profiles, _, num_samples = pays.shape
perm = rand.permutation(num_profiles)
prof = prof[perm]
pays = pays[perm]
new_samples, counts = np.unique(
rand.binomial(num_samples, prob, num_profiles), return_counts=True)
splits = counts[:-1].cumsum()
for num, prof_samp, pay_samp in zip(
new_samples, np.split(prof, splits), np.split(pays, splits)):
if num == 0:
continue
prof, pays = sample_map.setdefault(num, ([], []))
prof.append(prof_samp)
pays.append(pay_samp[..., :num])
if sample_map:
profiles = np.concatenate(list(itertools.chain.from_iterable(
x[0] for x in sample_map.values())), 0)
sample_payoffs = tuple(np.concatenate(x[1]) for x
in sample_map.values())
else: # No data
profiles = np.empty((0, game.num_role_strats), dtype=int)
sample_payoffs = []
return rsgame.samplegame_copy(game, profiles, sample_payoffs, False)
开发者ID:yackj,项目名称:GameAnalysis,代码行数:32,代码来源:gamegen.py
示例4: split_dataset
def split_dataset(dataset, N=4000):
perm = np.random.permutation(len(dataset['target']))
dataset['data'] = dataset['data'][perm]
dataset['target'] = dataset['target'][perm]
x_train, x_test = np.split(dataset['data'], [N])
y_train, y_test = np.split(dataset['target'], [N])
return x_train, y_train, x_test, y_test
开发者ID:fukatani,项目名称:soinn,代码行数:7,代码来源:train_mnist.py
示例5: update_h
def update_h(sigma2, phi, y, mu, psi):
"""Updates the hidden variables using updated parameters.
This is an implementation of the equation:
.. math::
\\hat{h} = (\\sigma^2 I + \\sum_{n=1}^N \\Phi_n^T A^T A \\Phi_n)^{-1} \\sum_{n=1}^N \\Phi_n^T A^T (y_n - A \\mu_n - b)
"""
N = y.shape[0]
K = phi.shape[1]
A = psi.params[:2, :2]
b = psi.translation
partial_0 = 0
for phi_n in np.split(phi, N, axis=0):
partial_0 += phi_n.T @ A.T @ A @ phi_n
partial_1 = sigma2 * np.eye(K) + partial_0
partial_2 = np.zeros((K, 1))
for phi_n, y_n, mu_n in zip(np.split(phi, N, axis=0), y, mu.reshape(-1, 2)):
partial_2 += phi_n.T @ A.T @ (y_n - A @ mu_n - b).reshape(2, -1)
return np.linalg.inv(partial_1) @ partial_2
开发者ID:jrdurrant,项目名称:vision,代码行数:25,代码来源:subspace_shape.py
示例6: split_data
def split_data(X,Y,degree):
Testing_error =[] #all the testing errors of 10 fold cross validations
Training_error = [] #all the training errors of 10 fold cross validations
X_sets = np.split(X,10)
Y_sets = np.split(Y,10)
for i in range(len(X_sets)):
X_test =np.vstack( X_sets[i])
Y_test = np.vstack(Y_sets[i])
if i<len(X_sets)-1:
X_train = np.vstack(X_sets[i+1:])
Y_train =np.vstack(Y_sets[i+1:])
elif i==len(X_sets)-1 :
X_train = np.vstack(X_sets[:i])
Y_train = np.vstack(Y_sets[:i])
while i>0:
tempX = np.vstack(X_sets[i-1])
X_train = np.append(tempX,X_train)
tempY = np.vstack(Y_sets[i-1])
Y_train = np.append(tempY,Y_train)
i = i-1
X_train = np.vstack(X_train)
Y_train = np.vstack(Y_train)
Z_train,theta,Z_test = polynomial_withCV(X_train,Y_train,degree,X_test)
Testing_error.append( mse(Z_test,theta,Y_test))
Training_error.append(mse(Z_train,theta,Y_train))
return sum(Testing_error),sum(Training_error)
开发者ID:ravitejachebrolu,项目名称:MachineLearning,代码行数:28,代码来源:singlefeature.py
示例7: get_train_data
def get_train_data(self, label_types):
labeled_images = self.get_labeled_images()
x_train_all = np.asarray(map(
lambda labeled_image_file: labeled_image_file.get_image(),
labeled_images
))
y_train_all = np.asarray(map(
lambda labeled_image_file: label_to_output(labeled_image_file.get_label(), label_types),
labeled_images
))
length = len(labeled_images)
# 元データをランダムに並べ替える
indexes = np.random.permutation(length)
x_train_all_rand = x_train_all[indexes]
y_train_all_rand = y_train_all[indexes]
# 平均画像を引く
mean = self.get_mean_image()
if mean is not None:
x_train_all_rand -= mean
# 正規化
x_train_all /= 255
# 1/5はテストに使う
data_size = length * 4 / 5
x_train, x_test = np.split(x_train_all_rand, [data_size])
y_train, y_test = np.split(y_train_all_rand, [data_size])
return x_train, x_test, y_train, y_test
开发者ID:syundo0730,项目名称:deresta-cnn,代码行数:30,代码来源:training_data.py
示例8: split_x
def split_x(x, split_pos):
# NOTE: do not support multiple sentence tensors
# sequence input , non-sequence input, and no non-sequence input
# sequence input:
if type(x) is not list:
x=[x]
if len(x) == 1:
# sec1, sec2, sec3,...
# sent1, sent2, sent5
x01, x02 = tuple(np.split(x[0],[split_pos]))
cond_list=[x02>=0,x02<0]
offset = x02[0][0]
choice_list=[x02-offset, x02 ]
x02 = np.select(cond_list, choice_list)
return ([x01],[x02])
# doc1 doc2 doc3
# sec1 sec2 ...
# sec1, sec2, ...
# sent1, sent2, ...
x01, x02 = tuple(np.split(x[0], [split_pos]))
offset = x02[0][0]
x1, x2 = split_x(x[1:], offset)
cond_list = [x02 >= 0, x02 < 0]
choice_list = [x02 - offset, x02]
x02 = np.select(cond_list, choice_list)
return ([x01] + x1, [x02]+x2)
开发者ID:lxh5147,项目名称:cacdi_attention_model,代码行数:30,代码来源:attention_cacdi_exp_with_fuel.py
示例9: generate_svm
def generate_svm():
digits, labels = load_digits(DIGITS_FN)
print('preprocessing...')
# shuffle digits
rand = np.random.RandomState(321)
shuffle = rand.permutation(len(digits))
digits, labels = digits[shuffle], labels[shuffle]
digits2 = list(map(deskew, digits))
samples = preprocess_hog(digits2)
train_n = int(0.9*len(samples))
cv2.imshow('test set', mosaic(25, digits[train_n:]))
digits_train, digits_test = np.split(digits2, [train_n])
samples_train, samples_test = np.split(samples, [train_n])
labels_train, labels_test = np.split(labels, [train_n])
print('training SVM...')
model = SVM(C=2.67, gamma=5.383)
model.train(samples_train, labels_train)
vis = evaluate_model(model, digits_test, samples_test, labels_test)
print('saving SVM as "digits_svm.dat"...')
return model
cv2.waitKey(0)
开发者ID:shawnyanwang,项目名称:PIL_examples,代码行数:27,代码来源:digits.py
示例10: k_fold_cross_validation_sets
def k_fold_cross_validation_sets(X, y, k, shuffle=True):
if shuffle:
X, y = shuffle_data(X, y)
n_samples = len(y)
left_overs = {}
n_left_overs = (n_samples % k)
if n_left_overs != 0:
left_overs["X"] = X[-n_left_overs:]
left_overs["y"] = y[-n_left_overs:]
X = X[:-n_left_overs]
y = y[:-n_left_overs]
X_split = np.split(X, k)
y_split = np.split(y, k)
sets = []
for i in range(k):
X_test, y_test = X_split[i], y_split[i]
X_train = np.concatenate(X_split[:i] + X_split[i + 1:], axis=0)
y_train = np.concatenate(y_split[:i] + y_split[i + 1:], axis=0)
sets.append([X_train, X_test, y_train, y_test])
# Add left over samples to last set as training samples
if n_left_overs != 0:
np.append(sets[-1][0], left_overs["X"], axis=0)
np.append(sets[-1][2], left_overs["y"], axis=0)
return np.array(sets)
开发者ID:NiranjanAgaram,项目名称:ML-From-Scratch,代码行数:28,代码来源:data_manipulation.py
示例11: to_json
def to_json(self):
base = super().to_json()
base['offsets'] = self.payoff_to_json(self._offset)
base['coefs'] = self.payoff_to_json(self._coefs)
lengths = {}
for role, strats, lens in zip(
self.role_names, self.strat_names,
np.split(self._lengths, self.role_starts[1:])):
lengths[role] = {s: self.payoff_to_json(l)
for s, l in zip(strats, lens)}
base['lengths'] = lengths
profs = {}
for role, strats, data in zip(
self.role_names, self.strat_names,
np.split(np.split(self._profiles, self._size_starts[1:]),
self.role_starts[1:])):
profs[role] = {strat: [self.profile_to_json(p) for p in dat]
for strat, dat in zip(strats, data)}
base['profiles'] = profs
alphas = {}
for role, strats, alphs in zip(
self.role_names, self.strat_names,
np.split(np.split(self._alpha, self._size_starts[1:]),
self.role_starts[1:])):
alphas[role] = {s: a.tolist() for s, a in zip(strats, alphs)}
base['alphas'] = alphas
base['type'] = 'rbf.1'
return base
开发者ID:egtaonline,项目名称:GameAnalysis,代码行数:32,代码来源:learning.py
示例12: update_stipples
def update_stipples(self, cells):
""" Updates stipple locations from an image
cells should be an image of the same size as self.img
with pixel values representing which Voronoi cell that
pixel falls into
"""
indices = np.argsort(cells.flat)
_, boundaries = np.unique(cells.flat[indices], return_index=True)
gxs = np.split(self.gx.flat[indices], boundaries)[1:]
gys = np.split(self.gy.flat[indices], boundaries)[1:]
gws = np.split(1 - self.img.flat[indices], boundaries)[1:]
w = self.img.shape[1] / 2.0
h = self.img.shape[0] / 2.0
for i, (gx, gy, gw) in enumerate(zip(gxs, gys, gws)):
weight = np.sum(gw)
if weight > 0:
x = np.sum(gx * gw) / weight
y = np.sum(gy * gw) / weight
self.stipples[i,:] = [(x - w) / w, (y - h) / h]
else:
self.stipples[i,:] = np.random.uniform(-1, 1, size=2)
开发者ID:BenFrantzDale,项目名称:OpenFL,代码行数:25,代码来源:stippler.py
示例13: make_predictions
def make_predictions(net, data, labels, num_classes):
data = np.require(data, requirements='C')
labels = np.require(labels, requirements='C')
preds = np.zeros((data.shape[1], num_classes), dtype=np.single)
softmax_idx = net.get_layer_idx('probs', check_type='softmax')
t0 = time.time()
net.libmodel.startFeatureWriter(
[data, labels, preds], softmax_idx)
net.finish_batch()
print "Predicted %s cases in %.2f seconds." % (
labels.shape[1], time.time() - t0)
if net.multiview_test:
# We have to deal with num_samples * num_views
# predictions.
num_views = net.test_data_provider.num_views
num_samples = labels.shape[1] / num_views
split_sections = range(
num_samples, num_samples * num_views, num_samples)
preds = np.split(preds, split_sections, axis=0)
labels = np.split(labels, split_sections, axis=1)
preds = reduce(np.add, preds)
labels = labels[0]
return preds, labels
开发者ID:invisibleroads,项目名称:noccn,代码行数:27,代码来源:predict.py
示例14: train
def train(self, trainfile_name):
train_X, train_Y, num_classes = self.make_data(trainfile_name)
accuracies = []
fscores = []
if self.cv:
num_points = train_X.shape[0]
fol_len = num_points / self.folds
rem = num_points % self.folds
X_folds = numpy.split(train_X, self.folds) if rem == 0 else numpy.split(train_X[:-rem], self.folds)
Y_folds = numpy.split(train_Y, self.folds) if rem == 0 else numpy.split(train_Y[:-rem], self.folds)
for i in range(self.folds):
train_folds_X = []
train_folds_Y = []
for j in range(self.folds):
if i != j:
train_folds_X.append(X_folds[j])
train_folds_Y.append(Y_folds[j])
train_fold_X = numpy.concatenate(train_folds_X)
train_fold_Y = numpy.concatenate(train_folds_Y)
classifier = self.fit_model(train_fold_X, train_fold_Y, num_classes)
predictions = self.classify(classifier, X_folds[i])
accuracy, weighted_fscore, _ = self.evaluate(Y_folds[i], predictions)
accuracies.append(accuracy)
fscores.append(weighted_fscore)
accuracies = numpy.asarray(accuracies)
fscores = numpy.asarray(fscores)
print >>sys.stderr, "Accuracies:", accuracies
print >>sys.stderr, "Average: %0.4f (+/- %0.4f)"%(accuracies.mean(), accuracies.std() * 2)
print >>sys.stderr, "Fscores:", fscores
print >>sys.stderr, "Average: %0.4f (+/- %0.4f)"%(fscores.mean(), fscores.std() * 2)
self.classifier = self.fit_model(train_X, train_Y, num_classes)
cPickle.dump(classifier, open(self.trained_model_name, "wb"))
#pickle.dump(tagset, open(self.stored_tagset, "wb"))
print >>sys.stderr, "Done"
开发者ID:BMKEG,项目名称:exp-parser,代码行数:34,代码来源:nn_classifier.py
示例15: conf2yap
def conf2yap(conf_fname, yap_filename):
print("Yap file : ", yap_filename)
positions, radii, meta = clff.read_conf_file(conf_fname)
positions[:, 0] -= float(meta['lx'])/2
positions[:, 1] -= float(meta['ly'])/2
positions[:, 2] -= float(meta['lz'])/2
if 'np_fixed' in meta:
# for conf with fixed particles
split_line = len(positions) - int(meta['np_fixed'])
pos_mobile, pos_fixed = np.split(positions, [split_line])
rad_mobile, rad_fixed = np.split(radii, [split_line])
yap_out = pyp.layer_switch(3)
yap_out = pyp.add_color_switch(yap_out, 3)
yap_out = np.row_stack((yap_out,
particles_yaparray(pos_mobile, rad_mobile)))
yap_out = pyp.add_layer_switch(yap_out, 4)
yap_out = pyp.add_color_switch(yap_out, 4)
yap_out = np.row_stack((yap_out,
particles_yaparray(pos_fixed, rad_fixed)))
else:
yap_out = pyp.layer_switch(3)
yap_out = pyp.add_color_switch(yap_out, 3)
yap_out = np.row_stack((yap_out,
particles_yaparray(positions, radii)))
pyp.savetxt(yap_filename, yap_out)
开发者ID:rmari,项目名称:LF_DEM,代码行数:27,代码来源:yapgen.py
示例16: gradient_p
def gradient_p(X,y,theta,alpha,m,numIterations):
errors1_x1 = 0
errors1_x2 = 0
errors2_x1 = 0
errors2_x2 = 0
x1,x2 = np.split(X,2)
y1,y2 = np.split(y,2)
for i in range(0,numIterations):
h1 = x1.dot(theta)
errors1_x1 = (h1 - y1) * x1[:, 0]
errors1_x2 = (h1 - y1) * x1[:, 1]
h2 = x2.dot(theta)
errors2_x1 = (h2 - y2) * x2[:, 0]
errors2_x2 = (h2 - y2) * x2[:, 1]
theta[0]=theta[0]-(alpha/m)*(errors1_x1.sum()+errors2_x1.sum())
theta[1]=theta[1]-(alpha/m)*(errors1_x2.sum()+errors2_x2.sum())
return theta
开发者ID:arthurbatista,项目名称:ml,代码行数:25,代码来源:linreg.py
示例17: make_batch
def make_batch(self):
# make datasets
x_dataset, y_dataset = ps.make_sente_datasets(1,100)
#print(x_dataset[110])
#print(y_dataset[110])
x_dataset = np.asarray(x_dataset)
y_dataset = np.asarray(y_dataset)
nb_data = x_dataset.shape[0]
x_train,x_test = np.split(x_dataset,[nb_data*0.9])
y_train,y_test = np.split(y_dataset,[nb_data*0.9])
#x_train = x_train.reshape(x_train.shape[0], 1, 15, 9)
#x_test = x_test.reshape(x_test.shape[0], 1, 15, 9)
x_train = x_train.reshape(x_train.shape[0], 1, 11, 9)
x_test = x_test.reshape(x_test.shape[0], 1, 11, 9)
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
print("x_train shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
return x_train, y_train, x_test, y_test
开发者ID:Tachibana1993,项目名称:TACHIBANA,代码行数:25,代码来源:CNNpolicy.py
示例18: main
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint_dir', type=str, default='checkpoints',
help='checkpoint directory')
parser.add_argument('--save_every', type=int, default=1000,
help='save frequency')
args = parser.parse_args()
# Read the training data
inputFile = open("data/input.txt","rU")
trainingData = inputFile.read()
# Count vocab
counter = collections.Counter(trainingData)
count_pairs = sorted(counter.items(), key=lambda x: -x[1])
chars, _ = list(zip(*count_pairs))
vocabSize = len(chars)
print vocabSize
vocab = dict(zip(chars, range(len(chars))))
inputTensor = np.array(map(vocab.get, trainingData))
numBatches = inputTensor.size / (batchSize * numSteps)
print numBatches
inputTensor = inputTensor[:numBatches * batchSize * numSteps]
inData = inputTensor
targetData = np.copy(inputTensor)
targetData[:-1] = inData[1:]
targetData[-1] = inData[0]
inDataBatches = np.split(inData.reshape(batchSize, -1), numBatches, 1)
targetDataBatches = np.split(targetData.reshape(batchSize, -1), numBatches, 1)
lstmTrain(args)
开发者ID:anujsampat,项目名称:CS767-MachineLearning,代码行数:34,代码来源:train.py
示例19: spiralroll
def spiralroll(B, orient=1):
''' undo spiral flatten '''
k = int(np.sqrt(B.size))
if k**2-B.size != 0:
print('ERR: unable to form a square 2D array!')
else:
C = np.copy(B)
C = C[::-1]
if k%2:
A, C = np.split(C, [1])
A = A.reshape(1,1)
start = 2
else:
A, C = np.split(C, [4])
A = A[::-1].reshape(2,2)
A[-1] = A[-1, ::-1]
start = 3
for ix in range(start, k, 2):
A = np.pad(A, ((1, 1), (1, 1)), mode='constant')
C1, C2, C3, C4, C = np.split(C, [ix, ix*2, ix*3, ix*4])
A[1:, 0] = C1
A[-1, 1:] = C2
A[-2::-1, -1] = C3
A[0, -2::-1] = C4
if orient is 0:
A = A.T
return A
开发者ID:harrispirie,项目名称:stmpy,代码行数:27,代码来源:driftcorr.py
示例20: blocksort2D
def blocksort2D(sfield, ofield, db):
"""
Takes two nx x ny fields and divides them into blocks - the new fields have
dimensions nx' x ny' where nx' = nx/db, ny' = ny/db
db is half the block width in number of grid cells.
the fields are averaged over the block area (db points) and then
ofield is sorted according to sfield (spatial structure is lost)
the returned value is a dictionary with sfield as the key and ofield as the value
assumes nx = ny = even integer.
db must be a multiple of nx
"""
nx = sfield.shape[0]
ny = sfield.shape[1]
nxblock = nx / db
nyblock = ny / db
#tave_field = np.mean(field[ti-ntave:ti,:,:])
#tave_field = np.squeeze(tave_field)
#split up field column-wise, take average row-wise. then split up resulting field row-wise, and take average column-wise.
blocksfield = np.average(np.split(np.average(np.split(sfield, nxblock, axis=1), axis=-1), nyblock, axis=1), axis=-1)
blockofield = np.average(np.split(np.average(np.split(ofield, nxblock, axis=1), axis=-1), nyblock, axis=1), axis=-1)
blocksfield = blocksfield.flatten()
blockofield = blockofield.flatten()
d = dict(zip(blocksfield, blockofield))
od = collections.OrderedDict(sorted(d.items()))
return od
开发者ID:cpatrizio88,项目名称:SAM_init_plot,代码行数:35,代码来源:block_fns.py
注:本文中的numpy.split函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论