本文整理汇总了Python中numpy.hsplit函数的典型用法代码示例。如果您正苦于以下问题:Python hsplit函数的具体用法?Python hsplit怎么用?Python hsplit使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了hsplit函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: otsuthresh
def otsuthresh(hist):
#http://docs.opencv.org/master/d7/d4d/tutorial_py_thresholding.html
# find normalized_histogram, and its cumulative distribution function
hist_norm = old_div(hist.astype("float").ravel(),hist.max())
Q = hist_norm.cumsum()
bins = np.arange(len(hist_norm))
fn_min = np.inf
thresh = -1
for i in range(1,len(hist_norm)):
p1,p2 = np.hsplit(hist_norm,[i]) # probabilities
q1,q2 = Q[i],Q[len(hist_norm)-1]-Q[i] # cum sum of classes
b1,b2 = np.hsplit(bins,[i]) # weights
# finding means and variances
m1,m2 = old_div(np.sum(p1*b1),q1), old_div(np.sum(p2*b2),q2)
v1,v2 = old_div(np.sum(((b1-m1)**2)*p1),q1),old_div(np.sum(((b2-m2)**2)*p2),q2)
# calculates the minimization function
fn = v1*q1 + v2*q2
if fn < fn_min:
fn_min = fn
thresh = i
return thresh
开发者ID:davtoh,项目名称:RRtools,代码行数:27,代码来源:hypothesis2.py
示例2: zero_directions
def zero_directions(zero_vec, tf, e=0.0001):
"""
Parameters: zero_vec => a vector containing all the transmission zeros of a system
tf => the transfer function G(s) of the system
e => this avoids possible divide by zero errors in G(z)
Returns: zero_dir => zero directions in the form:
(zero, input direction, output direction)
Notes: this method is going to give dubious answers if the function G has pole zero cancellation...
"""
zero_dir = []
for z in zero_vec:
num, den = cn.tfdata(tf)
rows, cols = np.shape(num)
G = np.empty(shape=(rows, cols))
for x in range(rows):
for y in range(cols):
top = np.polyval(num[x][y], z)
bot = np.polyval(den[x][y], z)
if bot == 0.0:
bot = e
entry = float(top) / bot
G[x][y] = entry
U, S, V = np.linalg.svd(G)
V = np.transpose(np.conjugate(V))
u_rows, u_cols = np.shape(U)
v_rows, v_cols = np.shape(V)
yz = np.hsplit(U, u_cols)[-1]
uz = np.hsplit(V, v_cols)[-1]
zero_dir.append((z, uz, yz))
return zero_dir
开发者ID:Lindeski,项目名称:Skogestad-Python,代码行数:34,代码来源:MIMO_Tools.py
示例3: _projTGraph
def _projTGraph(self,g):
sentry = TH1AddDirSentry()
y = numpy.ndarray( (g.GetN(),),dtype=numpy.double, buffer=g.GetY() )
ysplit = numpy.hsplit(y,self._nfold)
p_y = numpy.sum(ysplit,axis=self._pax)
eyh = numpy.ndarray( (g.GetN(),),dtype=numpy.double, buffer=g.GetEYhigh() )
eyh2_split = numpy.hsplit( (eyh**2) ,self._nfold)
p_eyh = numpy.sqrt( numpy.sum(eyh2_split,axis=self._pax) )
eyl = numpy.ndarray( (g.GetN(),),dtype=numpy.double, buffer=g.GetEYlow() )
eyl2_split = numpy.hsplit( (eyl**2) ,self._nfold)
p_eyl = numpy.sqrt( numpy.sum(eyl2_split,axis=self._pax) )
x = array.array('d',[0]*self._nbins)
exh = array.array('d',[0]*self._nbins)
exl = array.array('d',[0]*self._nbins)
for i in xrange(self._nbins):
x[i] = (self._axdef[i+1]+self._axdef[i])/2.
exh[i] = exl[i] = (self._axdef[i+1]-self._axdef[i])/2.
p_g = ROOT.TGraphAsymmErrors(self._nbins, x, p_y, exl, exh, p_eyl, p_eyh)
p_g.SetNameTitle('%s_proj_%s' % (g.GetName(),self._proj),'%s proj %s' % (g.GetTitle(),self._proj))
ROOT.TAttLine.Copy(g,p_g)
ROOT.TAttFill.Copy(g,p_g)
ROOT.TAttMarker.Copy(g,p_g)
return p_g
开发者ID:alessandrothea,项目名称:ginger,代码行数:30,代码来源:testcoroner.py
示例4: compute_candidate_connections
def compute_candidate_connections(self, paf, cand_a, cand_b, img_len, params):
candidate_connections = []
for joint_a in cand_a:
for joint_b in cand_b: # jointは(x, y)座標
vector = joint_b[:2] - joint_a[:2]
norm = np.linalg.norm(vector)
if norm == 0:
continue
ys = np.linspace(joint_a[1], joint_b[1], num=params['n_integ_points'])
xs = np.linspace(joint_a[0], joint_b[0], num=params['n_integ_points'])
integ_points = np.stack([ys, xs]).T.round().astype('i') # joint_aとjoint_bの2点間を結ぶ線分上の座標点 [[x1, y1], [x2, y2]...]
paf_in_edge = np.hstack([paf[0][np.hsplit(integ_points, 2)], paf[1][np.hsplit(integ_points, 2)]])
unit_vector = vector / norm
inner_products = np.dot(paf_in_edge, unit_vector)
integ_value = inner_products.sum() / len(inner_products)
# vectorの長さが基準値以上の時にペナルティを与える
integ_value_with_dist_prior = integ_value + min(params['limb_length_ratio'] * img_len / norm - params['length_penalty_value'], 0)
n_valid_points = sum(inner_products > params['inner_product_thresh'])
if n_valid_points > params['n_integ_points_thresh'] and integ_value_with_dist_prior > 0:
candidate_connections.append([int(joint_a[3]), int(joint_b[3]), integ_value_with_dist_prior])
candidate_connections = sorted(candidate_connections, key=lambda x: x[2], reverse=True)
return candidate_connections
开发者ID:kaustubhharapanahalli,项目名称:Chainer_Realtime_Multi-Person_Pose_Estimation,代码行数:25,代码来源:pose_detector.py
示例5: ostu_algorithm
def ostu_algorithm(img, blursize=3):
blur = cv2.GaussianBlur(img, (blursize, blursize), 0)
hist = cv2.calcHist([blur], [0], None, [256], [0, 256])
hist_norm = hist.ravel() / hist.max()
Q = hist_norm.cumsum()
bins = np.arange(256)
fn_min = np.inf
thresh = -1
for i in xrange(1, 256):
p1, p2 = np.hsplit(hist_norm, [i]) # probabilities
q1, q2 = Q[i], Q[255] - Q[i] # cum sum of classes
b1, b2 = np.hsplit(bins, [i]) # weights
if q1 == 0:
continue
if q2 == 0:
continue
m1, m2 = np.sum(p1 * b1) / q1, np.sum(p2 * b2) / q2
v1, v2 = np.sum(((b1 - m1) ** 2) * p1) / q1, np.sum(((b2 - m2) ** 2) * p2) / q2
fn = v1 * q1 + v2 * q2
if fn < fn_min:
fn_min = fn
thresh = i
_, otsu = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return otsu
开发者ID:sunary,项目名称:image-process,代码行数:27,代码来源:histogram_equalization.py
示例6: plotDensityPCA
def plotDensityPCA(args, pcaMatrix):
outName = args.outputFileName
outName = outName + '_PCAdensity_' + str(NBINS)
pcaXCoord = numpy.hsplit(pcaMatrix, [1])[0]
pcaXCoord = pcaXCoord.reshape(1, len(pcaXCoord))[0]
pcaXCoord = pcaXCoord.real
pcaYCoord = numpy.hsplit(pcaMatrix, [1])[1]
pcaYCoord = pcaYCoord.reshape(1, len(pcaYCoord))[0]
pcaYCoord = pcaYCoord.real
#fig2.set_title('Density plot of main PCA components')
H, edgeX, edgeY = numpy.histogram2d(pcaXCoord, pcaYCoord, bins = NBINS)
H = numpy.rot90(H)
H = numpy.flipud(H)
# mask zeroes
maskedH = numpy.ma.masked_where(H==0, H)
#Plot the histogram
fig2 = matplotlib.pyplot.figure()
plt.pcolormesh(edgeX, edgeY, maskedH)
plt.xlabel('Pricinpal Component 1')
plt.ylabel('Principal Component 2')
cbar = plt.colorbar()
cbar.ax.set_ylabel('Counts')
fig2.savefig(outName, format='png')
#show()
return fig2
开发者ID:jueshengong,项目名称:psytrans,代码行数:25,代码来源:plotPCA.py
示例7: fit
def fit(self, features, classes):
# TODO implement the above algorithm to build a random forest of decision trees
self.trees = [] #list of root nodes
esr = self.example_subsample_rate
asr = self.attr_subsample_rate
means = np.mean(features, axis=0)
for i in xrange(self.num_trees):
# a) Subsample the examples provided (with replacement) in accordance with a example subsampling rate.
features_np = np.asarray(features)
classes_np = np.asarray([classes]).transpose()
merged = np.concatenate((features_np, classes_np), axis=1)
merged_rand = np.random.permutation(merged)
merged_rand_esr = merged_rand[0:int(esr*len(features_np))]
split_rand = np.hsplit(merged_rand_esr, np.array([4, 6]))
features_split = split_rand[0]
classes_split = split_rand[1]
# b) From the sample in a), choose attributes at random to learn on, in accordance with an attribute subsampling rate.
num_attrs = int(asr * features.shape[1])
rand_attrs = np.random.randint(features.shape[1], num_attrs)
for i in xrange(len(rand_attrs)):
attr_features_split = np.hsplit(features_split, np.array([rand_attrs[i], 6]))[0] #need to rewrite
# c) Fit a decision tree to the subsample of data we've chosen (to a certain depth)
leaf0 = DecisionNode(None, None, None, class_label=0)
leaf1 = DecisionNode(None, None, None, class_label=1)
nodeA1 = DecisionNode(leaf1, leaf0, lambda x: 1 if x[0]<means[0] else 0)
root_node = nodeA1
self.trees.append(root_node)
开发者ID:cheehieu,项目名称:artificial-intelligence,代码行数:33,代码来源:decision_tree.py
示例8: calculate
def calculate(db):
rep = "1"
i = 0
while (rep == "y" or rep == "Y" or rep == "1"):
search = str(raw_input("Substance: "))
k = i
with open(db,'r') as dbfile:
for line in dbfile:
if (search == line.split()[0]):
mass = float(input("Mass (g): "))
prop = mass/100*numpy.array([line.split()[1:]], dtype=float)
i = i + 1
if (rep == "1"):
propall = prop
else:
propall = numpy.vstack([propall, prop])
if (i == k):
print ("Substance "+search+" not found!")
rep = str(raw_input("Repeat [y/n]: "))
if (i != 0):
prot = sum(numpy.hsplit(propall, (0, 1))[1])
lip = sum(numpy.hsplit(propall, (1, 2))[1])
carb = sum(numpy.hsplit(propall, (2, 3))[1])
ccal = sum(numpy.hsplit(propall, (3, 4))[1])
glyc = sum(numpy.hsplit(propall, (4, 4))[2])
print ("\nProteins: "+str(round(prot, 2))+"\nLipids: "+str(round(lip, 2))+"\nCarbohydrates: "+str(round(carb, 2))+"\nccal: "+str(round(ccal, 2))+"\nGlycemic index: "+str(round(glyc, 2)))
开发者ID:arcan1s,项目名称:food_gui,代码行数:30,代码来源:food.py
示例9: run
def run(name, source, quick=False):
print time.asctime(time.localtime()), "Filling BDT Branches"
branch_names = joblib.load("pickle/variables.pkl")
if quick == True:
signal = joblib.load('pickle/all_signalq.pkl')
clf = joblib.load("pickle/" + name + "quick.pkl")
else:
signal = joblib.load('pickle/all_signal.pkl')
clf = joblib.load("pickle/" + name + ".pkl")
# predict and write probability of each MC event being signal
bdt_MC_predicted = clf.predict_proba(signal)
bdt_MC_predicted.dtype = [('GradBoost_prob', np.float64)]
array2root((np.hsplit(bdt_MC_predicted,2)[1]), "/net/storage03/data/users/dlafferty/NTuples/SignalMC/2012/combined/Bs2phiphi_MC_2012_combined_corrected_TupleA_BDT.root", "DecayTree")
# predict and write probability of every data event being signal
all_data = root2array("/net/storage03/data/users/dlafferty/NTuples/data/2012/combined/Bs2phiphi_data_2012_corrected_TupleA_BDT.root", "DecayTree", branch_names)
all_data = rec2array(all_data)
bdt_data_predicted = clf.predict_proba(all_data)
bdt_data_predicted.dtype = [('GradBoost_prob', np.float64)]
array2root((np.hsplit(bdt_data_predicted,2)[1]), "/net/storage03/data/users/dlafferty/NTuples/data/2012/combined/Bs2phiphi_data_2012_corrected_TupleA_BDT.root", "DecayTree")
print time.asctime(time.localtime()), "Branches Filled!"
开发者ID:david0811,项目名称:RISE,代码行数:26,代码来源:write.py
示例10: main
def main():
global args
args = parse()
# Run through desc stats for file 1
prefix = args.files[0].split(".")[0]
fn = prefix+".txt"
outfile = prefix+"_results.txt"
data = np.genfromtxt(fn)
# ligindex = [4,5,6,7,8,27,28,29,30,31,32,33,34] # Can be used to analyse a subset of ligands
# data = data[ligindex]
# print data
data = np.hsplit(data,[1]) # Split expt values apart
expt = data[0]
comput = data[1]
anova = np.copy(comput) # Beginning of array for anova
origdata = np.hstack((expt,np.reshape(np.mean(comput,axis=1),(47,1)))) # Reshape then stack, now each ligand has an entry of length 2
write_data(outfile,origdata)
# Now do stats and t test for others
for argfile in args.files[1:]:
prefix = argfile.split(".")[0]
fn = prefix+".txt"
outfile = prefix+"_results_vs_file1.txt"
data = np.genfromtxt(fn)
# data = data[ligindex]
data = np.hsplit(data,[1]) # Split expt values apart
expt = data[0]
comput = data[1]
anova = np.hstack((anova,comput)) # Add data to Anova array
currdata = np.hstack((expt,np.reshape(np.mean(comput,axis=1),(47,1)))) # Reshape then stack, now each ligand has an entry of length 2
write_data(outfile,currdata)
t_test_diffs(origdata,currdata,outfile)
# Now do Anova
calc_anova(anova,"Anovas.txt")
开发者ID:rtb1c13,项目名称:scripts,代码行数:33,代码来源:analyse_hfe.py
示例11: logistic_test
def logistic_test(self, X, Y, train_results, predict_with_intercept=True,
predict_with_fixed_effects=True, use_prior_beta_split=True):
training_betas = train_results.params
print training_betas
# please add fixed effects BEFORE intercept, for now!
if self.fixed_effects_set:
if not predict_with_fixed_effects:
if use_prior_beta_split:
print np.shape(X), self.prior_beta_split, len(training_betas)
X = np.hsplit(X, [len(self.subject_indices.keys())])[1]
training_betas = training_betas[self.prior_beta_split:]
print np.shape(X), len(training_betas)
else:
X = np.hsplit(X, [len(self.subject_indices.keys())])[1]
training_betas = training_betas[len(self.subject_indices.keys()):]
if self.intercept_set:
if not predict_with_intercept:
X = np.hsplit(X, 1)[1]
training_betas = training_betas[1:]
test_eta = np.dot(X, training_betas)
test_p = np.exp(test_eta) / (1. + np.exp(test_eta))
test_predict = (test_p > 0.5)
return (Y == test_predict).sum()*1. / Y.shape[0]
开发者ID:kieferkat,项目名称:kk-tools,代码行数:29,代码来源:regression.py
示例12: ChangeSize
def ChangeSize(self,n_nodes):
self.masses.resize((1,n_nodes),refcheck=False)
#self.masses=np.resize(self.masses,(1,n_nodes))
#self.masses[0][-1] #bug in resize??
self.initDisp.resize((1,n_nodes),refcheck=False)
self.initVel.resize((1,n_nodes),refcheck=False)
#self.initDisp=np.resize(self.initDisp,(1,n_nodes))
#self.initVel=np.resize(self.initVel,(1,n_nodes))
if n_nodes>self.n_nodes:
#Take care of 2D array manipulation
delta=n_nodes-self.n_nodes
hor=np.zeros((self.n_nodes,delta))
ver=np.zeros((delta,n_nodes))
self.springs=np.vstack((np.hstack((self.springs,hor)),ver))
self.dampers=np.vstack((np.hstack((self.dampers,hor)),ver))
# Take care of displacement and forces list
print self.n_nodes,n_nodes
for i in range(0,n_nodes-self.n_nodes):
#print i
self.displacements.append(-1)
self.forces.append(-1)
#addArray=[0 for x in range(self.syst.n_nodes,n_nodes)]
elif n_nodes<self.n_nodes:
self.springs=np.hsplit(np.vsplit(self.springs,(n_nodes,n_nodes))[0],(n_nodes,n_nodes))[0]
self.dampers=np.hsplit(np.vsplit(self.dampers,(n_nodes,n_nodes))[0],(n_nodes,n_nodes))[0]
self.displacements=self.displacements[0:n_nodes]
self.forces=self.forces[0:n_nodes]
self.n_nodes=n_nodes
开发者ID:snexus,项目名称:ODDS,代码行数:30,代码来源:odds.py
示例13: scale_up
def scale_up(a, x=2, y=2, num_z=None):
"""Scale the input array repeating the array values up by the
x and y factors.
Requires:
--------
a : array
An ndarray, 1D arrays will be upcast to 2D
x, y : numbers
Factors to scale the array in x (col) and y (row). Scale factors
must be greater than 2
num_z : number
For 3D, produces the 3rd dimension, ie. if num_z = 3 with the
defaults, you will get an array with shape=(3, 6, 6). If
num_z != None or 0, then the options are 'repeat', 'random'.
With 'repeat' the extras are kept the same and you can add random
values to particular slices of the 3rd dimension, or multiply them.
Returns:
-------
>>> a = np.array([[0, 1, 2], [3, 4, 5]]
>>> b = scale(a, x=2, y=2)
array([[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 2, 2],
[3, 3, 4, 4, 5, 5],
[3, 3, 4, 4, 5, 5]])
Notes:
-----
>>> a = np.arange(2*2).reshape(2,2)
array([[0, 1],
[2, 3]])
>>> f_(scale(a, x=2, y=2, num_z=2))
Array... shape (3, 4, 4), ndim 3, not masked
0, 0, 1, 1 0, 0, 1, 1 0, 0, 1, 1
0, 0, 1, 1 0, 0, 1, 1 0, 0, 1, 1
2, 2, 3, 3 2, 2, 3, 3 2, 2, 3, 3
2, 2, 3, 3 2, 2, 3, 3 2, 2, 3, 3
sub (0) sub (1) sub (2)
"""
if (x < 1) or (y < 1):
print("x or y scale < 1... \n{}".format(scale_up.__doc__))
return None
a = np.atleast_2d(a)
z0 = np.tile(a.repeat(x), y) # repeat for x, then tile
z1 = np.hsplit(z0, y) # split into y parts horizontally
z2 = np.vstack(z1) # stack them vertically
if a.shape[0] > 1: # if there are more, repeat
z3 = np.hsplit(z2, a.shape[0])
z3 = np.vstack(z3)
else:
z3 = np.vstack(z2)
if num_z not in (0, None):
d = [z3]
for i in range(num_z):
d.append(z3)
z3 = np.dstack(d)
z3 = np.rollaxis(z3, 2, 0)
return z3
开发者ID:Dan-Patterson,项目名称:GIS,代码行数:60,代码来源:grid.py
示例14: phiSub
def phiSub(Q, k1, k2):
"""
Calculate initial vector for any subset.
Parameters
----------
mec : dcpyps.Mechanism
The mechanism to be analysed.
Returns
-------
phi : ndarray, shape (kA)
"""
u = np.ones((k2 - k1 + 1, 1))
p = pinf(Q)
p1, p2, p3 = np.hsplit(p,(k1, k2+1))
p1c = np.hstack((p1, p3))
#Q = Q.copy()
Q1, Q2, Q3 = np.hsplit(Q,(k1, k2+1))
Q21, Q22, Q23 = np.hsplit(Q2.transpose(),(k1, k2+1))
Q22c = Q22.copy()
Q12 = np.vstack((Q21.transpose(), Q23.transpose()))
nom = np.dot(p1c, Q12)
denom = np.dot(nom,u)
phi = nom / denom
return phi, Q22c
开发者ID:jenshnielsen,项目名称:DCPYPS,代码行数:29,代码来源:qmatlib.py
示例15: __init__
def __init__(self,data=list(),Lambda=.1, gamma =.5, theta=None ):
# SVM Class
#
# @param data [Nxd] array of observations where N is the number of observations and d is the dimensionality of the abstract space
# @param Lambda Regularizer to control Smoothness / Accuracy. Preliminary experimental results show the range 0-1 controls this parameter.
# @param gamma List of gamma values which define the kernel smoothness
try:
self.N,self.d = data.shape
except ValueError:
self.N,self.d = (len(data),1)
self.X = data.reshape([ self.N, self.d ])
else:
self.X = data
self.Lambda = Lambda
self.gamma = gamma
self.t = np.hsplit(self.X,[1])[0]
self.offset = np.tile( np.hsplit(self.X,[1])[0], len(theta) )
self.theta = np.repeat( np.array(theta), self.N )
self.D = self._K( self.X.reshape([self.N,1,self.d]) - self.X.T.reshape([1,self.N,self.d]) )
self.S = np.array( [ [ subset(self.X,self.D, t, theta ) for t in self.t ] for theta in self.theta ] ).flatten()
self.SV = None # X value array of SV
self.NSV = None # cardinality of SV
self.alpha = None # the full weight array for all observations
self.beta = None # weight array for SV
self.K = None # precomputed kernel matrix
self._compute()
开发者ID:kerinin,项目名称:iEngine,代码行数:32,代码来源:SVM_PW2.py
示例16: extract_t5
def extract_t5(filename, t):
'''Extract data at timeslice t from the propagator.
Converts the raw data into complex numbers.
'''
# Loop structure: s c r/i s c t z y x.
# Pluck out bits at t. Store in tmp.
tmp = []
for i in range(2*nc*nc*ns*ns):
with open(filename, "rb") as f: # Inefficient?
f.seek(i*8*nt*V + 8*t*V, 0)
data = np.fromfile(f, dtype='>d', count=V)
tmp.append(data)
tmp = ar(tmp, dtype=np.float).reshape((-1,))
# Convert to complex numbers. Store in ctmp.
ctmp_re = []
ctmp_im = []
for chunk in np.hsplit(tmp, ns*nc):
chunk_re, chunk_im = np.hsplit(chunk, 2)
ctmp_re.append(chunk_re)
ctmp_im.append(chunk_im)
ctmp_re = ar(ctmp_re, dtype=np.float).reshape((-1,))
ctmp_im = ar(ctmp_im, dtype=np.float).reshape((-1,))
ctmp = ctmp_re + 1j*ctmp_im
return ctmp
开发者ID:atlytle,项目名称:tifr,代码行数:26,代码来源:read_overlap.py
示例17: readdat
def readdat():
with open("hw2_lssvm_all.dat", "r") as f:
data = np.array([[float(i.strip()) for i in line.split()] for line in f.readlines()])
X = np.hsplit(data, [len(data[0])-1, len(data)])[0]
y = np.hsplit(data, [len(data[0])-1, len(data)])[1].reshape(np.shape(X)[0])
return X, y
开发者ID:yangarbiter,项目名称:ntu-ml-2014fall,代码行数:7,代码来源:LSSVMCoursera.py
示例18: Encoding
def Encoding(data, general_matrix=None):
encoder = LabelBinarizer()
count = 0
# encoding
for i in range(data.shape[1]):
if type(data[0, i]) == str:
count += 1
col = data[:, i]
unique = np.unique(col if general_matrix is None else general_matrix[:, i])
try:
encoder.fit(unique)
except:
pass
new_col = encoder.transform(col)
# split at i and i + 1
before, removed, after = np.hsplit(data, [i, i + 1])
# concatenate
data = np.concatenate((before, new_col, after), axis=1)
before, removed, after = np.hsplit(general_matrix, [i, i + 1])
general_matrix = np.concatenate((before, encoder.transform(general_matrix[:, i]), after), axis=1)
print "count : %d" % count
# return data
return data
开发者ID:nhanloukiala,项目名称:AppsOfDataAnalysis,代码行数:27,代码来源:cyber_attack_classification.py
示例19: initialize
def initialize(self, cloud_file=""):
"""Configure the cloud information.
This function gets the appropriate database file and creates the cloud information
from it. The default behavior is to use the module stored database. However, an
alternate database file can be provided. The alternate database file needs to have a
table called *Cloud* with the following columns:
cloudId
int : A unique index for each cloud entry.
c_date
int : The time (units=seconds) since the start of the simulation for the cloud observation.
cloud
float : The cloud coverage in 8ths of the sky.
Parameters
----------
cloud_file : str, optional
The full path to an alternate cloud database.
"""
if cloud_file != "":
self.cloud_db = cloud_file
else:
self.cloud_db = os.path.join(os.path.dirname(__file__), self.CLOUD_DB)
with sqlite3.connect(self.cloud_db) as conn:
cur = conn.cursor()
query = "select c_date, cloud from Cloud;"
cur.execute(query)
results = numpy.array(cur.fetchall())
self.cloud_dates = numpy.hsplit(results, 2)[0].flatten()
self.cloud_values = numpy.hsplit(results, 2)[1].flatten()
cur.close()
开发者ID:lsst-sims,项目名称:sims_ocs,代码行数:33,代码来源:cloud_model.py
示例20: __init__
def __init__(self, train_file="./data/traindata.txt", test_file="./data/testdata.txt"):
self.train = np.hsplit(np.loadtxt(train_file), np.array([0, 9]))
self.test = np.hsplit(np.loadtxt(test_file), np.array([0, 9]))
self.train_sample = self.train[1]
self.train_label = 2 * self.train[2] - 3
self.test_sample = self.test[1]
self.test_label = 2 * self.test[2] - 3
开发者ID:walsvid,项目名称:Lab,代码行数:7,代码来源:data.py
注:本文中的numpy.hsplit函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论