本文整理汇总了Python中scipy.load函数的典型用法代码示例。如果您正苦于以下问题:Python load函数的具体用法?Python load怎么用?Python load使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了load函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: cv
def cv(nn_name,d_num = 10000,k_fold = 7,score_metrics = 'accuracy',verbose = 0):
suff = str(nn_name[:2])
if nn_name.find('calib') > 0:
X_data_name = 'train_data_icalib_'+ suff + '.npy'
y_data_name = 'labels_icalib_'+ suff + '.npy'
else:
X_data_name = 'train_data_'+ suff + '.npy'
y_data_name = 'labels_'+ suff + '.npy'
X,y = sp.load(X_data_name),sp.load(y_data_name)
d_num = min(len(X),d_num)
X = X[:d_num]
y = y[:d_num]
rates12 = sp.hstack((0.05 * sp.ones(25,dtype=sp.float32),0.005*sp.ones(15,dtype=sp.float32),0.0005*sp.ones(10,dtype=sp.float32)))
rates24 = sp.hstack((0.01 * sp.ones(25,dtype=sp.float32),0.0001*sp.ones(15,dtype=sp.float32)))
rates48 = sp.hstack ([0.05 * sp.ones(15,dtype=sp.float32),0.005*sp.ones(10,dtype=sp.float32) ])
if nn_name == '48-net':
X12 = sp.load('train_data_12.npy')[:d_num]
X24 = sp.load('train_data_24.npy')[:d_num]
elif nn_name == '24-net':
X12 = sp.load('train_data_12.npy')[:d_num]
if score_metrics == 'accuracy':
score_fn = accuracy_score
else:
score_fn = f1_score
scores = []
iteration = 0
for t_indx,v_indx in util.kfold(X,y,k_fold=k_fold):
nn = None
X_train,X_test,y_train,y_test = X[t_indx], X[v_indx], y[t_indx], y[v_indx]
#print('\t \t',str(iteration+1),'fold out of ',str(k_fold),'\t \t' )
if nn_name == '24-net':
nn = Cnnl(nn_name = nn_name,l_rates=rates24,subnet=Cnnl(nn_name = '12-net',l_rates=rates12).load_model(
'12-net_lasagne_.pickle'))
nn.fit(X = X_train,y = y_train,X12 = X12[t_indx])
elif nn_name == '48-net':
nn = Cnnl(nn_name = nn_name,l_rates=rates48,subnet=Cnnl(nn_name = '24-net',l_rates=rates24,subnet=Cnnl(nn_name = '12-net',l_rates=rates12).load_model(
'12-net_lasagne_.pickle')).load_model('24-net_lasagne_.pickle'))
nn.fit(X = X_train,y = y_train,X12 = X12[t_indx],X24 = X24[t_indx])
else:
nn = Cnnl(nn_name = nn_name,l_rates=rates12,verbose=verbose)
nn.fit(X = X_train,y = y_train)
if nn_name == '24-net':
y_pred = nn.predict(X_test,X12=X12[v_indx])
elif nn_name == '48-net':
y_pred = nn.predict(X_test,X12=X12[v_indx],X24=X24[v_indx])
else:
y_pred = nn.predict(X_test)
score = score_fn(y_test,y_pred)
#print(iteration,'fold score',score)
scores.append(score)
iteration += 1
score_mean = sp.array(scores).mean()
print(d_num,'mean score',score)
return score_mean
开发者ID:CCSUZJJ,项目名称:Cascade-CNN-Face-Detection,代码行数:59,代码来源:cv.py
示例2: ParseToDataContainers
def ParseToDataContainers(self,
Delimiter=None):
# Parse an input file into the DataContainers object
DCs = DataContainer.DataContainers()
if(re.search('.npy',self.GetName())):
Arrays = None
if(self.GetboCompressed()):
Arrays = scipy.load(self.GetDecomprName())
else:
Arrays = scipy.load(self.GetName())
Header = Arrays[:,0].tolist()
for i in xrange(len(Header)):
Name = Header[i] # The names of the datacontainers are determined by the
# header column names.
DCs.DataContainers[Name] = DataContainer.DataContainer()
DCs.Names2Columns[Name] = i
DCs.Columns2Names[i] = Name
DCs.DataContainers[Name].SetDataArray(Arrays[i,1:])
DCs.DataContainers[Name].SetDataName(Name)
del Arrays
else:
Line = self.GetFileHandle().readline()
if(self.GetboHeader()):
Line = re.sub('#','',Line)
Names = Line.strip().split(Delimiter) # The file should be space or tab delimited!
for i in range(len(Names)):
Name = Names[i] # The names of the datacontainers are determined by the
# header column names.
DCs.DataContainers[Name] = DataContainer.DataContainer()
DCs.Names2Columns[Name] = i
DCs.Columns2Names[i] = Name
DCs.DataContainers[Name].InitDataArray()
DCs.DataContainers[Name].SetDataName(Name)
else:
LSplit = Line.strip().split(Delimiter)
for i in range(len(LSplit)):
Name = str(i)
DCs.DataContainers[Name] = DataContainer.DataContainer()
DCs.Names2Columns[Name] = i
DCs.Columns2Names[i] = Name
DCs.DataContainers[Name].InitDataArray()
DCs.DataContainers[Name].SetDataName(Name)
Entry = LSplit[i]
DCs.DataContainers[Name].AppendToArray(Entry)
for Line in self.GetFileHandle():
LSplit = Line.strip().split(Delimiter)
for i in range(len(LSplit)):
Name = DCs.Columns2Names[i]
Entry = LSplit[i]
DCs.DataContainers[Name].AppendToArray(Entry)
for Key in DCs.DataContainers.iterkeys():
DCs.DataContainers[Key].CastDataArrayToScipy() # Make scipy.arrays of the lists.
return DCs
开发者ID:rpool,项目名称:BioCrates,代码行数:56,代码来源:File.py
示例3: save_andor_load_arrays
def save_andor_load_arrays(endog, exog, true_params, save_arrays, load_old_arrays):
if save_arrays:
sp.save("endog.npy", endog)
sp.save("exog.npy", exog)
sp.save("true_params.npy", true_params)
if load_old_arrays:
endog = sp.load("endog.npy")
exog = sp.load("exog.npy")
true_params = sp.load("true_params.npy")
return endog, exog, true_params
开发者ID:r0k3,项目名称:statsmodels,代码行数:10,代码来源:demo.py
示例4: save_andor_load_arrays
def save_andor_load_arrays(
endog, exog, true_params, save_arrays, load_old_arrays):
if save_arrays:
sp.save('endog.npy', endog)
sp.save('exog.npy', exog)
sp.save('true_params.npy', true_params)
if load_old_arrays:
endog = sp.load('endog.npy')
exog = sp.load('exog.npy')
true_params = sp.load('true_params.npy')
return endog, exog, true_params
开发者ID:0ceangypsy,项目名称:statsmodels,代码行数:11,代码来源:demo.py
示例5: execute
def execute(self, nprocesses=1):
params = self.params
boxshape = params['boxshape']
boxunit = params['boxunit']
resultf = params['hr'][0]
if len(params['last']) != 0:
resultf = resultf + params['last'][0]
resultf = resultf + '-' + params['hr'][1]
if len(params['last']) != 0:
resultf = resultf + params['last'][1]
FKPweight = params['FKPweight']
in_root = params['input_root']
out_root = params['output_root']
mid = params['mid']
fkpp = params['FKPpk']
WindowF_fname = out_root+'WindowF_'+\
str(boxshape[0])+'x'+str(boxshape[1])+'x'+\
str(boxshape[2])+'x'+str(boxunit)+'_'+resultf
kWindowF_fname = out_root+'k_WindowF_'+\
str(boxshape[0])+'x'+str(boxshape[1])+'x'+\
str(boxshape[2])+'x'+str(boxunit)+'_'+resultf
print WindowF_fname
try:
WindowF = sp.load(WindowF_fname+'.npy')
k = sp.load(kWindowF_fname+'.npy')
except IOError:
print '\tWindow Functin ReMake'
WindowF, k = self.GetWindowFunctionData()
non0 = WindowF.nonzero()
sp.save(WindowF_fname, WindowF)
sp.save(kWindowF_fname, k)
#txtf = open(out_root+'window_for_idl.txt', 'w')
#try:
# for i in range(len(WindowF)):
# if WindowF[i]==0: continue
# print >>txtf, '{0} {1}'.format(k[i], WindowF[i])
#finally:
# txtf.close()
return WindowF, k
开发者ID:YichaoLi,项目名称:PowerMaker,代码行数:48,代码来源:windowf.py
示例6: loadTFIDF
def loadTFIDF(path):
weight=sp.load('tfidf_weight.npy')
fp=codecs.open('tfidf_words.txt','r','utf-8')
words=json.load(fp)
fp.close()
return words,weight
开发者ID:wjianwei126,项目名称:wordcut,代码行数:7,代码来源:tfidf.py
示例7: make_video
def make_video(image_dir, filename="vidout.avi", fixation_file=None):
MPEG_FOURCC = 827148624
vwriter = cv2.VideoWriter()
if fixation_file is not None:
fixations = sp.load(fixation_file)
fixations[sp.isnan(fixations)] = -100
fixations[abs(fixations) > 1000] = 1000
else:
fixations = []
im_base_name = "cam1_frame_"
im_extension = ".bmp"
suc = vwriter.open(os.path.join(image_dir, filename), cv.CV_FOURCC('M', 'J', 'P', 'G'), 30, (640,480))
if not suc:
raise IOError("Failed to open movie")
for frame_num in xrange(1000):
im_name = "".join([im_base_name, str(frame_num), im_extension])
im_path = os.path.join(image_dir, im_name)
im = cv2.imread(im_path)
if len(fixations) != 0:
cv2.circle(im, tuple(fixations[frame_num]), 3, (255, 255, 255))
vwriter.write(im)
开发者ID:Berkeley-BORIS,项目名称:NDS-Code,代码行数:27,代码来源:play_movie.py
示例8: from_file
def from_file(fname):
"""Load model from a npz file"""
params = dict(sc.load(fname).items())
model = Model(fname, **params)
if "seed" in params:
model.set_seed(model["seed"])
return model
开发者ID:sidaw,项目名称:polymom,代码行数:8,代码来源:Model.py
示例9: test_brown_clustering
def test_brown_clustering():
fname = "test-data/text-1e2.npz"
F = sc.load( fname )
C, D = F['C'], F['D']
k = 100
W = 1000
bc = BrownClusteringAlgorithm( C )
bc.run( k, W )
开发者ID:arunchaganty,项目名称:spectral,代码行数:9,代码来源:BrownClustering.py
示例10: train
def train(nn_name = '12-net',k = 12):
"""
Fucntion for traning 12-net with testing on part of data
using cross validation
"""
suff = str(k)
if nn_name.find('calib') > 0:
X_data_name = 'train_data_icalib_'+ suff + '.npy'
y_data_name = 'labels_icalib_'+ suff + '.npy'
else:
X_data_name = 'train_data_'+ suff + '.npy'
y_data_name = 'labels_'+ suff + '.npy'
rates12 = sp.hstack((0.05 * sp.ones(25,dtype=sp.float32),0.005*sp.ones(15,dtype=sp.float32),0.0005*sp.ones(10,dtype=sp.float32)))
rates24 = sp.hstack((0.01 * sp.ones(25,dtype=sp.float32),0.0001*sp.ones(15,dtype=sp.float32)))
rates48 = sp.hstack ([0.05 * sp.ones(15,dtype=sp.float32),0.005*sp.ones(10,dtype=sp.float32) ])
if nn_name == '24-net':
nn = Cnnl(nn_name = nn_name,l_rates=rates24,subnet=Cnnl(nn_name = '12-net',l_rates=rates12).load_model(
'12-net_lasagne_.pickle'))
elif nn_name == '48-net':
nn = Cnnl(nn_name = nn_name,l_rates=rates48,subnet=Cnnl(nn_name = '24-net',l_rates=rates24,subnet=Cnnl(nn_name = '12-net',l_rates=rates12).load_model(
'12-net_lasagne_.pickle')).load_model('24-net_lasagne_.pickle'))
else:
nn = Cnnl(nn_name = nn_name,l_rates=rates12)
if not os.path.exists(nn_name + '_lasagne_.pickle'):
if nn_name.find('calib') > 0:
ds.get_train_wider_calib_data(k=k)
else:
ds.get_train_data(k=k)
X,y = sp.load(X_data_name),sp.load(y_data_name)
X_train,y_train = X,y
if not os.path.exists(nn_name + '_lasagne_.pickle'):
if nn_name == '24-net':
X_sub_train12 = sp.load('train_data_12.npy')
nn.fit(X = X_train,y = y_train,X12 = X_sub_train12)
elif nn_name == '48-net':
X_sub_train12 = sp.load('train_data_12.npy')
X_sub_train24 = sp.load('train_data_24.npy')
nn.fit(X = X_train,y = y_train,X12 = X_sub_train12,X24 = X_sub_train24)
else:
nn.fit(X = X_train,y = y_train)
nn.save_model(nn_name + '_lasagne_.pickle')
开发者ID:CCSUZJJ,项目名称:Cascade-CNN-Face-Detection,代码行数:44,代码来源:train.py
示例11: draw_raw_signal_around_genes
def draw_raw_signal_around_genes(raw_signals, out_png, windowsize=20000):
"""draw the raw signals as computed by make_raw_signal_around_genes"""
gene_expr = filter(lambda f: 'gene_expr' in f, raw_signals)
reads = filter(lambda f: 'gene_expr' not in f and 'matched_size' not in f, raw_signals)
pyplot.figure()
f, plots = pyplot.subplots(1, len(reads)+1, sharex=False, sharey=True, squeeze=False)
#sig_min = reduce(min, map(min, map(sp.load, reads)))
#sig_max = reduce(max, map(max, map(sp.load, reads)))
for i, read_sig in enumerate(reads):
#plots[i+1].imshow(sp.load(read_sig), interpolation='nearest', vmin=sig_min, vmax=sig_max)
plots[0, i+1].imshow(sp.ma.filled(sp.load(read_sig), fill_value=0).T, interpolation='nearest', aspect=.05)
plots[0, i+1].text(0,0,read_sig.split('gene.expression.')[1].split('.')[0], rotation=30, verticalalignment='bottom')
gexpr_ma = sp.load(gene_expr[0]).astype(float)
plots[0, 0].imshow(sp.ma.filled(gexpr_ma.reshape(1,gexpr_ma.shape[0]), fill_value=0).T, interpolation='nearest', aspect=.002)
#yticks(sp.arange())
shape = sp.load(read_sig).shape
pyplot.xticks(sp.arange(0, shape[0] + shape[0]/4, shape[0] / 4), sp.arange(-windowsize/2, windowsize/2 + windowsize/4, windowsize/4))
f.savefig(out_png)
pyplot.close('all')
开发者ID:bunbun,项目名称:HTS-waterworks,代码行数:19,代码来源:annotation.py
示例12: from_file
def from_file( fname ):
"""Load model from a HDF file"""
if not fname.endswith(".npz"):
fname += ".npz"
params = dict( sc.load( fname ).items() )
model = Model( fname, **params )
if "seed" in params:
model.set_seed( model.get_parameter("seed") )
return model
开发者ID:arunchaganty,项目名称:spectral,代码行数:10,代码来源:Model.py
示例13: read_file
def read_file(name):
fname = fname_template %name
if os.path.exists(fname+'_x.npy') and os.path.exists(fname+'_y.npy'):
xs = scipy.load(fname+'_x.npy')
ys = scipy.load(fname+'_y.npy')
return xs, ys
elif os.path.exists(fname):
with open(fname) as fh:
lines = fh.readlines()
ns = len(lines)
x, y = numpy.ndarray(ns), numpy.ndarray(ns, dtype=complex)
for i, l in enumerate(lines):
xr, yr = l.split('\t')
x[i] = eval(xr)
y[i] = eval(yr)
return x, y
else:
print >>sys.stderr, 'Manca il file %s' %(fname_template %name,)
#sys.exit(1)
return None, None
开发者ID:jakebarnwell,项目名称:PythonGenerator,代码行数:20,代码来源:common.py
示例14: loadMSER_npy
def loadMSER_npy(fn=nn_data_sets.NN_DATA_MSER,datadir=NN_DATA_DIR):
'''
As a shortcut to loading the MSER data set from the 1000's of files found in the
ukbench_extract folder, one should call loadMSER() once, and save the resulting
numpy array to a single file. This function assumes you have done so, and will
load the MSER data from the specified numpy file.
@Note: This function really doesn't do anything but put comments around the
use of numpy.load(...). Use numpy.save( filename, M) to create the saved file
in the first place.
'''
return scipy.load( os.path.join(datadir, fn) )
开发者ID:Sciumo,项目名称:ProximityForest,代码行数:11,代码来源:data_preparation.py
示例15: reconstruct_target
def reconstruct_target(target_file,base_prefix,regul = None):
"""
Reconstruct the target in 'target_file' using constrained,
and optionally regularized, least square optimisation.
arguments :
target_file : file contaiing the target to fit
base_prefix : prefix for the files of the base.
"""
vlist = read_vertex_list(base_prefix+'_vertices.dat')
t = read_target(target_file,vlist)
U = load(base_prefix+"_U.npy").astype('float')
S = load(base_prefix+"_S.npy").astype('float')
V = load(base_prefix+"_V.npy").astype('float')
ntargets,dim = V.shape
nvert = len(t)
pt = dot(U.T,t.reshape(nvert*3,1))
pbase = S[:dim].reshape(dim,1)*V.T
A = param('A',value = matrix(pbase))
b = param('b',value = matrix(pt))
x = optvar('x',ntargets)
if regul is None : prob = problem(minimize(norm2(A*x-b)),[x>=0.,x<=1.])
else : prob = problem(minimize(norm2(A*x-b) + regul * norm1(x)),[x>=0.,x<=1.])
prob.solve()
targ_names_file = base_prefix+"_names.txt"
with open(targ_names_file) as f :
tnames = [line.strip() for line in f.readlines() ]
tnames.sort()
base,ext = os.path.splitext(target_file)
bs_name = base+".bs"
with open(bs_name,"w") as f :
for tn,v in zip(tnames,x.value):
if v >= 1e-3 : f.write("%s %0.3f\n"%(tn,v))
开发者ID:ihavenick,项目名称:MakeHuman,代码行数:39,代码来源:reconstruction.py
示例16: read_fft_features
def read_fft_features(path, value):
features = []
t = time.time()
for filename in os.listdir(path):
if filename.endswith(".npy"):
real_path = "{0}/{1}".format(path, filename)
fft_features = scipy.load(real_path)
features.append(np.asarray(fft_features[:1000]))
print time.time() - t
length = len(features)
data = np.array(features)
values = np.array([value] * length)
return data, values
开发者ID:nexemjail,项目名称:mir,代码行数:13,代码来源:data_loader.py
示例17: main
def main( fname ):
"""Run on sample in fname"""
lda = sc.load( fname )
k, d, a0, O, X = lda['k'], lda['d'], lda['a0'], lda['O'], lda['data']
X1, X2, X3 = X
P, T = sample_moments( X1, X2, X3, k, a0 )
O_ = recover_topics( P, T, k, a0 )
O_ = closest_permuted_matrix( O.T, O_.T ).T
print k, d, a0, norm( O - O_ )
开发者ID:arunchaganty,项目名称:spectral,代码行数:13,代码来源:TwoSVD.py
示例18: read_fft
def read_fft(genre_list, base_dir):
X = []
y = []
for label, genre in enumerate(genre_list):
genre_dir = os.path.join(base_dir, genre, "*.fft.npy")
# print genre_dir
file_list =glob.glob(genre_dir)
for fn in file_list:
fft_features = scipy.load(fn)
X.append(fft_features[:1000])
y.append(label)
return np.array(X), np.array(y)
开发者ID:Ritace,项目名称:musicMoodDetectionFFT,代码行数:13,代码来源:loadingFFT.py
示例19: get_train_data
def get_train_data(n_pos = 31929, n_neg = 164863,k=12):
'''
megre positive and negative examples
'''
suff = str(k)
X_name = 'train_data_'+ suff + '.npy'
y_name = 'labels_'+ suff + '.npy'
if not(os.path.exists(X_name) and os.path.exists(y_name)):
X_train_face,y_train_face = Datasets.get_train_face_wider_data(k = k)
#X_pos = X_train_face[y_train_face==1]
X_pos = X_train_face
X_aflw,y_train_face_aflw = Datasets.get_aflw_face_data(k = k)
X_pos = sp.vstack( [X_pos,X_aflw] )
X_train_non_face,y_train_non_face = Datasets.get_train_non_face_data(k = k)
print('c1_pos:',len(X_pos))
if len(X_train_face[y_train_face==0]) > 0:
X_neg = sp.vstack( (X_train_face[y_train_face==0],X_train_non_face) )
else:
X_neg = X_train_non_face
X_pos = shuffle(X_pos,random_state=42)
X_neg = shuffle(X_neg,random_state=42)
X_pos = X_pos[:n_pos]
X_neg = X_neg[:n_neg]
n_neg = len(X_neg)
n_pos = len(X_pos)
y_pos = sp.ones(n_pos,int)
y_neg = sp.zeros(n_neg,int)
X = sp.vstack((X_pos,X_neg))
y = sp.hstack( (y_pos,y_neg) )
X,y = shuffle(X,y,random_state=42)
sp.save(X_name,X)
sp.save(y_name,y)
else:
X = sp.load(X_name)
y = sp.load(y_name)
print("Done","Positive examples count, Negative exapmples count:",len(y[y==1]),len(y[y==0]))
开发者ID:CCSUZJJ,项目名称:Cascade-CNN-Face-Detection,代码行数:37,代码来源:datasets.py
示例20: play_movie
def play_movie(image_dir, fixation_file=None, fps=30):
pygame.init()
clock = pygame.time.Clock()
flags = pygame.NOFRAME
depth = 32
surf = pygame.display.set_mode((640,480), flags, depth)
im_base_name = "cam1_frame_"
im_extension = ".bmp"
if fixation_file is not None:
fixations = sp.load(fixation_file)
fixations[sp.isnan(fixations)] = -100
fixations[abs(fixations) > 1000] = 1000
else:
fixations = []
try:
pygame.event.clear()
pygame.event.set_allowed(None)
pygame.event.set_allowed(pygame.KEYDOWN)
for framenum in xrange(4000):
im_name = "".join([im_base_name, str(framenum), im_extension])
im_path = os.path.join(image_dir, im_name)
im = cv2.imread(im_path)
if len(fixations) != 0:
if sp.floor(fixations[framenum, 1]) == 237:
continue
cv2.circle(im, tuple(fixations[framenum]), 3, (255, 255, 255))
im_buf = im.tostring()
im = pygame.image.frombuffer(im_buf, (640,480), "RGB")
surf.blit(im, (0,0))
pygame.display.flip()
print "Frame", framenum
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
sys.exit(0)
clock.tick(30)
except KeyboardInterrupt:
print "Quitting"
except Exception as e:
print "An exception!"
print e
raise
finally:
pygame.quit()
开发者ID:Berkeley-BORIS,项目名称:NDS-Code,代码行数:50,代码来源:play_movie.py
注:本文中的scipy.load函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论