本文整理汇总了Python中numpy.loadtxt函数的典型用法代码示例。如果您正苦于以下问题:Python loadtxt函数的具体用法?Python loadtxt怎么用?Python loadtxt使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了loadtxt函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: merge_csvs
def merge_csvs(in_list):
for idx, in_file in enumerate(in_list):
try:
in_array = np.loadtxt(in_file, delimiter=',')
except ValueError as ex:
try:
in_array = np.loadtxt(in_file, delimiter=',', skiprows=1)
except ValueError as ex:
with open(in_file, 'r') as first:
header_line = first.readline()
header_list = header_line.split(',')
n_cols = len(header_list)
try:
in_array = np.loadtxt(
in_file, delimiter=',', skiprows=1,
usecols=list(range(1, n_cols))
)
except ValueError as ex:
in_array = np.loadtxt(
in_file, delimiter=',', skiprows=1, usecols=list(range(1, n_cols - 1)))
if idx == 0:
out_array = in_array
else:
out_array = np.dstack((out_array, in_array))
out_array = np.squeeze(out_array)
iflogger.info('Final output array shape:')
iflogger.info(np.shape(out_array))
return out_array
开发者ID:ashgillman,项目名称:nipype,代码行数:29,代码来源:misc.py
示例2: get_dates
def get_dates(file):
"""
Read the first file in the input directory and create a ordinal based
timeseries.
Also find the indicies to split the time series into months and years
"""
hours = (0, 1, 2, 3)
days = (0, 1, 2)
try:
data = np.loadtxt(file, usecols=hours, dtype=int)
datelist = [datetime(*d) for d in data]
except (ValueError, TypeError):
data = np.loadtxt(file, usecols=days, dtype=int)
datelist = [datetime(*d) for d in data]
# check to make sure we haven't used used daily by mistake
# (creating a bunch of duplicate times)
newlist = []
for i in datelist:
if i not in newlist:
newlist.append(i)
else:
raise ValueError('Found duplicate datetimes in datelist')
print('VIC startdate: {0}'.format(datelist[0]))
print('VIC enddate: {0}'.format(datelist[-1]))
return datelist
开发者ID:mbaptiste,项目名称:tonic,代码行数:28,代码来源:vic2netcdf.py
示例3: loadFilter
def loadFilter(fname):
#load filter transmission
try:
fdata = np.loadtxt(fname,dtype=float)
except ValueError:
fdata = np.loadtxt(fname,dtype=float,delimiter = ',')
filtx = np.array(fdata[:,0])
filty = np.array(fdata[:,1])
filty[filty<0.0] = 0.0
if max(filty)>1.0:
filty/=100.0
if max(filtx<2000):
filtx*=10.0
print (filty)
if fname == 'Rfilter.txt':
filty = filty/max(filty)*0.8218 #normalize to our Johnson R filter max Transmission
if fname == 'Vfilter.txt':
filty = filty/max(filty)*0.8623 #normalize to our Johnson V filter max transmission
if (filtx[-1] < filtx[0]): #if array goes high to low, reverse the order to the integration does not get a negative
filtx = filtx[::-1]
filty = filty[::-1]
filtWidth = filtx[-1]-filtx[0]
print "filter width = ",filtWidth
filtCorrection = integrate.simps(filty,x=filtx)/filtWidth #calculate correction factor for filter width
print "filter correction = ", filtCorrection
return filtx, filty, filtWidth, filtCorrection
开发者ID:RupertDodkins,项目名称:ARCONS-pipeline-1,代码行数:29,代码来源:plotCrabSpectrum_N1.py
示例4: plot_igraph_modules_conf_cor_mat_rada
def plot_igraph_modules_conf_cor_mat_rada(rada_lol_file,Pajek_net_file,coords_file,net_List_file,gm_mask_coords_file):
import numpy as np
import nibabel as nib
import os
import csv
from dmgraphanalysis.utils_cor import return_mod_mask_corres,read_lol_file,read_Pajek_corres_nodes,read_List_net_file
from dmgraphanalysis.plot_igraph import plot_3D_igraph_modules_Z_list
print 'Loading node_corres'
node_corres = read_Pajek_corres_nodes(Pajek_net_file)
print node_corres
print node_corres.shape
print 'Loading coords'
#with open(coords_file, 'Ur') as f:
#coords_list = list(tuple(map(float,rec))[0:2] for rec in csv.reader(f, delimiter=' '))
coords = np.array(np.loadtxt(coords_file),dtype = 'int64')
print coords.shape
print 'Loading gm mask coords'
gm_mask_coords = np.array(np.loadtxt(gm_mask_coords_file),dtype = 'int64')
print gm_mask_coords.shape
print "Loading community belonging file" + rada_lol_file
community_vect = read_lol_file(rada_lol_file)
#print community_vect
print community_vect.shape
print "loading net_List_net as list"
Z_list = read_List_net_file(net_List_file)
print Z_list
print 'extracting node coords'
node_coords = coords[node_corres,:]
print node_coords.shape
print "plotting conf_cor_mat_modules_file with igraph"
Z_list_all_modules_file = plot_3D_igraph_modules_Z_list(community_vect,node_coords,Z_list,gm_mask_coords)
return Z_list_all_modules_file
开发者ID:Lx37,项目名称:dmgraphanalysis,代码行数:60,代码来源:modularity.py
示例5: testEpsilon_MOEA_NegativeDTLZ2
def testEpsilon_MOEA_NegativeDTLZ2(self):
random = pyotl.utility.Random(1)
problemGen = lambda: pyotl.problem.real.NegativeDTLZ2(3)
problem = problemGen()
pathProblem = os.path.join(self.pathData, type(problem).__name__.replace('Negative', ''), str(problem.GetNumberOfObjectives()))
crossover = pyotl.crossover.real.SimulatedBinaryCrossover(random, 1, problem.GetBoundary(), 20)
mutation = pyotl.mutation.real.PolynomialMutation(random, 1 / float(len(problem.GetBoundary())), problem.GetBoundary(), 20)
epsilon = pyotl.utility.PyList2Vector_Real([0.06] * problem.GetNumberOfObjectives())
pfList = []
for _ in range(self.repeat):
problem = problemGen()
initial = pyotl.initial.real.BatchUniform(random, problem.GetBoundary(), 100)
optimizer = pyotl.optimizer.couple_couple.real.Epsilon_MOEA(random, problem, initial, crossover, mutation, epsilon)
while optimizer.GetProblem().GetNumberOfEvaluations() < 30000:
optimizer()
pf = pyotl.utility.PyListList2VectorVector_Real(
[list(solution.objective_) for solution in optimizer.GetSolutionSet()])
for objective in pf:
problem.Fix(objective)
pfList.append(pf)
pathCrossover = os.path.join(pathProblem, type(crossover).__name__)
pathOptimizer = os.path.join(pathCrossover, type(optimizer).__name__)
pfTrue = pyotl.utility.PyListList2VectorVector_Real(numpy.loadtxt(os.path.join(pathProblem, 'PF.csv')).tolist())
# GD
indicator = pyotl.indicator.real.DTLZ2GD()
metricList = [indicator(pf) for pf in pfList]
rightList = numpy.loadtxt(os.path.join(pathOptimizer, 'GD.csv')).tolist()
self.assertGreater(scipy.stats.ttest_ind(rightList, metricList)[1], 0.05, [numpy.mean(rightList), numpy.mean(metricList), metricList])
# IGD
indicator = pyotl.indicator.real.InvertedGenerationalDistance(pfTrue)
metricList = [indicator(pf) for pf in pfList]
rightList = numpy.loadtxt(os.path.join(pathOptimizer, 'IGD.csv')).tolist()
self.assertGreater(scipy.stats.ttest_ind(rightList, metricList)[1], 0.05, [numpy.mean(rightList), numpy.mean(metricList), metricList])
开发者ID:O-T-L,项目名称:PyOTL,代码行数:33,代码来源:epsilon_moea.py
示例6: ascii_specfem2d
def ascii_specfem2d(**kwargs):
""" Reads seismic traces from text files
"""
files = glob(solver='specfem2d', **kwargs)
t = _np.loadtxt(files[0])[:,0]
h = Struct()
h['t0'] = t[0]
h['nr'] = len(files)
h['ns'] = 1
h['dt'] = _np.mean(_np.diff(t))
h['nt'] = len(t)
# read data
s = _np.zeros((h['nt'], h['nr']))
i = 0
for file in files:
s[:, i] = _np.loadtxt(file)[:, 1]
i += 1
# keep track of file names
h.files = []
for file in files:
file = basename(file)
h.files.append(file)
return s, h
开发者ID:chukren,项目名称:seisflows,代码行数:26,代码来源:readers.py
示例7: prune_layer
def prune_layer(layer_name, model, netspec, original_activations_dir, current_activations_dir, args):
log(args, "Starting to prune Layer %s\n" % layer_name)
layer = get_layer(layer_name, model)
log(args, "Old Weight Shape: %s" % str(layer.blobs[0].data.shape))
log(args, "Old Bias Shape: %s" % str(layer.blobs[1].data.shape))
layer_param = get_layer_param(layer_name, netspec)
if layer_param is None:
raise Exception("Layer %s does not exist in file %s" % (layer_name, args.network_file))
bottom_blob_name = layer_param.bottom[0]
bottom_activations_file = os.path.join(current_activations_dir, "%s.txt" % bottom_blob_name)
bottom_activations = np.loadtxt(bottom_activations_file)
log(args, "Bottom shape: %s" % str(bottom_activations.shape))
top_blob_name = layer_param.top[0]
top_activations_file = os.path.join(original_activations_dir, "%s.txt" % top_blob_name)
top_activations = np.loadtxt(top_activations_file)
log(args, "Top shape: %s" % str(top_activations.shape))
# row = instance, col = neuron, so to get neuron similarity, we transpose
gram_matrix = gram(bottom_activations.transpose(), args)
log(args, "Gram Matrix shape: %s" % str(gram_matrix.shape))
neuron_indices_to_keep = sample_neurons(gram_matrix, args)
weights, bias = update_weights(bottom_activations, neuron_indices_to_keep, top_activations)
log(args, "New Weight shape: %s" % str(weights.shape))
log(args, "New Bias shape: %s" % str(bias.shape))
layer.blobs[1].data[:] = bias[:]
layer.blobs[0].reshape(weights.shape[0], weights.shape[1])
layer.blobs[0].data[:] = weights[:]
prune_prev_layer(layer_name, neuron_indices_to_keep, model, netspec, args)
开发者ID:waldol1,项目名称:caffe,代码行数:34,代码来源:divnet2.py
示例8: get_target_feature
def get_target_feature(model):
""" Get target features """
name = model.subdir
iteration = model.Mut_iteration
cwd = os.getcwd()
sub = "%s/%s/Mut_%d" % (cwd,name,iteration)
## To Do:
## - Check if a target set of contact probabilities is given
## else construct target as <Q_i^TS> = Q^TS (uniform TS).
## ---- For future
## Format for target_Qi.dat
## - three columns:
## <res_a> <res_b> <Q_ab^TS>
## computes contact as within native contact distance.
## ----
## ---- For now
## Just a column with the desired contact probability in the TS
if os.path.exists("%s/target_Qi.dat" % name):
target = np.loadtxt("%s/target_Qi.dat" % name)
target_err = np.loadtxt("%s/target_Qi_err.dat" % name)
else:
## Compute the average Q of the TS: Average of the endpoints.
os.chdir("%s" % sub)
bounds, state_labels = get_state_bounds()
Q_TS = 0.5*(bounds[2] + bounds[3])/float(model.n_contacts)
target = Q_TS*np.ones(model.n_contacts,float)
target_err = 0.05*np.ones(model.n_contacts,float)
os.chdir(cwd)
return target, target_err
开发者ID:TensorDuck,项目名称:project_tools,代码行数:34,代码来源:compute_Jacobian.py
示例9: __init__
def __init__(self, beta=0.99, sigma=2.0, gamma=1, aH=5.0, aL=0.0, y=-1, dti = 0.5,
aN=101, psi=0.03, tol=0.01, neg=-1e10, W=45, R=30, a0 = 0, tcost = 0.0, ltv=0.7):
self.beta, self.sigma, self.gamma, self.psi = beta, sigma, gamma, psi
self.R, self.W, self.y = R, W, y
self.tcost, self.ltv, self.dti = tcost, ltv, dti
self.T = T = (y+1 if (y >= 0) and (y <= W+R-2) else W+R)
self.aH, self.aL, self.aN, self.aa = aH, aL, aN, aL+(aH-aL)*linspace(0,1,aN)
self.tol, self.neg = tol, neg
""" house sizes and number of feasible feasible house sizes """
self.hh = [0.0, 0.2, 0.5, 0.7, 1.0]
# self.hh = loadtxt('hh.txt', delimiter='\n')
self.sp = loadtxt('sp.txt', delimiter='\n') # survival probability
self.hN = len(self.hh)
""" age-specific productivity """
self.ef = loadtxt('ef.txt', delimiter='\n')
""" value function and its interpolation """
self.v = array([[[0 for i in range(aN)] for h in range(self.hN)] for y in range(T)], dtype=float)
self.vtilde = [[[] for h in range(self.hN)] for y in range(T)]
""" policy functions used in value function method """
self.na = [[[0 for i in range(2)] for h in range(self.hN)] for y in range(T)]
self.ao = array([[[0 for i in range(aN)] for h in range(self.hN)] for y in range(T)], dtype=float)
self.ho = array([[[0 for i in range(aN)] for h in range(self.hN)] for y in range(T)], dtype=float)
self.co = array([[[0 for i in range(aN)] for h in range(self.hN)] for y in range(T)], dtype=float)
self.ro = array([[[0 for i in range(aN)] for h in range(self.hN)] for y in range(T)], dtype=float)
""" the following paths for a, c, n and u are used in direct and value function methods
In direct method, those paths are directly calculated, while in the value function
method the paths are calculated from value and policy functions """
self.apath = array([a0 for y in range(T)], dtype=float)
self.hpath = array([0 for y in range(T)], dtype=float)
self.cpath = array([0 for y in range(T)], dtype=float)
self.rpath = array([0 for y in range(T)], dtype=float)
self.spath = array([0 for y in range(T)], dtype=float)
self.epath = array([0 for y in range(T)], dtype=float) # labor supply in efficiency unit
self.upath = array([0 for y in range(T)], dtype=float)
开发者ID:hyunchangyi,项目名称:olg,代码行数:34,代码来源:ltv_ha2_o.py
示例10: testISNPS_DTLZ2
def testISNPS_DTLZ2(self):
random = pyotl.utility.Random(1)
problemGen = lambda: pyotl.problem.real.DTLZ2(3)
problem = problemGen()
pathProblem = os.path.join(self.pathData, type(problem).__name__, str(problem.GetNumberOfObjectives()))
_crossover = pyotl.crossover.real.SimulatedBinaryCrossover(random, 1, problem.GetBoundary(), 20)
crossover = pyotl.crossover.real.CoupleCoupleCrossoverAdapter(_crossover, random)
mutation = pyotl.mutation.real.PolynomialMutation(random, 1 / float(len(problem.GetBoundary())), problem.GetBoundary(), 20)
angle1 = 2.3 * math.pi / 180
angle2 = 45 * math.pi / 180
amplification = 3
pfList = []
for _ in range(self.repeat):
problem = problemGen()
initial = pyotl.initial.real.BatchUniform(random, problem.GetBoundary(), 100)
convergenceDirection = pyotl.utility.PyList2BlasVector_Real([1] * problem.GetNumberOfObjectives())
optimizer = pyotl.optimizer.real.ISNPS(random, problem, initial, crossover, mutation, convergenceDirection, angle1, angle2, amplification)
while optimizer.GetProblem().GetNumberOfEvaluations() < 30000:
optimizer()
pf = pyotl.utility.PyListList2VectorVector_Real(
[list(solution.objective_) for solution in optimizer.GetSolutionSet()])
pfList.append(pf)
pathCrossover = os.path.join(pathProblem, type(crossover.GetCrossover()).__name__)
pathOptimizer = os.path.join(pathCrossover, type(optimizer).__name__)
pfTrue = pyotl.utility.PyListList2VectorVector_Real(numpy.loadtxt(os.path.join(pathProblem, 'PF.csv')).tolist())
# GD
indicator = pyotl.indicator.real.DTLZ2GD()
metricList = [indicator(pf) for pf in pfList]
rightList = numpy.loadtxt(os.path.join(pathOptimizer, 'GD.csv')).tolist()
self.assertGreater(scipy.stats.ttest_ind(rightList, metricList)[1], 0.05, [numpy.mean(rightList), numpy.mean(metricList), metricList])
# IGD
indicator = pyotl.indicator.real.InvertedGenerationalDistance(pfTrue)
metricList = [indicator(pf) for pf in pfList]
rightList = numpy.loadtxt(os.path.join(pathOptimizer, 'IGD.csv')).tolist()
self.assertGreater(scipy.stats.ttest_ind(rightList, metricList)[1], 0.05, [numpy.mean(rightList), numpy.mean(metricList), metricList])
开发者ID:O-T-L,项目名称:PyOTL,代码行数:35,代码来源:isnps.py
示例11: create_class_vec
def create_class_vec(new_name):
cfa_dir=PLS_code_dir+"data/cfaspec_snIa/"
SNe_data=np.loadtxt(cfa_dir+'/cfasnIa_param_mod.dat', dtype={'names': ('SN_name', 'zhel', 'tMaxB', 'err_tMaxB', 'ref', 'Dm15', 'err_Dm15', 'ref2', 'M_B', 'err_M_B', "BmV", "err_BmV", "BmmVm", "err_BmmVm", "Phot_ref"),'formats': ('S15', "f8", "f8","f8", "S15", "f8", "f8","S15","f8" , "f8","f8", "f8","f8", "f8","S15")})
spectra_data=np.loadtxt(cfa_dir+'/cfasnIa_mjdspec.dat', dtype={'names': ('spectrum_name', 'time'),'formats': ('S40', "f8")})
SNe_BranchWang_class=np.loadtxt(cfa_dir+'/branchwangclass_mod.dat', dtype={'names': ('SN_name', 'pEW5972', 'pEW6355', 'vabs6355', 'phase', 'Branch', 'Wang'),'formats': ('S15', "f8", "f8","f8", "f8","S15","S15")})
name_regex = re.compile('(.+)\-\d+\.\d+')
name_vector=[]
for spectrum_name in enumerate(spectra_data["spectrum_name"]):
name_vector.append(name_regex.search(spectrum_name[1]).group(1))
#It creates the vectors of the classification of Branch and Wang
#SN_name_vec=[]
pEW5972_vec=[]
pEW6355_vec=[]
vabs6355_vec=[]
Branch_vec=[]
Wang_vec=[]
for i, supernova in enumerate(new_name):
pEW5972_tmp=np.nan
pEW6355_tmp=np.nan
vabs6355_tmp=np.nan
Branch_tmp=np.nan
Wang_tmp=np.nan
for name_sn in enumerate(SNe_BranchWang_class["SN_name"]):
if name_sn[1] == supernova:
SN_name_tmp, pEW5972_tmp, pEW6355_tmp, vabs6355_tmp, phase_tmp, Branch_tmp, Wang_tmp= SNe_BranchWang_class[name_sn[0]]
#SN_name_vec.append(SN_name_tmp)
pEW5972_vec.append(pEW5972_tmp)
pEW6355_vec.append(pEW6355_tmp)
vabs6355_vec.append(vabs6355_tmp)
Branch_vec.append(Branch_tmp)
Wang_vec.append(Wang_tmp)
#color plot for Branch
color_plot_Branch=[]
for i in range(0,np.size(new_name)):
if Branch_vec[i]=="CN":
color_plot_Branch.append('r')
elif Branch_vec[i]=="SS":
color_plot_Branch.append('g')
elif Branch_vec[i]=="BL":
color_plot_Branch.append('b')
elif Branch_vec[i]=="CL":
color_plot_Branch.append('y')
else:
color_plot_Branch.append('w')
#color plot for Wang
color_plot_Wang=[]
for i in range(0,np.size(new_name)):
if Wang_vec[i]=="91T":
color_plot_Wang.append('r')
elif Wang_vec[i]=="N" :
color_plot_Wang.append('g')
elif Wang_vec[i]=="pec":
color_plot_Wang.append('b')
elif Wang_vec[i]=="HV":
color_plot_Wang.append('y')
elif Wang_vec[i]=="91bg":
color_plot_Wang.append('c')
else:
color_plot_Wang.append('w')
return color_plot_Wang, color_plot_Branch
开发者ID:sasdelli,项目名称:lc_predictor,代码行数:60,代码来源:color_plots.py
示例12: datos
def datos():
'''
Descarga los datos de un archivo y los retorna en columnas
'''
lambda1 = np.loadtxt('espectro.dat', usecols=(-2,))
flujo = np.loadtxt('espectro.dat', usecols=(-1,))
return lambda1, flujo
开发者ID:hpozojose,项目名称:11Tarea,代码行数:7,代码来源:p12.py
示例13: _readVICOutputFromFile
def _readVICOutputFromFile(self, lat, lon, depths, filespath):
"""Read DSSAT inputs from VIC output files for a specific pixel."""
startdate = date(self.startyear, self.startmonth, self.startday)
enddate = date(self.endyear, self.endmonth, self.endday)
filename = "{0}/output/eb_{1:.{3}f}_{2:.{3}f}".format(
filespath, lat, lon, self.grid_decimal)
viceb = np.loadtxt(filename)
filename = "{0}/output/sub_{1:.{3}f}_{2:.{3}f}".format(
filespath, lat, lon, self.grid_decimal)
vicsm = np.loadtxt(filename)
filename = "{0}/output/sur_{1:.{3}f}_{2:.{3}f}".format(
filespath, lat, lon, self.grid_decimal)
vicsr = np.loadtxt(filename)
filename = "{0}/forcings/data_{1:.{3}f}_{2:.{3}f}".format(
filespath, lat, lon, self.grid_decimal)
met = np.loadtxt(filename)
sm = vicsm[:, 3:len(depths) + 3]
weather = np.vstack(
(viceb[:, 3] + viceb[:, 4], met[:, 1], met[:, 2], met[:, 0])).T
year = vicsm[:, 0].astype(int)
month = vicsm[:, 1].astype(int)
day = vicsm[:, 2].astype(int)
tidx = [i for i in range(len(year)) if date(year[i], month[i], day[
i]) >= startdate and date(year[i], month[i], day[i]) <= enddate]
lai = dict(zip([date(year[i], month[i], day[i])
for i in range(len(year)) if i in tidx], vicsr[:, 12]))
return year[tidx], month[tidx], day[tidx], weather[tidx, :], sm[tidx, :], lai
开发者ID:maduhu,项目名称:RHEAS,代码行数:27,代码来源:dssat.py
示例14: run_test
def run_test(name):
basepath = os.path.join('results', name)
if not os.path.exists(basepath):
os.makedirs(basepath)
ctrl = LBSimulationController(TestLDCSim)
ctrl.run(ignore_cmdline=True)
horiz = np.loadtxt('ldc_golden/re400_horiz', skiprows=1)
vert = np.loadtxt('ldc_golden/re400_vert', skiprows=1)
plt.plot(2 * (horiz[:,0] - 0.5), -2 * (horiz[:,1] - 0.5), '.', label='Sheu, Tsai paper')
plt.plot(2 * (vert[:,0] - 0.5), -2 * (vert[:,1] - 0.5), '.', label='Sheu, Tsai paper')
save_output(basepath, MAX_ITERS)
plt.legend(loc='lower right')
plt.gca().yaxis.grid(True)
plt.gca().xaxis.grid(True)
plt.gca().xaxis.grid(True, which='minor')
plt.gca().yaxis.grid(True, which='minor')
plt.title('Lid Driven Cavity, Re = 400')
print os.path.join(basepath, 're400.pdf' )
plt.savefig(os.path.join(basepath, 're400.pdf' ), format='pdf')
plt.clf()
plt.cla()
plt.show()
shutil.rmtree(tmpdir)
开发者ID:PokerN,项目名称:sailfish,代码行数:27,代码来源:ldc_3d.py
示例15: Visualize
def Visualize(self,path=None,filename=None,viz_type='difference'):
if path is None:
path = self.result_path
if filename is None:
filename = '/results'
im = []
if self.n<=1:
fig = mpl.figure()
x = np.linspace(0,1,self.m)
counter = 1
for step in sorted(glob.glob(path+filename+'*.txt')):
tmp = np.loadtxt(step)
if viz_type=='difference':
im.append(mpl.plot(x,(self.exact(x,np.zeros(self.m),counter*self.dt)-tmp),'b-'))
else:
im.append(mpl.plot(x,tmp,'b-'))
counter += 1
ani = animation.ArtistAnimation(fig,im)
mpl.show()
else:
X,Y = np.meshgrid(np.linspace(0,1,self.m),np.linspace(0,1,self.n))
mpl.ion()
fig = mpl.figure()
ax = fig.add_subplot(111,projection='3d')
counter = 1
for step in sorted(glob.glob(path+filename+'*.txt')):
tmp = np.loadtxt(step)
wframe = ax.plot_wireframe(X,Y,(self.exact(X,Y,(counter*self.dt))-tmp))
mpl.draw()
if counter==1:
pass
# ax.set_autoscaley_on(False)
ax.collections.remove(wframe)
counter +=1
开发者ID:fepettersen,项目名称:thesis,代码行数:34,代码来源:new_experiment.py
示例16: main
def main():
args = _args()
tests = {
'squared_diff': test_squared_diff,
'chi_square': chi_square,
'chi_square_shape': chi_square_shape,
'ks': kolmogorov_smirnov
}
if args.test_type not in tests:
print('--test_type not found, available: {}'.format(tests.keys()))
return
# Create histograms with the same bins.
if args.raw_data:
print('Reading {}'.format(args.data_1))
data_1 = np.loadtxt(args.data_1)
print('Reading {}'.format(args.data_2))
data_2 = np.loadtxt(args.data_2)
print('Data read')
if args.frames and args.raw_data:
data_1 = data_1[:args.frames]
data_2 = data_2[:args.frames]
min_bins, max_bins = map(float, args.min_max.split(':'))
bins = np.arange(min_bins, max_bins, (max_bins-min_bins)/args.bins)
histogram_1, _ = np.histogram(data_1, bins=bins, density=False)
histogram_2, _ = np.histogram(data_2, bins=bins, density=False)
else:
histogram_1 = np.loadtxt(args.data_1, usecols=(0, 1))
histogram_2 = np.loadtxt(args.data_2, usecols=(0, 1))
print('Running test {}'.format(args.test_type))
tests[args.test_type](histogram_1, histogram_2)
开发者ID:MrTheodor,项目名称:lab-tools,代码行数:35,代码来源:compare_histograms.py
示例17: single_eval
def single_eval(pred_filepath, true_filepath):
'''
@brief: do evaluation on a single prediction
'''
# read and flatten
y_pred_raw = np.loadtxt(open(pred_filepath,"rb"),delimiter=",").flatten()
y_true_raw = np.loadtxt(open(true_filepath,"rb"),delimiter=",").flatten()
assert len(y_pred_raw)==len(y_true_raw), 'len(y_pred_raw)!=len(y_true_raw)'
# remove void
void_num = 255
nonvoid_idxes = [i for i in range(len(y_true_raw)) if y_true_raw[i]!=void_num]
y_pred = [y_pred_raw[i] for i in nonvoid_idxes]
y_true = [y_true_raw[i] for i in nonvoid_idxes]
assert len(y_pred)==len(y_true), 'len(y_pred)!=len(y_true)'
# get confusion mat
cm = confusion_matrix(y_true, y_pred)
# get present classes in an ascending order!, map to their accuracy
present_classes = sorted( list(set(y_pred + y_true)) )
class_perf_map = {}
for i in range(len(present_classes)):
class_perf_map[i] = get_perf(cm, i)
return class_perf_map
开发者ID:grafikaj,项目名称:lab1231-sun-prj,代码行数:28,代码来源:evaluator_tor.py
示例18: test_usecols
def test_usecols(self):
a = np.array([[1, 2], [3, 4]], float)
c = StringIO.StringIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1,))
assert_array_equal(x, a[:,1])
a =np.array([[1, 2, 3], [3, 4, 5]], float)
c = StringIO.StringIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1, 2))
assert_array_equal(x, a[:, 1:])
# Testing with arrays instead of tuples.
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2]))
assert_array_equal(x, a[:, 1:])
# Checking with dtypes defined converters.
data = '''JOE 70.1 25.3
BOB 60.5 27.9
'''
c = StringIO.StringIO(data)
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
arr = np.loadtxt(c, usecols=(0, 2), dtype=zip(names, dtypes))
assert_equal(arr['stid'], ["JOE", "BOB"])
assert_equal(arr['temp'], [25.3, 27.9])
开发者ID:GunioRobot,项目名称:numpy-refactor,代码行数:30,代码来源:test_io.py
示例19: _stimcorr_core
def _stimcorr_core(self, motionfile, intensityfile, designmatrix, cwd=None):
"""
Core routine for determining stimulus correlation
"""
if not cwd:
cwd = os.getcwd()
# read in motion parameters
mc_in = np.loadtxt(motionfile)
g_in = np.loadtxt(intensityfile)
g_in.shape = g_in.shape[0], 1
dcol = designmatrix.shape[1]
mccol = mc_in.shape[1]
concat_matrix = np.hstack((np.hstack((designmatrix, mc_in)), g_in))
cm = np.corrcoef(concat_matrix, rowvar=0)
corrfile = self._get_output_filenames(motionfile, cwd)
# write output to outputfile
file = open(corrfile, 'w')
file.write("Stats for:\n")
file.write("Stimulus correlated motion:\n%s\n" % motionfile)
for i in range(dcol):
file.write("SCM.%d:" % i)
for v in cm[i, dcol + np.arange(mccol)]:
file.write(" %.2f" % v)
file.write('\n')
file.write("Stimulus correlated intensity:\n%s\n" % intensityfile)
for i in range(dcol):
file.write("SCI.%d: %.2f\n" % (i, cm[i, -1]))
file.close()
开发者ID:DimitriPapadopoulos,项目名称:nipype,代码行数:29,代码来源:rapidart.py
示例20: CoAddFinal
def CoAddFinal(frames, mode='mean', display=True):
# co-add FINSIHED, reduced spectra
# only trick: resample on to wavelength grid of 1st frame
files = np.loadtxt(frames, dtype='string',unpack=True)
# read in first file
wave_0, flux_0 = np.loadtxt(files[0],dtype='float',skiprows=1,
unpack=True,delimiter=',')
for i in range(1,len(files)):
wave_i, flux_i = np.loadtxt(files[i],dtype='float',skiprows=1,
unpack=True,delimiter=',')
# linear interp on to wavelength grid of 1st frame
flux_i0 = np.interp(wave_0, wave_i, flux_i)
flux_0 = np.dstack( (flux_0, flux_i0))
if mode == 'mean':
flux_out = np.squeeze(flux_0.sum(axis=2) / len(files))
if mode == 'median':
flux_out = np.squeeze(np.median(flux_0, axis=2))
if display is True:
plt.figure()
plt.plot(wave_0, flux_out)
plt.xlabel('Wavelength')
plt.ylabel('Co-Added Flux')
plt.show()
return wave_0, flux_out
开发者ID:holtzmanjon,项目名称:pydis,代码行数:31,代码来源:wrappers.py
注:本文中的numpy.loadtxt函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论