本文整理汇总了Python中numpy.genfromtxt函数的典型用法代码示例。如果您正苦于以下问题:Python genfromtxt函数的具体用法?Python genfromtxt怎么用?Python genfromtxt使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了genfromtxt函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: compare
def compare(mooseCsv, nrnCsv):
mooseData = None
nrnData = None
with open(mooseCsv, "r") as f:
mooseTxt = f.read().split("\n")
mooseHeader, mooseData = mooseTxt[0].split(","), np.genfromtxt(mooseTxt[1:],
delimiter=',').T
with open(nrnCsv, "r") as f:
nrnTxt = f.read().split("\n")
nrnHeader, nrnData = nrnTxt[0].split(','), 1e-3*np.genfromtxt(nrnTxt[1:],
delimiter=',').T
nrnTimeVec, nrnData = nrnData[0], nrnData[1:]
mooseTimeVec, mooseData = mooseData[0], mooseData[1:]
for i, comptName in enumerate(nrnHeader[1:]):
if i == 1:
break
nrnComptName = comptName.replace("table_", "")
mooseComptId, mooseComptName = get_index(nrnComptName, mooseHeader[1:])
print("%s %s- moose equivalent %s %s" % (i, nrnComptName, mooseComptId,
mooseComptName))
pylab.plot(mooseTimeVec, mooseData[ mooseComptId ], label = "Neuron: " + nrnComptName)
pylab.plot(nrnTimeVec, nrnData[i], label = "MOOSE: " + mooseComptName)
pylab.legend(loc='best', framealpha=0.4)
pylab.show()
开发者ID:BhallaLab,项目名称:benchmarks,代码行数:25,代码来源:compare.py
示例2: setUp
def setUp(self):
"""
"""
# Read initial dataset
filename = os.path.join(self.BASE_DATA_PATH,
'completeness_test_cat.csv')
test_data = np.genfromtxt(filename, delimiter=',', skip_header=1)
# Create the catalogue A
self.catalogueA = Catalogue.make_from_dict(
{'year': test_data[:,3], 'magnitude': test_data[:,17]})
# Read initial dataset
filename = os.path.join(self.BASE_DATA_PATH,
'recurrence_test_cat_B.csv')
test_data = np.genfromtxt(filename, delimiter=',', skip_header=1)
# Create the catalogue A
self.catalogueB = Catalogue.make_from_dict(
{'year': test_data[:,3], 'magnitude': test_data[:,17]})
# Read the verification table A
filename = os.path.join(self.BASE_DATA_PATH,
'recurrence_table_test_A.csv')
self.true_tableA = np.genfromtxt(filename, delimiter = ',')
# Read the verification table A
filename = os.path.join(self.BASE_DATA_PATH,
'recurrence_table_test_B.csv')
self.true_tableB = np.genfromtxt(filename, delimiter = ',')
开发者ID:g-weatherill,项目名称:hmtk,代码行数:28,代码来源:utils_test.py
示例3: learn
def learn(tuned_parameters,model):
# produceFeature(trainfile)
dataset = genfromtxt(open('Data/'+trainfile,'r'), delimiter=',',dtype='f8')[0:]
target = [x[0] for x in dataset]
train = [x[1:] for x in dataset]
# print train[1:10]
# print target
# print len(train)
# produceFeature(testfile)
test = genfromtxt(open('Data/'+testfile,'r'),delimiter=',',dtype='f8')[0:]
test_target = [x[1:] for x in test]
# X, y = digits.data, digits.target
trainnp = np.asarray(train)
targetnp = np.asarray(target)
# turn the data in a (samples, feature) matrix:
X, y = trainnp, targetnp
# X = digits.images.reshape((n_samples, -1))
# y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(model, tuned_parameters, cv=5,
scoring='%s_weighted' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
开发者ID:evanslight,项目名称:Exploring-Twitter-Sentiment-Analysis-and-the-Weather,代码行数:60,代码来源:Sentimentanalysis_parameter_gridsearch.py
示例4: make_dtopo
def make_dtopo():
'''
Make geoclaw dtopo file
'''
from numpy import genfromtxt,zeros
#Run params
f='/Users/dmelgarm/Research/Slip_Inv/tohoku_tsunami/'
stafile='tohoku.sta'
dlon=0.033333
dlat=0.033333
dt=5
stat_or_dyn='s'
#Get station list
sta=genfromtxt(f+'data/station_info/'+stafile,usecols=0,dtype='S4')
s=genfromtxt(f+'data/station_info/'+stafile,usecols=[1,2])
lon=s[:,0]
lat=s[:,1]
if stat_or_dyn.lower()=='s':
n=zeros(len(sta))
e=n.copy()
u=n.copy()
for ksta in range(len(sta)):
print ksta
neu=genfromtxt(f+'output/forward_models/'+str(sta[ksta])+'.static.neu')
n[ksta]=neu[0]
e[ksta]=neu[1]
u[ksta]=neu[2]
print neu[2]
开发者ID:christineruhl,项目名称:MudPy,代码行数:31,代码来源:clawtools.py
示例5: wide_dataset_large
def wide_dataset_large():
print("Reading in Arcene training data for binomial modeling.")
trainDataResponse = np.genfromtxt(pyunit_utils.locate("smalldata/arcene/arcene_train_labels.labels"), delimiter=' ')
trainDataResponse = np.where(trainDataResponse == -1, 0, 1)
trainDataFeatures = np.genfromtxt(pyunit_utils.locate("smalldata/arcene/arcene_train.data"), delimiter=' ')
xtrain = np.transpose(trainDataFeatures).tolist()
ytrain = trainDataResponse.tolist()
trainData = h2o.H2OFrame.fromPython([ytrain]+xtrain)
trainData[0] = trainData[0].asfactor()
print("Run model on 3250 columns of Arcene with strong rules off.")
model = H2OGeneralizedLinearEstimator(family="binomial", lambda_search=False, alpha=1)
model.train(x=range(1,3250), y=0, training_frame=trainData)
print("Test model on validation set.")
validDataResponse = np.genfromtxt(pyunit_utils.locate("smalldata/arcene/arcene_valid_labels.labels"), delimiter=' ')
validDataResponse = np.where(validDataResponse == -1, 0, 1)
validDataFeatures = np.genfromtxt(pyunit_utils.locate("smalldata/arcene/arcene_valid.data"), delimiter=' ')
xvalid = np.transpose(validDataFeatures).tolist()
yvalid = validDataResponse.tolist()
validData = h2o.H2OFrame.fromPython([yvalid]+xvalid)
prediction = model.predict(validData)
print("Check performance of predictions.")
performance = model.model_performance(validData)
print("Check that prediction AUC better than guessing (0.5).")
assert performance.auc() > 0.5, "predictions should be better then pure chance"
开发者ID:Vishnu24,项目名称:h2o-3,代码行数:29,代码来源:pyunit_wide_dataset_glm_large.py
示例6: read
def read(input_file="POSITIONS.OUT"):
""" Reads a geometry """
m = np.genfromtxt(input_file).transpose()
g = geometry() # cretae geometry
g.dimensionality = 0
g.x = m[0]
g.y = m[1]
g.x = g.x - sum(g.x)/len(g.x) # normalize
g.y = g.y - sum(g.y)/len(g.y) # normalize
g.z = m[2]
g.xyz2r() # create r coordinates
try: lat = np.genfromtxt("LATTICE.OUT") # read lattice
except: lat = None
try: # two dimensional
g.a1 = np.array([lat[0,0],lat[0,1],0.0])
g.a2 = np.array([lat[1,0],lat[1,1],0.0])
g.dimensionality=2
return g
except: pass
try:
g.celldis = lat
g.dimensionality = 1
return g
except: pass
g.dimensionality = 0
return g
开发者ID:joselado,项目名称:pygra,代码行数:26,代码来源:geometry.py
示例7: test_dtype_with_object
def test_dtype_with_object(self):
"Test using an explicit dtype with an object"
from datetime import date
import time
data = """
1; 2001-01-01
2; 2002-01-31
"""
ndtype = [('idx', int), ('code', np.object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.genfromtxt(StringIO.StringIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array([(1, datetime(2001,1,1)), (2, datetime(2002,1,31))],
dtype=ndtype)
assert_equal(test, control)
#
ndtype = [('nest', [('idx', int), ('code', np.object)])]
try:
test = np.genfromtxt(StringIO.StringIO(data), delimiter=";",
dtype=ndtype, converters=converters)
except NotImplementedError:
pass
else:
errmsg = "Nested dtype involving objects should be supported."
raise AssertionError(errmsg)
开发者ID:GunioRobot,项目名称:numpy-refactor,代码行数:26,代码来源:test_io.py
示例8: plotme
def plotme(typeid,num,h):
yMax = '6'
cuibmFolder = '/scratch/src/cuIBM'
caseFolder = cuibmFolder + '/validation/error/cylinder/'+typeid+num
validationData = cuibmFolder + '/validation-data/cylinderRe40-KL95.txt'
print caseFolder+'/forces'
my_data = genfromtxt(caseFolder+'/forces',dtype=float,delimiter='\t')
time = [my_data[i][0] for i in xrange(1,len(my_data))]
force = [my_data[i][1]*2 for i in xrange(1,len(my_data))]
validation_data = genfromtxt(validationData, dtype=float, delimiter='\t')
validation_time=[validation_data[i][0]*0.5 for i in xrange(1,len(validation_data))]
validation_force=[validation_data[i][1] for i in xrange(1,len(validation_data))]
plt.plot(validation_time,validation_force, 'o', color = 'red', markersize = 8, label = 'Koumoutsakos and Leonard, 1995')
plt.plot(time,force, '-', color='blue', linewidth=2, label='Present Work')
plt.title('Flow over an impulsively started cylinder at Reynolds number 40')
plt.legend(loc='upper right', numpoints=1, fancybox=True)
plt.xlabel('Non-dimensional time')
plt.ylabel('Drag Coefficient')
plt.xlim([0,3])
plt.ylim([0,int(yMax)])
plt.savefig('/scratch/src/cuIBM/validation/error/cylinder/'+typeid+h+'.pdf')
plt.clf()
开发者ID:Niemeyer-Research-Group,项目名称:cuIBM,代码行数:26,代码来源:error_order_cylinder.py
示例9: co
def co():
import numpy as np
import os
home = os.path.expanduser('~')
band = raw_input('Select the band:')
if band == 'pacs':
upper, lower = 200, 54
if band == 'spire':
upper, lower = 671, 200
level = np.genfromtxt(home+'/data/co_level.txt', dtype='str')
ref = np.genfromtxt(home+'/data/co_ref.txt','str')
for i in range(0,len(level[0:])):
for j in range(0, len(ref[0:])):
if ref[j,1] == level[i,0]:
ref[j,0] = level[i,2]
ref[j,1] = level[i,3]
if ref[j,2] == level[i,0]:
ref[j,2] = level[i,3]
c = 2.998e8
ref[:,4] = c/ref[:,4].astype(float)/1e9*1e6
ref = ref[np.argsort(ref[:,4].astype(float))]
ref_sort = ref
dummy = np.copy(ref[:,0])
ref_sort[:,0],ref_sort[:,1],ref_sort[:,2],ref_sort[:,3],ref_sort[:,4] = ref[:,1],ref[:,2],ref[:,4],ref[:,3],ref[:,5]
ref_sort[:,5] = dummy
ind = np.where((ref_sort[:,2].astype(float) >= lower) & (ref_sort[:,2].astype(float) <= upper))
slt_trans = ref_sort[ind,:]
print slt_trans
print len(slt_trans[0,:,0])
foo = open(home+'/data/co_ref_sort.txt','w')
np.savetxt(foo,ref_sort, fmt='%s')
foo.close()
开发者ID:yaolun,项目名称:sa,代码行数:32,代码来源:line_ref.py
示例10: load
def load(self):
# load data
values = []
if verbose: print()
if verbose: print("visualization: loading chains ...")
f = "prob-chain0.dump"
if not os.path.exists(f):
raise Exception("visualization: chains not available yet.")
try:
# I think the first column is the probabilities, the second is without prior
probabilities = numpy.genfromtxt(f, skip_footer=1, dtype='f')[:,0]
except Exception as e:
raise Exception("visualization: chains couldn't be loaded; perhaps no data yet: " + str(e))
for p in self.params:
f = "%s-chain-0.prob.dump" % p['name']
if verbose: print(" loading chain %s" % f)
if not os.path.exists(f):
raise Exception("visualization: chains not available yet.")
try:
v = numpy.genfromtxt(f, skip_footer=1, dtype='f')
except Exception as e:
raise Exception("visualization: chains couldn't be loaded; perhaps no data yet: " + str(e))
values.append(v)
nvalues = min(map(len, values))
if verbose: print("visualization: loading chains finished; %d values" % nvalues)
self.values = [v[:nvalues][-self.nlast::nevery] for v in values]
self.probabilities = probabilities[:nvalues][-self.nlast::nevery]
开发者ID:bsipocz,项目名称:PyMultiNest,代码行数:27,代码来源:analyse.py
示例11: read_gf_from_txt
def read_gf_from_txt(block_txtfiles, block_name):
"""
Read a GfReFreq from text files with the format (w, Re(G), Im(G)) for a single block.
Notes
-----
A BlockGf must be constructed from multiple GfReFreq objects if desired.
The mesh must be the same for all files read in.
Non-uniform meshes are not supported.
Parameters
----------
block_txtfiles: Rank 2 square np.array(str) or list[list[str]]
The text files containing the GF data that need to read for the block.
e.g. [['up_eg1.dat']] for a one-dimensional block and
[['up_eg1_1.dat','up_eg2_1.dat'],
['up_eg1_2.dat','up_eg2_2.dat']] for a 2x2 block.
block_name: str
Name of the block.
Returns
-------
g: GfReFreq
The real frequency Green's function read in.
"""
block_txtfiles = np.array(block_txtfiles) # Must be an array to use certain functions
N1,N2 = block_txtfiles.shape
mesh = np.genfromtxt(block_txtfiles[0,0],usecols=[0]) # Mesh needs to be the same for all blocks
g = GfReFreq(indices=range(N1),window=(np.min(mesh),np.max(mesh)),n_points=len(mesh),name=block_name)
for i,j in product(range(N1),range(N2)):
data = np.genfromtxt(block_txtfiles[i,j],usecols=[1,2])
g.data[:,i,j] = data[:,0] + 1j*data[:,1]
return g
开发者ID:TRIQS,项目名称:triqs,代码行数:33,代码来源:tools.py
示例12: load_hop
def load_hop(s, hop=hop_script_path):
"""
Loads the hop catalog for the given RAMSES snapshot. If the
catalog doesn't exist, it tries to run hop to create one via the
'script_hop.sh' script found in the RAMSES distribution. The hop
output should be in a 'hop' directory in the base directory of the
simulation.
**Input**:
*s* : loaded RAMSES snapshot
**Optional Keywords**:
*hop* : path to `script_hop.sh`
"""
if s.filename[-1] == '/' :
name = s.filename[-6:-1]
filename = s.filename[:-13]+'hop/grp%s.pos'%name
else:
name = s.filename[-5:]
filename = s.filename[:-12]+'hop/grp%s.pos'%name
try :
data = np.genfromtxt(filename,unpack=True)
except IOError :
import os
dir = s.filename[:-12] if len(s.filename[:-12]) else './'
os.system('cd %s;/home/itp/roskar/ramses/galaxy_formation/script_hop.sh %d;cd ..'%(dir,int(name)))
data = np.genfromtxt(filename,unpack=True)
return data
开发者ID:imclab,项目名称:pynbody,代码行数:35,代码来源:ramses_util.py
示例13: main
def main():
print "Solve small matrix..."
R = array([0, 0, 1, 1, 1, 2, 2])
C = array([0, 1, 0, 1, 2, 1, 2])
V = array([4.0, -1.0, -1.0, 4.0, -1.0, -1.0, 4.0])
b = array([3.0, 2.0, 3.0])
A = coo_matrix((V, (R, C)), shape=(3, 3))
# convert to csr format for efficiency
x = spsolve(A.tocsr(), b)
print "x = ", x
print "Solve psd matrix..."
# skip the first row (n, nnz)
A = numpy.genfromtxt('../data/psd.txt', skiprows=1)
b = numpy.genfromtxt('../data/b.txt')
coo = coo_matrix((A[:, 2], (A[:, 0], A[:, 1])))
x = spsolve(coo.tocsr(), b)
print 'x = ', x
print "Solve big matrix..."
A = numpy.genfromtxt('../data/mat_helmholtz.txt', skiprows=1)
coo = coo_matrix((A[:, 2], (A[:, 0], A[:, 1])))
n = coo.shape[0]
b = numpy.ones(n)
x = spsolve(coo.tocsr(), b)
print 'x = ', x
开发者ID:NP95,项目名称:coursera,代码行数:26,代码来源:demo.py
示例14: main
def main(options):
freq_range=range(options["from"], options["to"]+1)
gt_file=gzip.open(options["gt_file"], "r")
pos_file=gzip.open(options["pos_file"], "r")
out_haps=gzip.open(options["out_root"]+"/haps.gz", "w")
out_haps_fn=[gzip.open(options["out_root"]+"/haps.f"+str(x)+".gz", "w") for x in freq_range]
out_samples=open(options["out_root"]+"/samples.txt", "w")
gt=np.genfromtxt(gt_file, delimiter=1)
pos=np.genfromtxt(pos_file)
pos=np.floor(pos*options["chr_len"]).astype(int)
gt=gt.transpose().astype(int)
# This is because on some platforms the np.genfromtxt tries to import the line endings...
gt=gt[range(len(pos)),]
(nsnp,nind)=gt.shape
ACs=np.sum(gt, axis=1)
MACs=np.minimum(ACs, nind-ACs)
for i in range(nsnp):
out_haps.write(("\t".join(["%d"]*(nind+1))+"\n")%((pos[i],)+tuple(gt[i,])))
if MACs[i]>=options["from"] and MACs[i]<= options["to"]:
idx=MACs[i]-options["from"]
out_haps_fn[idx].write(("\t".join(["%d"]*(nind+1))+"\n")%((pos[i],)+tuple(gt[i,])))
for i in range(int(nind/2)):
out_samples.write("SIM%d\n"%(i+1,))
for fil in [gt_file, pos_file, out_haps]+out_haps_fn:
fil.close()
开发者ID:mathii,项目名称:f2,代码行数:34,代码来源:macs_genotype_to_hap_files.py
示例15: main
def main():
trainset = np.genfromtxt(open('train.csv','r'), delimiter=',')[1:]
X = np.array([x[1:8] for x in trainset])
y = np.array([x[8] for x in trainset])
#print X,y
import math
for i, x in enumerate(X):
for j, xx in enumerate(x):
if(math.isnan(xx)):
X[i][j] = 26.6
testset = np.genfromtxt(open('test.csv','r'), delimiter = ',')[1:]
test = np.array([x[1:8] for x in testset])
for i, x in enumerate(test):
for j, xx in enumerate(x):
if(math.isnan(xx)):
test[i][j] = 26.6
X, test = decomposition_pca(X, test)
bdt = AdaBoostClassifier(base_estimator = KNeighborsClassifier(n_neighbors=20, algorithm = 'auto'), algorithm="SAMME", n_estimators = 200)
bdt.fit(X, y)
print 'PassengerId,Survived'
for i, t in enumerate(test):
print '%d,%d' % (i + 892, int(bdt.predict(t)[0]))
开发者ID:kingr13,项目名称:entire-src,代码行数:32,代码来源:adaboost.py
示例16: RunQDAShogun
def RunQDAShogun(q):
totalTimer = Timer()
Log.Info("Loading dataset", self.verbose)
try:
# Load train and test dataset.
trainData = np.genfromtxt(self.dataset[0], delimiter=',')
trainFeat = modshogun.RealFeatures(trainData[:,:-1].T)
if len(self.dataset) == 2:
testSet = np.genfromtxt(self.dataset[1], delimiter=',')
testFeat = modshogun.RealFeatures(testData.T)
# Labels are the last row of the training set.
labels = modshogun.MulticlassLabels(trainData[:, (trainData.shape[1] - 1)])
with totalTimer:
model = modshogun.QDA(trainFeat, labels)
model.train()
if len(self.dataset) == 2:
model.apply(testFeat).get_labels()
except Exception as e:
q.put(-1)
return -1
time = totalTimer.ElapsedTime()
q.put(time)
return time
开发者ID:MarcosPividori,项目名称:benchmarks,代码行数:29,代码来源:qda.py
示例17: RunNBCShogun
def RunNBCShogun(q):
totalTimer = Timer()
Log.Info("Loading dataset", self.verbose)
try:
# Load train and test dataset.
trainData = np.genfromtxt(self.dataset[0], delimiter=',')
testData = np.genfromtxt(self.dataset[1], delimiter=',')
# Labels are the last row of the training set.
labels = MulticlassLabels(trainData[:, (trainData.shape[1] - 1)])
with totalTimer:
# Transform into features.
trainFeat = RealFeatures(trainData[:,:-1].T)
testFeat = RealFeatures(testData.T)
# Create and train the classifier.
nbc = GaussianNaiveBayes(trainFeat, labels)
nbc.train()
# Run Naive Bayes Classifier on the test dataset.
nbc.apply(testFeat).get_labels()
except Exception as e:
q.put(-1)
return -1
time = totalTimer.ElapsedTime()
q.put(time)
return time
开发者ID:Saurabh7,项目名称:benchmarks,代码行数:30,代码来源:nbc.py
示例18: get_spec
def get_spec(file, nfreq=300, mu=-1, nmu=10):
#Read file assumed to be in the format of tlusty uni 14 file. File has lines with wavelengths and h followed by blocks with
#intensities and polariztions
spec=np.genfromtxt(file, skip_header=1, invalid_raise=False)
wlh=np.genfromtxt(file, invalid_raise=False)
spec=spec.flatten()
wlh=wlh.flatten()
spec=spec[::2]
wl=wlh[::2]
#mu is and index for the polar angle to use. -1 corresponds to returning emergent flux from the disk
if mu == -1:
h=wlh[1::2]
spec=4*np.pi*h
spec=np.vstack((wl, spec))
return spec
spec=spec.reshape(( nfreq, nmu))
spec=spec[:, mu]
spec=np.vstack((wl,spec))
return spec
开发者ID:alekseygenerozov,项目名称:tlusty_wrappers,代码行数:25,代码来源:interpolation.py
示例19: load_lexicons
def load_lexicons():
"""
This function loads in Harvard's postivie/negative word lexicons.
"""
positive_words = np.genfromtxt('Positive_&_Negative_Words.csv', skip_header = 1, usecols = (0, ), delimiter = ',', dtype = 'str')
negative_words = np.genfromtxt('Positive_&_Negative_Words.csv', skip_header = 1, usecols = (1, ), delimiter = ',', dtype = 'str')
return positive_words, negative_words
开发者ID:FrankLongueira,项目名称:Natural-Language-Processing,代码行数:7,代码来源:SA_Functions.py
示例20: loadTables
def loadTables(self, filesDir, varianceTbl = True):
"""
Read in and store in dictionary the IGM Lookup Tables that contain IGM transmission
for a given redshift and must be formatted in two columns:
(wavelength (nm), IGM Transmission %) or for variance
(wavelength (nm), IGM Transmission % Variance). Variance tables are not required and
can be turned off as a requirement. Names in directory formatted as
'MeanLookupTable_zSourceX.X.tbl' or 'VarLookupTable_zSourceX.X.tbl' where X.X is the redshift
of the given lookup table.
@param [in] filesDir is the location of the directory where lookup table are stored
@param [in] varianceTbl is a boolean that is True if variance tables are present in dir
for loading.
"""
self.meanLookups = {}
self.varLookups = {}
for zValue in self.zRange:
self.meanLookups['%.1f' % zValue] = np.genfromtxt(str(filesDir + '/MeanLookupTable_zSource' +
'%.1f' % zValue + '.tbl.gz'))
if varianceTbl == True:
try:
self.varLookups['%.1f' % zValue] = np.genfromtxt(str(filesDir + '/VarLookupTable_zSource' +
'%.1f' % zValue + '.tbl.gz'))
except IOError:
raise IOError("Cannot find variance tables.")
self.tablesPresent = True
开发者ID:lsst,项目名称:sims_catUtils,代码行数:31,代码来源:applyIGM.py
注:本文中的numpy.genfromtxt函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论