本文整理汇总了Python中numpy.recfromtxt函数的典型用法代码示例。如果您正苦于以下问题:Python recfromtxt函数的具体用法?Python recfromtxt怎么用?Python recfromtxt使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了recfromtxt函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: check_n_in_aper
def check_n_in_aper(radius_factor=1, k=100):
for catfile in find_files(bcdphot_out_path, "*_combined_hdr_catalog.txt"):
print
print catfile
names = open(catfile).readline().split()[1:]
cat = np.recfromtxt(catfile, names=names)
xscfile = catfile.replace('combined_hdr_catalog.txt','2mass_xsc.tbl')
print xscfile
names = open(xscfile).read().split('\n')[76].split('|')[1:-1]
xsc = np.recfromtxt(xscfile, skip_header=80, names=names)
n_in_aper = []
coords = radec_to_coords(cat.ra, cat.dec)
kdt = KDT(coords)
for i in range(xsc.size):
r_deg = xsc.r_ext[i]/3600.
idx, ds = spherematch2(xsc.ra[i], xsc.dec[i], cat.ra, cat.dec,
kdt, tolerance=radius_factor*r_deg, k=k)
n_in_aper.append(ds.size)
for i in [(i,n_in_aper.count(i)) for i in set(n_in_aper)]:
print i
开发者ID:john-livingston,项目名称:bcdphot,代码行数:25,代码来源:xsc.py
示例2: read_data
def read_data(data_files):
x_files, y_files = data_files
XX, Y = None, None
for x_file in x_files:
print 'Reading..', x_file
sys.stdout.flush()
X_subject = np.recfromtxt(x_file, delimiter=',')
if XX is None:
XX = X_subject
else:
XX = np.concatenate((XX, X_subject))
XX = XX.reshape((XX.shape[0], 3, config.width, config.height, config.time_slice))
#print XX.shape
for y_file in y_files:
print 'Reading..', y_file
sys.stdout.flush()
Y_subject = np.recfromtxt(y_file)
if Y is None:
Y = Y_subject
else:
Y = np.concatenate((Y, Y_subject))
return XX, Y
开发者ID:rodion-zheludkov,项目名称:kaggle,代码行数:25,代码来源:spatial_read.py
示例3: initialize_database
def initialize_database(self, configuration_dir=None):
"""Read in GMOS filter/grating information, for matching to headers."""
if configuration_dir is None:
configuration_dir = default_configuration_dir
logger.info('Reading Filter information')
gmos_filters = np.recfromtxt(
os.path.join(configuration_dir, 'GMOSfilters.dat'),
names=['name', 'wave_start', 'wave_end', 'fname'])
for line in gmos_filters:
new_filter = GMOSFilter(name=line['name'],
wavelength_start_value=line['wave_start'],
wavelength_start_unit='nm',
wavelength_end_value=line['wave_end'],
wavelength_end_unit='nm',
fname=line['fname'],
path=os.path.join(configuration_dir,
'filter_data'))
self.session.add(new_filter)
open_filter = GMOSFilter(name='open', wavelength_start_value=0,
wavelength_start_unit='nm',
wavelength_end_value=np.inf,
wavelength_end_unit='nm',
fname=None, path=None)
self.session.add(open_filter)
gmos_gratings = np.recfromtxt(
os.path.join(configuration_dir, 'GMOSgratings.dat'),
names = ['name', 'ruling_density', 'blaze_wave', 'R', 'coverage',
'wave_start', 'wave_end', 'wave_offset', 'y_offset'])
logger.info('Reading grating information')
for line in gmos_gratings:
new_grating = GMOSGrating(
name=line['name'],
ruling_density_value=line['ruling_density'],
blaze_wavelength_value=line['blaze_wave'],
R=line['R'],
coverage_value=line['coverage'],
wavelength_start_value=line['wave_start'],
wavelength_end_value=line['wave_end'],
wavelength_offset_value=line['wave_offset'],
y_offset_value=line['y_offset'])
self.session.add(new_grating)
mirror = GMOSGrating(name='mirror',
ruling_density_value=0.0,
blaze_wavelength_value=0.0, R=0.0,
coverage_value=np.inf,
wavelength_start_value=0.0,
wavelength_end_value=np.inf,
wavelength_offset_value=0.0,
y_offset_value=0.0)
self.session.add(mirror)
self.session.commit()
开发者ID:dlakaplan,项目名称:geminiutil,代码行数:56,代码来源:gmos_project.py
示例4: _get_data
def _get_data():
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
with open(filepath + '/statecrime.csv', 'rb') as f:
try:
data = np.recfromtxt(f, delimiter=",", names=True,
dtype=None, encoding='utf-8')
except TypeError:
data = np.recfromtxt(f, delimiter=",", names=True, dtype=None)
return data
开发者ID:dieterv77,项目名称:statsmodels,代码行数:10,代码来源:data.py
示例5: handle_special
def handle_special(self, q, p):
if q[0] == '\\':
NULL = p.stdout.readline()
if len(q.split()) > 1:
names = p.stdout.readline().split(',')
r = np.recfromtxt(p.stdout, skip_footer=2, delimiter=',', names=names)
else:
names = p.stdout.readline().split(',')
r = np.recfromtxt(p.stdout, skip_footer=1, delimiter=',', names=names)
else:
names = p.stdout.readline().split(',')
r = np.recfromtxt(p.stdout, skip_footer=1, delimiter=',', names=names)
return r
开发者ID:ryanmaas,项目名称:eztables,代码行数:14,代码来源:sqlhelpers.py
示例6: layoutFromTxt
def layoutFromTxt(filename):
"""Read plate layout from text file and return a structured array."""
if not os.path.isfile(filename):
msg = "No Plate Layout provided. File not found {}".format(filename)
raise IOError(msg)
try:
rec = np.recfromtxt(filename, dtype=LayoutDtype, skip_header=True)
except ValueError:
rec = np.recfromtxt(filename, dtype=LayoutDtype, delimiter="\t",
skip_header=True)
return rec
开发者ID:CellCognition,项目名称:cecog,代码行数:14,代码来源:hdf.py
示例7: getList
def getList(file):
posList= np.recfromtxt(file)
l = [posList['f0'],posList['f2'],posList['f3']]
l = np.array(l)
l = l.T
names = posList['f4']
return l,names
开发者ID:bmazin,项目名称:SDR,代码行数:7,代码来源:leftRightPlot.py
示例8: _get_data
def _get_data():
filepath = dirname(abspath(__file__))
with open(filepath + "/anes96.csv", "rb") as f:
data = recfromtxt(f, delimiter="\t", names=True, dtype=float)
logpopul = log(data["popul"] + 0.1)
data = nprf.append_fields(data, "logpopul", logpopul, usemask=False, asrecarray=True)
return data
开发者ID:Inoryy,项目名称:statsmodels,代码行数:7,代码来源:data.py
示例9: _get_data
def _get_data():
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
with open(filepath + '/engel.csv', 'rb') as f:
data = np.recfromtxt(f,
delimiter=",", names = True, dtype=float)
return data
开发者ID:Bonfils-ebu,项目名称:statsmodels,代码行数:7,代码来源:data.py
示例10: __init__
def __init__(self, path, coordkeys = "time time_bounds TFLAG ETFLAG latitude latitude_bounds longitude longitude_bounds lat lat_bnds lon lon_bnds etam_pressure etai_pressure layer_bounds layer47 layer".split(), delimiter = ',', names = True, **kwds):
"""
path - place to find csv file
coordkeys - use these keys as dimensions and coordinate variables
delimiter - use this as delimiter (default = ',')
names - see help in recfromtxt (Default = True)
kwds - np.recfromtxt keywords
* Note: currently only works when all coordinate variables are 1-d
"""
kwds['names'] = names
kwds['delimiter'] = delimiter
data = np.recfromtxt(path, **kwds)
dimkeys = [dk for dk in data.dtype.names if dk in coordkeys]
varkeys = [vk for vk in data.dtype.names if not vk in coordkeys]
for dk in dimkeys:
dv = np.unique(data[dk])
dv.sort()
self.createDimension(dk, len(dv))
dvar = self.createVariable(dk, dv.dtype.char, (dk,))
dvar[:] = dv
for vk in varkeys:
vv = data[vk]
var = self.createVariable(vk, vv.dtype.char, tuple(dimkeys))
for idx in np.ndindex(var.shape):
thisidx = np.sum([data[dk] == self.variables[dk][di] for di, dk in zip(idx, dimkeys)], axis = 0) == len(dimkeys)
if thisidx.any():
var[idx] = vv[thisidx]
开发者ID:tatawang,项目名称:pseudonetcdf,代码行数:29,代码来源:_delimited.py
示例11: SNrest
def SNrest():
path = "../data/restframe/"
objnames, band, mjd, mag, magerr, stype = [],[],[],[],[], []
formatcode = ('|S16,'.rstrip('#') +'f8,'*6 + '|S16,' + 4 * 'f8,' + '|S16,' * 3 + 'f8,' * 2 + '|S16,' + 'f8,' * 2)
filenames = os.listdir(path)
for filename in filenames:
data = np.recfromtxt(os.path.join(path, filename),usecols = (0,1,2,3,4), dtype = formatcode, names = True, skip_header = 13, case_sensitive = 'lower', invalid_raise = False)
name = np.empty(len(data.band), dtype = 'S20')
name.fill(filename)
objnames.append(name)
data.band = [x.lower() for x in data.band]
band.append(data.band)
mjd.append(data.phase)
mag.append(data.mag)
magerr.append(data.err)
objnames = np.fromiter(itertools.chain.from_iterable(objnames), dtype = 'S20')
band = np.fromiter(itertools.chain.from_iterable(band), dtype = 'S16')
mjd = np.fromiter(itertools.chain.from_iterable(mjd), dtype = 'float')
mag = np.fromiter(itertools.chain.from_iterable(mag), dtype = 'float')
magerr = np.fromiter(itertools.chain.from_iterable(magerr), dtype = 'float')
stype = np.full(len(objnames), 1)
LC = Lightcurve(objnames, band, mjd, mag, magerr, stype)
return LC
开发者ID:tayebzaidi,项目名称:snova_analysis,代码行数:25,代码来源:readin.py
示例12: readin_aavso
def readin_aavso(filename):
formatcode = ('f8,'*4 + '|S16,'*20).rstrip(',')
data = np.recfromtxt(filename, delimiter='\t', names=True, dtype=formatcode,autostrip=True,case_sensitive='lower', invalid_raise=False)
ind = np.where((data.band == 'V') & (data.uncertainty > 0) & (np.isnan(data.uncertainty) == 0) & (data.uncertainty < 0.02))
banddata = data[ind]
return banddata
开发者ID:tayebzaidi,项目名称:snova_analysis,代码行数:7,代码来源:readin.py
示例13: read
def read(filename):
"""
Read a table (.tbl) file.
Parameters:
* filename Name of table file to read
Returns: (comments,rec)
* comments List of comments (strings terminated with newline)
* rec Records array with named fields.
"""
# pull out the comment lines from the file (start with #)
f = open(filename,'r')
comments = [l for l in f if l[0]=='#']
f.close()
# find the line beginning with # NAMES and parse out the column names
nl = [i for i,l in enumerate(comments) if l[:7]=="# NAMES"]
if len(nl)!=1:
raise IOError("%s does not have a # NAMES line"%(filename))
dtd = {'names':comments.pop(nl[0])[7:].split()}
# find the line beginning with # DTYPE and parse out the column names
dl = [i for i,l in enumerate(comments) if l[:9]=="# FORMATS"]
if len(dl)!=1:
raise IOError("%s does not have a # FORMATS line"%(filename))
dtd['formats'] = comments.pop(dl[0])[9:].split()
# return the data as a records array
return comments,np.atleast_1d(np.recfromtxt(filename,dtype=dtd))
开发者ID:rpbarnes,项目名称:nmrglue,代码行数:33,代码来源:table.py
示例14: __init__
def __init__(self, pathlike):
if isinstance(pathlike, str):
paths = glob(pathlike)
else:
paths = pathlike
paths.sort()
pfile = self
pfile._vars = dict()
files = [open(path) for path in paths]
datas = [np.recfromtxt(f, names = True, case_sensitive = True) for f in files]
data = np.ma.concatenate(datas)
desired_unit = dict(O3 = 'ppb', GMAO_TEMP = 'K', PRESS = 'hPa', TEMP = 'K')
unit_factor = {'ppt': 1e12, 'ppb': 1e9}
pfile.createDimension('time', data.shape[0])
for ki, key in enumerate(data.dtype.names):
typecode = data[key].dtype.char
if typecode not in ('c', 'S'):
unit = desired_unit.get(key, 'ppt')
factor = unit_factor.get(unit, 1)
values = np.ma.masked_values(data[key], -1000) * factor
else:
unit = 'unknown'
values = data[key]
pfile.createVariable(key, typecode, dimensions = ('time',), units = unit, values = values)
开发者ID:tatawang,项目名称:pseudonetcdf,代码行数:25,代码来源:_planelog.py
示例15: load
def load():
"""
Load the star98 data and returns a Dataset class instance.
Returns
-------
Load instance:
a class of the data with array attrbutes 'endog' and 'exog'
"""
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
names = ["NABOVE","NBELOW","LOWINC","PERASIAN","PERBLACK","PERHISP",
"PERMINTE","AVYRSEXP","AVSALK","PERSPENK","PTRATIO","PCTAF",
"PCTCHRT","PCTYRRND","PERMINTE_AVYRSEXP","PERMINTE_AVSAL",
"AVYRSEXP_AVSAL","PERSPEN_PTRATIO","PERSPEN_PCTAF","PTRATIO_PCTAF",
"PERMINTE_AVYRSEXP_AVSAL","PERSPEN_PTRATIO_PCTAF"]
data = recfromtxt(open(filepath + '/star98.csv',"rb"), delimiter=",",
names=names, skip_header=1, dtype=float)
names = list(data.dtype.names)
# endog = (successes, failures)
NABOVE = array(data[names[1]]).astype(float) # successes
NBELOW = array(data[names[0]]).astype(float) \
- array(data[names[1]]).astype(float) # now its failures
endog = column_stack((NABOVE,NBELOW))
endog_name = names[:2]
exog = column_stack(data[i] for i in names[2:]).astype(float)
exog_name = names[2:]
dataset = Dataset(data=data, names=names, endog=endog, exog=exog,
endog_name = endog_name, exog_name=exog_name)
return dataset
开发者ID:chrisjordansquire,项目名称:statsmodels,代码行数:30,代码来源:data.py
示例16: get_model_conditions
def get_model_conditions(self, model):
"""Returns a description of all conditions for a given model
Parameters
----------
model : int
Model identifier.
Returns
-------
list(dict)
A list of a model conditions is returned, where each item is a
dictionary with keys ``id`` (numerical condition ID), ``task``
(numerical task ID for the task containing this condition), and
``name`` (the literal condition name). This information is
returned in a list (instead of a dictionary), because the openfmri
specification of model conditions contains no unique condition
identifier. Conditions are only uniquely described by the combination
of task and condition ID.
"""
def_fname = _opj(self._basedir, 'models', _model2id(model),
'condition_key.txt')
def_data = np.recfromtxt(def_fname)
conds = []
# load model meta data
for dd in def_data:
cond = {}
cond['task'] = _id2int(dd[0])
cond['id'] = _id2int(dd[1])
cond['name'] = dd[2]
conds.append(cond)
return conds
开发者ID:Marphy,项目名称:PyMVPA,代码行数:32,代码来源:openfmri.py
示例17: get_region_corners
def get_region_corners(catalog):
cat = np.recfromtxt(catalog, names=open(catalog).readline().split()[1:])
c1 = (cat.ra.min(), cat.dec[cat.ra==cat.ra.min()][0])
c2 = (cat.ra[cat.dec==cat.dec.max()][0], cat.dec.max())
c3 = (cat.ra.max(), cat.dec[cat.ra==cat.ra.max()][0])
c4 = (cat.ra[cat.dec==cat.dec.min()][0], cat.dec.min())
return reduce(lambda x,y: x+y, [c1, c2, c3, c4])
开发者ID:john-livingston,项目名称:bcdphot,代码行数:7,代码来源:xsc.py
示例18: _initialize_gmos_filters
def _initialize_gmos_filters(self, configuration_dir):
"""
Read in GMOS filter/grating information, for matching to headers.
"""
logger.info('Reading Filter information')
gmos_filters = np.recfromtxt(
os.path.join(configuration_dir, 'GMOSfilters.dat'),
names=['name', 'wave_start', 'wave_end', 'fname'])
for line in gmos_filters:
new_filter = GMOSFilter(name=line['name'],
wavelength_start_value=line['wave_start'],
wavelength_start_unit='nm',
wavelength_end_value=line['wave_end'],
wavelength_end_unit='nm',
fname=line['fname'],
path=os.path.join('gmos', 'filter_data'))
self.session.add(new_filter)
open_filter = GMOSFilter(name='open', wavelength_start_value=0,
wavelength_start_unit='nm',
wavelength_end_value=np.inf,
wavelength_end_unit='nm',
fname=None, path=None)
self.session.add(open_filter)
开发者ID:ThierrySN,项目名称:geminiutil,代码行数:25,代码来源:gmos_project.py
示例19: sigma_clip_non_hdr
def sigma_clip_non_hdr(filepath):
"""
Eliminates sources with SNR less than the 'sigma_clip' parameter from setup
file. Also checks for negative flux sources which may remain after this
step (can happen when uncertainty values are also negative).
"""
work_dir = "/".join(filepath.split("/")[:-1])
meta = json.load(open(work_dir + "/metadata.json"))
names = open(filepath).readline().split()[1:]
data = np.recfromtxt(filepath, names=names)
# get rid of low SNR sources
snr = data["flux"] / data["unc"]
good = snr >= meta["sigma_clip"]
data = data[good]
# get rid of any remaining negative flux sources
good = data["flux"] > 0
data = data[good]
# get rid of id column
data = data[["ra", "dec", "flux", "unc", "n_obs"]]
# write to disk
fmt = ["%0.8f"] * 2 + ["%.4e"] * 2 + ["%i"]
out_path = filepath.replace(".txt", "_sigclip.txt")
header = " ".join(names[1:])
np.savetxt(out_path, data, fmt=fmt, header=header, comments="")
print("created file: " + out_path)
开发者ID:john-livingston,项目名称:bcdphot,代码行数:32,代码来源:phot.py
示例20: query_region_db
def query_region_db(ra_center, dec_center, radius, region_dir):
"""
Returns a list of files relative to regions which are within the provided cone
:param ra_center: R.A. of the center of the cone
:param dec_center: Dec. of the center of the cone
:param radius: Radius of the cone (in arcmin)
:param region_dir: Path of the directory containing the database file and the region files
:return: list of region files
"""
database_file = os.path.join(region_dir, 'region_database.txt')
data = np.recfromtxt(database_file, names=True)
# Compute the angular distance between all regions and the center of the cone
distances = angular_distance(float(ra_center), float(dec_center), data["RA"], data["DEC"], unit='arcmin')
# Select all regions within the cone
idx = (distances <= float(radius))
# Return the corresponding region files
data_list = []
for i in data['REGION_FILE'][idx]:
data_list.append(os.path.join(region_dir, i))
return data_list
开发者ID:nitikayad96,项目名称:chandra_suli,代码行数:31,代码来源:query_region_db.py
注:本文中的numpy.recfromtxt函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论