本文整理汇总了Python中numpy.lib.recfunctions.stack_arrays函数的典型用法代码示例。如果您正苦于以下问题:Python stack_arrays函数的具体用法?Python stack_arrays怎么用?Python stack_arrays使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了stack_arrays函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_unnamed_and_named_fields
def test_unnamed_and_named_fields(self):
# Test combination of arrays w/ & w/o named fields
(_, x, _, z) = self.data
test = stack_arrays((x, z))
control = ma.array(
[(1, -1, -1), (2, -1, -1), (-1, "A", 1), (-1, "B", 2)],
mask=[(0, 1, 1), (0, 1, 1), (1, 0, 0), (1, 0, 0)],
dtype=[("f0", int), ("A", "|S3"), ("B", float)],
)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, x))
control = ma.array(
[("A", 1, -1), ("B", 2, -1), (-1, -1, 1), (-1, -1, 2)],
mask=[(0, 0, 1), (0, 0, 1), (1, 1, 0), (1, 1, 0)],
dtype=[("A", "|S3"), ("B", float), ("f2", int)],
)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, z, x))
control = ma.array(
[("A", 1, -1), ("B", 2, -1), ("A", 1, -1), ("B", 2, -1), (-1, -1, 1), (-1, -1, 2)],
mask=[(0, 0, 1), (0, 0, 1), (0, 0, 1), (0, 0, 1), (1, 1, 0), (1, 1, 0)],
dtype=[("A", "|S3"), ("B", float), ("f2", int)],
)
assert_equal(test, control)
开发者ID:haadkhan,项目名称:cerebri,代码行数:29,代码来源:test_recfunctions.py
示例2: load_data
def load_data( data_path, branch_names, dataset_names, dataset_ranges = []):
""" Import data from several ROOT files to a recarray """
l_raw_vars = []
l_weight = []
l_origin = []
for i, d_name in enumerate(dataset_names):
f_name = "{}{}.root".format(data_path,d_name)
if "BTagCSV" in d_name:
d_weight = 1.
else:
d_weight = mc_samples[d_name]["xs"]/mc_samples[d_name]["gen_events"]
if len(dataset_ranges) == len(dataset_names):
l_raw_vars.append(root2array(f_name,"tree", branch_names,
stop=dataset_ranges[i]))
else:
l_raw_vars.append(root2array(f_name,"tree", branch_names))
n_ev = l_raw_vars[-1].shape[0]
l_weight.append(np.full((n_ev),d_weight, 'f8'))
l_origin.append(np.full((n_ev),d_name, 'a20'))
raw_vars = stack_arrays(l_raw_vars, asrecarray=True, usemask=False)
weight = stack_arrays(l_weight, asrecarray=True, usemask=False)
origin = stack_arrays(l_origin, asrecarray=True, usemask=False)
raw_vars = append_fields(raw_vars, ["origin","weight"], [origin, weight],
asrecarray=True, usemask=False)
return raw_vars
开发者ID:pablodecm,项目名称:hh2bbbb_mva,代码行数:25,代码来源:load_data.py
示例3: test_matching_named_fields
def test_matching_named_fields(self):
# Test combination of arrays w/ matching field names
(_, x, _, z) = self.data
zz = np.array(
[("a", 10.0, 100.0), ("b", 20.0, 200.0), ("c", 30.0, 300.0)],
dtype=[("A", "|S3"), ("B", float), ("C", float)],
)
test = stack_arrays((z, zz))
control = ma.array(
[("A", 1, -1), ("B", 2, -1), ("a", 10.0, 100.0), ("b", 20.0, 200.0), ("c", 30.0, 300.0)],
dtype=[("A", "|S3"), ("B", float), ("C", float)],
mask=[(0, 0, 1), (0, 0, 1), (0, 0, 0), (0, 0, 0), (0, 0, 0)],
)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, zz, x))
ndtype = [("A", "|S3"), ("B", float), ("C", float), ("f3", int)]
control = ma.array(
[
("A", 1, -1, -1),
("B", 2, -1, -1),
("a", 10.0, 100.0, -1),
("b", 20.0, 200.0, -1),
("c", 30.0, 300.0, -1),
(-1, -1, -1, 1),
(-1, -1, -1, 2),
],
dtype=ndtype,
mask=[(0, 0, 1, 1), (0, 0, 1, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (1, 1, 1, 0), (1, 1, 1, 0)],
)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
开发者ID:haadkhan,项目名称:cerebri,代码行数:33,代码来源:test_recfunctions.py
示例4: test_matching_named_fields
def test_matching_named_fields(self):
# Test combination of arrays w/ matching field names
(_, x, _, z) = self.data
zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)])
test = stack_arrays((z, zz))
control = ma.array([('A', 1, -1), ('B', 2, -1),
(
'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 0), (0, 0, 0), (0, 0, 0)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, zz, x))
ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)]
control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1),
('a', 10., 100., -1), ('b', 20., 200., -1),
('c', 30., 300., -1),
(-1, -1, -1, 1), (-1, -1, -1, 2)],
dtype=ndtype,
mask=[(0, 0, 1, 1), (0, 0, 1, 1),
(0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1),
(1, 1, 1, 0), (1, 1, 1, 0)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
开发者ID:vbasu,项目名称:numpy,代码行数:27,代码来源:test_recfunctions.py
示例5: test_unnamed_and_named_fields
def test_unnamed_and_named_fields(self):
# Test combination of arrays w/ & w/o named fields
(_, x, _, z) = self.data
test = stack_arrays((x, z))
control = ma.array([(1, -1, -1), (2, -1, -1),
(-1, 'A', 1), (-1, 'B', 2)],
mask=[(0, 1, 1), (0, 1, 1),
(1, 0, 0), (1, 0, 0)],
dtype=[('f0', int), ('A', '|S3'), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, x))
control = ma.array([('A', 1, -1), ('B', 2, -1),
(-1, -1, 1), (-1, -1, 2), ],
mask=[(0, 0, 1), (0, 0, 1),
(1, 1, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('f2', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, z, x))
control = ma.array([('A', 1, -1), ('B', 2, -1),
('A', 1, -1), ('B', 2, -1),
(-1, -1, 1), (-1, -1, 2), ],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 1), (0, 0, 1),
(1, 1, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('f2', int)])
assert_equal(test, control)
开发者ID:vbasu,项目名称:numpy,代码行数:31,代码来源:test_recfunctions.py
示例6: analyze_chamber_data
def analyze_chamber_data(self,raw_chamber_data):
ethanol_data = raw_chamber_data[raw_chamber_data['status']=='Ethanol']
analyzed_ethanol_data = self.analyze_data(ethanol_data)
status_array = numpy.array(['Ethanol']*len(analyzed_ethanol_data),dtype='|S25')
analyzed_chamber_data = recfunctions.append_fields(analyzed_ethanol_data,
'status',
status_array,
dtypes='|S25',
usemask=False)
air_before_data = raw_chamber_data[raw_chamber_data['status']=='AirBefore']
if air_before_data.size != 0:
analyzed_air_before_data = self.analyze_data(air_before_data)
status_array = numpy.array(['AirBefore']*len(analyzed_air_before_data),dtype='|S25')
analyzed_air_before_data = recfunctions.append_fields(analyzed_air_before_data,
'status',
status_array,
dtypes='|S25',
usemask=False)
analyzed_chamber_data = recfunctions.stack_arrays((analyzed_air_before_data,analyzed_chamber_data),usemask=False)
air_after_data = raw_chamber_data[raw_chamber_data['status']=='AirAfter']
if air_after_data.size != 0:
analyzed_air_after_data = self.analyze_data(air_after_data)
status_array = numpy.array(['AirAfter']*len(analyzed_air_after_data),dtype='|S25')
analyzed_air_after_data = recfunctions.append_fields(analyzed_air_after_data,
'status',
status_array,
dtypes='|S25',
usemask=False)
analyzed_chamber_data = recfunctions.stack_arrays((analyzed_chamber_data,analyzed_air_after_data),usemask=False)
return analyzed_chamber_data
开发者ID:janelia-idf,项目名称:fly-alcohol-assay,代码行数:34,代码来源:tracking_data_processor.py
示例7: combine_datasets
def combine_datasets(dataset_list):
"""
Definition:
-----------
Function that combines a list datasets into a single dataset
Each of the inputs (and the output) should have the form {"X":data, "y":recarray, "w":recarray}
This allows us to combine datasets from different input files
Args:
-----
dataset_list = array of dictionaries of the form {"X":data, "y":recarray, "w":recarray}
Returns:
--------
dictionary of the form {"X":data, "y":recarray, "w":recarray} containing all input information
"""
# -- y and w are 1D arrays which are simple to combine
y_combined = stack_arrays([dataset["y"] for dataset in dataset_list], asrecarray=True, usemask=False)
w_combined = stack_arrays([dataset["w"] for dataset in dataset_list], asrecarray=True, usemask=False)
# print dataset_list[0]["X"].dtype
# -- Construct the desired output shape using the known size of y_combined
# Necessary shape is (N_elements, N_categories)
X_shape = (y_combined.shape[0], dataset_list[0]["X"].shape[1])
# -- Stack X arrays and then reshape
X_combined = stack_arrays([dataset["X"] for dataset in dataset_list], asrecarray=True, usemask=False)
X_combined.resize(X_shape)
# -- Recombine into a dictionary and return
return {"X": X_combined, "y": y_combined, "w": w_combined}
开发者ID:jemrobinson,项目名称:bbyy_jet_classifier,代码行数:32,代码来源:process_data.py
示例8: root2panda
def root2panda(files_path, tree_name, mask = False, **kwargs):
'''
Args:
-----
files_path: a string like './data/*.root', for example
tree_name: a string like 'Collection_Tree' corresponding to the name of the folder inside the root
file that we want to open
kwargs: arguments taken by root2rec, such as branches to consider, etc
Returns:
--------
output_panda: a panda dataframe like allbkg_df in which all the info from the root file will be stored
Note:
-----
if you are working with .root files that contain different branches, you might have to mask your data
in that case, return pd.DataFrame(ss.data)
'''
files = glob.glob(files_path)
# -- check whether a name was passed for the tree_name --> for root files with only one tree and no folders,
# -- you do not need to specify any name (I believe)
if (tree_name == ''):
ss = stack_arrays([root2rec(fpath, **kwargs) for fpath in files])
else:
ss = stack_arrays([root2rec(fpath, tree_name, **kwargs) for fpath in files])
if (mask):
return pd.DataFrame(ss.data)
else:
try:
return pd.DataFrame(ss)
except Exception, e:
return pd.DataFrame(ss.data)
开发者ID:ChunyangDing,项目名称:IPNN,代码行数:34,代码来源:pandautils.py
示例9: get_raw_chamber_data
def get_raw_chamber_data(self,filtered_data):
# chamber_dtype = numpy.dtype([('time_secs', '<u4'),
# ('time_nsecs', '<u4'),
# ('time_rel', '<f4'),
# ('status', '|S25'),
# ('tunnel', '<u2'),
# ('fly_x', '<f4'),
# ('fly_y', '<f4'),
# ('fly_angle', '<f4'),
# ])
header = list(FILE_TOOLS.chamber_dtype.names)
tracking_chamber_data = filtered_data[filtered_data['status'] != 'Walk To End']
tracking_chamber_data = tracking_chamber_data[header]
tracking_chamber_data = tracking_chamber_data.astype(FILE_TOOLS.chamber_dtype)
tracking_chamber_data['tunnel'] = tracking_chamber_data['tunnel']+1
indicies = tracking_chamber_data['status'] == 'End Chamber Ethanol'
raw_chamber_data_ethanol = tracking_chamber_data[indicies]
raw_chamber_data_ethanol = recfunctions.drop_fields(raw_chamber_data_ethanol,
'status',
usemask=False)
status_array = numpy.array(['Ethanol']*len(raw_chamber_data_ethanol),dtype='|S25')
raw_chamber_data_ethanol = recfunctions.append_fields(raw_chamber_data_ethanol,
'status',
status_array,
dtypes='|S25',
usemask=False)
raw_chamber_data = raw_chamber_data_ethanol
ethanol_start_time = raw_chamber_data_ethanol['time_rel'][0]
indicies = tracking_chamber_data['status'] == 'End Chamber Air'
indicies &= tracking_chamber_data['time_rel'] < ethanol_start_time
raw_chamber_data_air_before = tracking_chamber_data[indicies]
raw_chamber_data_air_before = recfunctions.drop_fields(raw_chamber_data_air_before,
'status',
usemask=False)
status_array = numpy.array(['AirBefore']*len(raw_chamber_data_air_before),dtype='|S25')
raw_chamber_data_air_before = recfunctions.append_fields(raw_chamber_data_air_before,
'status',
status_array,
dtypes='|S25',
usemask=False)
raw_chamber_data = recfunctions.stack_arrays((raw_chamber_data_air_before,raw_chamber_data),usemask=False)
indicies = tracking_chamber_data['status'] == 'End Chamber Air'
indicies &= tracking_chamber_data['time_rel'] > ethanol_start_time
raw_chamber_data_air_after = tracking_chamber_data[indicies]
raw_chamber_data_air_after = recfunctions.drop_fields(raw_chamber_data_air_after,
'status',
usemask=False)
status_array = numpy.array(['AirAfter']*len(raw_chamber_data_air_after),dtype='|S25')
raw_chamber_data_air_after = recfunctions.append_fields(raw_chamber_data_air_after,
'status',
status_array,
dtypes='|S25',
usemask=False)
raw_chamber_data = recfunctions.stack_arrays((raw_chamber_data,raw_chamber_data_air_after),usemask=False)
return raw_chamber_data
开发者ID:janelia-idf,项目名称:fly-alcohol-assay,代码行数:58,代码来源:tracking_data_processor.py
示例10: test_solo
def test_solo(self):
# Test stack_arrays on single arrays
(_, x, _, _) = self.data
test = stack_arrays((x,))
assert_equal(test, x)
self.assertTrue(test is x)
test = stack_arrays(x)
assert_equal(test, x)
self.assertTrue(test is x)
开发者ID:vbasu,项目名称:numpy,代码行数:10,代码来源:test_recfunctions.py
示例11: test_autoconversion
def test_autoconversion(self):
# Tests autoconversion
adtype = [('A', int), ('B', bool), ('C', float)]
a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)
bdtype = [('A', int), ('B', float), ('C', float)]
b = ma.array([(4, 5, 6)], dtype=bdtype)
control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],
dtype=bdtype)
test = stack_arrays((a, b), autoconvert=True)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
with assert_raises(TypeError):
stack_arrays((a, b), autoconvert=False)
开发者ID:ales-erjavec,项目名称:numpy,代码行数:13,代码来源:test_recfunctions.py
示例12: test_unnamed_fields
def test_unnamed_fields(self):
# Tests combinations of arrays w/o named fields
(_, x, y, _) = self.data
test = stack_arrays((x, x), usemask=False)
control = np.array([1, 2, 1, 2])
assert_equal(test, control)
test = stack_arrays((x, y), usemask=False)
control = np.array([1, 2, 10, 20, 30])
assert_equal(test, control)
test = stack_arrays((y, x), usemask=False)
control = np.array([10, 20, 30, 1, 2])
assert_equal(test, control)
开发者ID:vbasu,项目名称:numpy,代码行数:15,代码来源:test_recfunctions.py
示例13: summarize_data
def summarize_data(self,analyzed_data):
initialized = False
tunnels = set(analyzed_data['tunnel'])
for tunnel in tunnels:
tunnel_data_analyzed = analyzed_data[analyzed_data['tunnel']==tunnel]
tunnel_array = numpy.ones(1,dtype=numpy.uint16)*tunnel
tunnel_array.dtype = numpy.dtype([('tunnel','<u2')])
tunnel_data_summarized = tunnel_array
delta_time = tunnel_data_analyzed['delta_time']
total_time = delta_time.sum()
distance = tunnel_data_analyzed['distance']
total_distance = distance.sum()
velocity = tunnel_data_analyzed['velocity']
mean_velocity = velocity.mean()
angular_velocity = tunnel_data_analyzed['angular_velocity']
mean_angular_velocity = angular_velocity.mean()
names = ['total_time','total_distance','mean_velocity','mean_angular_velocity']
tunnel_data_seq = [total_time,total_distance,mean_velocity,mean_angular_velocity]
tunnel_data_summarized = recfunctions.append_fields(tunnel_data_summarized,
names,
tunnel_data_seq,
dtypes=numpy.float32,
usemask=False)
if initialized:
summarized_data = recfunctions.stack_arrays((summarized_data,tunnel_data_summarized),usemask=False)
else:
summarized_data = tunnel_data_summarized
initialized = True
return summarized_data
开发者ID:janelia-idf,项目名称:fly-alcohol-assay,代码行数:33,代码来源:tracking_data_processor.py
示例14: resampleNSEMdataAtFreq
def resampleNSEMdataAtFreq(NSEMdata, freqs):
"""
Function to resample NSEMdata at set of frequencies
"""
# Make a rec array
NSEMrec = NSEMdata.toRecArray().data
# Find unique locations
uniLoc = np.unique(NSEMrec[['x','y','z']])
uniFreq = NSEMdata.survey.freqs
# Get the comps
dNames = NSEMrec.dtype
# Loop over all the locations and interpolate
for loc in uniLoc:
# Find the index of the station
ind = np.sqrt(np.sum((rec_to_ndarr(NSEMrec[['x','y','z']]) - rec_to_ndarr(loc))**2,axis=1)) < 1. # Find dist of 1 m accuracy
# Make a temporary recArray and interpolate all the components
tArrRec = np.concatenate((simpeg.mkvc(freqs,2),np.ones((len(freqs),1))*rec_to_ndarr(loc),np.nan*np.ones((len(freqs),12))),axis=1).view(dNames)
for comp in ['zxxr','zxxi','zxyr','zxyi','zyxr','zyxi','zyyr','zyyi','tzxr','tzxi','tzyr','tzyi']:
int1d = sciint.interp1d(NSEMrec[ind]['freq'],NSEMrec[ind][comp],bounds_error=False)
tArrRec[comp] = simpeg.mkvc(int1d(freqs),2)
# Join together
try:
outRecArr = recFunc.stack_arrays((outRecArr,tArrRec))
except NameError:
outRecArr = tArrRec
# Make the NSEMdata and return
return Data.fromRecArray(outRecArr)
开发者ID:jsc1129,项目名称:simpeg,代码行数:33,代码来源:dataUtils.py
示例15: main
def main(iso_filename, XCov_filename, interpolate=True, overwrite=False):
# FOR PARSEC ISOCHRONE (reversing it for interpolation)
iso = ascii.read(iso_filename, header_start=13)[:114][::-1]
iso = nprf.stack_arrays((iso[:25], iso[27:]),usemask=False) # because of stupid red clump turnaround
# FOR DARTMOUTH ISOCHRONE (reversing it for interpolation)
# iso = ascii.read(iso_filename, header_start=8)[::-1]
# output hdf5 file
with h5py.File(XCov_filename, mode='r+') as f:
# feature and covariance matrices for all stars
X = ps1_isoc_to_XCov(iso, W=mixing_matrix, interpolate=interpolate)
if 'isochrone' in f and overwrite:
f.__delitem__('isochrone')
logger.debug("Overwriting isochrone data")
if 'isochrone' not in f:
g = f.create_group('isochrone')
else:
g = f['isochrone']
if 'X' not in f['isochrone']:
g.create_dataset('X', X.shape, dtype='f', data=X)
f.flush()
logger.debug("Saved isochrone to {}".format(XCov_filename))
开发者ID:adrn,项目名称:globber,代码行数:29,代码来源:isochrone-to-xcov.py
示例16: from_rows
def from_rows(cls, sample_id, row_data, extra_keys=()):
dtype = list(cls._dtype)
if extra_keys:
blank_kwargs = {k: [] for k in extra_keys}
new_cna = cls(sample_id, [], [], [], [], [], **blank_kwargs)
if 'gc' in extra_keys:
dtype.append(cls._dtype_gc)
if 'rmask' in extra_keys:
dtype.append(cls._dtype_rmask)
if 'spread' in extra_keys:
dtype.append(cls._dtype_spread)
if 'weight' in extra_keys:
dtype.append(cls._dtype_weight)
if 'probes' in extra_keys:
dtype.append(cls._dtype_probes)
else:
new_cna = cls(sample_id, [], [], [], [], [])
if len(row_data) == 1:
row_data = [tuple(row_data[0])]
try:
# Rows might be plain tuples
new_array = numpy.asarray(row_data, dtype=dtype)
except ValueError:
# "Setting void-array with object members using buffer"
# All rows are numpy.ndarray
new_array = rfn.stack_arrays(row_data, usemask=False,
asrecarray=True, autoconvert=False)
# print(new_array.dtype)
new_cna.data = new_array
return new_cna
开发者ID:roryk,项目名称:cnvkit,代码行数:32,代码来源:cnarray.py
示例17: root2panda
def root2panda(file_paths, tree_name, **kwargs):
'''
Args:
-----
files_path: a string like './data/*.root', for example
tree_name: a string like 'Collection_Tree' corresponding to the name of the folder inside the root
file that we want to open
kwargs: arguments taken by root2rec, such as branches to consider, etc
Returns:
--------
output_panda: a panda dataframe like allbkg_df in which all the info from the root file will be stored
Note:
-----
if you are working with .root files that contain different branches, you might have to mask your data
in that case, return pd.DataFrame(ss.data)
'''
if isinstance(file_paths, basestring):
files = glob.glob(file_paths)
else:
files = [matched_f for f in file_paths for matched_f in glob.glob(f)]
ss = stack_arrays([root2rec(fpath, tree_name, **kwargs) for fpath in files])
try:
return pd.DataFrame(ss)
except Exception:
return pd.DataFrame(ss.data)
开发者ID:mickypaganini,项目名称:YaleATLAS,代码行数:27,代码来源:pandautils.py
示例18: computeDataPointCounts
def computeDataPointCounts():
dataSet = getDataSet('20150129', '20150331', '../../Data/Autopassdata/Singledatefiles/Dataset/raw/', 'dataset')
dataPointCounts = np.zeros((288,62))
firstDate = dataSet['dateAndTime'][1]
firstDateStr = firstDate.strftime('%Y%m%d')
date_list = [firstDate.date() + timedelta(days=x) for x in range(0, 62)]
interval_list = [(datetime(2015, 1, 1, 0, 0, 0) + timedelta(minutes=x)).time() for x in range(0, 1440, 5)]
interval_list.append(datetime(2015, 1, 1, 23, 59, 59).time())
for i in range(0, len(date_list)):
endDate = date_list[i]
print(endDate)
endDateStr = endDate.strftime('%Y%m%d')
dataDateSubSet = []
if i == 0:
dataDateSubSet = getRowsWithinDateRange(firstDateStr, endDateStr, dataSet)
else:
dataDateSubSet = getRowsWithinDateRange(endDateStr, endDateStr, dataSet)
for j in range(0, len(interval_list)-1):
i1 = interval_list[j]
i2 = interval_list[j+1]
dataDateIntervalSubSet = getRowsWithinTimeIntervalRange(i1, i2, dataDateSubSet)
if i == 0:
dataPointCounts[j][i] = len(dataDateIntervalSubSet)
else:
dataPointCounts[j][i] = len(dataDateIntervalSubSet)
print(dataPointCounts[:, i])
dataPointCounts = rfn.stack_arrays(dataPointCounts,usemask=False)
np.savetxt("dataPointCountsIndividualDates.csv", dataPointCounts, fmt="%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f;%f")
开发者ID:ajanigyasi,项目名称:master,代码行数:28,代码来源:utils.py
示例19: veto_all
def veto_all(auxiliary, segmentlist):
"""Remove events from all auxiliary channel tables based on a segmentlist
Parameters
----------
auxiliary : `dict` of `numpy.recarray`
a `dict` of event arrays to veto
segmentlist : `~glue.segments.segmentlist`
the list of veto segments to use
Returns
-------
survivors : `dict` of `numpy.recarray`
a dict of the reduced arrays of events for each input channel
See Also
--------
core.veto
for details on the veto algorithm itself
"""
channels = auxiliary.keys()
rec = stack_arrays(auxiliary.values(), usemask=False,
asrecarray=True, autoconvert=True)
keep, _ = veto(rec, segmentlist)
return dict((c, keep[keep['channel'] == c]) for c in channels)
开发者ID:andrew-lundgren,项目名称:hveto,代码行数:25,代码来源:core.py
示例20: test_subdtype
def test_subdtype(self):
z = np.array([
('A', 1), ('B', 2)
], dtype=[('A', '|S3'), ('B', float, (1,))])
zz = np.array([
('a', [10.], 100.), ('b', [20.], 200.), ('c', [30.], 300.)
], dtype=[('A', '|S3'), ('B', float, (1,)), ('C', float)])
res = stack_arrays((z, zz))
expected = ma.array(
data=[
(b'A', [1.0], 0),
(b'B', [2.0], 0),
(b'a', [10.0], 100.0),
(b'b', [20.0], 200.0),
(b'c', [30.0], 300.0)],
mask=[
(False, [False], True),
(False, [False], True),
(False, [False], False),
(False, [False], False),
(False, [False], False)
],
dtype=zz.dtype
)
assert_equal(res.dtype, expected.dtype)
assert_equal(res, expected)
assert_equal(res.mask, expected.mask)
开发者ID:vbasu,项目名称:numpy,代码行数:28,代码来源:test_recfunctions.py
注:本文中的numpy.lib.recfunctions.stack_arrays函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论