本文整理汇总了Python中numpy.isin函数的典型用法代码示例。如果您正苦于以下问题:Python isin函数的具体用法?Python isin怎么用?Python isin使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了isin函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _apply_BCs
def _apply_BCs(self):
r"""
Applies all the boundary conditions that have been specified, by
adding values to the *A* and *b* matrices.
"""
if 'pore.bc_rate' in self.keys():
# Update b
ind = np.isfinite(self['pore.bc_rate'])
self.b[ind] = self['pore.bc_rate'][ind]
if 'pore.bc_value' in self.keys():
f = np.abs(self.A.diagonal()).mean()
# Update b (impose bc values)
ind = np.isfinite(self['pore.bc_value'])
self.b[ind] = self['pore.bc_value'][ind] * f
# Update b (substract quantities from b to keep A symmetric)
x_BC = np.zeros(self.b.shape)
x_BC[ind] = self['pore.bc_value'][ind]
self.b[~ind] -= (self.A.tocsr() * x_BC)[~ind]
# Update A
P_bc = self.toindices(ind)
indrow = np.isin(self.A.row, P_bc)
indcol = np.isin(self.A.col, P_bc)
self.A.data[indrow] = 0 # Remove entries from A for all BC rows
self.A.data[indcol] = 0 # Remove entries from A for all BC cols
datadiag = self.A.diagonal() # Add diagonal entries back into A
datadiag[P_bc] = np.ones_like(P_bc, dtype=np.float64) * f
self.A.setdiag(datadiag)
self.A.eliminate_zeros() # Remove 0 entries
开发者ID:PMEAL,项目名称:OpenPNM,代码行数:28,代码来源:GenericTransport.py
示例2: run_single
def run_single(path, min_flow_area, max_gradient):
logger.info("Analyzing scenario at {}".format(path))
gr = GridH5ResultAdmin(os.path.join(path, GRIDADMIN_NAME),
os.path.join(path, RESULTS_NAME))
lines2d2d_valid, lines1d2d_active, lines1d2d_valid = filter_lines(
gr,
min_flow_area=min_flow_area,
max_gradient=max_gradient,
)
groups = group_nodes(lines2d2d_valid.line)
cell_data = gr.cells.subset('2D_ALL').only("id", "cell_coords").data
overlast_ids, plas_ids, modelfout_ids = classify_nodes(
node_id_2d=cell_data['id'],
groups=groups,
lines1d2d_active=lines1d2d_active,
lines1d2d_valid=lines1d2d_valid,
)
cell_data['case'] = np.full(cell_data['id'].size, '', dtype='S10')
cell_data['case'][np.isin(cell_data['id'], plas_ids)] = 'plas'
cell_data['case'][np.isin(cell_data['id'], overlast_ids)] = 'overlast'
cell_data['case'][np.isin(cell_data['id'], modelfout_ids)] = 'modelfout'
return cell_data, gr.epsg_code
开发者ID:nens,项目名称:raster-tools,代码行数:29,代码来源:maskerkaart.py
示例3: __init__
def __init__(self, skim_dict, orig_zones, dest_zones, transpose=False):
omx_shape = skim_dict.skim_info['omx_shape']
logger.info("init AccessibilitySkims with %d dest zones %d orig zones omx_shape %s" %
(len(dest_zones), len(orig_zones), omx_shape, ))
assert len(orig_zones) <= len(dest_zones)
assert np.isin(orig_zones, dest_zones).all()
assert len(np.unique(orig_zones)) == len(orig_zones)
assert len(np.unique(dest_zones)) == len(dest_zones)
self.skim_dict = skim_dict
self.transpose = transpose
if omx_shape[0] == len(orig_zones):
# no slicing required
self.slice_map = None
else:
# 2-d boolean slicing in numpy is a bit tricky
# data = data[orig_map, dest_map] # <- WRONG!
# data = data[orig_map, :][:, dest_map] # <- RIGHT
# data = data[np.ix_(orig_map, dest_map)] # <- ALSO RIGHT
skim_index = list(range(omx_shape[0]))
orig_map = np.isin(skim_index, skim_dict.offset_mapper.map(orig_zones))
dest_map = np.isin(skim_index, skim_dict.offset_mapper.map(dest_zones))
if not dest_map.all():
# not using the whole skim matrix
logger.info("%s skim zones not in dest_map: %s" %
((~dest_map).sum(), np.ix_(~dest_map)))
self.slice_map = np.ix_(orig_map, dest_map)
开发者ID:UDST,项目名称:activitysim,代码行数:33,代码来源:accessibility.py
示例4: _decision_function
def _decision_function(self, X, labels):
# Initialize the score array
scores = np.zeros([X.shape[0], ])
small_indices = np.where(
np.isin(labels, self.small_cluster_labels_))[0]
large_indices = np.where(
np.isin(labels, self.large_cluster_labels_))[0]
if small_indices.shape[0] != 0:
# Calculate the outlier factor for the samples in small clusters
dist_to_large_center = cdist(X[small_indices, :],
self._large_cluster_centers)
scores[small_indices] = np.min(dist_to_large_center, axis=1)
if large_indices.shape[0] != 0:
# Calculate the outlier factor for the samples in large clusters
large_centers = self.cluster_centers_[labels[large_indices]]
scores[large_indices] = pairwise_distances_no_broadcast(
X[large_indices, :], large_centers)
if self.use_weights:
# Weights are calculated as the number of elements in the cluster
scores = scores * self.cluster_sizes_[labels]
return scores.ravel()
开发者ID:flaviassantos,项目名称:pyod,代码行数:28,代码来源:cblof.py
示例5: test_isin
def test_isin(self):
bv = self.bv
test_bv = BlockVector(2)
a = np.array([1.1, 3.3])
b = np.array([5.5, 7.7])
test_bv[0] = a
test_bv[1] = b
res = pn.isin(bv, test_bv)
for bid, blk in enumerate(bv):
self.assertEqual(blk.size, res[bid].size)
res_flat = np.isin(blk, test_bv[bid])
self.assertTrue(np.allclose(res[bid], res_flat))
c = np.concatenate([a, b])
res = pn.isin(bv, c)
for bid, blk in enumerate(bv):
self.assertEqual(blk.size, res[bid].size)
res_flat = np.isin(blk, c)
self.assertTrue(np.allclose(res[bid], res_flat))
res = pn.isin(bv, test_bv, invert=True)
for bid, blk in enumerate(bv):
self.assertEqual(blk.size, res[bid].size)
res_flat = np.isin(blk, test_bv[bid], invert=True)
self.assertTrue(np.allclose(res[bid], res_flat))
c = np.concatenate([a, b])
res = pn.isin(bv, c, invert=True)
for bid, blk in enumerate(bv):
self.assertEqual(blk.size, res[bid].size)
res_flat = np.isin(blk, c, invert=True)
self.assertTrue(np.allclose(res[bid], res_flat))
开发者ID:Pyomo,项目名称:pyomo,代码行数:34,代码来源:test_intrinsics.py
示例6: _get_cat_and_ncat
def _get_cat_and_ncat(self, X):
if self.category_name_is_set_ is False:
raise NeedToSetCategoriesException()
cat_X = X[np.isin(self.corpus_.get_category_names_by_row(),
[self.category_name] + self.neutral_category_names), :]
ncat_X = X[np.isin(self.corpus_.get_category_names_by_row(),
self.not_category_names + self.neutral_category_names), :]
if len(self.neutral_category_names) > 0:
neut_X = [np.isin(self.corpus_.get_category_names_by_row(), self.neutral_category_names)]
cat_X = vstack([cat_X, neut_X])
ncat_X = vstack([ncat_X, neut_X])
return cat_X, ncat_X
开发者ID:JasonKessler,项目名称:scattertext,代码行数:12,代码来源:CorpusBasedTermScorer.py
示例7: add_single_detected_image_info
def add_single_detected_image_info(self, image_id, detections_dict):
"""Adds detections for a single image to be used for evaluation.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary containing -
standard_fields.DetectionResultFields.detection_boxes: A numpy array of
structures with shape [N, 1], representing N tuples, each tuple
containing the same number of named bounding boxes.
Each box is of the format [y_min, x_min, y_max, x_max] (as an example
see datatype vrd_box_data_type, single_box_data_type above).
standard_fields.DetectionResultFields.detection_scores: float32 numpy
array of shape [N] containing detection scores for the boxes.
standard_fields.DetectionResultFields.detection_classes: A numpy array
of structures shape [N, 1], representing the class labels of the
corresponding bounding boxes and possibly additional classes (see
datatype label_data_type above).
"""
if image_id not in self._image_ids:
logging.warn('No groundtruth for the image with id %s.', image_id)
# Since for the correct work of evaluator it is assumed that groundtruth
# is inserted first we make sure to break the code if is it not the case.
self._image_ids.update([image_id])
self._negative_labels[image_id] = np.array([])
self._evaluatable_labels[image_id] = np.array([])
num_detections = detections_dict[
standard_fields.DetectionResultFields.detection_boxes].shape[0]
detection_class_tuples = detections_dict[
standard_fields.DetectionResultFields.detection_classes]
detection_box_tuples = detections_dict[
standard_fields.DetectionResultFields.detection_boxes]
negative_selector = np.zeros(num_detections, dtype=bool)
selector = np.ones(num_detections, dtype=bool)
# Only check boxable labels
for field in detection_box_tuples.dtype.fields:
# Verify if one of the labels is negative (this is sure FP)
negative_selector |= np.isin(detection_class_tuples[field],
self._negative_labels[image_id])
# Verify if all labels are verified
selector &= np.isin(detection_class_tuples[field],
self._evaluatable_labels[image_id])
selector |= negative_selector
self._evaluation.add_single_detected_image_info(
image_key=image_id,
detected_box_tuples=self._process_detection_boxes(
detection_box_tuples[selector]),
detected_scores=detections_dict[
standard_fields.DetectionResultFields.detection_scores][selector],
detected_class_tuples=detection_class_tuples[selector])
开发者ID:ALISCIFP,项目名称:models,代码行数:50,代码来源:vrd_evaluation.py
示例8: window_periods_in_states
def window_periods_in_states(self, window_row_ids, periods, states):
"""
Return boolean array indicating whether specified window periods are in list of states.
Internal DRY method to implement previous_tour_ends and previous_tour_begins
Parameters
----------
window_row_ids : pandas Series int
series of window_row_ids indexed by tour_id
periods : pandas series int
series of tdd_alt ids, index irrelevant (one period per window_row_id)
states : list of int
presumably (e.g. I_EMPTY, I_START...)
Returns
-------
pandas Series boolean
indexed by window_row_ids.index
"""
assert len(window_row_ids) == len(periods)
window = self.slice_windows_by_row_id_and_period(window_row_ids, periods)
return pd.Series(np.isin(window, states), window_row_ids.index)
开发者ID:UDST,项目名称:activitysim,代码行数:26,代码来源:timetable.py
示例9: tour_available
def tour_available(self, window_row_ids, tdds):
"""
test whether time window allows tour with specific tdd alt's time window
Parameters
----------
window_row_ids : pandas Series
series of window_row_ids indexed by tour_id
tdds : pandas series
series of tdd_alt ids, index irrelevant
Returns
-------
available : pandas Series of bool
with same index as window_row_ids.index (presumably tour_id, but we don't care)
"""
assert len(window_row_ids) == len(tdds)
# numpy array with one tdd_footprints_df row for tdds
tour_footprints = self.tdd_footprints[tdds.values.astype(int)]
# numpy array with one windows row for each person
windows = self.slice_windows_by_row_id(window_row_ids)
# t0 = tracing.print_elapsed_time("slice_windows_by_row_id", t0, debug=True)
x = tour_footprints + (windows << I_BIT_SHIFT)
available = ~np.isin(x, COLLISION_LIST).any(axis=1)
available = pd.Series(available, index=window_row_ids.index)
return available
开发者ID:UDST,项目名称:activitysim,代码行数:33,代码来源:timetable.py
示例10: test_return_values
def test_return_values(self):
out = draw(self.cdf)
ok_(out in range(self.n))
size = 10
out = draw(self.cdf, size)
ok_(np.isin(out, range(self.n)).all())
开发者ID:AsiaBartnik,项目名称:QuantEcon.py,代码行数:7,代码来源:test_utilities.py
示例11: isin
def isin(element, test_elements, assume_unique=False, invert=False):
if isinstance(element, BlockVector) and isinstance(test_elements, BlockVector):
assert not element.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
assert not test_elements.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
assert element.nblocks == test_elements.nblocks, 'Operation on BlockVectors need the same number of blocks on each operand'
res = BlockVector(element.nblocks)
for i in range(element.nblocks):
res[i] = isin(element[i],
test_elements[i],
assume_unique=assume_unique,
invert=invert)
return res
elif isinstance(element, BlockVector) and isinstance(test_elements, np.ndarray):
assert not element.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
res = BlockVector(element.nblocks)
for i in range(element.nblocks):
res[i] = isin(element[i],
test_elements,
assume_unique=assume_unique,
invert=invert)
return res
elif isinstance(element, np.ndarray) and isinstance(test_elements, np.ndarray):
return np.isin(element,
test_elements,
assume_unique=assume_unique,
invert=invert)
else:
raise NotImplementedError()
开发者ID:Pyomo,项目名称:pyomo,代码行数:34,代码来源:intrinsic.py
示例12: postprocess_clustered_data
def postprocess_clustered_data(which_cluster_each_point_is_in, points_to_be_clustered, min_cluster_size):
clusters_to_keep, which_cluster_each_point_is_in_to_keep, points_to_be_clustered_to_keep = discard_noise(which_cluster_each_point_is_in, points_to_be_clustered)
cluster_indices, cluster_sizes = np.unique(which_cluster_each_point_is_in_to_keep, return_counts=True)
logging.info("Cluster sizes: {}".format(cluster_sizes))
big_enough_cluster_indices = cluster_indices[cluster_sizes >= min_cluster_size]
is_in_big_enough_cluster = np.isin(which_cluster_each_point_is_in_to_keep, big_enough_cluster_indices)
return len(big_enough_cluster_indices), reindex_clusters(big_enough_cluster_indices, which_cluster_each_point_is_in_to_keep[is_in_big_enough_cluster]), points_to_be_clustered_to_keep[is_in_big_enough_cluster]
开发者ID:tinybike,项目名称:WormCam,代码行数:7,代码来源:clustering.py
示例13: lofar2image
def lofar2image(all_data, all_trgt,
index_info, window_size, stride,
run_indices_info,
filepath=None,
dtype=np.float64):
fold_runs = np.concatenate([np.extract([np.isin(run, index_info).all() for run in cls_runs], cls_runs)
for cls_runs in run_indices_info.runs.values()])
pruned_indexes = np.concatenate([range(run[0], run[-1] - window_size, stride) for run in fold_runs])
data_shape = (pruned_indexes.shape[0],
window_size,
all_data.shape[1],
1)
if not filepath is None:
image_data = np.memmap(filename=filepath, shape=data_shape, mode='w+', dtype=dtype)
else:
image_data = np.zeros(shape=data_shape, dtype=dtype)
trgt_image = np.zeros(shape=data_shape[0])
for image_index, spectre_index in enumerate(pruned_indexes):
new_data = all_data[spectre_index:spectre_index + window_size, :]
new_data = np.array(new_data.reshape(new_data.shape[0], new_data.shape[1], 1), np.float64)
image_data[image_index] = new_data
trgt_image[image_index] = all_trgt[spectre_index]
print 'trgt'
print np.unique(trgt_image)
return [image_data, trgt_image]
开发者ID:natmourajr,项目名称:SonarAnalysis,代码行数:28,代码来源:DataTransformation.py
示例14: test_in_transit
def test_in_transit():
t = np.linspace(-20, 20, 1000)
m_planet = np.array([0.3, 0.5])
m_star = 1.45
r_star = 1.5
orbit = KeplerianOrbit(
m_star=m_star,
r_star=r_star,
t0=np.array([0.5, 17.4]),
period=np.array([10.0, 5.3]),
ecc=np.array([0.1, 0.8]),
omega=np.array([0.5, 1.3]),
m_planet=m_planet,
)
r_pl = np.array([0.1, 0.03])
coords = theano.function([], orbit.get_relative_position(t))()
r2 = coords[0]**2 + coords[1]**2
inds = theano.function([], orbit.in_transit(t, r=r_pl))()
m = np.isin(np.arange(len(t)), inds)
in_ = r2[inds] <= ((r_star + r_pl)**2)[None, :]
in_ &= coords[2][inds] > 0
assert np.all(np.any(in_, axis=1))
out = r2[~m] > ((r_star + r_pl)**2)[None, :]
out |= coords[2][~m] <= 0
assert np.all(out)
开发者ID:dfm,项目名称:exoplanet,代码行数:28,代码来源:keplerian_test.py
示例15: check_probe
def check_probe(self):
if not np.isin(self.chans, self.probe.chans).all():
raise ValueError("Data chans are not a subset of probe chans. Wrong probe "
"specified in .json file?\n"
"Data chans:\n%s\n"
"Probe %r chans:\n%s"
% (self.chans, self.probename, self.probe.chans))
开发者ID:spyke,项目名称:spyke,代码行数:7,代码来源:dat.py
示例16: flagStats_single
def flagStats_single(self, fname):
'''counter of all the primary and secondary flags
'''
import pandas as pd
df = Dataset(fname, 'r')
arr = [pd.Series({'time size': df['time'].size})]
for vrbl in df.variables:
if '_flagPrimary' in vrbl:
dict = {}
v = vrbl.split('_')[0]
flagP = vrbl
flagS = v+'_flagSecondary'
pArr = df[flagP][:]
for p in [1,2,3,4,9]:
# print flagP, p,':', df[flagP][:].tolist().count(p)
dict[flagP+'.'+str(p)] = df[flagP][:].tolist().count(p)
for s in [1,2,3]:
# print flagS, s, ':', df[flagS][:].tolist().count(s)
pAtsArr = df[flagP][np.isin(df[flagS][:],s)]
# print flagS, s, '(3):', pAtsArr.tolist().count(3)
# print flagS, s, '(4):', pAtsArr.tolist().count(4)
dict[flagS+'.'+str(s)+'.3']= pAtsArr.tolist().count(3)
dict[flagS+'.'+str(s)+'.4']= pAtsArr.tolist().count(4)
arr.append(pd.Series(dict))
return pd.concat(arr)
df.close()
开发者ID:sarahheim,项目名称:ncObjects,代码行数:28,代码来源:nc.py
示例17: add_component
def add_component(self, c):
"""Initialize a new model component and prepare to save its optimized outputs.
The component name should be consistent across all order models.
Note that if a component name was initialized in the models for 1+ orders but
was not included in all order models, its RV values/uncertainties will be set
to NaNs and all other properties set to 0 for the excluded order(s).
Parameters
----------
c : a wobble.Model.Component object
"""
if np.isin(c.name, self.component_names):
print("Results: A component of name {0} has already been added.".format(c.name))
return
self.component_names.append(c.name)
basename = c.name+'_'
setattr(self, basename+'rvs', np.empty((self.R,self.N)) + np.nan)
setattr(self, basename+'ivars_rvs', np.empty((self.R,self.N)) + np.nan)
setattr(self, basename+'template_xs', [0 for r in range(self.R)])
setattr(self, basename+'template_ys', [0 for r in range(self.R)])
setattr(self, basename+'template_ivars', [0 for r in range(self.R)])
if c.K > 0:
setattr(self, basename+'basis_vectors', [0 for r in range(self.R)])
setattr(self, basename+'basis_weights', [0 for r in range(self.R)])
setattr(self, basename+'ys_predicted', [0 for r in range(self.R)])
attrs = COMPONENT_NP_ATTRS
if c.K > 0:
attrs = np.append(attrs, OPT_COMPONENT_NP_ATTRS)
for attr in attrs:
setattr(self, basename+attr, [0 for r in range(self.R)])
开发者ID:megbedell,项目名称:wobble,代码行数:31,代码来源:results.py
示例18: euc_dist
def euc_dist(a, origins=0, cell_size=1):
"""Calculate the euclidean distance and/or allocation
Parameters:
-----------
a : array
numpy float or integer array
origins : number, list or tuple
The locations to calculate distance for. Anything that is not a mask
is an origin. If a single number is provided, a `mask` will be created
using it. A list/tuple of values can be used for multiple value
masking.
cell_size : float, int
The cell size of the raster. What does each cell represent on the
ground. 1.0 is assumed
"""
from scipy import ndimage as nd
#
cell_size = abs(cell_size)
if cell_size == 0:
cell_size = 1
msk = (~np.isin(a, origins)).astype('int')
dist = nd.distance_transform_edt(msk,
sampling=cell_size,
return_distances=True)
return dist
开发者ID:Dan-Patterson,项目名称:GIS,代码行数:26,代码来源:grid.py
示例19: append_column
def append_column(xray_evtfile,grp_fitsfile):
hdu_xray = fits.open(xray_evtfile)
xray_mod_pulse_number = hdu_xray['EVENTS'].data['MOD_PULSE_NUMBER']
num_of_xrays = len(xray_mod_pulse_number)
print(xray_mod_pulse_number.dtype)
hdu_grp = fits.open(grp_fitsfile)
grp_mod_pulse_number = hdu_grp['GRP'].data['NSEQpulse']
num_of_grps = len(grp_mod_pulse_number)
print(grp_mod_pulse_number.dtype)
sys.stdout.write('Number of X-rays: %d\n' % num_of_xrays)
sys.stdout.write('Number of GRPs: %d\n' % num_of_grps)
xray_flag_isin_grp = np.isin(xray_mod_pulse_number,grp_mod_pulse_number)
num_of_xrays_in_grp = np.sum(xray_flag_isin_grp==True)
sys.stdout.write('Number of X-rays within GRPs: %d\n' % num_of_xrays_in_grp)
new_columns = fits.ColDefs([
fits.Column(name='MPGRP5.5',format='L',array=xray_flag_isin_grp)
])
hdu_xray_events_columns = hdu_xray['EVENTS'].columns
xray_evtfile_grp = '%s_grp.evt' % os.path.splitext(xray_evtfile)[0]
cmd = 'rm -f %s' % xray_evtfile_grp
print(cmd);os.system(cmd)
hdu_primary = fits.PrimaryHDU()
hdu_events = fits.BinTableHDU.from_columns(hdu_xray_events_columns+new_columns,name='EVENTS')
hdulist = fits.HDUList([hdu_primary,hdu_events])
hdulist.writeto(xray_evtfile_grp)
开发者ID:tenoto,项目名称:giantradiopulse,代码行数:32,代码来源:run_append_grpflag.py
示例20: set_gl_hl_mask
def set_gl_hl_mask(self, artist, hit_id = None,
cmask = 0.0, amask = 0.65):
#
# logic is
# if artist_id is found within raidus from (x, y)
# and
# if it is the closet artist in the area of checking
# then return True
if self._gl_id_data is None: return False
if self._gl_mask_artist is None: return False
# do not do this when hitest_map is updating..this is when
# mouse dragging is going on
if not get_glcanvas()._hittest_map_update: return
x0, y0, id_dict, im, imd, im2 = self._gl_id_data
arr = self._gl_mask_artist.get_array()
for k in id_dict.keys():
if (id_dict[k]() == artist):
if hit_id is not None:
if len(hit_id) > 0:
mask = np.isin(imd, hit_id)
m = np.logical_and(im == k, mask)
else:
m = (im == k)
else:
m = (im == k)
c = self.figure.canvas.hl_color
arr[:,:,:3][m] = np.array(c, copy=False)
arr[:,:,3][m] = amask
break
开发者ID:piScope,项目名称:piScope,代码行数:34,代码来源:axes3d_mod.py
注:本文中的numpy.isin函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论