本文整理汇总了Python中numpy.not_equal函数的典型用法代码示例。如果您正苦于以下问题:Python not_equal函数的具体用法?Python not_equal怎么用?Python not_equal使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了not_equal函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: parseArgs
def parseArgs(data, targetClass, otherClass = None, **args) :
'''parse arguments for a feature scoring function'''
if 'feature' in args :
feature = args['feature']
else :
feature = None
if 'Y' in args :
Y = args['Y']
if otherClass is None :
otherI = numpy.nonzero(numpy.not_equal(Y, targetClass))[0]
else :
otherI = numpy.nonzero(numpy.equal(Y, otherClass))[0]
targetClassSize = numpy.sum(numpy.equal(Y, targetClass))
else :
Y = None
if otherClass is None :
otherI = numpy.nonzero(numpy.not_equal(data.labels.Y, targetClass))[0]
else :
otherI = data.labels.classes[otherClass]
targetClassSize = len(data.labels.classes[targetClass])
otherClassSize = len(otherI)
return Y, targetClassSize, otherClassSize, otherI, feature
开发者ID:bpartridge,项目名称:PyML,代码行数:25,代码来源:featsel.py
示例2: node_can_drain
def node_can_drain(self, the_node):
"""Check if a node has drainage away from the current lake/depression.
Parameters
----------
the_node : int
The node to test.
nodes_this_depression : array_like of int
Nodes that form a pit.
Returns
-------
boolean
``True`` if the node can drain. Otherwise, ``False``.
"""
nbrs = self._node_nbrs[the_node]
not_bad = nbrs != LOCAL_BAD_INDEX_VALUE
not_too_high = self._elev[nbrs] < self._elev[the_node]
not_current_lake = np.not_equal(self.flood_status[nbrs], _CURRENT_LAKE)
not_flooded = np.not_equal(self.flood_status[nbrs], _FLOODED)
all_probs = np.logical_and(
np.logical_and(not_bad, not_too_high),
np.logical_and(not_current_lake, not_flooded))
if np.any(all_probs):
return True
else:
return False
开发者ID:mcflugen,项目名称:landlab,代码行数:27,代码来源:lake_mapper.py
示例3: best_grid
def best_grid(wavelengths1, wavelengths2, key):
"""
Return the best wavelength grid to regrid to arrays
Considering the two wavelength grids passed in parameters, this function
compute the best new grid that will be used to regrid the two spectra
before combining them. We do not use np.unique as it is much slowe than
finding the unique elements by hand.
Parameters
----------
wavelengths1, wavelengths2: array of floats
The wavelength grids to be 'regridded'.
key: tuple
Key to key the results in cache.
Returns
-------
new_grid: array of floats
Array containing all the wavelengths found in the input arrays.
"""
if key in best_grid_cache:
return best_grid_cache[key]
wl = np.concatenate((wavelengths1, wavelengths2))
wl.sort(kind='mergesort')
flag = np.ones(len(wl), dtype=bool)
np.not_equal(wl[1:], wl[:-1], out=flag[1:])
best_grid_cache[key] = wl[flag]
return wl[flag]
开发者ID:JohannesBuchner,项目名称:cigale,代码行数:31,代码来源:utils.py
示例4: compute_distances
def compute_distances(self, x1, x2):
"""
The method uses a function implemented in Cython. Data (`x1` and `x2`)
is accompanied by two tables. One is a 2-d table in which elements of
`x1` (`x2`) are replaced by 0's and 1's. The other is a vector
indicating rows (or column) with nan values.
The function in Cython uses a fast loop without any conditions to
compute distances between rows without missing values, and a slower
loop for those with missing values.
"""
nonzeros1 = np.not_equal(x1, 0).view(np.int8)
if self.axis == 1:
nans1 = _distance.any_nan_row(x1)
if x2 is None:
nonzeros2, nans2 = nonzeros1, nans1
else:
nonzeros2 = np.not_equal(x2, 0).view(np.int8)
nans2 = _distance.any_nan_row(x2)
return _distance.jaccard_rows(
nonzeros1, nonzeros2,
x1, x1 if x2 is None else x2,
nans1, nans2,
self.ps,
x2 is not None)
else:
nans1 = _distance.any_nan_row(x1.T)
return _distance.jaccard_cols(
nonzeros1, x1, nans1, self.ps)
开发者ID:acopar,项目名称:orange3,代码行数:29,代码来源:distance.py
示例5: average_without_padding
def average_without_padding(x, ids, padding_id, cuda=False, eps=1e-8):
if cuda:
mask = Variable(torch.from_numpy(np.not_equal(ids, padding_id).astype(int)[:,:,np.newaxis])).float().cuda().permute(1, 2, 0).expand_as(x)
else:
mask = Variable(torch.from_numpy(np.not_equal(ids, padding_id).astype(int)[:,:,np.newaxis])).float().permute(1, 2, 0).expand_as(x)
s = torch.sum(x*mask, dim=2) / (torch.sum(mask, dim=2)+eps)
return s
开发者ID:sepiatone,项目名称:information_retrieval,代码行数:7,代码来源:utils.py
示例6: get_calipso_phase_inner
def get_calipso_phase_inner(features, qual_min=CALIPSO_QUAL_VALUES['medium'],
max_layers=1, same_phase_in_top_three_lay=True):
"""
Returns Calipso cloud phase.
Pixels with quality lower than *qual_min* are masked out.
Screen out pixels with more than *max_layers* layers.
"""
if same_phase_in_top_three_lay:
phase1 = get_bits(features[:,0], CALIPSO_PHASE_BITS, shift=True)
phase2 = get_bits(features[:,1], CALIPSO_PHASE_BITS, shift=True)
phase3 = get_bits(features[:,2], CALIPSO_PHASE_BITS, shift=True)
two_layer_pixels = features[:, 2] >1
three_layer_pixels = features[:, 3] >1
lay1_lay2_differ = np.logical_and(two_layer_pixels,
np.not_equal(phase1, phase2))
lay2_lay3_differ = np.logical_and(three_layer_pixels,
np.not_equal(phase2, phase3))
varying_phases_in_top_3lay = np.logical_or(lay1_lay2_differ,
lay2_lay3_differ)
# Reduce to single layer, masking any multilayer pixels
features = np.ma.array(features[:, 0],
mask=(features[:, max_layers:] > 1).any(axis=-1))
if same_phase_in_top_three_lay:
features = np.ma.array(features,
mask = varying_phases_in_top_3lay)
phase = get_bits(features, CALIPSO_PHASE_BITS, shift=True)
qual = get_bits(features, CALIPSO_QUAL_BITS, shift=True)
# Don't care about pixels with lower than *qual_min* quality
return np.ma.array(phase, mask=qual < qual_min)
开发者ID:adybbroe,项目名称:atrain_match,代码行数:29,代码来源:validate_cph_util.py
示例7: test_prelu_param_updates
def test_prelu_param_updates(self):
x_train, _, y_train, _ = simple_classification()
prelu_layer1 = layers.PRelu(20, alpha=0.25)
prelu_layer2 = layers.PRelu(1, alpha=0.25)
gdnet = algorithms.GradientDescent(
[
layers.Input(10),
prelu_layer1,
prelu_layer2,
]
)
prelu1_alpha_before_training = prelu_layer1.alpha.get_value()
prelu2_alpha_before_training = prelu_layer2.alpha.get_value()
gdnet.train(x_train, y_train, epochs=10)
prelu1_alpha_after_training = prelu_layer1.alpha.get_value()
prelu2_alpha_after_training = prelu_layer2.alpha.get_value()
self.assertTrue(all(np.not_equal(
prelu1_alpha_before_training,
prelu1_alpha_after_training,
)))
self.assertTrue(all(np.not_equal(
prelu2_alpha_before_training,
prelu2_alpha_after_training,
)))
开发者ID:InSertCod3,项目名称:neupy,代码行数:29,代码来源:test_layers.py
示例8: scoreDuplicates
def scoreDuplicates(records, data_model, pool, threshold=0):
record, records = peek(records)
id_type = idType(record)
score_dtype = [('pairs', id_type, 2), ('score', 'f4', 1)]
record_chunks = grouper(records, 100000)
scoring_function = ScoringFunction(data_model,
threshold,
score_dtype)
results = [pool.apply_async(scoring_function,
(chunk,))
for chunk in record_chunks]
for r in results :
r.wait()
scored_pairs = numpy.concatenate([r.get() for r in results])
scored_pairs.sort()
flag = numpy.ones(len(scored_pairs), dtype=bool)
numpy.not_equal(scored_pairs[1:],
scored_pairs[:-1],
out=flag[1:])
return scored_pairs[flag]
开发者ID:nidhog,项目名称:dedupe,代码行数:30,代码来源:core.py
示例9: merge
def merge(a, b):
# http://stackoverflow.com/questions/12427146/combine-two-arrays-and-sort
c = np.concatenate((a, b))
c.sort(kind='mergesort')
flag = np.ones(len(c), dtype=bool)
np.not_equal(c[1:], c[:-1], out=flag[1:])
return c[flag]
开发者ID:yrapop01,项目名称:treecoreset,代码行数:8,代码来源:coreset.py
示例10: _calc_errors
def _calc_errors(truth, prediction, class_number=1):
tp = np.sum(np.equal(truth,class_number)*np.equal(prediction,class_number))
tn = np.sum(np.not_equal(truth,class_number)*np.not_equal(prediction,class_number))
fp = np.sum(np.not_equal(truth,class_number)*np.equal(prediction,class_number))
fn = np.sum(np.equal(truth,class_number)*np.not_equal(prediction,class_number))
return tp, tn, fp, fn
开发者ID:gzuidhof,项目名称:luna16,代码行数:8,代码来源:metrics.py
示例11: oht_model
def oht_model( gw, oro, fsns, flns, shfl, lhfl ):
"""parameters; must be dimensioned as specified:
gwi : gaussian weights (lat)
oroi : orography data array (lat,lon)
requires the lat and lon are attached coordinates of oro
and that oro and the following variables are 2D arrays (lat,lon).
fsnsi: net shortwave solar flux at surface (lat,lon)
flnsi: net longwave solar flux at surface (lat,lon)
shfli: sensible heat flux at surface (lat,lon)
lhfli: latent heat flux at surface (lat,lon)
"""
re = 6.371e6 # radius of earth
coef = re**2/1.e15 # scaled by PW
heat_storage = 0.3 # W/m^2 adjustment for ocean heat storage
nlat = oro.shape[0]
nlon = oro.shape[1]
dlon = 2.*pi/nlon # dlon in radians
lat = latAxis(oro)
i65n = numpy.where( lat[:]>=65 )[0][0] # assumes that lat[i+1]>lat[i]
i65s = numpy.where( lat[:]<=-65 )[0][-1] # assumes that lat[i+1]>lat[i]
# get the mask for the ocean basins
basins_mask = ocean_mask(oro) # returns 2D array(lat,lon)
# compute net surface energy flux
netflux = fsns-flns-shfl-lhfl-heat_storage
# compute the net flux for the basins
netflux_basin = numpy.ma.empty( (3,nlat,nlon) )
netflux_basin[0,:,:] = netflux[:,:]
netflux_basin[1,:,:] = netflux[:,:]
netflux_basin[2,:,:] = netflux[:,:]
netflux_basin[:,:,:] = numpy.ma.masked # to make sure the mask array gets created
netflux_basin._mask[0,:,:] = numpy.not_equal(basins_mask,1) # False on Pacific
netflux_basin._mask[1,:,:] = numpy.not_equal(basins_mask,2) # False on Atlantic
netflux_basin._mask[2,:,:] = numpy.not_equal(basins_mask,3) # False on Indian
# sum flux over the longitudes in each basin
heatflux = numpy.ma.sum( netflux_basin, axis=2 )
# compute implied heat transport in each basin
oft = cdms2.createVariable( numpy.ma.masked_all((4,nlat)) )
oft.setAxisList( [cdms2.createAxis([0,1,2,3],id='basin numer'),lat] )
# These ! signs assign a name to a dimension of oft:
#oft!0 = "basin number" # 0:pacific, 1:atlantic, 2:indian, 3:total
#oft!1 = "lat"
for n in range(3):
for j in range(i65n,i65s-1,-1): #start sum at most northern point
# ...assumes that lat[i+1]>lat[i]
oft[n,j] = -coef*dlon*numpy.ma.sum( heatflux[n,j:i65n+1]*gw[j:i65n+1] )
# compute total implied ocean heat transport at each latitude
# as the sum over the basins at that latitude
for j in range( i65n, i65s-1, -1 ):
oft[3,j] = numpy.ma.sum( oft[0:3,j] )
return oft # 2D array(4,lat)
开发者ID:susburrows,项目名称:uvcmetrics,代码行数:58,代码来源:ncl_isms.py
示例12: shrink_hyperrect
def shrink_hyperrect(x0, x1, L, R):
"""
"""
L_or_R = (x1 >= x0) #Modifications to R
R[L_or_R] = x1[L_or_R]
np.not_equal(L_or_R, True, L_or_R) #Modifications to L
L[L_or_R] = x1[L_or_R]
return L, R
开发者ID:ktchrn,项目名称:MVSlice,代码行数:9,代码来源:sampling.py
示例13: _numpy
def _numpy(self, data, weights, shape):
q = self.quantity(data)
self._checkNPQuantity(q, shape)
self._checkNPWeights(weights, shape)
weights = self._makeNPWeights(weights, shape)
newentries = weights.sum()
import numpy
selection = numpy.isnan(q)
numpy.bitwise_not(selection, selection)
subweights = weights.copy()
subweights[selection] = 0.0
self.nanflow._numpy(data, subweights, shape)
# avoid nan warning in calculations by flinging the nans elsewhere
numpy.bitwise_not(selection, selection)
q = numpy.array(q, dtype=numpy.float64)
q[selection] = self.high
weights = weights.copy()
weights[selection] = 0.0
numpy.greater_equal(q, self.low, selection)
subweights[:] = weights
subweights[selection] = 0.0
self.underflow._numpy(data, subweights, shape)
numpy.less(q, self.high, selection)
subweights[:] = weights
subweights[selection] = 0.0
self.overflow._numpy(data, subweights, shape)
if all(isinstance(value, Count) and value.transform is identity for value in self.values) and numpy.all(numpy.isfinite(q)) and numpy.all(numpy.isfinite(weights)):
# Numpy defines histograms as including the upper edge of the last bin only, so drop that
weights[q == self.high] == 0.0
h, _ = numpy.histogram(q, self.num, (self.low, self.high), weights=weights)
for hi, value in zip(h, self.values):
value.fill(None, float(hi))
else:
q = numpy.array(q, dtype=numpy.float64)
numpy.subtract(q, self.low, q)
numpy.multiply(q, self.num, q)
numpy.divide(q, self.high - self.low, q)
numpy.floor(q, q)
q = numpy.array(q, dtype=int)
for index, value in enumerate(self.values):
numpy.not_equal(q, index, selection)
subweights[:] = weights
subweights[selection] = 0.0
value._numpy(data, subweights, shape)
# no possibility of exception from here on out (for rollback)
self.entries += float(newentries)
开发者ID:histogrammar,项目名称:histogrammar-python,代码行数:57,代码来源:bin.py
示例14: _build_y
def _build_y(self, X, y, sample_weight, trim_duplicates=True):
"""Build the y_ IsotonicRegression."""
check_consistent_length(X, y, sample_weight)
X, y = [check_array(x, ensure_2d=False) for x in [X, y]]
y = as_float_array(y)
self._check_fit_data(X, y, sample_weight)
# Determine increasing if auto-determination requested
if self.increasing == 'auto':
self.increasing_ = check_increasing(X, y)
else:
self.increasing_ = self.increasing
# If sample_weights is passed, removed zero-weight values and clean
# order
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
mask = sample_weight > 0
X, y, sample_weight = X[mask], y[mask], sample_weight[mask]
else:
sample_weight = np.ones(len(y))
order = np.lexsort((y, X))
X, y, sample_weight = [astype(array[order], np.float64, copy=False)
for array in [X, y, sample_weight]]
unique_X, unique_y, unique_sample_weight = _make_unique(
X, y, sample_weight)
# Store _X_ and _y_ to maintain backward compat during the deprecation
# period of X_ and y_
self._X_ = X = unique_X
self._y_ = y = isotonic_regression(unique_y, unique_sample_weight,
self.y_min, self.y_max,
increasing=self.increasing_)
# Handle the left and right bounds on X
self.X_min_, self.X_max_ = np.min(X), np.max(X)
if trim_duplicates:
# Remove unnecessary points for faster prediction
keep_data = np.ones((len(y),), dtype=bool)
# Aside from the 1st and last point, remove points whose y values
# are equal to both the point before and the point after it.
keep_data[1:-1] = np.logical_or(
np.not_equal(y[1:-1], y[:-2]),
np.not_equal(y[1:-1], y[2:])
)
return X[keep_data], y[keep_data]
else:
# The ability to turn off trim_duplicates is only used to it make
# easier to unit test that removing duplicates in y does not have
# any impact the resulting interpolation function (besides
# prediction speed).
return X, y
开发者ID:AlexandreAbraham,项目名称:scikit-learn,代码行数:55,代码来源:isotonic.py
示例15: LabelPerimeter
def LabelPerimeter(L, Connectivity=4):
"""Converts a label or binary mask image to a binary perimeter image.
Uses 4-neighbor or 8-neighbor shifts to detect pixels whose values do
not agree with their neighbors.
Parameters
----------
L : array_like
A label or binary mask image.
Connectivity : double or int
Neighborhood connectivity to evaluate. Valid values are 4 or 8.
Default value = 4.
Returns
-------
Mask : array_like
A binary image where object perimeter pixels have value 1, and
non-perimeter pixels have value 0.
See Also
--------
EmbedBounds
"""
# initialize temporary variable
Mask = np.zeros(L.shape)
Temp = np.zeros(L.shape)
# check left-right neighbors
Temp[:, 0:-2] = np.not_equal(L[:, 0:-2], L[:, 1:-1])
Temp[:, 1:-1] = np.logical_or(Temp[:, 1:-1], Temp[:, 0:-2])
Mask = np.logical_or(Mask, Temp)
# check up-down neighbors
Temp[0:-2, :] = np.not_equal(L[0:-2, :], L[1:-1, :])
Temp[1:-1, :] = np.logical_or(Temp[1:-1, :], Temp[0:-2, :])
Mask = np.logical_or(Mask, Temp)
# additional calculations if Connectivity == 8
if(Connectivity == 8):
# slope 1 diagonal shift
Temp[1:-1, 0:-2] = np.not_equal(L[0:-2, 1:-2], L[1:-1, 0:-2])
Temp[0:-2, 1:-1] = np.logical_or(Temp[0:-2, 1:-1], Temp[1:-1, 0:-2])
Mask = np.logical_or(Mask, Temp)
# slope -1 diagonal shift
Temp[1:-1, 1:-1] = np.not_equal(L[0:-2, 0:-2], L[1:-1, 1:-1])
Temp[0:-2, 0:-2] = np.logical_or(Temp[0:-2, 0:-2], Temp[1:-1, 1:-1])
Mask = np.logical_or(Mask, Temp)
# generate label-valued output
return Mask.astype(np.uint32) * L
开发者ID:directorscut82,项目名称:HistomicsTK,代码行数:54,代码来源:LabelPerimeter.py
示例16: fact
def fact(x):
p = equal(x, 0)
k = add(x, p)
i = not_equal(k, 1).astype(int)
z = 1
while i.any().astype(bool):
z = multiply(z, k)
subtract(k, i, k)
i = not_equal(k, 1).astype(int)
return z
开发者ID:will3216,项目名称:optimization,代码行数:12,代码来源:custom_np_fxns.py
示例17: getModelData
def getModelData(model, air, selected_mogs, type1, type2=""):
data = np.array([])
type2 = ""
tt = np.array([])
et = np.array([])
in_vect = np.array([])
mogs = []
for i in selected_mogs:
mogs.append(model.mogs[i])
if type1 == "tt":
fac_dt = 1
mog = mogs[0]
ind = np.not_equal(mog.tt, -1).T
tt, t0 = mog.getCorrectedTravelTimes(air)
tt = tt.T
et = fac_dt * mog.f_et * mog.et.T
in_vect = mog.in_vect.T
no = np.arange(mog.data.ntrace).T
if len(mogs) > 1:
for n in range(1, len(model.mogs)):
mog = mogs[n]
ind = np.concatenate((ind, np.not_equal(mog.tt, -1).T), axis=0)
tt = np.concatenate((tt, mog.getCorrectedTravelTimes(air)[0].T), axis=0)
et = np.concatenate((et, fac_dt * mog.et * mog.f_et.T), axis=0)
in_vect = np.concatenate((in_vect, mog.in_vect.T), axis=0)
no = np.concatenate((no, np.arange(mog.ntrace + 1).T), axis=0)
ind = np.equal((ind.astype(int) + in_vect.astype(int)), 2)
data = np.array([tt[ind], et[ind], no[ind]]).T
return data, ind
if type2 == "depth":
data, ind = getModelData(model, air, selected_mogs, type1) # @UndefinedVariable
mog = mogs[0]
tt = mog.Tx_z_orig.T
et = mog.Rx_z_orig.T
in_vect = mog.in_vect.T
if len(mogs) > 1:
for n in (1, len(mogs)):
tt = np.concatenate((tt, mogs[n].Tx_z_orig.T), axis=0)
et = np.concatenate((et, mogs[n].Rx_z_orig.T), axis=0)
in_vect = np.concatenate((in_vect, mogs[n].in_vect.T), axis=0)
ind = np.equal((ind.astype(int) + in_vect.astype(int)), 2)
data = np.array([tt[ind], et[ind], no[ind]]).T
return data, ind
开发者ID:groupeLIAMG,项目名称:BhTomoPy,代码行数:52,代码来源:model.py
示例18: step_callback
def step_callback(*args, **kwargs):
nonlocal model, optimiser, context, w, b, var, call_count
context.optimiser_updated = False
mon.update_optimiser(context, *args, **kwargs)
w_new, b_new, var_new = model.enquire_session().run([model.w.unconstrained_tensor,
model.b.unconstrained_tensor,
model.var.unconstrained_tensor])
self.assertTrue(np.alltrue(np.not_equal(w, w_new)))
self.assertTrue(np.alltrue(np.not_equal(b, b_new)))
self.assertTrue(np.alltrue(np.not_equal(var, var_new)))
self.assertTrue(context.optimiser_updated)
call_count += 1
w, b, var = w_new, b_new, var_new
开发者ID:sanket-kamthe,项目名称:GPflow,代码行数:13,代码来源:test_monitor.py
示例19: test_simplify
def test_simplify(self):
origEdgeImages = dm.load_rasters_from_dir('../images/manual/edges')
redr_edge_images = []
simp_edge_images = []
edgeSets = []
simp_edge_sets = []
for img in origEdgeImages:
edges = ch.chain(img)
edgeSets.append(edges)
simplified_set = []
edge_image = np.zeros_like(img)
simp_edge_image = np.zeros_like(img)
for chain in edges:
edge = et.Edge(chain)
edge.draw(edge_image, 255)
simp_chain = ch.simplify_chain(chain,1)
simp_edge = et.Edge(simp_chain)
simp_edge.draw(simp_edge_image, 255)
simplified_set.append(simp_chain)
redr_edge_images.append(edge_image)
simp_edge_images.append(simp_edge_image)
simp_edge_sets.append(simplified_set)
redr_matches_orig = np.array_equal(img, edge_image)
if not redr_matches_orig:
bad_pixels = np.argwhere(np.not_equal(img,edge_image).astype(np.uint8))
copy = cv2.cvtColor(np.copy(img),cv2.COLOR_GRAY2BGR)
for pixel in bad_pixels:
copy[pixel[0],pixel[1]] = (0,0,255)
for chain in edges:
for pt in chain:
copy[pt[0],pt[1]] = (255,128,2)
cv2.imwrite('bad_pixels.png',copy)
self.assertTrue(redr_matches_orig,
"The redrawn edge image does not match the original image."+
" Percentage of unmatched pixels: %f" %
(float(np.not_equal(img,edge_image).sum())/img.size))
simp_matches_orig = np.array_equal(img, simp_edge_image)
if not simp_matches_orig:
bad_pixels = np.argwhere(np.not_equal(img,simp_edge_image).astype(np.uint8))
copy = cv2.cvtColor(np.copy(img),cv2.COLOR_GRAY2BGR)
for pixel in bad_pixels:
if(simp_edge_image[pixel[0],pixel[1]] != 0):
copy[pixel[0],pixel[1]] = (0,0,255)
for simp_chain in simplified_set:
for pt in simp_chain:
copy[pt[0],pt[1]] = (255,128,2)
cv2.imwrite('bad_pixels.png',copy)
self.assertTrue(simp_matches_orig,
"The simplified edge image does not match the original image."+
" Percentage of unmatched pixels: %f" %
(float(np.not_equal(img,simp_edge_image).sum())/img.size))
开发者ID:anupamguha,项目名称:midas,代码行数:51,代码来源:test_simplify.py
示例20: svm_bench
def svm_bench():
data_file = "./data/dataset.pkl"
train_set, valid_set, test_set, word2id, pop2id, type2id = dataset.load_data(data_file)
train_set_x, train_set_y = train_set
train_set_pop_y, train_set_type_y, train_set_loc_y = train_set_y
valid_set_x, valid_set_y = valid_set
valid_set_pop_y, valid_set_type_y, valid_set_loc_y = valid_set_y
test_set_x, test_set_y = test_set
test_set_pop_y, test_set_type_y, test_set_loc_y = test_set_y
id2word = {v:k for k,v in word2id.items()}
word_train_set_x = [sen_dig2word(doc, id2word) for doc in train_set_x]
word_valid_set_x = [sen_dig2word(doc, id2word) for doc in valid_set_x]
word_test_set_x = [sen_dig2word(doc, id2word) for doc in test_set_x]
# construct the word count matrix
# construct the word count matrix
count_vect = CountVectorizer()
x_train_count = count_vect.fit_transform(word_train_set_x)
x_valid_count = count_vect.transform(word_valid_set_x)
x_test_count = count_vect.transform(word_test_set_x)
tfidf_transformer = TfidfTransformer()
x_train_tfidf = tfidf_transformer.fit_transform(x_train_count)
x_valid_tfidf = tfidf_transformer.transform(x_valid_count)
x_test_tfidf = tfidf_transformer.transform(x_test_count)
# train the pop model
pop_clf = svm.LinearSVC().fit(x_train_tfidf, train_set_pop_y)
pop_pred = pop_clf.predict(x_valid_tfidf)
pop_pred_test = pop_clf.predict(x_test_tfidf)
# compute the performance
pop_errors = np.mean(np.not_equal(pop_pred, valid_set_pop_y))
pop_errors_test = np.mean(np.not_equal(pop_pred_test, test_set_pop_y))
# train the event type model
type_clf = svm.LinearSVC().fit(x_train_tfidf, train_set_type_y)
type_pred = type_clf.predict(x_valid_tfidf)
type_pred_test = type_clf.predict(x_test_tfidf)
# compute the performance
type_errors = np.mean(np.not_equal(type_pred, valid_set_type_y))
type_errors_test = np.mean(np.not_equal(type_pred_test, test_set_type_y))
print "SVM Valid--> Type error: %0.2f, Popuation error: %0.2f" % (type_errors, pop_errors)
print "SVM Tes--> Type error: %0.2f, Popuation error: %0.2f" % (type_errors_test, pop_errors_test)
开发者ID:Tskatom,项目名称:Protest_Event_Encoder,代码行数:51,代码来源:benchmark.py
注:本文中的numpy.not_equal函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论