本文整理汇总了Python中numpy.bool8函数的典型用法代码示例。如果您正苦于以下问题:Python bool8函数的具体用法?Python bool8怎么用?Python bool8使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了bool8函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_invalid
def test_invalid(self):
prop = bcpp.Int()
assert not prop.is_valid(0.0)
assert not prop.is_valid(1.0)
assert not prop.is_valid(1.0+1.0j)
assert not prop.is_valid("")
assert not prop.is_valid(())
assert not prop.is_valid([])
assert not prop.is_valid({})
assert not prop.is_valid(_TestHasProps())
assert not prop.is_valid(_TestModel())
assert not prop.is_valid(np.bool8(False))
assert not prop.is_valid(np.bool8(True))
assert not prop.is_valid(np.float16(0))
assert not prop.is_valid(np.float16(1))
assert not prop.is_valid(np.float32(0))
assert not prop.is_valid(np.float32(1))
assert not prop.is_valid(np.float64(0))
assert not prop.is_valid(np.float64(1))
assert not prop.is_valid(np.complex64(1.0+1.0j))
assert not prop.is_valid(np.complex128(1.0+1.0j))
if hasattr(np, "complex256"):
assert not prop.is_valid(np.complex256(1.0+1.0j))
开发者ID:jakirkham,项目名称:bokeh,代码行数:25,代码来源:test_primitive.py
示例2: test_valid
def test_valid(self):
prop = bcpp.Bool()
assert prop.is_valid(None)
assert prop.is_valid(False)
assert prop.is_valid(True)
assert prop.is_valid(np.bool8(False))
assert prop.is_valid(np.bool8(True))
开发者ID:jakirkham,项目名称:bokeh,代码行数:10,代码来源:test_primitive.py
示例3: main
def main():
cap = cv2.VideoCapture(0)
prev_grey_frame = None
while True:
ret, frame = cap.read()
if not ret:
break
grey = np.uint8(np.mean(frame, axis=2))
ch = 0xFF & cv2.waitKey(5)
if ch == 27:
break
if prev_grey_frame is not None:
flow = cv2.calcOpticalFlowFarneback(prev_grey_frame, grey,
pyr_scale=0.5, levels=3, winsize=15,
iterations=3, poly_n=5, poly_sigma=1.2, flags=0)
mag_flow = np.uint8(np.sum(np.abs(5 * flow), axis=2))
mask_flow = np.uint8(255 * (mag_flow > 50))
mask_flow = cv2.dilate(mask_flow,
cv2.getStructuringElement(cv2.MORPH_RECT,(15,15)))
vis_frame = frame.copy()
fx, fy = flow[:, :, 0], flow[:, :, 1]
for contour in cv2.findContours(mask_flow,
cv2.cv.CV_RETR_EXTERNAL,
cv2.cv.CV_CHAIN_APPROX_SIMPLE)[0]:
rect = cv2.minAreaRect(contour)
center, size, _ = rect
if np.min(size) < 100:
continue
cur_mask = np.zeros(grey.shape)
cv2.drawContours(cur_mask, [contour], 0, 255, -1)
mean_fx = np.mean(fx[np.bool8(cur_mask)])
mean_fy = np.mean(fy[np.bool8(cur_mask)])
p2 = (int(center[0] + mean_fx * 10),
int(center[1] + mean_fy * 10))
cv2.line(vis_frame, (int(center[0]), int(center[1])),
p2, (0, 255, 0))
box = cv2.cv.BoxPoints(rect)
box = np.int0(box)
for i in xrange(len(box)):
cv2.line(vis_frame, tuple(box[i - 1]),
tuple(box[i]), (0, 0, 255), 2)
cv2.imshow('mag_flow', vis_frame)
prev_grey_frame = grey.copy()
开发者ID:ansgri,项目名称:rsdt-tasks,代码行数:53,代码来源:obj_from_motion.py
示例4: test_Bool
def test_Bool(self):
prop = Bool()
self.assertTrue(prop.is_valid(None))
self.assertTrue(prop.is_valid(False))
self.assertTrue(prop.is_valid(True))
self.assertFalse(prop.is_valid(0))
self.assertFalse(prop.is_valid(1))
self.assertFalse(prop.is_valid(0.0))
self.assertFalse(prop.is_valid(1.0))
self.assertFalse(prop.is_valid(1.0 + 1.0j))
self.assertFalse(prop.is_valid(""))
self.assertFalse(prop.is_valid(()))
self.assertFalse(prop.is_valid([]))
self.assertFalse(prop.is_valid({}))
self.assertFalse(prop.is_valid(Foo()))
try:
import numpy as np
self.assertTrue(prop.is_valid(np.bool8(False)))
self.assertTrue(prop.is_valid(np.bool8(True)))
self.assertFalse(prop.is_valid(np.int8(0)))
self.assertFalse(prop.is_valid(np.int8(1)))
self.assertFalse(prop.is_valid(np.int16(0)))
self.assertFalse(prop.is_valid(np.int16(1)))
self.assertFalse(prop.is_valid(np.int32(0)))
self.assertFalse(prop.is_valid(np.int32(1)))
self.assertFalse(prop.is_valid(np.int64(0)))
self.assertFalse(prop.is_valid(np.int64(1)))
self.assertFalse(prop.is_valid(np.uint8(0)))
self.assertFalse(prop.is_valid(np.uint8(1)))
self.assertFalse(prop.is_valid(np.uint16(0)))
self.assertFalse(prop.is_valid(np.uint16(1)))
self.assertFalse(prop.is_valid(np.uint32(0)))
self.assertFalse(prop.is_valid(np.uint32(1)))
self.assertFalse(prop.is_valid(np.uint64(0)))
self.assertFalse(prop.is_valid(np.uint64(1)))
self.assertFalse(prop.is_valid(np.float16(0)))
self.assertFalse(prop.is_valid(np.float16(1)))
self.assertFalse(prop.is_valid(np.float32(0)))
self.assertFalse(prop.is_valid(np.float32(1)))
self.assertFalse(prop.is_valid(np.float64(0)))
self.assertFalse(prop.is_valid(np.float64(1)))
self.assertFalse(prop.is_valid(np.complex64(1.0 + 1.0j)))
self.assertFalse(prop.is_valid(np.complex128(1.0 + 1.0j)))
self.assertFalse(prop.is_valid(np.complex256(1.0 + 1.0j)))
except ImportError:
pass
开发者ID:Jessime,项目名称:bokeh,代码行数:49,代码来源:test_properties.py
示例5: regions
def regions(img):
'''
CURRENTLY (6pm 8 Aug):
To fix: ksize (and maybe iterations) based on big image. Need to make it work for resized
or else adaptive to img size. Maybe compare current ksize to length
original (non-resized vals ksize1=15, iterations=30, ksize=41)
#update: reduced values, still not adaptive
#Also: thresh value in threshold also not adaptive but works for now
'''
img_copy = img[:].copy()
#eroded = cv2.erode(img, None, iterations=10)
gam = gamma(img, 2.2)
blur = cv2.GaussianBlur(src=gam, dst=img_copy, ksize=(3, 3), sigmaX=0,
sigmaY=0)
eroded = cv2.dilate(blur, None, iterations=1)
#gam = gamma(eroded, 2)
blur2 = cv2.GaussianBlur(src=eroded, dst=img_copy, ksize=(9,9), sigmaX=0,
sigmaY=0)
thresh_val = np.int(np.mean(blur2))
ret, threshold_data = cv2.threshold(blur2, 50, 255, cv2.THRESH_BINARY)
#threshold_data = cv2.adaptiveThreshold(blur2, 255,
#cv2.ADAPTIVE_THRESH_MEAN_C,
#cv2.THRESH_BINARY, 301, 2)
#Create two masked images, one that masks out darker areas, one masks light
boole = np.bool8(threshold_data)
light_img = boole * img
dark_img = img * np.uint8(boole == 0)
return light_img, dark_img
开发者ID:polar-computing,项目名称:3DSeals,代码行数:32,代码来源:region_extract.py
示例6: filterPrepare
def filterPrepare(self, e, data, keys, ndata, events):
import numpy as np
import pyopencl as cl
mf = cl.mem_flags
ndata = data.size
if keys.size != ndata: raise Exception()
filtbytes = np.bool8(False).nbytes * ndata
if not isinstance(data, cl.Buffer):
data_buf = cl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf= data)
else:
data_buf = data
if not isinstance(keys, cl.Buffer):
keys_buf = cl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf= keys)
else:
keys_buf = keys
filt_buf = cl.Buffer(self.ctx, mf.READ_WRITE, filtbytes)
kernel = self.prg.filterPrepare
kernel.set_args(data_buf, keys_buf, np.uint64(ndata), np.uint8(33), np.uint8(66), filt_buf)
global_dims = self.get_global(self.get_grid_dims(ndata))
print "filterPrepare"
if e is None:
e = [ cl.enqueue_nd_range_kernel(self.queue, kernel, global_dims, self.localDims), ]
else:
e = [ cl.enqueue_nd_range_kernel(self.queue, kernel, global_dims, self.localDims, wait_for=e), ]
events += e
return (e, data_buf, keys_buf, filt_buf)
开发者ID:Kobtul,项目名称:documents,代码行数:34,代码来源:filter.py
示例7: testDefaultFlatAndBackNonIdentical
def testDefaultFlatAndBackNonIdentical(self):
"""
Test flattening/unflattening of objects which change type.
No type requirements are given in these tests. In other words, we allow
pylabrad to choose a default type for flattening.
In this test, we do not expect A == unflatten(*flatten(A)). This is
mostly because list of numbers, both with an without units, should
unflatten to ndarray or ValueArray, rather than actual python lists.
"""
def compareValueArrays(a, b):
"""I check near equality of two ValueArrays"""
self.assertTrue(a.allclose(b))
tests = [
([1, 2, 3], np.array([1, 2, 3], dtype="int32"), np.testing.assert_array_equal),
([1.1, 2.2, 3.3], np.array([1.1, 2.2, 3.3], dtype="float64"), np.testing.assert_array_almost_equal),
(np.array([3, 4], dtype="int32"), np.array([3, 4], dtype="int32"), np.testing.assert_array_equal),
(np.array([1.2, 3.4]), np.array([1.2, 3.4]), np.testing.assert_array_almost_equal),
([Value(1.0, "m"), Value(3.0, "m")], ValueArray([1.0, 3.0], "m"), compareValueArrays),
([Value(1.0, "m"), Value(10, "cm")], ValueArray([1.0, 0.1], "m"), compareValueArrays),
(ValueArray([1, 2], "Hz"), ValueArray([1, 2], "Hz"), compareValueArrays),
(ValueArray([1.0, 2], ""), np.array([1.0, 2]), np.testing.assert_array_almost_equal),
# Numpy scalar types
(np.bool8(True), True, self.assertEqual),
]
for input, expected, comparison_func in tests:
unflat = T.unflatten(*T.flatten(input))
if isinstance(unflat, np.ndarray):
self.assertEqual(unflat.dtype, expected.dtype)
comparison_func(unflat, expected)
开发者ID:ckometter,项目名称:pylabrad,代码行数:33,代码来源:test_types.py
示例8: test_int
def test_int(self):
self.assert_equal_with_lambda_check(_flexible_type(1), 1)
self.assert_equal_with_lambda_check(_flexible_type(1L), 1)
self.assert_equal_with_lambda_check(_flexible_type(True), 1)
self.assert_equal_with_lambda_check(_flexible_type(False), 0)
# numpy types
self.assert_equal_with_lambda_check(_flexible_type(np.int_(1)), 1)
self.assert_equal_with_lambda_check(_flexible_type(np.int64(1)), 1)
self.assert_equal_with_lambda_check(_flexible_type(np.int32(1)), 1)
self.assert_equal_with_lambda_check(_flexible_type(np.int16(1)), 1)
self.assert_equal_with_lambda_check(_flexible_type(np.uint64(1)), 1)
self.assert_equal_with_lambda_check(_flexible_type(np.uint32(1)), 1)
self.assert_equal_with_lambda_check(_flexible_type(np.uint16(1)), 1)
self.assert_equal_with_lambda_check(_flexible_type(np.bool(1)), 1)
self.assert_equal_with_lambda_check(_flexible_type(np.bool(0)), 0)
self.assert_equal_with_lambda_check(_flexible_type(np.bool_(1)), 1)
self.assert_equal_with_lambda_check(_flexible_type(np.bool_(0)), 0)
self.assert_equal_with_lambda_check(_flexible_type(np.bool8(1)), 1)
self.assert_equal_with_lambda_check(_flexible_type(np.bool8(0)), 0)
开发者ID:andreacrescini,项目名称:SFrame,代码行数:19,代码来源:test_flexible_type.py
示例9: toNumpyScalar
def toNumpyScalar(num, dtype=None):
''' convert a Python number to an equivalent Numpy scalar type '''
if isinstance(dtype,np.dtype):
num = dtype.type(num)
else:
if isinstance(num, float): num = np.float64(num)
elif isinstance(num, int): num = np.int64(num)
elif isinstance(num, bool): num = np.bool8(num)
else: raise NotImplementedError(num)
return num
开发者ID:xiefengy,项目名称:GeoPy,代码行数:10,代码来源:misc.py
示例10: back_extract
def back_extract (img):
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
trash = gray_img[:].copy()
eq_img = cv2.equalizeHist(src=gray_img, dst=trash)
gammed = gamma(eq_img, gamma=15)
blur = gammed
cv2.GaussianBlur(src=gammed, dst=blur, ksize=(35,35), sigmaX=0, sigmaY=0 )
cont = cv2.findContours(blur, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
areaArray = []
for i, c in enumerate(cont):
area = cv2.contourArea(c)
areaArray.append(area)
sorteddata = sorted(zip(areaArray, cont), key = lambda x: x[0],
reverse=True)
largest1 = sorteddata[0][1]
points1 = np.array([point[0] for point in largest1])
points2 = [0,0]
if len(sorteddata) > 1 : #Some images don't have 2 segments
largest2 = sorteddata[1][1]
points2 = np.array([point[0] for point in largest2])
else: largest2 = np.asarray((0,0))
blank = np.zeros(shape = gray_img.shape)
if len(points2) > 2 : #If there're two segments
filled = cv2.fillPoly(blank, [points1, points2], 1)
else:
filled = cv2.fillPoly(blank, [points1], 1)
boole = ~np.bool8(filled) #inverts so background is 0
boole = np.uint8(boole)
masked = gray_img*boole
######## Secondary: GrabCut
mask = np.zeros(img.shape[:2],np.uint8)
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
rect = (0,0,img.shape[1]-1, len(img)-1)
cv2.grabCut(img,mask,rect,bgdModel,fgdModel,2,cv2.GC_INIT_WITH_RECT)
mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')
masked2 = img*mask2[:,:,np.newaxis]
masked2 = cv2.cvtColor(masked2, cv2.COLOR_BGR2GRAY)
masked = masked2*boole
# Find how much white there is. Integrates into inversion decision later
how_mask = masked.size - np.count_nonzero(masked)
cv2.imshow("masked img", masked)
cv2.waitKey(0)
cv2.destroyAllWindows()
return masked, how_mask
开发者ID:polar-computing,项目名称:3DSeals,代码行数:53,代码来源:back_extract.py
示例11: _read_image
def _read_image(name):
"""Read an image from a file_handle"""
if name == "image":
if file_handle["phased"][0]:
image = _numpy.squeeze(file_handle['real'][...] + 1.j*file_handle['imag'][...])
else:
image = _numpy.real(_numpy.squeeze(file_handle['real'][...]))
elif name == "mask":
image = _numpy.bool8(_numpy.squeeze(file_handle["mask"][...]))
else:
raise ValueError("Can not load {0}.".format(name))
return image
开发者ID:ekeberg,项目名称:Python-tools,代码行数:12,代码来源:sphelper.py
示例12: execute
def execute(positions, num_particles, num_frames):
#Get host positions:
cpuPos = numpy.array(positions, dtype=numpy.float32)
#Allocate position space on device:
devPos = cuda.mem_alloc(cpuPos.nbytes)
#Copy positions:
cuda.memcpy_htod(devPos, cpuPos)
#Allocate device velocities:
devVels = cuda.mem_alloc(2 * num_particles * numpy.float32().nbytes)
cuda.memset_d32(devVels, 0, 2 * num_particles)
# #Copy velocities:
# cuda.memcpy_htod(devVels, cpuVels)
#Allocate and initialize device in bounds to false:
#inBounds = numpy.zeros(num_particles, dtype=bool)
devInBounds = cuda.mem_alloc(num_particles * numpy.bool8().nbytes)
cuda.memset_d8(devInBounds, True, num_particles)
# inB = numpy.zeros(num_particles, dtype=numpy.bool)
# cuda.memcpy_dtoh(inB, devInBounds)
# print inB
# cuda.memcpy_htod(devInBounds, inBounds)
# numBlocks = 1#(num_particles // 512) + 1;
grid_dim = ((num_particles // NUM_THREADS) + 1, 1)
print grid_dim
runframe = module.get_function("runframe")
frames = [None] * num_frames
for i in range(num_frames):
runframe(devPos, devVels, devInBounds,
numpy.int32(num_particles),
grid=grid_dim,
block=(NUM_THREADS, 1, 1))
#Get the positions from device:
cuda.memcpy_dtoh(cpuPos, devPos)
frames[i] = cpuPos.copy()
#frames[i] = copy(cpuPos)
#write_frame(out, cpuPos, num_particles)
#Simulation destination file:
# out = open(OUTPUT_FILE, 'w')
# write_header(out, num_particles)
# for frame in frames:
# write_frame(out, frame, num_particles)
#clean up...
#out.close()
devPos.free()
devVels.free()
devInBounds.free()
开发者ID:rbpittman,项目名称:CUDA,代码行数:51,代码来源:gpuSimulator.py
示例13: dft_2d_masked
def dft_2d_masked(y_side, x_side, mask_real, mask_fourier):
"""
The dft matrix that is returnd works on complex vectors
and returns a complex vector. Data is stored consistent with
numpys flatten(). Only the cols and rows corresponding to pixels
in the real and Fourier mask respectively are calculated.
"""
o_1 = _numpy.exp(-2.0j * _numpy.pi / y_side)
o_2 = _numpy.exp(-2.0j * _numpy.pi / x_side)
i = _numpy.zeros(x_side * y_side)
j = _numpy.zeros(x_side * y_side)
for k in xrange(y_side):
j[x_side * k : x_side * (k + 1)] = _numpy.arange(x_side)
for k in xrange(x_side):
i[k::x_side] = _numpy.arange(y_side)
i_mask_real = i[_numpy.bool8(mask_real.flatten())]
i_mask_fourier = i[_numpy.bool8(mask_fourier.flatten())]
j_mask_real = j[_numpy.bool8(mask_real.flatten())]
j_mask_fourier = j[_numpy.bool8(mask_fourier.flatten())]
dft = o_1 ** (i_mask_real[:, _numpy.newaxis] * i_mask_fourier[_numpy.newaxis, :]) * o_2 ** (
j_mask_real[:, _numpy.newaxis] * j_mask_fourier[_numpy.newaxis, :]
)
return dft
开发者ID:ekeberg,项目名称:Python-tools,代码行数:23,代码来源:dft.py
示例14: process
def process(self, src, **kwargs):
sw = SW('Optical Flow')
frame_gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
p0 = self.p0
# calculate optical flow
p1, st, err = cv2.calcOpticalFlowPyrLK(self.old_gray, frame_gray, p0, None,
winSize = (15,15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# Select good points
good_new = p1[st==1]
good_old = p0[st==1]
rstmsk = np.zeros(good_new.shape[0], dtype=np.bool8)
for i,pt in enumerate(good_new):
rstmsk[i] = np.bool8(math.sqrt((pt[0]-self.center.x)**2+(pt[1]-self.center.y)**2)<=(self.radius+SELECTPADDING))
good_new = good_new[rstmsk]
good_old = good_old[rstmsk]
if (good_new.shape[0]*2)<self.nump0:
raise OpticalFlow.ObjectMissError
tmp = np.average(good_new, axis=0)
self.center = util.Point(int(tmp[0]),int(tmp[1]))
dst = src.copy()
try:
self.oflines # 光流轨迹线
except AttributeError:
self.oflines = np.zeros_like(src, dtype=np.uint8)
for n,o in zip(good_new, good_old):
cv2.circle(dst,tuple(n),3,OBJECT_MATCH_COLOR,-1)# filled circle
cv2.line(self.oflines,tuple(n),tuple(o),OBJECT_MATCH_COLOR,3)# line
self.old_gray = frame_gray
self.p0 = good_new.reshape(-1,1,2)
sw.stop()
return dst, [self.center], cv2.add(self.oflines,src)
开发者ID:dalinhuang,项目名称:GroundStation,代码行数:44,代码来源:ObjectTracking.py
示例15: radial_average
def radial_average(image, mask=None):
"""Calculates the radial average of an array of any shape,
the center is assumed to be at the physical center."""
if mask is None:
mask = _numpy.ones(image.shape, dtype="bool8")
else:
mask = _numpy.bool8(mask)
axis_values = [_numpy.arange(l) - l / 2.0 + 0.5 for l in image.shape]
radius = _numpy.zeros((image.shape[-1]))
for i in range(len(image.shape)):
radius = radius + (axis_values[-(1 + i)][(slice(0, None),) + (_numpy.newaxis,) * i]) ** 2
radius = _numpy.int32(_numpy.sqrt(radius))
number_of_bins = radius[mask].max() + 1
radial_sum = _numpy.zeros(number_of_bins)
weight = _numpy.zeros(number_of_bins)
for value, this_radius in zip(image[mask], radius[mask]):
radial_sum[this_radius] += value
weight[this_radius] += 1.0
radial_sum[weight > 0] /= weight[weight > 0]
radial_sum[weight == 0] = _numpy.nan
return radial_sum
开发者ID:ekeberg,项目名称:Python-tools,代码行数:21,代码来源:tools.py
示例16: cluster_withsubsets
def cluster_withsubsets(spike_table, reorder_clus=True):
if reorder_clus:
print "Cluster reordering not implemented!"
ST_nc = np.bool8(spike_table.cols.channel_mask[:])
Fet_nc3 = spike_table.cols.fet[:]
# TODO: implement this and remove the raise exception
raise NotImplementedError(
"To use cluster_withsubsets you will need to implement some code to find the groups from the probe graph.")
# m these are all 4-channel subsets to be computed (based on probe's
# topology)
ChSubsets = probes.SORT_GROUPS
# m for each subset - the consecutive numbers of spikes that are relevant
# (?)
SpkSubsets = spike_subsets(ST_nc, ChSubsets)
print "%i subsets total" % len(SpkSubsets)
# m _FPC is no. of features per channel
n_spikes, n_ch, _FPC = Fet_nc3.shape
# for i_subset,ChHere,SpkHere in zip(it.count(), ChSubsets, SpkSubsets): #m SpkHere - the consecutive numbers of spikes belonging to this subset
# print("Sorting channels %s"%ChHere.__repr__())
# FetHere_nc3 = Fet_nc3[np.ix_(SpkHere, ChHere)] #m features of spikes in this subset
# m FetHere_nc3 is a 3D array of size (no. of spikes in this subset) x 4(subsets are of 4 channels) x 3 (no. of features per channel)
# CluArr = klustakwik_cluster(FetHere_nc3, i_subset, ChHere, SpkHere)
# print 'KlustaKwik returned', max(CluArr), 'clusters.'
args = []
# m SpkHere - the consecutive numbers of spikes belonging to this subset
for i_subset, ChHere, SpkHere in zip(it.count(), ChSubsets, SpkSubsets):
print("Sorting channels %s" % ChHere.__repr__())
# m features of spikes in this subset
FetHere_nc3 = Fet_nc3[np.ix_(SpkHere, ChHere)]
# m FetHere_nc3 is a 3D array of size (no. of spikes in this subset) x
# 4(subsets are of 4 channels) x 3 (no. of features per channel)
args.append((FetHere_nc3, i_subset, ChHere, SpkHere))
#CluArr = klustakwik_cluster(FetHere_nc3, i_subset, ChHere, SpkHere)
# print 'KlustaKwik returned', max(CluArr), 'clusters.'
pool = multiprocessing.Pool(NUMPROCESSES)
pool.map(klustakwik_cluster_args, args)
开发者ID:kylerbrown,项目名称:spikedetekt,代码行数:40,代码来源:subsets.py
示例17: back_extract
def back_extract (img):
'''
Attempts to find the background and turn it black. Equalizes the histogram,
boosts gamma way up to 15 such that the only contour is the seal (usually),
blurs, then finds that contour, builds a filled polygon from the point,
and then multiplies the (inverted) boolean values by the original image
such that the background (black, 0) turns all corresponding background
pixels in the original black as well.
Requires: cv2, numpy as np
'''
trash = img[:].copy()
eq_img = cv2.equalizeHist(src=img, dst=trash)
gammed = gamma(eq_img, gamma=15)
blur = gammed
cv2.GaussianBlur(src=gammed, dst=blur, ksize=(35,35), sigmaX=0, sigmaY=0 )
cont = cv2.findContours(blur, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
areaArray = []
for i, c in enumerate(cont):
area = cv2.contourArea(c)
areaArray.append(area)
sorteddata = sorted(zip(areaArray, cont), key = lambda x: x[0],
reverse=True)
largest1 = sorteddata[0][1]
points1 = np.array([point[0] for point in largest1])
points2 = [0,0]
if len(sorteddata) > 1 : #Some images don't have 2 segments
largest2 = sorteddata[1][1]
points2 = np.array([point[0] for point in largest2])
else: largest2 = np.asarray((0,0))
blank = np.zeros(shape = img.shape)
if len(points2) > 2 : #If there're two segments
filled = cv2.fillPoly(blank, [points1, points2], 1)
else:
filled = cv2.fillPoly(blank, [points1], 1)
boole = -np.bool8(filled) #inverts so background is 0
boole = np.uint8(boole)
masked = img*boole
return masked
开发者ID:polar-computing,项目名称:3DSeals,代码行数:40,代码来源:Watershed+segmentation.py
示例18: svmPlotExtrRep
def svmPlotExtrRep(event=0,plot=True,suf=''):
from Pixel import initPath
if plot: plt.close()
P=32;F=34
dat=[]
for vp in range(1,5):
path,inpath,figpath=initPath(vp,event)
fn= inpath+'svm%s/hc/hcWorker'%suf
dat.append([])
for g in range(2):
for k in range(4):
try:temp=np.load(fn+'%d.npy'%(k*2+g))
except IOError:
print 'File missing: ',vp,event,suf
temp=np.zeros(P*P*F,dtype=np.bool8)
temp=np.reshape(temp,[P,P,F])
dat[-1].append(np.bool8(g-1**g *temp))
lbl=[]
for i in range(4):lbl.append([FIG[7][0]+str(i+1),20,18+i*40,FIG[7][1]])
lbl.append([FIG[7][2],20,-10,70]);lbl.append([FIG[7][3],20,-10,245])
if plot: plotGifGrid(dat,fn=figpath+'svm%sExtremaE%d'%(suf,event)+FMT,
F=34,P=32,text=lbl,bcgclr=0.5)
return dat
开发者ID:simkovic,项目名称:Chase,代码行数:23,代码来源:FiguresMoviesTables.py
示例19: cluster_withsubsets
def cluster_withsubsets(spike_table,clusterdir,reorder_clus=True):
"TODO: write docstring"
if reorder_clus: print "Cluster reordering not implemented!"
ST_nc = np.bool8(spike_table.cols.st[:])
Fet_nc3 = spike_table.cols.fet[:]
ChSubsets = probe_stuff.SORT_GROUPS
SpkSubsets = spike_subsets(ST_nc,ChSubsets)
print("%i subsets total"%len(SpkSubsets))
n_spikes,n_ch,_FPC = Fet_nc3.shape
key2subset, key2members, key2spkmean, key2mag = {},{},{},{}
for i_subset,ChHere,SpkHere in zip(it.count(),ChSubsets,SpkSubsets):
print("Sorting channels %s"%ChHere.__repr__())
FetHere_nc3 = Fet_nc3[np.ix_(SpkHere,ChHere)] # features of spikes in this subset
CluArr = klustakwik_cluster(FetHere_nc3,'/'.join((clusterdir,"cluster_%i" % i_subset)))
CluMembersList = [(SpkHere[inds]) for inds in subset_inds(CluArr)] #go back to original indices
# We are ignoring cluster 0 here, because of [1:] above. No not now
for (i_clu,Members) in enumerate(CluMembersList):
if len(Members) > MIN_CLU_SIZE:
SpkMean = np.array([spike_table[member]["wave"][:,ChHere] for member in Members]).mean(axis=0)
key = (i_subset,i_clu)
key2subset[key]=ChHere
key2members[key] = Members
key2spkmean[key] = SpkMean
key2mag[key] = SpkMean.ptp(axis=0).sum()
ImprovingKeys = sorted(key2mag.keys(),key = lambda key: key2mag[key])
#problem: most spikes aren't members of any cluster?!
key2oldcount = dict((key,len(members)) for key,members in key2members.items())
FinalClu = np.zeros(n_spikes,dtype=np.dtype([("subset",int),("clu",int)]))
# maybe i should have a key2int kind of function?
fromto2stolen = collections.defaultdict(int)
for key in ImprovingKeys:
if DEBUG:
for oldkey in FinalClu[key2members[key]]: fromto2stolen[tuple(oldkey),key] += 1
FinalClu[key2members[key]] = key
for fromkey,tokey in fromto2stolen.keys():
if DEBUG:
if fromkey == (0,0): del fromto2stolen[(fromkey,tokey)]
key2newcount = dict((key,((FinalClu["subset"] == key[0]) & (FinalClu["clu"] == key[1])).sum()) for key in ImprovingKeys)
key2good = dict((key,
key2newcount[key]/key2oldcount[key] > ACCEPTABLE_FRAC and
key2oldcount[key] > MIN_CLU_SIZE)
for key in ImprovingKeys)
good_keys = filter(lambda key: key2good[key],reversed(ImprovingKeys))
#with open("counts.txt","w") as fd:
# for i_clu,(new,old) in enumerate(zip(NewCount,OrigCount)):
# fd.write("%i: %i/%i\n"%(i_clu,new,old) if new/old < .8 else "%i: %i/%i ==> %i\n"%(i_clu,new,old,RelabelArr[i_clu]))
# problem: relabel cluster indices so they're in the right order
key2rank = dict((key,rank) for (rank,key) in enumerate(reversed(ImprovingKeys)))
key2left = dict((key,len(members)) for key,members in key2members.items())
if DEBUG:
merge_diagnostics(n_ch,key2subset,key2rank,key2left,key2good,key2spkmean,fromto2stolen)
key2ind = dict((key,ind) for (ind,key) in enumerate(sorted(good_keys,key=lambda key: np.mean(key2subset[key]))))
FinalCluInd = np.array([key2ind.get(tuple(key),0) for key in FinalClu],dtype=np.int32)
return FinalCluInd
开发者ID:braingram,项目名称:caton,代码行数:66,代码来源:subset_sorting.py
示例20: len
fnames.sort()
fnames = [f for f in fnames if f.find('image_') >= 0]
n = len(fnames)/2
# Store some values in order to keep track of FPS
if (showFPS):
startTime = time()
FPS = 0
lastI = 0
# Get our plot points ready
timePoints = [[], []]
plotPoints = [[[], [], []], [[], [], []]]
# Create the mask and table model
mask = ~np.bool8(cv2.imread(os.path.join(folder, 'mask.png'), -1))
tablemodel = util.buildMinMap(os.path.join(folder, 'table'))
i = 0
waitAmount = 5
handList = None
camShifter = None
colors = None
# Loop until we are out of images
while (i < n):
print "Processing Frame ", i
# Show the FPS if desired
开发者ID:erichare,项目名称:Hand-Tracking,代码行数:31,代码来源:handTrack.py
注:本文中的numpy.bool8函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论