本文整理汇总了Python中sensor.snapshot函数的典型用法代码示例。如果您正苦于以下问题:Python snapshot函数的具体用法?Python snapshot怎么用?Python snapshot使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了snapshot函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: start_streaming
def start_streaming(s):
print ('Waiting for connections..')
client, addr = s.accept()
# set client socket timeout to 2s
client.settimeout(2.0)
print ('Connected to ' + addr[0] + ':' + str(addr[1]))
# Read request from client
data = client.recv(1024)
# Should parse client request here
# Send multipart header
client.send("HTTP/1.1 200 OK\r\n" \
"Server: OpenMV\r\n" \
"Content-Type: multipart/x-mixed-replace;boundary=openmv\r\n" \
"Cache-Control: no-cache\r\n" \
"Pragma: no-cache\r\n\r\n")
# FPS clock
clock = time.clock()
# Start streaming images
# NOTE: Disable IDE preview to increase streaming FPS.
while (True):
clock.tick() # Track elapsed milliseconds between snapshots().
frame = sensor.snapshot()
cframe = frame.compressed(quality=35)
header = "\r\n--openmv\r\n" \
"Content-Type: image/jpeg\r\n"\
"Content-Length:"+str(cframe.size())+"\r\n\r\n"
client.send(header)
client.send(cframe)
print(clock.fps())
开发者ID:MaurinElectroTextile,项目名称:openmv,代码行数:33,代码来源:mjpeg_streamer_ap.py
示例2: find_face
def find_face():
for i in range(0, 100):
img = sensor.snapshot()
while (True):
img = sensor.snapshot()
objects = img.find_features(face_cascade, threshold=0.65, scale=1.65)
if objects:
print (objects[0])
img.draw_rectangle(objects[0])
try:
kpts1 = img.find_keypoints(threshold=32, normalized=False, roi=objects[0])
except:
continue
if kpts1:
img.draw_keypoints(kpts1)
time.sleep(1000)
return kpts1
开发者ID:RayPhon,项目名称:openmv,代码行数:17,代码来源:face_tracking.py
示例3: unittest
def unittest(data_path, temp_path):
import sensor
sensor.reset()
sensor.set_framesize(sensor.QVGA)
sensor.set_pixformat(sensor.GRAYSCALE)
img = sensor.snapshot().clear()
img.set_pixel(img.width()//2+50, 120, 255)
img.set_pixel(img.width()//2-50, 120, 255)
img.draw_line([img.width()//2-50, 50, img.width()//2+50, 50])
img.draw_rectangle([img.width()//2-25, img.height()//2-25, 50, 50])
img.draw_circle(img.width()//2, img.height()//2, 40)
img.draw_string(11, 10, "HelloWorld!")
img.draw_cross(img.width()//2, img.height()//2)
sensor.flush()
img.difference(data_path+"/drawing.pgm")
stats = img.get_statistics()
return (stats.max() == 0) and (stats.min() == 0)
开发者ID:openmv,项目名称:openmv,代码行数:17,代码来源:20-drawing.py
示例4: test_color_bars
def test_color_bars():
sensor.reset()
# Set sensor settings
sensor.set_brightness(0)
sensor.set_saturation(0)
sensor.set_gainceiling(8)
sensor.set_contrast(2)
# Set sensor pixel format
sensor.set_framesize(sensor.QVGA)
sensor.set_pixformat(sensor.RGB565)
# Enable colorbar test mode
sensor.set_colorbar(True)
# Skip a few frames to allow the sensor settle down
# Note: This takes more time when exec from the IDE.
for i in range(0, 100):
image = sensor.snapshot()
# Color bars thresholds
t = [lambda r, g, b: r < 50 and g < 50 and b < 50, # Black
lambda r, g, b: r < 50 and g < 50 and b > 200, # Blue
lambda r, g, b: r > 200 and g < 50 and b < 50, # Red
lambda r, g, b: r > 200 and g < 50 and b > 200, # Purple
lambda r, g, b: r < 50 and g > 200 and b < 50, # Green
lambda r, g, b: r < 50 and g > 200 and b > 200, # Aqua
lambda r, g, b: r > 200 and g > 200 and b < 50, # Yellow
lambda r, g, b: r > 200 and g > 200 and b > 200] # White
# 320x240 image with 8 color bars each one is approx 40 pixels.
# we start from the center of the frame buffer, and average the
# values of 10 sample pixels from the center of each color bar.
for i in range(0, 8):
avg = (0, 0, 0)
idx = 40*i+20 # center of colorbars
for off in range(0, 10): # avg 10 pixels
rgb = image.get_pixel(idx+off, 120)
avg = tuple(map(sum, zip(avg, rgb)))
if not t[i](avg[0]/10, avg[1]/10, avg[2]/10):
raise Exception("COLOR BARS TEST FAILED. "
"BAR#(%d): RGB(%d,%d,%d)"%(i+1, avg[0]/10, avg[1]/10, avg[2]/10))
print("COLOR BARS TEST PASSED...")
开发者ID:Killercotton,项目名称:OpenMV_medialab,代码行数:46,代码来源:selftest.py
示例5: find_face
def find_face():
global sensor, time
# Load Haar Cascade
face_cascade = HaarCascade("/frontalface.cascade")
while (True):
image = sensor.snapshot()
objects = image.find_features(face_cascade, threshold=0.65, scale=1.85)
if objects:
print (objects[0])
image.draw_rectangle(objects[0])
try:
kpts1 = image.find_keypoints(threshold=32, normalized=False, roi=objects[0])
except:
continue
if kpts1:
image.draw_keypoints(kpts1)
time.sleep(1000)
return kpts1
开发者ID:VTzoganis,项目名称:openmv,代码行数:18,代码来源:face_tracking.py
示例6: while
-1, -1, -1]
# This is a high pass filter kernel. see here for more kernels:
# http://www.fmwconcepts.com/imagemagick/digital_image_filtering.pdf
thresholds = [(100, 255)] # grayscale thresholds
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
clock = time.clock() # Tracks FPS.
# On the OV7725 sensor, edge detection can be enhanced
# significantly by setting the sharpness/edge registers.
# Note: This will be implemented as a function later.
if (sensor.get_id() == sensor.OV7725):
sensor.__write_reg(0xAC, 0xDF)
sensor.__write_reg(0x8F, 0xFF)
while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.
img.morph(kernel_size, kernel)
img.binary(thresholds)
# Erode pixels with less than 2 neighbors using a 3x3 image kernel
img.erode(1, threshold = 2)
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
# connected to your computer. The FPS should increase once disconnected.
开发者ID:MaurinElectroTextile,项目名称:openmv,代码行数:30,代码来源:edge_detection.py
示例7: test_image_processing
def test_image_processing():
for i in range(0, 50):
clock.tick() # Update the FPS clock.
img = sensor.snapshot() # Take a picture and return the image.
img.find_edges(image.EDGE_CANNY, threshold=(50, 80))
开发者ID:openmv,项目名称:openmv,代码行数:5,代码来源:cpufreq_scaling.py
示例8: while
import sensor, time
sensor.set_pixformat(sensor.RGB565)
clock = time.clock()
while (True):
clock.tick()
# take snapshot
image = sensor.snapshot()
#get a binary image
binary = image.threshold((255, 127, 127), 25)
# run median filter
binary.median(3)
# detect blobs in image
blobs = binary.find_blobs()
# draw rectangles around detected blobs
for r in blobs:
image.draw_rectangle(r)
print(clock.fps())
开发者ID:PierreBizouard,项目名称:openmv,代码行数:17,代码来源:blob_detection.py
示例9: find_circles
# Transform. https://en.wikipedia.org/wiki/Circle_Hough_Transform
#
# Note that the find_circles() method will only find circles which are completely
# inside of the image. Circles which go outside of the image/roi are ignored...
import sensor, image, time
sensor.reset()
sensor.set_pixformat(sensor.RGB565) # grayscale is faster
sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(time = 2000)
clock = time.clock()
while(True):
clock.tick()
img = sensor.snapshot().lens_corr(1.8)
# Circle objects have four values: x, y, r (radius), and magnitude. The
# magnitude is the strength of the detection of the circle. Higher is
# better...
# `threshold` controls how many circles are found. Increase its value
# to decrease the number of circles detected...
# `x_margin`, `y_margin`, and `r_margin` control the merging of similar
# circles in the x, y, and r (radius) directions.
for c in img.find_circles(threshold = 2000, x_margin = 10, y_margin = 10, r_margin = 10):
img.draw_circle(c.x(), c.y(), c.r(), color = (255, 0, 0))
print(c)
开发者ID:MaurinElectroTextile,项目名称:openmv,代码行数:30,代码来源:find_circles.py
示例10: GRAYSCALE
# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64,
# 128x64, and 128x128. If you want a resolution of 32x32 you can create
# it by doing "img.pool(2, 2)" on a 64x64 image.
sensor.reset() # Reset and initialize the sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565)
sensor.set_framesize(sensor.B128X128) # Set frame size to 128x128... (or 128x64)...
sensor.skip_frames(time = 2000) # Wait for settings take effect.
clock = time.clock() # Create a clock object to track the FPS.
# Take from the main frame buffer's RAM to allocate a second frame buffer.
# There's a lot more RAM in the frame buffer than in the MicroPython heap.
# However, after doing this you have a lot less RAM for some algorithms...
# So, be aware that it's a lot easier to get out of RAM issues now.
extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE)
extra_fb.replace(sensor.snapshot())
while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.
for y in range(0, sensor.height(), BLOCK_H):
for x in range(0, sensor.width(), BLOCK_W):
displacement = extra_fb.find_displacement(img, \
roi = (x, y, BLOCK_W, BLOCK_H), template_roi = (x, y, BLOCK_W, BLOCK_H))
# Below 0.1 or so (YMMV) and the results are just noise.
if(displacement.response() > 0.1):
pixel_x = x + (BLOCK_W//2) + int(displacement.x_translation())
pixel_y = y + (BLOCK_H//2) + int(displacement.y_translation())
img.draw_line((x + BLOCK_W//2, y + BLOCK_H//2, pixel_x, pixel_y), \
开发者ID:michaelchi08,项目名称:openmv,代码行数:31,代码来源:image-patches-differential-translation.py
示例11: interrupt
Important:
This script should be copied to the OpenMV Cam as `main.py`.
Source:
https://github.com/openmv/openmv/blob/master/scripts/examples/02-Board-Control/usb_vcp.py
"""
import sensor
import ustruct
import pyb
usb_vcp = pyb.USB_VCP()
# Disable USB interrupt (CTRL-C by default) when sending raw data (i.e. images)
# See: https://docs.openmv.io/library/pyb.USB_VCP.html#pyb.USB_VCP.setinterrupt
usb_vcp.setinterrupt(-1)
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.VGA)
sensor.skip_frames(time=2000) # wait for settings to take effect!
while True:
command = usb_vcp.recv(4, timeout=5000)
if command == b'snap':
image = sensor.snapshot().compress()
usb_vcp.send(ustruct.pack('<L', image.size()))
usb_vcp.send(image)
开发者ID:fabianschilling,项目名称:openmv_cam,代码行数:29,代码来源:main.py
示例12: while
# up processing at the expense of accuracy. The frontalface HaarCascade has 25
# stages.
face_cascade = image.HaarCascade("frontalface", stages=25)
while(True):
pyb.LED(RED_LED_PIN).on()
print("About to start detecting faces...")
sensor.skip_frames(60) # Give the user time to get ready.
pyb.LED(RED_LED_PIN).off()
print("Now detecting faces!")
pyb.LED(BLUE_LED_PIN).on()
diff = 10 # We'll say we detected a face after 10 frames.
while(diff):
img = sensor.snapshot()
# Threshold can be between 0.0 and 1.0. A higher threshold results in a
# higher detection rate with more false positives. The scale value
# controls the matching scale allowing you to detect smaller faces.
faces = img.find_features(face_cascade, threshold=0.5, scale=1.5)
if faces:
diff -= 1
for r in faces:
img.draw_rectangle(r)
pyb.LED(BLUE_LED_PIN).off()
print("Face detected! Saving image...")
sensor.snapshot().save("snapshot-%d.jpg" % pyb.rng()) # Save Pic.
开发者ID:Killercotton,项目名称:OpenMV_medialab,代码行数:30,代码来源:snapshot_on_face_detection.py
示例13: print
#
# This example demonstrates using frame differencing with your OpenMV Cam. It's
# called basic frame differencing because there's no background image update.
# So, as time passes the background image may change resulting in issues.
import sensor, image, pyb, os, time
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
sensor.set_auto_whitebal(False) # Turn off white balance.
clock = time.clock() # Tracks FPS.
if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory
print("About to save background image...")
sensor.skip_frames(time = 2000) # Give the user time to get ready.
sensor.snapshot().save("temp/bg.bmp")
print("Saved background image - Now frame differencing!")
while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.
# Replace the image with the "abs(NEW-OLD)" frame difference.
img.difference("temp/bg.bmp")
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
# connected to your computer. The FPS should increase once disconnected.
开发者ID:MaurinElectroTextile,项目名称:openmv,代码行数:30,代码来源:basic_frame_differencing.py
示例14: while
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
sensor.skip_frames(10) # Let new settings take affect.
sensor.set_whitebal(False) # Turn off white balance.
if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory
while(True):
pyb.LED(RED_LED_PIN).on()
print("About to save background image...")
sensor.skip_frames(60) # Give the user time to get ready.
pyb.LED(RED_LED_PIN).off()
sensor.snapshot().save("temp/bg.bmp")
print("Saved background image - Now detecting motion!")
pyb.LED(BLUE_LED_PIN).on()
diff = 10 # We'll say we detected motion after 10 frames of motion.
while(diff):
img = sensor.snapshot()
img.difference("temp/bg.bmp")
for blob_l in img.find_blobs([(20, 100, -128, 127, -128, 127)]):
for blob in blob_l:
# Over 100 pixels need to change to detect motion.
if (diff and (blob[4] > 100)): diff -= 1
m = mjpeg.Mjpeg("example-%d.mjpeg" % pyb.rng())
clock = time.clock() # Tracks FPS.
开发者ID:mfauziz,项目名称:openmv,代码行数:31,代码来源:mjpeg_on_movement.py
示例15: while
# Linear Polar Mapping Example
#
# This example shows off re-projecting the image using a linear polar
# transformation. Linear polar images are useful in that rotations
# become translations in the X direction and linear changes
# in scale become linear translations in the Y direction.
import sensor, image, time
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
clock = time.clock() # Tracks FPS.
while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot().linpolar(reverse=False)
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
# connected to your computer. The FPS should increase once disconnected.
开发者ID:michaelchi08,项目名称:openmv,代码行数:21,代码来源:linear_polar.py
示例16: while
# stages.
face_cascade = image.HaarCascade("frontalface", stages=25)
while(True):
pyb.LED(RED_LED_PIN).on()
print("About to start detecting faces...")
sensor.skip_frames(time = 2000) # Give the user time to get ready.
pyb.LED(RED_LED_PIN).off()
print("Now detecting faces!")
pyb.LED(BLUE_LED_PIN).on()
diff = 10 # We'll say we detected a face after 10 frames.
while(diff):
img = sensor.snapshot()
# Threshold can be between 0.0 and 1.0. A higher threshold results in a
# higher detection rate with more false positives. The scale value
# controls the matching scale allowing you to detect smaller faces.
faces = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5)
if faces:
diff -= 1
for r in faces:
img.draw_rectangle(r)
g = gif.Gif("example-%d.gif" % pyb.rng(), loop=True)
clock = time.clock() # Tracks FPS.
print("You're on camera!")
for i in range(100):
开发者ID:MaurinElectroTextile,项目名称:openmv,代码行数:31,代码来源:gif_on_face_detection.py
示例17: while
# stages.
face_cascade = image.HaarCascade("frontalface", stages=25)
while(True):
pyb.LED(RED_LED_PIN).on()
print("About to start detecting faces...")
sensor.skip_frames(time = 2000) # Give the user time to get ready.
pyb.LED(RED_LED_PIN).off()
print("Now detecting faces!")
pyb.LED(BLUE_LED_PIN).on()
diff = 10 # We'll say we detected a face after 10 frames.
while(diff):
img = sensor.snapshot()
# Threshold can be between 0.0 and 1.0. A higher threshold results in a
# higher detection rate with more false positives. The scale value
# controls the matching scale allowing you to detect smaller faces.
faces = img.find_features(face_cascade, threshold=0.5, scale_factor=1.5)
if faces:
diff -= 1
for r in faces:
img.draw_rectangle(r)
m = mjpeg.Mjpeg("example-%d.mjpeg" % pyb.rng())
clock = time.clock() # Tracks FPS.
print("You're on camera!")
for i in range(200):
开发者ID:MaurinElectroTextile,项目名称:openmv,代码行数:31,代码来源:mjpeg_on_face_detection.py
示例18: resolution
# Display on
self.write_command(0x29)
if __name__ == "__main__":
import sensor, time
#from lcd import LCD
# Reset sensor
sensor.reset()
# Sensor settings
sensor.set_contrast(2)
sensor.set_brightness(0)
sensor.set_saturation(2)
sensor.set_pixformat(sensor.RGB565)
# LCD resolution (128x160)
sensor.set_framesize(sensor.QQVGA2)
# Init LCD
lcd = LCD()
lcd.clear(0x00)
lcd.set_backlight(True)
clock = time.clock()
while (True):
clock.tick()
# Capture a frame a draw it to LCD
lcd.write_image(sensor.snapshot())
print(clock.fps())
开发者ID:12019,项目名称:openmv,代码行数:30,代码来源:lcd.py
示例19: print
s.settimeout(1.0)
print ('Waiting for connections..')
client, addr = s.accept()
print ('Connected to ' + addr[0] + ':' + str(addr[1]))
# Read request from client
data = client.recv(1024)
# Should parse client request here
# Send multipart header
client.send("HTTP/1.1 200 OK\r\n" \
"Server: OpenMV\r\n" \
"Content-Type: multipart/x-mixed-replace;boundary=openmv\r\n" \
"Cache-Control: no-cache\r\n" \
"Pragma: no-cache\r\n\r\n")
# FPS clock
clock = time.clock()
# Start streaming images
while (True):
clock.tick() # Track elapsed milliseconds between snapshots().
frame = sensor.snapshot()
client.send("\r\n--openmv\r\n" \
"Content-Type: image/jpeg\r\n"\
"Content-Length:"+str(frame.size())+"\r\n\r\n")
client.send(frame.compress(35))
print(clock.fps())
client.close()
开发者ID:mfauziz,项目名称:openmv,代码行数:31,代码来源:mjpeg_streamer.py
示例20: while
# TV Example
#
# Note: To run this example you will need a wireless tv shield for your OpenMV Cam.
#
# The wireless video tv Shield allows you to view your OpenMV Cam's frame buffer on the go.
import sensor, image, tv
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QQVGA)
tv.init() # Initialize the tv.
tv.channel(8) # For wireless video transmitter shield
while(True):
tv.display(sensor.snapshot()) # Take a picture and display the image.
开发者ID:openmv,项目名称:openmv,代码行数:15,代码来源:tv.py
注:本文中的sensor.snapshot函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论