本文整理汇总了Python中sensor.set_framesize函数的典型用法代码示例。如果您正苦于以下问题:Python set_framesize函数的具体用法?Python set_framesize怎么用?Python set_framesize使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了set_framesize函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: unittest
def unittest(data_path, temp_path):
import sensor
sensor.reset()
sensor.set_framesize(sensor.QVGA)
sensor.set_pixformat(sensor.GRAYSCALE)
img = sensor.snapshot().clear()
img.set_pixel(img.width()//2+50, 120, 255)
img.set_pixel(img.width()//2-50, 120, 255)
img.draw_line([img.width()//2-50, 50, img.width()//2+50, 50])
img.draw_rectangle([img.width()//2-25, img.height()//2-25, 50, 50])
img.draw_circle(img.width()//2, img.height()//2, 40)
img.draw_string(11, 10, "HelloWorld!")
img.draw_cross(img.width()//2, img.height()//2)
sensor.flush()
img.difference(data_path+"/drawing.pgm")
stats = img.get_statistics()
return (stats.max() == 0) and (stats.min() == 0)
开发者ID:openmv,项目名称:openmv,代码行数:17,代码来源:20-drawing.py
示例2: test_color_bars
def test_color_bars():
sensor.reset()
# Set sensor settings
sensor.set_brightness(0)
sensor.set_saturation(0)
sensor.set_gainceiling(8)
sensor.set_contrast(2)
# Set sensor pixel format
sensor.set_framesize(sensor.QVGA)
sensor.set_pixformat(sensor.RGB565)
# Enable colorbar test mode
sensor.set_colorbar(True)
# Skip a few frames to allow the sensor settle down
# Note: This takes more time when exec from the IDE.
for i in range(0, 100):
image = sensor.snapshot()
# Color bars thresholds
t = [lambda r, g, b: r < 50 and g < 50 and b < 50, # Black
lambda r, g, b: r < 50 and g < 50 and b > 200, # Blue
lambda r, g, b: r > 200 and g < 50 and b < 50, # Red
lambda r, g, b: r > 200 and g < 50 and b > 200, # Purple
lambda r, g, b: r < 50 and g > 200 and b < 50, # Green
lambda r, g, b: r < 50 and g > 200 and b > 200, # Aqua
lambda r, g, b: r > 200 and g > 200 and b < 50, # Yellow
lambda r, g, b: r > 200 and g > 200 and b > 200] # White
# 320x240 image with 8 color bars each one is approx 40 pixels.
# we start from the center of the frame buffer, and average the
# values of 10 sample pixels from the center of each color bar.
for i in range(0, 8):
avg = (0, 0, 0)
idx = 40*i+20 # center of colorbars
for off in range(0, 10): # avg 10 pixels
rgb = image.get_pixel(idx+off, 120)
avg = tuple(map(sum, zip(avg, rgb)))
if not t[i](avg[0]/10, avg[1]/10, avg[2]/10):
raise Exception("COLOR BARS TEST FAILED. "
"BAR#(%d): RGB(%d,%d,%d)"%(i+1, avg[0]/10, avg[1]/10, avg[2]/10))
print("COLOR BARS TEST PASSED...")
开发者ID:Killercotton,项目名称:OpenMV_medialab,代码行数:46,代码来源:selftest.py
示例3: find_displacement
import sensor, image, time, math
# NOTE!!! You have to use a small power of 2 resolution when using
# find_displacement(). This is because the algorithm is powered by
# something called phase correlation which does the image comparison
# using FFTs. A non-power of 2 resolution requires padding to a power
# of 2 which reduces the usefulness of the algorithm results. Please
# use a resolution like B64X64 or B64X32 (2x faster).
# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64,
# 128x64, and 128x128. If you want a resolution of 32x32 you can create
# it by doing "img.pool(2, 2)" on a 64x64 image.
sensor.reset() # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.B64X64) # Set frame size to 64x64... (or 64x32)...
sensor.skip_frames(time = 2000) # Wait for settings take effect.
clock = time.clock() # Create a clock object to track the FPS.
# Take from the main frame buffer's RAM to allocate a second frame buffer.
# There's a lot more RAM in the frame buffer than in the MicroPython heap.
# However, after doing this you have a lot less RAM for some algorithms...
# So, be aware that it's a lot easier to get out of RAM issues now.
extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.RGB565)
extra_fb.replace(sensor.snapshot())
while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.
# This algorithm is hard to test without a perfect jig... So, here's a cheat to see it works.
开发者ID:michaelchi08,项目名称:openmv,代码行数:31,代码来源:differential-rotation-scale.py
示例4: write_command
rst.high()
time.sleep(100)
write_command(0x11) # Sleep Exit
time.sleep(120)
# Memory Data Access Control
write_command(0x36, 0xC0)
# Interface Pixel Format
write_command(0x3A, 0x05)
# Display On
write_command(0x29)
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565) # must be this
sensor.set_framesize(sensor.QQVGA2) # must be this
sensor.skip_frames(time = 2000) # Let new settings take affect.
clock = time.clock() # Tracks FPS.
while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.
write_command(0x2C) # Write image command...
write_image(img)
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
# connected to your computer. The FPS should increase once disconnected.
开发者ID:openmv,项目名称:openmv,代码行数:30,代码来源:spi_control.py
示例5: FFTs
# Optical Flow Example
#
# Your OpenMV Cam can use optical flow to determine the displacement between
# two images. This allows your OpenMV Cam to track movement like how your laser
# mouse tracks movement. By tacking the difference between successive images
# you can determine instaneous displacement with your OpenMV Cam too!
import sensor, image, time
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.B64x32) # or B40x30 or B64x64
clock = time.clock() # Tracks FPS.
# NOTE: The find_displacement function works by taking the 2D FFTs of the old
# and new images and compares them using phase correlation. Your OpenMV Cam
# only has enough memory to work on two 64x64 FFTs (or 128x32, 32x128, or etc).
old = sensor.snapshot()
while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.
[delta_x, delta_y, response] = old.find_displacement(img)
old = img.copy()
print("%0.1f X\t%0.1f Y\t%0.2f QoR\t%0.2f FPS" % \
(delta_x, delta_y, response, clock.fps()))
开发者ID:MaurinElectroTextile,项目名称:openmv,代码行数:29,代码来源:optical_flow.py
示例6: find_displacement
import sensor, image, time
# NOTE!!! You have to use a small power of 2 resolution when using
# find_displacement(). This is because the algorithm is powered by
# something called phase correlation which does the image comparison
# using FFTs. A non-power of 2 resolution requires padding to a power
# of 2 which reduces the usefulness of the algorithm results. Please
# use a resolution like B128X128 or B128X64 (2x faster).
# Your OpenMV Cam supports power of 2 resolutions of 64x32, 64x64,
# 128x64, and 128x128. If you want a resolution of 32x32 you can create
# it by doing "img.pool(2, 2)" on a 64x64 image.
sensor.reset() # Reset and initialize the sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE (or RGB565)
sensor.set_framesize(sensor.B128X128) # Set frame size to 128x128... (or 128x64)...
sensor.skip_frames(time = 2000) # Wait for settings take effect.
clock = time.clock() # Create a clock object to track the FPS.
# Take from the main frame buffer's RAM to allocate a second frame buffer.
# There's a lot more RAM in the frame buffer than in the MicroPython heap.
# However, after doing this you have a lot less RAM for some algorithms...
# So, be aware that it's a lot easier to get out of RAM issues now.
extra_fb = sensor.alloc_extra_fb(sensor.width(), sensor.height(), sensor.GRAYSCALE)
extra_fb.replace(sensor.snapshot())
while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.
for y in range(0, sensor.height(), BLOCK_H):
开发者ID:michaelchi08,项目名称:openmv,代码行数:31,代码来源:image-patches-differential-translation.py
示例7: snapshots
# Basic Frame Differencing Example
#
# Note: You will need an SD card to run this example.
#
# This example demonstrates using frame differencing with your OpenMV Cam. It's
# called basic frame differencing because there's no background image update.
# So, as time passes the background image may change resulting in issues.
import sensor, image, pyb, os, time
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
sensor.set_auto_whitebal(False) # Turn off white balance.
clock = time.clock() # Tracks FPS.
if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory
print("About to save background image...")
sensor.skip_frames(time = 2000) # Give the user time to get ready.
sensor.snapshot().save("temp/bg.bmp")
print("Saved background image - Now frame differencing!")
while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.
# Replace the image with the "abs(NEW-OLD)" frame difference.
img.difference("temp/bg.bmp")
开发者ID:MaurinElectroTextile,项目名称:openmv,代码行数:30,代码来源:basic_frame_differencing.py
示例8: cartoon
# Cartoon Filter
#
# This example shows off a simple cartoon filter on images. The cartoon
# filter works by joining similar pixel areas of an image and replacing
# the pixels in those areas with the area mean.
import sensor, image, time
sensor.reset()
sensor.set_pixformat(sensor.RGB565) # or GRAYSCALE...
sensor.set_framesize(sensor.QVGA) # or QQVGA...
sensor.skip_frames(time = 2000)
clock = time.clock()
while(True):
clock.tick()
# seed_threshold controls the maximum area growth of a colored
# region. Making this larger will merge more pixels.
# floating_threshold controls the maximum pixel-to-pixel difference
# when growing a region. Settings this very high will quickly combine
# all pixels in the image. You should keep this small.
# cartoon() will grow regions while both thresholds are statisfied...
img = sensor.snapshot().cartoon(seed_threshold=0.05, floating_thresholds=0.05)
print(clock.fps())
开发者ID:openmv,项目名称:openmv,代码行数:29,代码来源:cartoon_filter.py
示例9: print
# Set the target temp range here
min_temp_in_celsius = 20.0
max_temp_in_celsius = 35.0
print("Resetting Lepton...")
# These settings are applied on reset
sensor.reset()
sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_MODE, True)
sensor.ioctl(sensor.IOCTL_LEPTON_SET_MEASUREMENT_RANGE, min_temp_in_celsius, max_temp_in_celsius)
print("Lepton Res (%dx%d)" % (sensor.ioctl(sensor.IOCTL_LEPTON_GET_WIDTH),
sensor.ioctl(sensor.IOCTL_LEPTON_GET_HEIGHT)))
print("Radiometry Available: " + ("Yes" if sensor.ioctl(sensor.IOCTL_LEPTON_GET_RADIOMETRY) else "No"))
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.LCD)
sensor.skip_frames(time=5000)
clock = time.clock()
lcd.init()
# Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are
# returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the
# camera resolution. "merge=True" merges all overlapping blobs in the image.
def map_g_to_temp(g):
return ((g * (max_temp_in_celsius - min_temp_in_celsius)) / 255.0) + min_temp_in_celsius
while(True):
clock.tick()
img = sensor.snapshot()
blob_stats = []
开发者ID:openmv,项目名称:openmv,代码行数:30,代码来源:lepton_get_object_temp_color_lcd.py
示例10: value
# AprilTags Example
#
# This example shows the power of the OpenMV Cam to detect April Tags
# on the OpenMV Cam M7. The M4 versions cannot detect April Tags.
import sensor, image, time, math
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.VGA) # we run out of memory if the resolution is much bigger...
sensor.set_windowing((160, 120)) # Look at center 160x120 pixels of the VGA resolution.
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False) # must turn this off to prevent image washout...
sensor.set_auto_whitebal(False) # must turn this off to prevent image washout...
clock = time.clock()
# Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work.
# What's the difference between tag families? Well, for example, the TAG16H5 family is effectively
# a 4x4 square tag. So, this means it can be seen at a longer distance than a TAG36H11 tag which
# is a 6x6 square tag. However, the lower H value (H5 versus H11) means that the false positve
# rate for the 4x4 tag is much, much, much, higher than the 6x6 tag. So, unless you have a
# reason to use the other tags families just use TAG36H11 which is the default family.
while(True):
clock.tick()
img = sensor.snapshot()
for tag in img.find_apriltags(): # defaults to TAG36H11
img.draw_rectangle(tag.rect(), color = (255, 0, 0))
img.draw_cross(tag.cx(), tag.cy(), color = (0, 255, 0))
print_args = (tag.id(), (180 * tag.rotation()) / math.pi)
开发者ID:MaurinElectroTextile,项目名称:openmv,代码行数:31,代码来源:find_apriltags_w_lens_zoom.py
示例11: get_regression
#
# We're using the robust=True argument for get_regression() in this script which
# computes the linear regression using a much more robust algorithm... but potentially
# much slower. The robust algorithm runs in O(N^2) time on the image. So, YOU NEED
# TO LIMIT THE NUMBER OF PIXELS the robust algorithm works on or it can actually
# take seconds for the algorithm to give you a result... THRESHOLD VERY CAREFULLY!
THRESHOLD = (0, 100) # Grayscale threshold for dark things...
BINARY_VISIBLE = True # Does binary first so you can see what the linear regression
# is being run on... might lower FPS though.
import sensor, image, time
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QQQVGA) # 80x60 (4,800 pixels) - O(N^2) max = 2,3040,000.
sensor.skip_frames(time = 2000) # WARNING: If you use QQVGA it may take seconds
clock = time.clock() # to process a frame sometimes.
while(True):
clock.tick()
img = sensor.snapshot().binary([THRESHOLD]) if BINARY_VISIBLE else sensor.snapshot()
# Returns a line object similar to line objects returned by find_lines() and
# find_line_segments(). You have x1(), y1(), x2(), y2(), length(),
# theta() (rotation in degrees), rho(), and magnitude().
#
# magnitude() represents how well the linear regression worked. It means something
# different for the robust linear regression. In general, the larger the value the
# better...
line = img.get_regression([(255,255) if BINARY_VISIBLE else THRESHOLD], robust = True)
开发者ID:MaurinElectroTextile,项目名称:openmv,代码行数:31,代码来源:linear_regression_robust.py
示例12: snapshot
# Global Shutter Triggered Mode Example
#
# This example shows off setting the global shutter camera into triggered mode. In triggered mode
# snapshot() controls EXACTLY when integration of the camera pixels start such that you can sync
# taking pictures to some external movement. Since the camera captures all pixels at the same time
# (as it is a global shutter camera versus a rolling shutter camera) movement in the image will
# only be captured for the integration time and not the integration time multipled by the number
# of rows in the image. Additionally, sensor noise is reduced in triggered mode as the camera will
# not read out rows until after exposing which results in a higher quality image.
#
# That said, your maximum frame rate will be reduced by 2 to 3 as frames are no longer generated
# continously by the camera and because you have to wait for the integration to finish before
# readout of the frame.
import sensor, image, time
sensor.reset() # Reset and initialize the sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE
sensor.set_framesize(sensor.VGA) # Set frame size to VGA (640x480)
sensor.skip_frames(time = 2000) # Wait for settings take effect.
clock = time.clock() # Create a clock object to track the FPS.
sensor.ioctl(sensor.IOCTL_SET_TRIGGERED_MODE, True)
while(True):
clock.tick() # Update the FPS clock.
img = sensor.snapshot() # Take a picture and return the image.
print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected
# to the IDE. The FPS should increase once disconnected.
开发者ID:openmv,项目名称:openmv,代码行数:29,代码来源:triggered_mode.py
示例13: object
# finding Apriltags in the image using blob tracking to find the
# area of where the tag is first and then calling find_apriltags
# on that blob.
# Note, this script works well assuming most parts of the image do not
# pass the thresholding test... otherwise, you don't get a distance
# benefit.
import sensor, image, time, math, omv
# Set the thresholds to find a white object (i.e. tag border)
thresholds = (150, 255)
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
if omv.board_type() == "H7": sensor.set_framesize(sensor.VGA)
elif omv.board_type() == "M7": sensor.set_framesize(sensor.QVGA)
else: raise Exception("You need a more powerful OpenMV Cam to run this script")
sensor.skip_frames(time = 200) # increase this to let the auto methods run for longer
sensor.set_auto_gain(False) # must be turned off for color tracking
sensor.set_auto_whitebal(False) # must be turned off for color tracking
clock = time.clock()
# The apriltag code supports up to 6 tag families which can be processed at the same time.
# Returned tag objects will have their tag family and id within the tag family.
tag_families = 0
tag_families |= image.TAG16H5 # comment out to disable this family
tag_families |= image.TAG25H7 # comment out to disable this family
tag_families |= image.TAG25H9 # comment out to disable this family
tag_families |= image.TAG36H10 # comment out to disable this family
tag_families |= image.TAG36H11 # comment out to disable this family (default family)
开发者ID:openmv,项目名称:openmv,代码行数:31,代码来源:find_small_apriltags.py
示例14:
# Example 1 - LCD Shield Demo
#
# Note: To run this example you will need a LCD Shield for your OpenMV Cam.
#
# The LCD Shield allows you to view your OpenMV Cam's frame buffer on the go.
import sensor, image, lcd
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QQVGA2) # Special 128x160 framesize for LCD Shield.
lcd.init() # Initialize the lcd screen.
while(True):
lcd.display(sensor.snapshot()) # Take a picture and display the image.
开发者ID:mfauziz,项目名称:openmv,代码行数:15,代码来源:lcd.py
示例15: snapshots
import sensor, image, time
# For color tracking to work really well you should ideally be in a very, very,
# very, controlled enviroment where the lighting is constant. Additionally, if
# you want to track more than 2 colors you need to set the boundaries for them
# very narrowly. If you try to track... generally red, green, and blue then
# you will end up just tracking everything which you don't want.
red_threshold = ( 40, 60, 60, 90, 50, 70)
blue_threshold = ( 0, 20, -10, 30, -60, 10)
# You may need to tweak the above settings for tracking red and blue things...
# Select an area in the Framebuffer to copy the color settings.
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565) # use RGB565.
sensor.set_framesize(sensor.QQVGA) # use QQVGA for speed.
sensor.skip_frames(10) # Let new settings take affect.
sensor.set_whitebal(False) # turn this off.
clock = time.clock() # Tracks FPS.
while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.
blobs = img.find_blobs([red_threshold, blue_threshold])
merged_blobs = img.find_markers(blobs)
if merged_blobs:
for b in merged_blobs:
# Draw a rect around the blob.
img.draw_rectangle(b[0:4]) # rect
img.draw_cross(b[5], b[6]) # cx, cy
开发者ID:Killercotton,项目名称:OpenMV_medialab,代码行数:30,代码来源:marker_tracking.py
示例16:
import sensor, time, pyb
led_r = pyb.LED(1)
led_g = pyb.LED(2)
led_b = pyb.LED(3)
#sensor.reset()
sensor.set_contrast(2)
sensor.set_framesize(sensor.QCIF)
sensor.set_pixformat(sensor.RGB565)
clock = time.clock()
while (True):
clock.tick()
# Take snapshot
image = sensor.snapshot()
# Threshold image with RGB
binary = image.threshold([(255, 0, 0),
(0, 255, 0),
(0, 0, 255)], 80)
# Image closing
binary.dilate(3)
binary.erode(3)
# Detect blobs in image
blobs = binary.find_blobs()
led_r.off()
led_g.off()
开发者ID:jithurbide,项目名称:openmv,代码行数:31,代码来源:blob_detection.py
示例17: resolution
# Display on
self.write_command(0x29)
if __name__ == "__main__":
import sensor, time
#from lcd import LCD
# Reset sensor
sensor.reset()
# Sensor settings
sensor.set_contrast(2)
sensor.set_brightness(0)
sensor.set_saturation(2)
sensor.set_pixformat(sensor.RGB565)
# LCD resolution (128x160)
sensor.set_framesize(sensor.QQVGA2)
# Init LCD
lcd = LCD()
lcd.clear(0x00)
lcd.set_backlight(True)
clock = time.clock()
while (True):
clock.tick()
# Capture a frame a draw it to LCD
lcd.write_image(sensor.snapshot())
print(clock.fps())
开发者ID:12019,项目名称:openmv,代码行数:30,代码来源:lcd.py
示例18:
# Edge Detection Example:
#
# This example demonstrates using the morph function on an image to do edge
# detection and then thresholding and filtering that image afterwards.
import sensor, image
kernel_size = 1 # kernel width = (size*2)+1, kernel height = (size*2)+1
kernel = [-1, -1, -1,\
-1, +8, -1,\
-1, -1, -1]
# This is a high pass filter kernel. ee here for more kernels:
# http://www.fmwconcepts.com/imagemagick/digital_image_filtering.pdf
thresholds = [(100, 255)] # grayscale thresholds
sensor.reset()
sensor.set_framesize(sensor.QQVGA) # smaller resolution to go faster
sensor.set_pixformat(sensor.GRAYSCALE)
while(True):
img = sensor.snapshot()
img.morph(kernel_size, kernel)
img.binary(thresholds)
img.erode(1, threshold = 2) # erode pixels with less than 2 neighbors
开发者ID:12019,项目名称:openmv,代码行数:23,代码来源:edge_detection.py
示例19: correction
# QRCode Example
#
# This example shows the power of the OpenMV Cam to detect QR Codes
# using lens correction (see the qrcodes_with_lens_corr.py script for higher performance).
import sensor, image, time
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QQVGA) # can be QVGA on M7...
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False) # must turn this off to prevent image washout...
clock = time.clock()
while(True):
clock.tick()
img = sensor.snapshot()
img.lens_corr(1.8) # strength of 1.8 is good for the 2.8mm lens.
for code in img.find_qrcodes():
img.draw_rectangle(code.rect(), color = (255, 0, 0))
print(code)
print(clock.fps())
开发者ID:MaurinElectroTextile,项目名称:openmv,代码行数:22,代码来源:qrcodes_with_lens_corr.py
示例20: RGB565
# CIFAR-10 Search Just Center Example
#
# CIFAR is a convolutional nueral network designed to classify it's field of view into several
# different object types and works on RGB video data.
#
# In this example we slide the LeNet detector window over the image and get a list of activations
# where there might be an object. Note that use a CNN with a sliding window is extremely compute
# expensive so for an exhaustive search do not expect the CNN to be real-time.
import sensor, image, time, os, nn
sensor.reset() # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
sensor.set_windowing((128, 128)) # Set 128x128 window.
sensor.skip_frames(time=750) # Don't let autogain run very long.
sensor.set_auto_gain(False) # Turn off autogain.
sensor.set_auto_exposure(False) # Turn off whitebalance.
# Load cifar10 network (You can get the network from OpenMV IDE).
net = nn.load('/cifar10.network')
# Faster, smaller and less accurate.
# net = nn.load('/cifar10_fast.network')
labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
clock = time.clock()
while(True):
clock.tick()
img = sensor.snapshot()
开发者ID:openmv,项目名称:openmv,代码行数:30,代码来源:nn_cifar10_search_just_center.py
注:本文中的sensor.set_framesize函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论