One thing I did not find is if it is possible, to detect Letters with the OpenMV H7. If yes, how would that work?
Thank you,
Finn

Code: Select all
# Find Line Segments Example
#
# This example shows off how to find line segments in the image. For each line object
# found in the image a line object is returned which includes the line's rotation.
# find_line_segments() finds finite length lines (but is slow).
# Use find_line_segments() to find non-infinite lines (and is fast).
enable_lens_corr = False # turn on for straighter lines...
import sensor, image, time
sensor.reset()
sensor.set_pixformat(sensor.RGB565) # grayscale is faster
sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(time = 2000)
clock = time.clock()
# All lines also have `x1()`, `y1()`, `x2()`, and `y2()` methods to get their end-points
# and a `line()` method to get all the above as one 4 value tuple for `draw_line()`.
while(True):
clock.tick()
img = sensor.snapshot()
if enable_lens_corr: img.lens_corr(1.8) # for 2.8mm lens...
# `merge_distance` controls the merging of nearby lines. At 0 (the default), no
# merging is done. At 1, any line 1 pixel away from another is merged... and so
# on as you increase this value. You may wish to merge lines as line segment
# detection produces a lot of line segment results.
# `max_theta_diff` controls the maximum amount of rotation difference between
# any two lines about to be merged. The default setting allows for 15 degrees.
for l in img.find_line_segments(merge_distance = 0, max_theta_diff = 5):
img.draw_line(l.line(), color = (255, 0, 0))
print(l)
print("FPS %f" % clock.fps())
Code: Select all
import time, sensor, image
from pyb import UART
from image import SEARCH_EX, SEARCH_DS
ser = UART(3,115200,timeout_char=1000)
# Reset sensor
sensor.reset()
# Set sensor settings
sensor.set_contrast(1)
sensor.set_gainceiling(16)
# Max resolution for template matching with SEARCH_EX is QQVGA
sensor.set_framesize(sensor.QQCIF)
# You can set windowing to reduce the search image.
#sensor.set_windowing(((640-80)//2, (480-60)//2, 80, 60))
sensor.set_pixformat(sensor.GRAYSCALE)
# Load template.
# Template should be a small (eg. 32x32 pixels) grayscale image.
template1 = image.Image("/H_Letter.pgm")
template2 = image.Image("/U_Letter.pgm")
template3 = image.Image("/S_Letter.pgm")
clock = time.clock()
# Run template matching
while (True):
clock.tick()
img = sensor.snapshot()
# find_template(template, threshold, [roi, step, search])
# ROI: The region of interest tuple (x, y, w, h).
# Step: The loop step used (y+=step, x+=step) use a bigger step to make it faster.
# Search is either image.SEARCH_EX for exhaustive search or image.SEARCH_DS for diamond search
#
# Note1: ROI has to be smaller than the image and bigger than the template.
# Note2: In diamond search, step and ROI are both ignored.
harmed = img.find_template(template1, 0.70, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60))
unharmed = img.find_template(template2, 0.70, step=4, search=SEARCH_EX)
stable = img.find_template(template3, 0.70, step=4, search=SEARCH_EX)
if harmed:
img.draw_rectangle(harmed,5)
ser.write(0x01) #Error: TypeError: Object with buffer protocol required.
print("Detected H")
if unharmed:
img.draw_rectangle(unharmed,5)
ser.write(0x02) # same Error
print("Detected U")
if stable:
img.draw_rectangle(stable,5)
ser.write(0x03) # same Error
print("Detected S")
#if l:
# img.draw_rectangle(l,5)
# ser.write(0x02)
print(clock.fps())
Code: Select all
# Color Tracking Thresholds (Grayscale Min, Grayscale Max)
# The below grayscale threshold is set to only find extremely bright white areas.
thresholds = (0, 40)
blobby = False
sensor.reset() # Reset and initialize the sensor.
# Set sensor settings
sensor.set_contrast(1)
sensor.set_gainceiling(16)
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to GRAYSCALE
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
sensor.set_windowing((96, 96)) # Set 128x128 window.
sensor.skip_frames(time=500)
sensor.set_auto_gain(False)
sensor.set_auto_exposure(False)
# Load chars74 network
net = nn.load('/fnt-chars74k.network') # works on printed font
# net = nn.load('/fnt-chars74k.network') # works on handwritten chars
# net = nn.load('/img-chars74k.network') # works on images of chars
labels = ['n/a', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
for i in range(ord('A'), ord('Z') + 1): labels.append(chr(i))
for i in range(ord('a'), ord('z') + 1): labels.append(chr(i))
clock = time.clock() # Create a clock object to track the FPS.
while(True):
clock.tick() # Update the FPS clock.
img = sensor.snapshot() # Take a picture and return the image.
#imgBlob = sensor.snapshot() # Picture of blob, to see the letter
# Adjust the binary thresholds below if things aren't working - make sure characters are good.
#img.find_edges(image.EDGE_CANNY, threshold=(100, 100))
for blob in img.find_blobs([thresholds], pixels_threshold=100, area_threshold=100, merge=True):
# These values depend on the blob not being circular - otherwise they will be shaky.
if blob.elongation() > 0.5:
img.draw_edges(blob.min_corners(), color=0)
img.draw_line(blob.major_axis_line(), color=0)
img.draw_line(blob.minor_axis_line(), color=0)
# These values are stable all the time.
img.draw_rectangle(blob.rect(), color=127)
img.draw_cross(blob.cx(), blob.cy(), color=127)
blobby = True
# Note - the blob rotation is unique to 0-180 only.
img.draw_keypoints([(blob.cx(), blob.cy(), int(math.degrees(blob.rotation())))], size=40, color=127)
if (blobby == True):
out = net.forward(img.binary([(100, 255)]), softmax=True)
max_idx = out.index(max(out))
score = int(out[max_idx]*100)
if (score < 50):
score_str = "??:??%"
else:
score_str = "%s:%d%% "%(labels[max_idx], score)
img.draw_string(0, 0, score_str, color=(0, 255, 0))
print(score_str)
blobbi = False
print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected
# to the IDE. The FPS should increase once disconnected.
Code: Select all
net.forward(img.binary([(100, 255)]), softmax=True, roi=blob.rect())
Code: Select all
# Next we look for a tag in an ROI that's bigger than the blob.
w = min(max(int(blob.w() * 1.2), 10), 160) # Not too small, not too big.
h = min(max(int(blob.h() * 1.2), 10), 160) # Not too small, not too big.
x = min(max(int(blob.x() + (blob.w()/4) - (w * 0.1)), 0), img.width()-1)
y = min(max(int(blob.y() + (blob.h()/4) - (h * 0.1)), 0), img.height()-1)
net.forward(img.binary([(100, 255)]), softmax=True, roi=(x, y, w, h))
Users browsing this forum: No registered users and 0 guests