Image processing for line tracing

Hi I’m trying to make the make the line tracing robot by turn the image black and make the outline for the line I’m trying to track so the light level won’t effect the quality of the line tracing then make like a blob that at the center of the line & follow the line then send the degree back to my robot how can I achieve that here is the code I’m working with

import sensor
import image
import time
import math
from pyb import UART

# Initialize the camera
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)  # grayscale is sufficient for line detection
sensor.set_framesize(sensor.QQVGA)  # or any other size that suits your needs
sensor.skip_frames(time=2000)
sensor.set_auto_gain(False)  # turn off automatic gain control
sensor.set_auto_whitebal(False)  # turn off white balance

uart = UART(3, 19200, timeout_char=200)

GRAYSCALE_THRESHOLD = [(128, 255)]
ROIS = [  # [ROI, weight]
    (0, 200, 320, 20, 0.6),
    (0, 150, 320, 20, 0.2),  # depending on how your robot is setup.
    (0, 100, 320, 20, 0.1),  # You'll need to tweak the weights for your app
    (0, 50, 320, 20, 0.05),  # depending on how your robot is setup.
    # (0, 0, 320, 20, 0.05)
]

class ImageProcessor:
    def __init__(self):
        self.low_threshold = 260
        self.high_threshold = 270
        self.line_threshold = 1000  # Adjust this threshold
        self.theta_margin = 100
        self.rho_margin = 100
        self.max_line_length = 100  # Set maximum line length
        self.min_line_length = 10  # Set minimum line length
        self.max_angle_diff = 20  # Set maximum angle difference for filtering

    def process(self):
        self.image = sensor.snapshot()
        self.image.find_edges(image.EDGE_CANNY, threshold=(self.low_threshold, self.high_threshold))

    def LineDetect(self):
        for l in self.image.find_lines(
            threshold=self.line_threshold,
            theta_margin=self.theta_margin,
            rho_margin=self.rho_margin,
            max_line_length=self.max_line_length,
            min_theta=-90,
            max_theta=90
        ):
            if self.min_line_length < l.length() < self.max_line_length:
                # Check if line angle is within a certain range
                if abs(l.theta() - 90) < self.max_angle_diff:
                    self.image.draw_line(l.line(), color=(255))

img_processor = ImageProcessor()
clock = time.clock()

while True:
    clock.tick()
    img_processor.process()
    centroid_sum = 0
    blobs_num = 0
    last_pos = [160, 240]

    for r in ROIS:
        try:
            blobs = img_processor.image.find_blobs(GRAYSCALE_THRESHOLD, roi=r[0:4], merge=True)
        except Exception as e:
            print("Error occurred during blob detection:", e)
            blobs = None  # Set blobs to None to handle the error gracefully
        

        if blobs:
            # Find the blob with the most pixels.
            largest_blob = max(blobs, key=lambda b: b.pixels())
            if largest_blob.area() > 540:
                blobs_num += 1

                # Draw a rect around the blob.
                img_processor.image.draw_rectangle(largest_blob.rect())
                img_processor.image.draw_cross(largest_blob.cx(), largest_blob.cy())

                last_pos = [largest_blob.cx(), largest_blob.cy()]

                centroid_sum += (largest_blob.cx() - 160) * r[4]  # r[4] is the roi weight.

    center_pos = centroid_sum / sum(r[4] for r in ROIS)  # Determine center of line.
    degree = math.atan2(160 - last_pos[0], 240 - last_pos[1])
    if blobs_num:
        uart.write(f"{-1 * math.sin(degree)}\n")
    else:
        uart.write("0\n")

    if uart.any():
        print(uart.read())

here the outline code alone

import sensor, image, time
from pyb import UART

# Initialize the camera
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE) # grayscale is sufficient for line detection
sensor.set_framesize(sensor.QQVGA)      # or any other size that suits your needs
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False)            # turn off automatic gain control
sensor.set_auto_whitebal(False)        # turn off white balance

uart = UART(3, 19200, timeout_char=200)

GRAYSCALE_THRESHOLD = [(128, 255)]
ROIS = [  # [ROI, weight]
    (0, 200, 320, 20, 0.6),
    (0, 150, 320, 20, 0.2),  # depending on how your robot is setup.
    (0, 100, 320, 20, 0.1),  # You'll need to tweak the weights for your app
    (0, 50, 320, 20, 0.05),  # depending on how your robot is setup.
#    (0, 0, 320, 20, 0.05)
]
class ImageProcessor:

        def __init__(self):
            self.low_threshold = 260
            self.high_threshold = 270
            self.line_threshold = 1000  # Adjust this threshold
            self.theta_margin = 100
            self.rho_margin = 100
            self.max_line_length = 100  # Set maximum line length
            self.min_line_length = 10   # Set minimum line length
            self.max_angle_diff = 20    # Set maximum angle difference for filtering

        def process(self):
            self.image = sensor.snapshot()
            self.image.find_edges(image.EDGE_CANNY, threshold=(self.low_threshold, self.high_threshold))

        def LineDetect(self):
            for l in img.find_lines(
                    threshold=self.line_threshold,
                    theta_margin=self.theta_margin,
                    rho_margin=self.rho_margin,
                    max_line_length=self.max_line_length,
                    min_theta=-90,
                    max_theta=90
                ):
                    if self.min_line_length < l.length() < self.max_line_length:
                        # Check if line angle is within a certain range
                        if abs(l.theta() - 90) < self.max_angle_diff:
                            self.image.draw_line(l.line(), color=(255))



img_processor = ImageProcessor()
clock = time.clock()

while(True):
    clock.tick()
    img_processor.process()
    print(clock.fps())

here the line tracing code

import sensor
import time
import math
from pyb import UART

# UART 3, and baudrate.
uart = UART(3, 19200, timeout_char=200)

# Tracks a black line. Use [(128, 255)] for a tracking a white line.
GRAYSCALE_THRESHOLD = [(0, 64)]

# Each roi is (x, y, w, h). The line detection algorithm will try to find the
# centroid of the largest blob in each roi. The x position of the centroids
# will then be averaged with different weights where the most weight is assigned
# to the roi near the bottom of the image and less to the next roi and so on.
ROIS = [  # [ROI, weight]
    (0, 200, 320, 25, 0.20),
    (0, 175, 320, 25, 0.20),
    (0, 150, 320, 25, 0.20),
    (0, 125, 320, 25, 0.15),
    (0, 100, 320, 25, 0.15),
    (0, 75, 320, 25, 0.1),
]

#ROIS2 = [  # [ROI, weight]
#    (0, 120, 20, 240, 0.2),
#    (80, 120, 20, 240, 0.2),  # depending on how your robot is setup.
#    (160, 120, 20, 240, 0.2),  # You'll need to tweak the weights for your app
#    (240, 120, 20, 240, 0.2),  # depending on how your robot is setup.
#    (300, 120, 20, 240, 0.2)
#]

#LP = [0,0,0,0,0,]

def distance(pos1, pos2):
    return ((pos1[0] - pos2[0])**2 + (pos1[1] - pos2[1])**2)**0.5

# Compute the weight divisor (we're computing this so you don't have to make weights add to 1).
weight_sum = 0
for r in ROIS:
    weight_sum += r[4]  # r[4] is the roi weight.

# Camera setup...
sensor.reset()  # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE)  # use grayscale.
sensor.set_framesize(sensor.QVGA)  # use QQVGA for speed.
sensor.skip_frames(time=2000)  # Let new settings take affect.
sensor.set_auto_gain(False)  # must be turned off for color tracking
sensor.set_auto_whitebal(False)  # must be turned off for color tracking
clock = time.clock()  # Tracks FPS.

while True:
    clock.tick()  # Track elapsed milliseconds between snapshots().
    img = sensor.snapshot()  # Take a picture and return the image.

    centroid_sum = 0
    blobs_num = 0
    last_pos = [160, 240]

#    for r in ROIS2:
#        blobs = img.find_blobs(
#            GRAYSCALE_THRESHOLD, roi=r[0:4], merge=True
#        )  # r[0:4] is roi tuple.

#        if blobs:
#            # Find the blob with the most pixels.
#            largest_blob = max(blobs, key=lambda b: b.pixels())
#            blobs_num += 1

#            # Draw a rect around the blob.
#            img.draw_rectangle(largest_blob.rect())
#            img.draw_cross(largest_blob.cx(), largest_blob.cy())

#            centroid_sum += largest_blob.cx() * r[4]  # r[4] is the roi weight.

    for r in ROIS:
        blobs = img.find_blobs(
            GRAYSCALE_THRESHOLD, roi=r[0:4], merge=True
        )  # r[0:4] is roi tuple.

        if blobs:
            # Find the blob with the most pixels.
            largest_blob = max(blobs, key=lambda b: [-1 * distance(last_pos, [b.cx(), b.cy()]), b.pixels()])
            if (largest_blob.area() > 540):
                blobs_num += 1

                # Draw a rect around the blob.
                img.draw_rectangle(largest_blob.rect())
                img.draw_cross(largest_blob.cx(), largest_blob.cy())

                last_pos = [largest_blob.cx(), largest_blob.cy()]

                centroid_sum += (largest_blob.cx() - 160) * r[4]  # r[4] is the roi weight.

    center_pos = centroid_sum / weight_sum  # Determine center of line.
    degree = math.atan2(160 - last_pos[0], 240 - last_pos[1])
    if (blobs_num):
#        uart.write(f"{center_pos}\n")
        uart.write(f"{-1 * math.sin(degree)}\n")
#        print(math.sin(degree))
    else:
        uart.write("0\n")

    if uart.any():
        print(uart.read())
#   print(center_pos)

image_2024-05-22_233111888
image_2024-05-22_233140744

Hi, please see our code on the open projects repo. There’s an extremely capable line following robot listed there that you can use. The code works very well for any type of line.

Do you mean the donkey car project? I didn’t use steering for line tracing I want the output to be degree so I can send it to Arduino for further processing. And the line I’m tracking are more complex than the example video so can you have a look at my code please.

Yeah, it seems like you are doing what you want to do already with your code. So, I’m not sure how I’m supposed to help you.

Please understand I can provide limited help support related to the API and how to make thing work. It seems like you are already using the API well… so, I’m not sure what you need help with.

It sounds like you are asking for help processing the data to do something and drive the robot. If so, again, I have the donkey car reference project where I did this. Anything beyond that is up to a user to develop.