MemoryError: memory allocation failed on H7 R2

# Hello World Example
#
# Welcome to the OpenMV IDE! Click on the green run arrow button below to run the script!

import sensor, image, time
import gc
sensor.reset()                      # Reset and initialize the sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QQQVGA) # Set frame size to QVGA (320x240)
sensor.skip_frames(time = 2000)     # Wait for settings take effect.
clock = time.clock()                # Create a clock object to track the FPS.
width=sensor.width()
height=sensor.height()

def scale_img3(img):
    img_scaled=image.Image(img.width()*3,img.height()*3,sensor.GRAYSCALE)
    for r in range(0,img.height()):
        for c in range(0,img.width()):
            color=img.get_pixel(c,r)
            img_scaled.set_pixel(c*3,r*3,color)
            img_scaled.set_pixel(c*3+1,r*3,color)
            img_scaled.set_pixel(c*3+2,r*3,color)
            img_scaled.set_pixel(c*3,r*3+1,color)
            img_scaled.set_pixel(c*3+1,r*3+1,color)
            img_scaled.set_pixel(c*3+2,r*3+1,color)
            img_scaled.set_pixel(c*3,r*3+2,color)
            img_scaled.set_pixel(c*3+1,r*3+2,color)
            img_scaled.set_pixel(c*3+2,r*3+2,color)
    return img_scaled

def read_image(img):
    """ blob_dict={color: [[pixels],[pixels]]"""
    height=img.height()
    width=img.width()
    global blob_dict
    for r in range(height-1):
        for c in range(width-1):
            top_left=img.get_pixel(c,r)
            top_right=img.get_pixel(c+1,r)
            bottom_right=img.get_pixel(c+1,r+1)
            bottom_left=img.get_pixel(c,r+1)
            if top_left!=top_right: # left ro right
                if top_left in blob_dict:
                    blob_dict[top_left].add(r*width+c)
                else:
                    blob_dict[top_left]=set()
                    blob_dict[top_left].add(r*width+c)


                if top_right in blob_dict:
                    blob_dict[top_right].add(r*width+c+1)
                else:
                    blob_dict[top_right]=set()
                    blob_dict[top_right].add(r*width+c+1)

            if top_left!=bottom_left: # top to bottom
                if top_left in blob_dict:
                    blob_dict[top_left].add(r*width+c)
                else:
                    blob_dict[top_left]=set()
                    blob_dict[top_left].add(r*width+c)


                if bottom_left in blob_dict:
                    blob_dict[bottom_left].add((r+1)*width+c)
                else:
                    blob_dict[bottom_left]=set()
                    blob_dict[bottom_left].add((r+1)*width+c)

            if top_left!=bottom_right: # diagonal
                if top_left in blob_dict:
                    blob_dict[top_left].add(r*width+c)
                else:
                    blob_dict[top_left]=set()
                    blob_dict[top_left].add(r*width+c)


                if bottom_right in blob_dict:
                    blob_dict[bottom_right].add((r+1)*width+c+1)
                else:
                    blob_dict[bottom_right]=set()
                    blob_dict[bottom_right].add((r+1)*width+c+1)

            if bottom_left!=top_right: # diagonal
                if bottom_left in blob_dict:
                    blob_dict[bottom_left].add((r+1)*width+c)
                else:
                    blob_dict[bottom_left]=set()
                    blob_dict[bottom_left].add((r+1)*width+c)

                if top_right in blob_dict:
                    blob_dict[top_right].add(r*width+c+1)
                else:
                    blob_dict[top_right]=set()
                    blob_dict[top_right].add(r*width+c+1)

    for c,blobs in blob_dict.items():
        blob_dict[c]=list(blobs)
    #print("unsorted dict created.")


blob_dict={}
while(True):
    clock.tick()                    # Update the FPS clock.
    img = sensor.snapshot()         # Take a picture and return the image.
    img_cond=scale_img3(img)
    read_image(img_cond)
    shape=[img.height(),img.width()]
    print("len(blob_dict)=",len(blob_dict),"fps:",clock.fps())
    blob_dict.clear()

I am trying to run the above code and i get MemoryError on QQQVGA and above. Is there anything that i can do to fix the error?

Thanks,

Hi, why are you doing pixel level processing in the python API? You should be using the C API.

Right now you are running out of heap because you are creating an object per pixel. This is very slow and wasteful of RAM.

Okay. Where should I start. Can I program the cam in c within openmv ide? How? Should I build the firmware myself to do so?

Hi, have you looked into our API to see what functions we have? Do they not meet your needs?

I wish to get pixels around the perimeter of the blob? Can I do that in c or upython.

Find_Blobs in our API returns the perimeter in pixels of a blob.

Did you mean this upython api image — machine vision — MicroPython 1.19 documentation
I thought it only returned the count of pixels in the perimeter of the blob. Thanks. I will try it.

See the blob object: image — machine vision — MicroPython 1.19 documentation

import sensor, image, time
sensor.reset()                      # Reset and initialize the sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
sensor.skip_frames(time = 2000)     # Wait for settings take effect.
clock = time.clock()                # Create a clock object to track the FPS.
while(True):
    clock.tick()                    # Update the FPS clock.
    img = sensor.snapshot()         # Take a picture and return the image.
    blobs = img.find_blobs([(0,255)])
    print("blobs=",len(blobs),"fps:",clock.fps())

I ran the above code and i am getting only one blob where i expect lot many blobs to be detected? I want blob of every grayscale color in the img to be detected.

Did you notice that your thresholds for blob detection are actually covering the hole gray-scale range?

This threshold (0,255) detects all pixels.

Adjust to (200,255)

(200,255) gives me more blobs but i think find_blobs considers blobs different to what i thought. If there are n unique shades of grey in the image i considered a blob as group of pixels of same color which are connected to one another. So if n1 is a shade of grey , all the pixels in the image which has the color n1 and which are connected / neighboring to one another becomes a blob. So there is atleast n blobs in the image.

yeah ,
the problem with your script is that you don’t filter out any unwanted blobs.

image.find_blobs(thresholds[, invert=False[, roi[, x_stride=2[, y_stride=1[, area_threshold=10[, pixels_threshold=10[, merge=False[, margin=0[, threshold_cb=None[, merge_cb=None[, x_hist_bins_max=0[, y_hist_bins_max=0]]]]]]]]]]]])¶

look also for the whole class for further filtering:

https://docs.openmv.io/library/omv.image.html?highlight=find+blobs#image.image.Image.image.find_blobs