Hi, I want to write a code for face with emotion detection. I am unsure how to modify the code as I was using the tf module and I understand that the module is now gone. Below is my current code. Please advice.
import sensor, image, time, tf
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time = 2000)
clock = time.clock()
net = tf.Model("trained.tflite", load_to_fb = True)
labels = [l.rstrip('\n') for l in open ("labels.txt")]
while(True):
clock.tick()
#Take a picture and brighten things up for the frontal face detector.
img = sensor.snapshot().gamma_corr(contrast = 1.5)
#Returns a list of rects (x,y,w,h) where faces are.
faces = img.find_features(image.HaarCascade("frontalface"))
for f in faces:
#Classify a face and get the class scores list
scores = net.predict(img, roi = f)[0].output()
#Find the highest class score and lookup the label for that
label = labels[scores.index(max(scores))]
#Draw a box around the face
img.draw_rectangle(f)
#Draw the label above the face
img.draw_string(f[0]+3, f[1]-1, label, mono_space = False)
print(clock.fps())
import sensor, image, time, ml
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time = 2000)
clock = time.clock()
net = ml.Model("trained.tflite", load_to_fb = True)
labels = [l.rstrip('\n') for l in open ("labels.txt")]
while(True):
clock.tick()
#Take a picture and brighten things up for the frontal face detector.
img = sensor.snapshot().gamma_corr(contrast = 1.5)
#Returns a list of rects (x,y,w,h) where faces are.
faces = img.find_features(image.HaarCascade("frontalface"))
for f in faces:
norm = ml.Normalization(roi=f)
#Classify a face and get the class scores list
scores = net.predict([norm(img)])[0].flatten().tolist()
#Find the highest class score and lookup the label for that
label = labels[scores.index(max(scores))]
#Draw a box around the face
img.draw_rectangle(f)
#Draw the label above the face
img.draw_string(f[0]+3, f[1]-1, label, mono_space = False)
print(clock.fps())
While the normalization object is a bit awkward, it’s the only way to allow for multi-input networks to have arguments per input.
In the future you will be able to have a net operating on camera and distance sensor info at the same time and etc.
You could also do image crops too:
import sensor, image, time, ml
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.skip_frames(time = 2000)
clock = time.clock()
net = ml.Model("trained.tflite", load_to_fb = True)
labels = [l.rstrip('\n') for l in open ("labels.txt")]
while(True):
clock.tick()
#Take a picture and brighten things up for the frontal face detector.
img = sensor.snapshot().gamma_corr(contrast = 1.5)
#Returns a list of rects (x,y,w,h) where faces are.
faces = img.find_features(image.HaarCascade("frontalface"))
for f in faces:
#Classify a face and get the class scores list
scores = net.predict([img.crop(roi=f, copy=True)])[0].flatten().tolist()
#Find the highest class score and lookup the label for that
label = labels[scores.index(max(scores))]
#Draw a box around the face
img.draw_rectangle(f)
#Draw the label above the face
img.draw_string(f[0]+3, f[1]-1, label, mono_space = False)
print(clock.fps())