Age and gender detection with OpenCV on the Raspberry Pi

Categories Deep Learning, Machine Learning, OpenCV, Raspberry Pi

OpenCV is pretty powerful in that, it does not stop with computer vision related functionality. It has this, very powerful DNN (Deep Neural Network) module that can parse and use pre-trained models from several popular deep learning tool kits.

In this video, I explain how we can take a popular deep learning based, pre-trained Caffe model and use it in OpenCV. While doing so, we will use OpenCV’s HAAR Cascade module to detect faces and send the age and gender detection model just the cropped face from the full image captured by the Raspberry Pi’s camera.

UPDATE: The age/gender model zip file used in the video no longer exists in the original location. You can download it from here  Age/Gender Model File (2708 downloads)

Here is the final code. Please watch the video to see a full and detailed explanation and walk-through of this code:

# import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
import imutils
import time
import cv2
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(640, 480))

MODEL_MEAN_VALUES = (78.4263377603, 87.7689143744, 114.895847746)
age_list=['(0, 2)','(4, 6)','(8, 12)','(15, 20)','(25, 32)','(38, 43)','(48, 53)','(60, 100)']
gender_list = ['Male', 'Female']

# allow the camera to warmup

def initialize_caffe_model():
    print('Loading models...')
    age_net = cv2.dnn.readNetFromCaffe(
    gender_net = cv2.dnn.readNetFromCaffe(

    return (age_net, gender_net)

def capture_loop(age_net, gender_net): 
    # capture frames from the camera
    for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
        # grab the raw NumPy array representing the image, then initialize the timestamp
        # and occupied/unoccupied text
        image = frame.array
        face_cascade = cv2.CascadeClassifier('/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml')
        gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
        faces = face_cascade.detectMultiScale(gray, 1.1, 5)
        print("Found "+str(len(faces))+" face(s)")

        #Draw a rectangle around every found face
        for (x,y,w,h) in faces:
            face_img = image[y:y+h, x:x+w].copy()
            blob = cv2.dnn.blobFromImage(face_img, 1, (227, 227), MODEL_MEAN_VALUES, swapRB=False)
            # Predict gender
            gender_preds = gender_net.forward()
            gender = gender_list[gender_preds[0].argmax()]
            # Predict age
            age_preds = age_net.forward()
            age = age_list[age_preds[0].argmax()]
            overlay_text = "%s, %s" % (gender, age)
            cv2.putText(image, overlay_text ,(x,y), font, 2,(255,255,255),2,cv2.LINE_AA)
        cv2.imshow("Image", image)

        key = cv2.waitKey(1) & 0xFF
        # clear the stream in preparation for the next frame
        # if the `q` key was pressed, break from the loop
        if key == ord("q"):

if __name__ == '__main__':
    age_net, gender_net = initialize_caffe_model()
    capture_loop(age_net, gender_net)

For those of you interested in the mean value generation Python code, here it is:

import caffe
import numpy as np

blob = caffe.proto.caffe_pb2.BlobProto()
with open('mean.binaryproto', 'rb') as f:
    data = np.array([blob.channels, blob.height, blob.width])
    print np.mean(data[0]), np.mean(data[1]), np.mean(data[2])