OpenCV  5.0.0alpha
Open Source Computer Vision
Loading...
Searching...
No Matches
How to run deep networks in browser

Prev Tutorial: YOLO DNNs
Next Tutorial: Custom deep learning layers support

Original author Dmitry Kurtaev
Compatibility OpenCV >= 3.3.1

Introduction

This tutorial will show us how to run deep learning models using OpenCV.js right in a browser. Tutorial refers a sample of face detection and face recognition models pipeline.

Face detection

Face detection network gets BGR image as input and produces set of bounding boxes that might contain faces. All that we need is just select the boxes with a strong confidence.

Face recognition

Network is called OpenFace (project https://github.com/cmusatyalab/openface). Face recognition model receives RGB face image of size 96x96. Then it returns 128-dimensional unit vector that represents input face as a point on the unit multidimensional sphere. So difference between two faces is an angle between two output vectors.

Sample

All the sample is an HTML page that has JavaScript code to use OpenCV.js functionality. You may see an insertion of this page below. Press Start button to begin a demo. Press Add a person to name a person that is recognized as an unknown one. Next we'll discuss main parts of the code.

  1. Run face detection network to detect faces on input image.
    function detectFaces(img) {
    netDet.setInputSize(new cv.Size(img.cols, img.rows));
    var out = new cv.Mat();
    netDet.detect(img, out);
    var faces = [];
    for (var i = 0, n = out.data32F.length; i < n; i += 15) {
    var left = out.data32F[i];
    var top = out.data32F[i + 1];
    var right = (out.data32F[i] + out.data32F[i + 2]);
    var bottom = (out.data32F[i + 1] + out.data32F[i + 3]);
    left = Math.min(Math.max(0, left), img.cols - 1);
    top = Math.min(Math.max(0, top), img.rows - 1);
    right = Math.min(Math.max(0, right), img.cols - 1);
    bottom = Math.min(Math.max(0, bottom), img.rows - 1);
    if (left < right && top < bottom) {
    faces.push({
    x: left,
    y: top,
    width: right - left,
    height: bottom - top,
    x1: out.data32F[i + 4] < 0 || out.data32F[i + 4] > img.cols - 1 ? -1 : out.data32F[i + 4],
    y1: out.data32F[i + 5] < 0 || out.data32F[i + 5] > img.rows - 1 ? -1 : out.data32F[i + 5],
    x2: out.data32F[i + 6] < 0 || out.data32F[i + 6] > img.cols - 1 ? -1 : out.data32F[i + 6],
    y2: out.data32F[i + 7] < 0 || out.data32F[i + 7] > img.rows - 1 ? -1 : out.data32F[i + 7],
    x3: out.data32F[i + 8] < 0 || out.data32F[i + 8] > img.cols - 1 ? -1 : out.data32F[i + 8],
    y3: out.data32F[i + 9] < 0 || out.data32F[i + 9] > img.rows - 1 ? -1 : out.data32F[i + 9],
    x4: out.data32F[i + 10] < 0 || out.data32F[i + 10] > img.cols - 1 ? -1 : out.data32F[i + 10],
    y4: out.data32F[i + 11] < 0 || out.data32F[i + 11] > img.rows - 1 ? -1 : out.data32F[i + 11],
    x5: out.data32F[i + 12] < 0 || out.data32F[i + 12] > img.cols - 1 ? -1 : out.data32F[i + 12],
    y5: out.data32F[i + 13] < 0 || out.data32F[i + 13] > img.rows - 1 ? -1 : out.data32F[i + 13],
    confidence: out.data32F[i + 14]
    })
    }
    }
    out.delete();
    return faces;
    };
    You may play with input blob sizes to balance detection quality and efficiency. The bigger input blob the smaller faces may be detected.
  2. Run face recognition network to receive 128-dimensional unit feature vector by input face image.
    function face2vec(face) {
    var blob = cv.blobFromImage(face, 1.0, {width: 112, height: 112}, [0, 0, 0, 0], true, false)
    netRecogn.setInput(blob);
    var vec = netRecogn.forward();
    blob.delete();
    return vec;
    };
  3. Perform a recognition.
    function recognize(face) {
    var vec = face2vec(face);
    var bestMatchName = 'unknown';
    var bestMatchScore = 30; // Threshold for face recognition.
    for (name in persons) {
    var personVec = persons[name];
    var score = vec.dot(personVec);
    if (score > bestMatchScore) {
    bestMatchScore = score;
    bestMatchName = name;
    }
    }
    vec.delete();
    return bestMatchName;
    };
    Match a new feature vector with registered ones. Return a name of the best matched person.
  4. The main loop.
    var isRunning = false;
    const FPS = 30; // Target number of frames processed per second.
    function captureFrame() {
    var begin = Date.now();
    cap.read(frame); // Read a frame from camera
    cv.cvtColor(frame, frameBGR, cv.COLOR_RGBA2BGR);
    var faces = detectFaces(frameBGR);
    faces.forEach(function(rect) {
    cv.rectangle(frame, {x: rect.x, y: rect.y}, {x: rect.x + rect.width, y: rect.y + rect.height}, [0, 255, 0, 255]);
    if(rect.x1>0 && rect.y1>0)
    cv.circle(frame, {x: rect.x1, y: rect.y1}, 2, [255, 0, 0, 255], 2)
    if(rect.x2>0 && rect.y2>0)
    cv.circle(frame, {x: rect.x2, y: rect.y2}, 2, [0, 0, 255, 255], 2)
    if(rect.x3>0 && rect.y3>0)
    cv.circle(frame, {x: rect.x3, y: rect.y3}, 2, [0, 255, 0, 255], 2)
    if(rect.x4>0 && rect.y4>0)
    cv.circle(frame, {x: rect.x4, y: rect.y4}, 2, [255, 0, 255, 255], 2)
    if(rect.x5>0 && rect.y5>0)
    cv.circle(frame, {x: rect.x5, y: rect.y5}, 2, [0, 255, 255, 255], 2)
    var face = frameBGR.roi(rect);
    var name = recognize(face);
    cv.putText(frame, name, {x: rect.x, y: rect.y}, cv.FONT_HERSHEY_SIMPLEX, 1.0, [0, 255, 0, 255]);
    });
    cv.imshow(output, frame);
    // Loop this function.
    if (isRunning) {
    var delay = 1000 / FPS - (Date.now() - begin);
    setTimeout(captureFrame, delay);
    }
    };
    A main loop of our application receives a frames from a camera and makes a recognition of an every detected face on the frame. We start this function ones when OpenCV.js was initialized and deep learning models were downloaded.