Skip to content

Commit

Permalink
Revert "chore(liveness): upgrade blazeface model (#4685)" (#4736)
Browse files Browse the repository at this point in the history
This reverts commit b289850.
  • Loading branch information
reesscot authored Nov 15, 2023
1 parent 7fc340f commit c1141da
Show file tree
Hide file tree
Showing 12 changed files with 184 additions and 223 deletions.
5 changes: 0 additions & 5 deletions .changeset/red-seas-attack.md

This file was deleted.

4 changes: 2 additions & 2 deletions canary/e2e/features/liveness/face-detect.feature
Original file line number Diff line number Diff line change
Expand Up @@ -7,5 +7,5 @@ Feature: Liveness Start Screen

@react
Scenario: Blazeface CDN is up
Then I request "https://cdn.liveness.rekognition.amazonaws.com/face-detection/tensorflow/tfjs-backend-wasm/4.11.0/tfjs-backend-wasm-simd.wasm" and get "200"
Then I request "https://cdn.liveness.rekognition.amazonaws.com/face-detection/tensorflow-models/blazeface/1.0.2/model/model.json" and get "200"
Then I request "https://cdn.liveness.rekognition.amazonaws.com/face-detection/tensorflow/tfjs-backend-wasm/3.11.0/tfjs-backend-wasm-simd.wasm" and get "200"
Then I request "https://cdn.liveness.rekognition.amazonaws.com/face-detection/tensorflow-models/blazeface/0.0.7/model/model.json" and get "200"
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@ import { CustomizationCdn } from './CustomizationCdn';
FaceLivenessDetector allows overriding the default hosted CDN and providing your own. The CDN files are used by the TensorFlow library to load in files at runtime. You can host your own CDN by following the instructions below:

1. Download the TFJS wasm and blazeface files:
- Download the wasm file for tfjs here: https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@4.11.0/dist/tfjs-backend-wasm-simd.wasm
- Download https://tfhub.dev/mediapipe/tfjs-model/face_detection/short/1 and unzip the tar file. It should contain a `model.json` file and a `group1-shard1of1.bin` file.
- Download the wasm file for tfjs here: https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@3.11.0/dist/tfjs-backend-wasm-simd.wasm
- Download https://tfhub.dev/tensorflow/tfjs-model/blazeface/1/default/1 and unzip the tar file. It should contain a `model.json` file and a `group1-shard1of1.bin` file.
1. Host all three files alongside your JS files on your own server
1. Update FaceLivenessDetector code:

Expand Down
13 changes: 6 additions & 7 deletions packages/react-liveness/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -55,12 +55,11 @@
"@smithy/eventstream-serde-browser": "^2.0.4",
"@smithy/fetch-http-handler": "^2.1.3",
"@smithy/protocol-http": "^3.0.3",
"@mediapipe/face_detection": "~0.4.0",
"@tensorflow-models/face-detection": "1.0.2",
"@tensorflow/tfjs-backend-cpu": "4.11.0",
"@tensorflow/tfjs-backend-wasm": "4.11.0",
"@tensorflow/tfjs-converter": "4.11.0",
"@tensorflow/tfjs-core": "4.11.0",
"@tensorflow-models/blazeface": "0.0.7",
"@tensorflow/tfjs-backend-cpu": "3.11.0",
"@tensorflow/tfjs-backend-wasm": "3.11.0",
"@tensorflow/tfjs-converter": "3.11.0",
"@tensorflow/tfjs-core": "3.11.0",
"@xstate/react": "^3.2.2",
"nanoid": "3.1.31",
"tslib": "^2.5.2",
Expand All @@ -81,7 +80,7 @@
"name": "FaceLivenessDetector",
"path": "dist/esm/index.mjs",
"import": "{ FaceLivenessDetector }",
"limit": "291 kB"
"limit": "275 kB"
}
]
}
Original file line number Diff line number Diff line change
Expand Up @@ -89,8 +89,6 @@ describe('Liveness Machine', () => {
leftEye: [200, 200],
mouth: [200, 200],
nose: [200, 200],
rightEar: [200, 200],
leftEar: [200, 200],
};
const mockOvalDetails: LivenessOvalDetails = {
height: 100,
Expand Down Expand Up @@ -593,7 +591,7 @@ describe('Liveness Machine', () => {
).toStrictEqual({
Height: 0,
Left: 0.6875,
Top: 0.625,
Top: 0.4166666666666667,
Width: 0,
})
);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,6 @@ export interface Face {
leftEye: Coordinate;
mouth: Coordinate;
nose: Coordinate;
rightEar: Coordinate;
leftEar: Coordinate;
}

export type Coordinate = [number, number];
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,6 @@ export const mockFace: Face = {
leftEye: [200, 200],
mouth: [200, 200],
nose: [200, 200],
rightEar: [200, 200],
leftEar: [200, 200],
};
export const mockOvalDetails: LivenessOvalDetails = {
height: 100,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
/* eslint-disable */
import 'jest-canvas-mock';
import * as blazeface from '@tensorflow-models/face-detection';
import * as blazeface from '@tensorflow-models/blazeface';
import { isWebAssemblySupported } from '../support';

import { BlazeFaceFaceDetection } from '../blazefaceFaceDetection';

const mockEstimateFace = jest.fn();
jest.mock('@tensorflow-models/face-detection');
jest.mock('@tensorflow/tfjs-core');
jest.mock('@tensorflow-models/blazeface');
jest.mock('@tensorflow/tfjs-backend-wasm', () => {
return {
setWasmPaths: jest.fn(),
Expand All @@ -19,22 +20,15 @@ jest.mock('@aws-amplify/core/internals/utils', () => ({
}));
jest.mock('../support');

const MOCK_NORMALIZED_FACE: blazeface.Face = {
box: {
xMin: 0,
yMin: 0,
xMax: 100,
yMax: 100,
width: 100,
height: 100,
},
keypoints: [
{ x: 50, y: 50, name: 'rightEye' },
{ x: 50, y: 50, name: 'leftEye' },
{ x: 50, y: 50, name: 'noseTip' },
{ x: 50, y: 50, name: 'mouthCenter' },
{ x: 50, y: 50, name: 'leftEarTragion' },
{ x: 50, y: 50, name: 'rightEarTragion' },
const MOCK_NORMALIZED_FACE: blazeface.NormalizedFace = {
bottomRight: [100, 0],
topLeft: [0, 100],
probability: 90,
landmarks: [
[50, 50],
[50, 50],
[50, 50],
[50, 50],
],
};

Expand All @@ -46,7 +40,7 @@ describe('blazefaceFaceDetection', () => {
mockIsWebAssemblySupported
);
mockIsWebAssemblySupported.mockReturnValue(true);
mockEstimateFace.mockResolvedValue([MOCK_NORMALIZED_FACE]);
mockEstimateFace.mockResolvedValue([{}, MOCK_NORMALIZED_FACE]);
});

it('can be initialized', () => {
Expand Down Expand Up @@ -76,8 +70,8 @@ describe('blazefaceFaceDetection', () => {

expect(face.height).toBe(100);
expect(face.width).toBe(100);
expect(face.top).toBe(0);
expect(face.left).toBe(0);
expect(face.top).toBe(100);
expect(face.left).toBe(100);
expect(face.leftEye).toStrictEqual([50, 50]);
expect(face.rightEye).toStrictEqual([50, 50]);
expect(face.mouth).toStrictEqual([50, 50]);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,25 +58,23 @@ describe('Liveness Helper', () => {
expect(faceMatchPercentage).toBe(0);
});

// Note: if this test ever fails due to face detection updates just add a console log for face and do a check and copy the value here
it('should return matched', () => {
const face: Face = {
top: 89.82275009155273,
left: 188.85473251342773,
width: 375.7287788391113,
height: 375.66087722778315,
timestampMs: 1696575105415,
rightEye: [291.9423294067383, 241.10103607177734],
leftEye: [435.9743881225586, 264.43485260009766],
mouth: [336.0434341430664, 417.43167877197266],
nose: [336.7517852783203, 387.286376953125],
rightEar: [236.0750961303711, 193.77853393554685],
leftEar: [520.6099700927734, 238.4494781494141],
top: 150.07504272460938,
left: 106.4375,
width: 432.6287078857422,
height: 324.4761657714844,
timestampMs: 1683674498221,
probability: 0.9998714923858643,
rightEye: [409.51867485046387, 253.7000298500061],
leftEye: [251.89466857910156, 246.67850017547607],
mouth: [320.84685707092285, 404.4503116607666],
nose: [326.97374725341797, 341.47182762622833],
};
const ovalDetails: LivenessOvalDetails = {
centerX: 320,
centerY: 240,
flippedCenterX: 320,
centerX: 305,
centerY: 237,
flippedCenterX: 335,
height: 512,
width: 316,
};
Expand Down Expand Up @@ -107,8 +105,6 @@ describe('Liveness Helper', () => {
leftEye: [318.5161700248718, 253.94269466400146],
mouth: [339.64158596098423, 298.5959941148758],
nose: [342.7122294306755, 277.0021167397499],
rightEar: [400.5564064979553, 258.19776356220245],
leftEar: [300.5161700248718, 253.94269466400146],
};
const ovalDetails: LivenessOvalDetails = {
flippedCenterX: 346,
Expand Down Expand Up @@ -239,8 +235,6 @@ describe('Liveness Helper', () => {
leftEye: [150, 100],
mouth: [100, 100],
nose: [100, 100],
rightEar: [0, 100],
leftEar: [150, 100],
};
mockBlazeFace.detectFaces.mockResolvedValue([mockCloseFace]);

Expand Down
Original file line number Diff line number Diff line change
@@ -1,12 +1,8 @@
import { ready, setBackend } from '@tensorflow/tfjs-core';
import {
FaceDetector,
Keypoint,
SupportedModels,
createDetector,
} from '@tensorflow-models/face-detection';
import { setWasmPaths, version_wasm } from '@tensorflow/tfjs-backend-wasm';
import * as tf from '@tensorflow/tfjs-core';
import * as blazeface from '@tensorflow-models/blazeface';

// TODO:: Figure out if we should lazy load these or not.
import * as tfjsWasm from '@tensorflow/tfjs-backend-wasm';
import '@tensorflow/tfjs-backend-cpu';

import { jitteredExponentialRetry } from '@aws-amplify/core/internals/utils';
Expand All @@ -16,14 +12,14 @@ import { FaceDetection, Face, Coordinate } from '../types';

type BlazeFaceModelBackend = 'wasm' | 'cpu';

export const BLAZEFACE_VERSION = '1.0.2';
export const BLAZEFACE_VERSION = '0.0.7';

/**
* WARNING: When updating these links,
* also make sure to update documentation and the link in the canary/e2e test "canary/e2e/features/liveness/face-detect.feature"
*/
export const DEFAULT_BLAZEFACE_URL = `https://cdn.liveness.rekognition.amazonaws.com/face-detection/tensorflow-models/blazeface/${BLAZEFACE_VERSION}/model/model.json`;
export const DEFAULT_TFJS_WASM_URL = `https://cdn.liveness.rekognition.amazonaws.com/face-detection/tensorflow/tfjs-backend-wasm/${version_wasm}/`;
export const DEFAULT_TFJS_WASM_URL = `https://cdn.liveness.rekognition.amazonaws.com/face-detection/tensorflow/tfjs-backend-wasm/${tfjsWasm.version_wasm}/`;

/**
* The BlazeFace implementation of the FaceDetection interface.
Expand All @@ -32,7 +28,7 @@ export class BlazeFaceFaceDetection extends FaceDetection {
modelBackend!: BlazeFaceModelBackend;
faceModelUrl: string | undefined;
binaryPath: string;
private _model!: FaceDetector;
private _model!: blazeface.BlazeFaceModel;

constructor(binaryPath?: string, faceModelUrl?: string) {
super();
Expand All @@ -48,14 +44,12 @@ export class BlazeFaceFaceDetection extends FaceDetection {
}

try {
await ready();
this._model = await createDetector(
SupportedModels.MediaPipeFaceDetector,
await tf.ready();
this._model = await jitteredExponentialRetry(blazeface.load, [
{
runtime: 'tfjs',
detectorModelUrl: this.faceModelUrl,
}
);
modelUrl: this.faceModelUrl,
},
]);
} catch (e) {
throw new Error(
'There was an error loading the blazeface model. If you are using a custom blazeface model url ensure that it is a fully qualified url that returns a json file.'
Expand All @@ -64,53 +58,54 @@ export class BlazeFaceFaceDetection extends FaceDetection {
}

async detectFaces(videoEl: HTMLVideoElement): Promise<Face[]> {
const returnTensors = false;
const flipHorizontal = true;
const predictions = await this._model.estimateFaces(videoEl, {
const annotateBoxes = true;
const predictions = await this._model.estimateFaces(
videoEl,
returnTensors,
flipHorizontal,
});
annotateBoxes
);

const timestampMs = Date.now();

const faces: Face[] = predictions.map((prediction) => {
const { box, keypoints } = prediction;
const { xMin: left, yMin: top, width, height } = box;
const rightEye = this._getCoordinate(keypoints, 'rightEye');
const leftEye = this._getCoordinate(keypoints, 'leftEye');
const nose = this._getCoordinate(keypoints, 'noseTip');
const mouth = this._getCoordinate(keypoints, 'mouthCenter');
const rightEar = this._getCoordinate(keypoints, 'rightEarTragion');
const leftEar = this._getCoordinate(keypoints, 'leftEarTragion');
const probability = [90];

return {
top,
left,
width,
height,
timestampMs,
probability: (probability as unknown as [number])[0], // probability in reality is [number] but is typed as number | Tensor.1d
rightEye,
leftEye,
mouth,
nose,
rightEar,
leftEar,
};
});
const faces: Face[] = predictions
.filter((prediction) => !!prediction.landmarks)
.map((prediction) => {
const { topLeft, bottomRight, probability, landmarks } = prediction;

const [right, top] = topLeft as Coordinate; // right, top because the prediction is flipped
const [left, bottom] = bottomRight as Coordinate; // left, bottom because the prediction is flipped
const width = Math.abs(right - left);
const height = Math.abs(bottom - top);
const rightEye = (landmarks as Coordinate[])[0];
const leftEye = (landmarks as Coordinate[])[1];
const nose = (landmarks as Coordinate[])[2];
const mouth = (landmarks as Coordinate[])[3];

return {
top,
left,
width,
height,
timestampMs,
probability: (probability as unknown as [number])[0], // probability in reality is [number] but is typed as number | Tensor.1d
rightEye,
leftEye,
mouth,
nose,
};
});

return faces;
}

private _getCoordinate(keypoints: Keypoint[], name: string): Coordinate {
const keypoint = keypoints.find((k) => k.name === name)!;
return [keypoint.x, keypoint.y];
}

private async _loadWebAssemblyBackend() {
try {
setWasmPaths(this.binaryPath);
tfjsWasm.setWasmPaths(this.binaryPath);
await jitteredExponentialRetry(async () => {
const success = await setBackend('wasm');
const success = await tf.setBackend('wasm');
if (!success) {
throw new Error(`Initialization of backend wasm failed`);
}
Expand All @@ -124,7 +119,7 @@ export class BlazeFaceFaceDetection extends FaceDetection {
}

private async _loadCPUBackend() {
await setBackend('cpu');
await tf.setBackend('cpu');
this.modelBackend = 'cpu';
}
}
Loading

0 comments on commit c1141da

Please sign in to comment.