-
Notifications
You must be signed in to change notification settings - Fork 65
/
demo.py
76 lines (63 loc) · 2.37 KB
/
demo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
# ---------------------------------------------------------------------
# Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# ---------------------------------------------------------------------
import cv2
import numpy as np
from PIL import Image
from skimage import io
from qai_hub_models.models.facemap_3dmm.app import FaceMap_3DMMApp
from qai_hub_models.models.facemap_3dmm.model import (
MODEL_ASSET_VERSION,
MODEL_ID,
FaceMap_3DMM,
)
from qai_hub_models.utils.args import (
demo_model_from_cli_args,
get_model_cli_parser,
get_on_device_demo_parser,
validate_on_device_demo_args,
)
from qai_hub_models.utils.asset_loaders import CachedWebModelAsset
from qai_hub_models.utils.display import display_or_save_image
INPUT_IMAGE_PATH = str(
CachedWebModelAsset.from_asset_store(MODEL_ID, MODEL_ASSET_VERSION, "face_img.jpg")
)
# Run FaceMap_3DMM end-to-end on a sample image.
# The demo will display a image with the predicted landmark displayed.
def main(is_test: bool = False):
# Demo parameters
parser = get_model_cli_parser(FaceMap_3DMM)
parser = get_on_device_demo_parser(parser, add_output_dir=True)
parser.add_argument(
"--image",
type=str,
default=INPUT_IMAGE_PATH,
help="image file path or URL",
)
args = parser.parse_args([] if is_test else None)
model = demo_model_from_cli_args(FaceMap_3DMM, MODEL_ID, args)
validate_on_device_demo_args(args, MODEL_ID)
# Load image
(_, _, height, width) = FaceMap_3DMM.get_input_spec()["image"][0]
image = io.imread(args.image)
print("Model Loaded")
app = FaceMap_3DMMApp(model)
# Get face bounding box info (from file or face detector)
fbox = np.loadtxt(INPUT_IMAGE_PATH.replace(".jpg", "_fbox.txt"))
x0, x1, y0, y1 = int(fbox[0]), int(fbox[1]), int(fbox[2]), int(fbox[3])
lmk, output = app.landmark_prediction(image, x0, x1, y0, y1)
if not is_test:
# Annotated lmk
np.savetxt(
"qai_hub_models/models/facemap_3dmm/demo_output_lmk.txt",
lmk.detach().numpy(),
)
# Annotated image
display_or_save_image(
Image.fromarray(cv2.cvtColor(output, cv2.COLOR_BGR2RGB)),
"qai_hub_models/models/facemap_3dmm",
"demo_output_img.png",
)
if __name__ == "__main__":
main()