-
Notifications
You must be signed in to change notification settings - Fork 5
/
1_webcam-poses.html
61 lines (47 loc) · 1.92 KB
/
1_webcam-poses.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
<!DOCTYPE html>
<html>
<head>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/[email protected]"></script>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/[email protected]"></script>
<script src="./drawingHelpers.js"></script>
<script src="../webcam.js"></script>
<meta name="viewport" content="width=device-width, initial-scale=1">
</head>
<body>
<div id='main'>
<video id="webcam" autoplay playsinline width="640" height="480" style="transform: scaleX(-1); display: none;">
</video>
<canvas id="output" width="640" height="480"></canvas>
</div>
<script>
const flipHorizontal = true;
const imageScaleFactor = 0.5;
const maxPoseDetections = 6;
const minPoseConfidence = 0.2;
const outputStride = 16;
const minPartConfidence = 0.1;
const nmsRadius = 30.0;
window.onload = async () => {
const model = await posenet.load(0.75);
const video = await setupWebcam(document.getElementById('webcam'));
await detectPosesInRealTime(video, model);
};
async function detectPosesInRealTime(video, model) {
const canvas = document.getElementById('output');
const ctx = canvas.getContext('2d');
while(true) {
const poses = await model.estimateMultiplePoses(video, imageScaleFactor, flipHorizontal, outputStride,
maxPoseDetections, minPartConfidence, nmsRadius);
renderVideoFeedToContext(ctx, video);
for (let pose of poses) {
if (pose.score >= minPoseConfidence) {
drawPoseToContext(pose.keypoints, minPartConfidence, ctx);
// drawHatToContext(pose.keypoints, minPartConfidence, ctx);
}
}
await tf.nextFrame();
}
}
</script>
</body>
</html>