Newer
Older
about-Leaflet / VRChat.html
<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8" />
    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
    <title>Face Mesh Detection</title>
    <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs"></script>
    <script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/facemesh"></script>
    <link
        rel="stylesheet"
        href="https://cdn.jsdelivr.net/npm/@fortawesome/fontawesome-free@5.6.3/css/all.min.css"
    />
</head>
<body>
    <p><button onclick="start()">Start</button></p>
    <div>
        <video
            id="input"
            width="320"
            height="240"
            autoplay
            muted
            playsinline
            style="
                border: solid 1px black;
                box-sizing: content-box;
                display: inline-block;
            "
        ></video>
        <span style="position: relative; display: inline-block;">
            <span
                id="loadingicon"
                class="fa fa-spinner fa-spin"
                style="
                    position: absolute;
                    font-size: 100px;
                    margin: 70px 110px;
                    display: none;
                "
            ></span>
            <canvas
                id="output"
                width="320"
                height="240"
                style="border: solid 1px black; box-sizing: content-box"
            ></canvas>
        </span>
    </div>
 
    <script>
    async function setupCamera(videoElement) {
        const constraints = { video: { width: 320, height: 240 }, audio: false };
        const stream = await navigator.mediaDevices.getUserMedia(constraints);
        videoElement.srcObject = stream;
        return new Promise(resolve => {
            videoElement.onloadedmetadata = () => {
                videoElement.play();
                resolve();
            };
        });
    }
 
    function startRender(input, output, model) {
        const ctx = output.getContext("2d");
        async function renderFrame() {
            requestAnimationFrame(renderFrame);
            const faces = await model.estimateFaces(input, false, false);
            console.log(faces); // デバッグ用
            ctx.clearRect(0, 0, output.width, output.height);
            faces.forEach(face => {
                face.scaledMesh.forEach(xy => {
                    ctx.beginPath();
                    ctx.arc(xy[0], xy[1], 1, 0, 2 * Math.PI);
                    ctx.fill();
                });
            });
        }
        renderFrame();
    }
 
    function loading(onoff) {
        document.getElementById("loadingicon").style.display = onoff ? "inline" : "none";
    }
 
    async function start() {
        const input = document.getElementById("input");
        const output = document.getElementById("output");
        loading(true);
        await setupCamera(input);
        const model = await facemesh.load({ maxFaces: 1 });
        startRender(input, output, model);
        loading(false);
    }
    </script>
</body>
</html>