首页 > 解决方案 > TensorFlow JS Face Mesh Uncaught (in promise) TypeError: Cannot read property 'height' of null

问题描述

我正在尝试将 TensorFlow 面部网格(JS)用于简单的面部跟踪器。我已经使用了网站上的标准代码,但我收到了一个高度错误:未捕获(承诺中)TypeError:无法在 main 的 L.estimateFaces (facemesh:17) 的 facemesh:17 处读取 null 的属性“高度” ((指数):76)

我是 JS 的新手,但我不知道哪里出错了。任何帮助将非常感激。

TensorFlow 代码:https ://github.com/tensorflow/tfjs-models/tree/master/facemesh

我的代码:

<body>
    <div id="container">
      <video autoplay="true" id="videoElement">

        <script> //starts video stream
          var video = document.querySelector("#videoElement");

          if (navigator.mediaDevices.getUserMedia) {
            navigator.mediaDevices.getUserMedia({ video: true })
              .then(function (stream) {
                video.srcObject = stream;
              })
              .catch(function (err0r) {
                console.log("Something went wrong!");
              });
          }
        </script>

      </video>
    </div>

<script type="text/javascript">
//navigator.getUserMedia= navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia || navigator.oGetUserMedia;



    </script>

    </script>
    <script>
      const videoFeed = document.querySelector('stream');
      // checking the video has loaded in 
      video.onloadeddata = (event) => {
        console.log('video loaded');
      };
      window.onload = function () {


        async function main() {
          // Load the MediaPipe facemesh model.
          const model = await facemesh.load();

          // Pass in a video stream (or an image, canvas, or 3D tensor) to obtain an
          // array of detected faces from the MediaPipe graph.
          const predictions = await model.estimateFaces(document.querySelector('stream'));

          if (predictions.length > 0) {
            /*
            `predictions` is an array of objects describing each detected face, for example:
        
            [
              {
                faceInViewConfidence: 1, // The probability of a face being present.
                boundingBox: { // The bounding box surrounding the face.
                  topLeft: [232.28, 145.26],
                  bottomRight: [449.75, 308.36],
                },
                mesh: [ // The 3D coordinates of each facial landmark.
                  [92.07, 119.49, -17.54],
                  [91.97, 102.52, -30.54],
                  ...
                ],
                scaledMesh: [ // The 3D coordinates of each facial landmark, normalized.
                  [322.32, 297.58, -17.54],
                  [322.18, 263.95, -30.54]
                ],
                annotations: { // Semantic groupings of the `scaledMesh` coordinates.
                  silhouette: [
                    [326.19, 124.72, -3.82],
                    [351.06, 126.30, -3.00],
                    ...
                  ],
                  ...
                }
              }
            ]
            */

            for (let i = 0; i < predictions.length; i++) {
              const keypoints = predictions[i].scaledMesh;

              // Log facial keypoints.
              for (let i = 0; i < keypoints.length; i++) {
                const [x, y, z] = keypoints[i];

                console.log(`Keypoint ${i}: [${x}, ${y}, ${z}]`);
              }
            }
          }
        }

        main();

标签: javascripttensorflow

解决方案


在 main() 函数中声明视频元素

async function main() {
 const model = await facemesh.load();

 const videoFeed = document.getElementById('videoElement');
 video.onloadeddata = (event) => {
        console.log('video loaded');
      };
      window.onload = function ()
 const predictions = await model.estimateFaces(document.querySelector('stream'));

          if (predictions.length > 0) {
            /*
            `predictions` is an array of objects describing each detected face, for example:
        
            [
              {
                faceInViewConfidence: 1, // The probability of a face being present.
                boundingBox: { // The bounding box surrounding the face.
                  topLeft: [232.28, 145.26],
                  bottomRight: [449.75, 308.36],
                },
                mesh: [ // The 3D coordinates of each facial landmark.
                  [92.07, 119.49, -17.54],
                  [91.97, 102.52, -30.54],
                  ...
                ],
                scaledMesh: [ // The 3D coordinates of each facial landmark, normalized.
                  [322.32, 297.58, -17.54],
                  [322.18, 263.95, -30.54]
                ],
                annotations: { // Semantic groupings of the `scaledMesh` coordinates.
                  silhouette: [
                    [326.19, 124.72, -3.82],
                    [351.06, 126.30, -3.00],
                    ...
                  ],
                  ...
                }
              }
            ]
            */

            for (let i = 0; i < predictions.length; i++) {
              const keypoints = predictions[i].scaledMesh;

              // Log facial keypoints.
              for (let i = 0; i < keypoints.length; i++) {
                const [x, y, z] = keypoints[i];

                console.log(`Keypoint ${i}: [${x}, ${y}, ${z}]`);
              }
            }
          }
        }

        main();


推荐阅读