首页 > 解决方案 > 使用 react 访问移动网络摄像机

问题描述

我想访问我的移动网络摄像头而不是使用网络摄像头。我尝试在 ref 属性中设置 ip 地址,但它没有显示任何内容我也尝试将 localhost 地址设置为 ref 但它仍然没有显示任何内容。此代码用于枪支检测它仅适用于网络摄像头模式我想使用移动 IP 摄像头而不是网络摄像头。我将我的移动摄像头用作 IP 摄像头,并希望访问该特定摄像头,但每次它都会显示我笔记本电脑的网络摄像头。我尝试将this.video.current.srcObject的值设置为我的相机的 ip,并尝试为其分配 localhost 地址,但它仍然不起作用。

import React from 'react'
import * as tf from '@tensorflow/tfjs'
import './styles.css'

const LABELS =  '/model/labels.json'
const MODEL =  '/model/tensorflowjs_model.pb'
const WEIGHTS_URL =  '/model/weights_manifest.json'
var obj;

const TFWrapper = model => {
  const calculateMaxScores = (scores, numBoxes, numClasses) => {
    const maxes = []
    const classes = []
    for (let i = 0; i < numBoxes; i++) {
      let max = Number.MIN_VALUE
      let index = -1
      for (let j = 0; j < numClasses; j++) {
        if (scores[i * numClasses + j] > max) {
          max = scores[i * numClasses + j]
          index = j
        }
      }
      maxes[i] = max
      classes[i] = index
    }
    return [maxes, classes]
  }

  const buildDetectedObjects = (width,height,boxes,scores,indexes,classes) => {

    const count = indexes.length
    const objects = []
    for (let i = 0; i < count; i++) {
      const bbox = []
      for (let j = 0; j < 4; j++) {
        bbox[j] = boxes[indexes[i] * 4 + j]
      }
      const minY = bbox[0] * height
      const minX = bbox[1] * width
      const maxY = bbox[2] * height
      const maxX = bbox[3] * width
      bbox[0] = minX
      bbox[1] = minY
      bbox[2] = maxX - minX
      bbox[3] = maxY - minY
      objects.push({
        bbox: bbox,
        class: classes[indexes[i]],
        score: scores[indexes[i]]
      })
    }
    return objects
  }

  const detect = input => {
    const batched = tf.tidy(() => {
      const img = tf.fromPixels(input)
      // Reshape to a single-element batch so we can pass it to executeAsync
      return img.expandDims(0)
    })

    const height = batched.shape[1]
    const width = batched.shape[2]

    return model.executeAsync(batched).then(result => {
      const scores = result[0].dataSync()
      const boxes = result[1].dataSync()

      // clean the webgl tensors
      batched.dispose()
      tf.dispose(result)

      const [maxScores, classes] = calculateMaxScores(
        scores,
        result[0].shape[1],
        result[0].shape[2]
      )

      const indexTensor = tf.tidy(() => {
        const boxes2 = tf.tensor2d(boxes, [
          result[1].shape[1],
          result[1].shape[3]
        ])
        return tf.image.nonMaxSuppression(
          boxes2,
          maxScores,
          20, // maxNumBoxes
          0.1, 
          0.5 
         )
      })
      const indexes = indexTensor.dataSync()
      indexTensor.dispose()

      return buildDetectedObjects(width,height,boxes,maxScores,indexes,classes)
    })
  }
  return {
    detect: detect
  }
}

class Detection extends React.Component {
  videoRef = React.createRef()
  canvasRef = React.createRef()
  gunDetected = false

  componentDidMount() {

    if (navigator.mediaDevices) {
      const webCamPromise = navigator.mediaDevices
        .getUserMedia({
          audio: false,
          video: {
            facingMode: 'user'
          }
        })
        .then(stream => {
          window.stream = stream
          this.videoRef.current.srcObject = stream
          return new Promise((resolve, _) => {
            this.videoRef.current.onloadedmetadata = () => {
              resolve()
            }
          })
        })
      const modelPromise = tf.loadFrozenModel(MODEL, WEIGHTS_URL)
      const labelsPromise = fetch(LABELS).then(data => data.json())
      Promise.all([modelPromise, labelsPromise, webCamPromise])
        .then(values => {
          const [model, labels] = values
          this.detectFrame(this.videoRef.current, model, labels)
        })
        .catch(error => {
          console.error(error)
        })
    }
  }

  setRef = webcam => {
    this.webcam = webcam;
  };

  detectFrame = (video, model, labels) => {
    TFWrapper(model)
      .detect(video)
      .then(predictions => {
        this.renderPredictions(predictions, labels)
        requestAnimationFrame(() => {
          this.detectFrame(video, model, labels)
        })
      })
  }

  renderPredictions = (predictions, labels) => {
    const ctx = this.canvasRef.current.getContext('2d')
    ctx.clearRect(0, 0, ctx.canvas.width, ctx.canvas.height)
    // Font options.
    const font = '20px sans-serif'
    ctx.font = font
    ctx.textBaseline = 'top'

    predictions.forEach(prediction => {
      const x = prediction.bbox[0]
      const y = prediction.bbox[1]
      const width = prediction.bbox[2]
      const height = prediction.bbox[3]

      const label = labels[parseInt(prediction.class)]


      obj=label;
      console.log(obj)        //returning GUN



      //Bounding Box Styling s
      ctx.strokeStyle = '#FF0000'
      ctx.lineWidth = 5
      ctx.strokeRect(x, y, width, height)

      //Label Background Styling
      ctx.fillStyle = '#FF0000'
      const textWidth = ctx.measureText(label).width
      const textHeight = parseInt(font, 0) 
      ctx.fillRect(x, y, textWidth, textHeight)
    })

    predictions.forEach(prediction => {
      const x = prediction.bbox[0]
      const y = prediction.bbox[1]
      const label = labels[parseInt(prediction.class)]
      // Draw the text last to ensure it's on top.
      ctx.fillStyle = '#000000'
      ctx.fillText(label, x, y)
    })

  }
  render() {
    return (

      <div>
        <video
          className="size"
          autoPlay
          playsInline
          muted
          ref={this.videoRef}
          width="800"
          height="500"
        />
        <canvas
          className="size"
          ref={this.canvasRef}
          width="800"
          height="500"
        />
      </div>

    )

  }
}

export default Detection;

标签: javascriptreactjswebcamip-camera

解决方案


推荐阅读