首页 > 解决方案 > firebase.firestore().onSnapshot 发生两次

问题描述

我使用 Firebase 托管并希望实现这个网络应用程序。(我使用 Windows 10、适用于 Linux 的 Windows 子系统、Debian 10.3 和 Google Chrome 浏览器。)

  1. 按下按钮并录制音频(index.html + main.js)
  2. 将音频文件上传到 Cloud Storage (main.js)
  3. 使用 Cloud Speech to text API 转录音频文件(index.js:云函数)
  4. 在 Cloud Firestore 上编写转录(index.js:云函数)
  5. 使用 . 从 Firestore 获取转录数据.onSnapshot。将该数据放入文本区域(main.js)

我通过了第 1~4 步,但在第 5 步中遇到了困难。当我访问网络应用程序时,它会在我录制音频之前显示转录数据。

此数据是我上次访问 Web 应用程序时生成的。 在此处输入图像描述

当我通过第 1 步到第 5 步时,我得到了另一个我想要的文本区域。 在此处输入图像描述 你能告诉我如何避免第一个 textarea 吗?先感谢您。

这是浏览器的控制台。

在此处输入图像描述

这是 main.js(客户端)

const startRecording = document.getElementById('start-recording');
const stopRecording = document.getElementById('stop-recording');
let recordAudio;
startRecording.disabled = false;

// on start button handler
startRecording.onclick = function() {
    // recording started
    startRecording.disabled = true;

    // make use of HTML 5/WebRTC, JavaScript getUserMedia()
    // to capture the browser microphone stream
    navigator.getUserMedia({
        audio: true
    }, function(stream) {
            recordAudio = RecordRTC(stream, {
            type: 'audio',
            mimeType: 'audio/webm',
            sampleRate: 44100, // this sampleRate should be the same in your server code

            // MediaStreamRecorder, StereoAudioRecorder, WebAssemblyRecorder
            // CanvasRecorder, GifRecorder, WhammyRecorder
            recorderType: StereoAudioRecorder,

            numberOfAudioChannels: 1,

            // get intervals based blobs
            // value in milliseconds
            // as you might not want to make detect calls every seconds
            timeSlice: 4000,

            // only for audio track
            // audioBitsPerSecond: 128000,

            // used by StereoAudioRecorder
            // the range 22050 to 96000.
            // let us force 16khz recording:
            desiredSampRate: 16000
        });

        recordAudio.startRecording();
        stopRecording.disabled = false;
    }, function(error) {
        console.error(JSON.stringify(error));
    });
};

// on stop button handler
stopRecording.onclick = function() {
    // recording stopped
    startRecording.disabled = false;
    stopRecording.disabled = true;

    // stop audio recorder
    recordAudio.stopRecording(function() {
      var blob = recordAudio.getBlob()

      // Create a root reference
      var storageRef = firebase.storage().ref();

      // Create the file metadata
      var metadata = {
        contentType: 'audio/wav'
      };

      // Upload file and metadata to the object 'images/mountains.jpg'
      var uploadTask = storageRef.child('audio/speech3.wav').put(blob, metadata);

      // Listen for state changes, errors, and completion of the upload.
      uploadTask.on(firebase.storage.TaskEvent.STATE_CHANGED, // or 'state_changed'
        function(snapshot) {
          // Get task progress, including the number of bytes uploaded and the total number of bytes to be uploaded
          var progress = (snapshot.bytesTransferred / snapshot.totalBytes) * 100;
          console.log('Upload is ' + progress + '% done');
          switch (snapshot.state) {
            case firebase.storage.TaskState.PAUSED: // or 'paused'
              console.log('Upload is paused');
              break;
            case firebase.storage.TaskState.RUNNING: // or 'running'
              console.log('Upload is running');
              break;
          }
        }, function(error) {

        // A full list of error codes is available at
        // https://firebase.google.com/docs/storage/web/handle-errors
        switch (error.code) {
          case 'storage/unauthorized':
            // User doesn't have permission to access the object
            break;

          case 'storage/canceled':
            // User canceled the upload
            break;

          case 'storage/unknown':
            // Unknown error occurred, inspect error.serverResponse
            break;
        }
      }, function() {
        // Upload completed successfully, now we can get the download URL
        uploadTask.snapshot.ref.getDownloadURL().then(function(downloadURL) {
          console.log('File available at', downloadURL);
        });
      });                 
    });
  };

firebase.firestore().collection('script').orderBy("timestamp", "desc").limit(1)
.onSnapshot(function(querySnapshot) {
  querySnapshot.forEach(function(doc) {
    console.log(doc.data().text);

    const textarea = document.createElement('textarea');
    textarea.value = doc.data().text;
    textarea.rows = '5';
    textarea.cols = '40';
    const parent = document.getElementById('textbox');
    parent.appendChild(textarea);
  });
});

这是 index.js(云函数)

// Import the Firebase SDK for Google Cloud Functions.
const functions = require('firebase-functions');
const admin = require('firebase-admin');
admin.initializeApp();
const speech = require('@google-cloud/speech');

exports.transcribeAudio = functions.runWith({memory: '2GB'}).storage.object().onFinalize(
    async (object) => {
        // Creates a client
        const client = new speech.SpeechClient();

        const gcsUri = `gs://${object.bucket}/${object.name}`;
        const encoding = 'LINEAR16';
        const sampleRateHertz = 16000;
        const languageCode = 'en-US';

        const config = {
        encoding: encoding,
        sampleRateHertz: sampleRateHertz,
        languageCode: languageCode,
        enableAutomaticPunctuation: true
        };
        const audio = {
        uri: gcsUri,
        };

        const request = {
        config: config,
        audio: audio,
        };

        // Detects speech in the audio file. This creates a recognition job that you
        // can wait for now, or get its result later.
        const [operation] = await client.longRunningRecognize(request);
        // Get a Promise representation of the final result of the job
        const [response] = await operation.promise();
        const transcription = response.results
            .map(result => result.alternatives[0].transcript)
            .join('\n');
        console.log(`Transcription: ${transcription}`);

        admin.firestore().collection('script').add({
            text: transcription,
            timestamp: admin.firestore.FieldValue.serverTimestamp()
        });

    });

这是 index.html

<!doctype html>
<html lang="ja">
   <head>
      <meta name="robots" content="noindex">

      <title>音読アプリ アドバンス&lt;/title>
      <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css" integrity="sha384-Vkoo8x4CGsO3+Hhxv8T/Q5PaXtkKtu6ug5TOeNV6gBiFeWPGFN9MuhOf23Q9Ifjh" crossorigin="anonymous">
      <meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
      <link href="https://fonts.googleapis.com/css?family=M+PLUS+Rounded+1c&display=swap" rel="stylesheet">
      <meta http-equiv="content-type" content="text/html; charset=utf-8" />

      <script src="https://www.WebRTC-Experiment.com/RecordRTC.js"></script>
   </head>

   <body>
      <div>
        <button id="start-recording" disabled>Start Recording</button>
        <button id="stop-recording" disabled>Stop Recording</button>
      </div>
      <div id="textbox"></div>

      <script src="https://code.jquery.com/jquery-3.4.1.slim.min.js" integrity="sha384-J6qa4849blE2+poT4WnyKhv5vZF5SrPo0iEjwBvKU7imGFAV0wwj1yYfoRSJoZ+n" crossorigin="anonymous"></script>
      <script src="https://cdn.jsdelivr.net/npm/popper.js@1.16.0/dist/umd/popper.min.js" integrity="sha384-Q6E9RHvbIyZFJoft+2mJbHaEWldlvI9IOYy5n3zV9zzTtmI3UksdQRVvoxMfooAo" crossorigin="anonymous"></script>
      <script src="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/js/bootstrap.min.js" integrity="sha384-wfSDF2E50Y2D1uUdj0O3uMBJnjuUD4Ih7YwaYd1iqfktj0Uod8GCExl3Og8ifwB6" crossorigin="anonymous"></script>

      <!-- Import and configure the Firebase SDK -->
      <!-- These scripts are made available when the app is served or deployed on Firebase Hosting -->
      <!-- If you do not want to serve/host your project using Firebase Hosting see https://firebase.google.com/docs/web/setup -->
      <script src="/__/firebase/7.14.3/firebase-app.js"></script>
      <script src="/__/firebase/7.14.3/firebase-auth.js"></script>
      <script src="/__/firebase/7.14.3/firebase-storage.js"></script>
      <script src="/__/firebase/7.14.3/firebase-messaging.js"></script>
      <script src="/__/firebase/7.14.3/firebase-firestore.js"></script>
      <script src="/__/firebase/7.14.3/firebase-performance.js"></script>
      <script src="/__/firebase/7.14.3/firebase-functions.js"></script>
      <script src="/__/firebase/init.js"></script>

      <script src="scripts/main.js"></script>
   </body>
</html>

标签: javascriptnode.jsfirebasegoogle-cloud-firestoregoogle-cloud-functions

解决方案


这是因为,正如文档中所解释的,当您设置侦听器时,总会有一个初始调用

在您的情况下,此初始调用返回在您的集合中创建的最后一个文档(因为您的查询是用 定义的orderBy("timestamp", "desc").limit(1))。

您可以维护一个计数器来指示它是初始调用还是后续调用。大致如下:

  let nbrCalls = 0;
  firebase
    .firestore()
    .collection('script')
    .orderBy('timestamp', 'desc')
    .limit(1)
    .onSnapshot(function (querySnapshot) {
      nbrCalls++;
      if (nbrCalls > 1) {
        querySnapshot.forEach(function (doc) {
          console.log(doc.data().text);
        });
      }
    });

推荐阅读