首页 > 解决方案 > firebase.firestore().collection().onSnapshot() 不起作用

问题描述

我使用 Firebase 托管并希望实现这个网络应用程序。

(我使用 Windows 10、适用于 Linux 的 Windows 子系统、Debian 10.3 和 Google Chrome 浏览器。)

  1. 按下按钮并录制音频(index.html + main.js)
  2. 将音频文件上传到 Cloud Storage (main.js)
  3. 使用 Cloud Speech to text API 转录音频文件(index.js:云函数)
  4. 在 Cloud Firestore 上编写转录(index.js:云函数)
  5. 在浏览器的控制台(main.js)上编写转录

我通过了 1~4,但在 5 中有一个困难。我确定我在这段代码中犯了一个错误。

var query = firebase.firestore().collection('transcription').orderBy('timestamp', 'text').limit(3);
query.onSnapshot(function(doc) {
    console.log("Current data: ", doc && doc.data());
});

但是,我无法解决这个问题,因为浏览器的控制台没有给我任何错误消息。

这是浏览器的控制台。 快照

你能给我什么建议吗?先感谢您。


这是 index.html

<!doctype html>
<html lang="ja">
    <head>
        <meta http-equiv="content-type" content="text/html; charset=utf-8" />

        <script src="https://www.WebRTC-Experiment.com/RecordRTC.js"></script>
    </head>

    <body>
        <div>
        <button id="start-recording" disabled>Start Recording</button>
        <button id="stop-recording" disabled>Stop Recording</button>
        </div>

        <!-- Import and configure the Firebase SDK -->
        <!-- These scripts are made available when the app is served or deployed on Firebase Hosting -->
        <!-- If you do not want to serve/host your project using Firebase Hosting see https://firebase.google.com/docs/web/setup -->
        <script src="/__/firebase/7.14.3/firebase-app.js"></script>
        <script src="/__/firebase/7.14.3/firebase-auth.js"></script>
        <script src="/__/firebase/7.14.3/firebase-storage.js"></script>
        <script src="/__/firebase/7.14.3/firebase-messaging.js"></script>
        <script src="/__/firebase/7.14.3/firebase-firestore.js"></script>
        <script src="/__/firebase/7.14.3/firebase-performance.js"></script>
        <script src="/__/firebase/7.14.3/firebase-functions.js"></script>
        <script src="/__/firebase/init.js"></script>

        <script src="scripts/main.js"></script>
    </body>
</html>

这是 main.js

const startRecording = document.getElementById('start-recording');
const stopRecording = document.getElementById('stop-recording');
let recordAudio;
startRecording.disabled = false;

// on start button handler
startRecording.onclick = function() {
    // recording started
    startRecording.disabled = true;

    // make use of HTML 5/WebRTC, JavaScript getUserMedia()
    // to capture the browser microphone stream
    navigator.getUserMedia({
        audio: true
    }, function(stream) {
            recordAudio = RecordRTC(stream, {
            type: 'audio',
            mimeType: 'audio/webm',
            sampleRate: 44100, // this sampleRate should be the same in your server code

            // MediaStreamRecorder, StereoAudioRecorder, WebAssemblyRecorder
            // CanvasRecorder, GifRecorder, WhammyRecorder
            recorderType: StereoAudioRecorder,

            numberOfAudioChannels: 1,

            // get intervals based blobs
            // value in milliseconds
            // as you might not want to make detect calls every seconds
            timeSlice: 4000,

            // only for audio track
            // audioBitsPerSecond: 128000,

            // used by StereoAudioRecorder
            // the range 22050 to 96000.
            // let us force 16khz recording:
            desiredSampRate: 16000
        });

        recordAudio.startRecording();
        stopRecording.disabled = false;
    }, function(error) {
        console.error(JSON.stringify(error));
    });
};

// on stop button handler
stopRecording.onclick = function() {
    // recording stopped
    startRecording.disabled = false;
    stopRecording.disabled = true;

    // stop audio recorder
    recordAudio.stopRecording(function() {
        var blob = recordAudio.getBlob()

        // Create a root reference
        var storageRef = firebase.storage().ref();

        // Create the file metadata
        var metadata = {
        contentType: 'audio/wav'
        };

        // Upload file and metadata to the object 'images/mountains.jpg'
        var uploadTask = storageRef.child('audio/speech3.wav').put(blob, metadata);

        // Listen for state changes, errors, and completion of the upload.
        uploadTask.on(firebase.storage.TaskEvent.STATE_CHANGED, // or 'state_changed'
        function(snapshot) {
            // Get task progress, including the number of bytes uploaded and the total number of bytes to be uploaded
            var progress = (snapshot.bytesTransferred / snapshot.totalBytes) * 100;
            console.log('Upload is ' + progress + '% done');
            switch (snapshot.state) {
            case firebase.storage.TaskState.PAUSED: // or 'paused'
                console.log('Upload is paused');
                break;
            case firebase.storage.TaskState.RUNNING: // or 'running'
                console.log('Upload is running');
                break;
            }
        }, function(error) {

        // A full list of error codes is available at
        // https://firebase.google.com/docs/storage/web/handle-errors
        switch (error.code) {
            case 'storage/unauthorized':
            // User doesn't have permission to access the object
            break;

            case 'storage/canceled':
            // User canceled the upload
            break;

            case 'storage/unknown':
            // Unknown error occurred, inspect error.serverResponse
            break;
        }
        }, function() {
        // Upload completed successfully, now we can get the download URL
        uploadTask.snapshot.ref.getDownloadURL().then(function(downloadURL) {
            console.log('File available at', downloadURL);
        });
        });                 
    });
    };

var query = firebase.firestore().collection('transcription').orderBy('timestamp', 'text').limit(3);
query.onSnapshot(function(doc) {
    console.log("Current data: ", doc && doc.data());
});

这是 index.js(云函数)

// Import the Firebase SDK for Google Cloud Functions.
const functions = require('firebase-functions');
const admin = require('firebase-admin');
admin.initializeApp();
const speech = require('@google-cloud/speech');

exports.transcribeAudio = functions.runWith({memory: '2GB'}).storage.object().onFinalize(
    async (object) => {
        // Creates a client
        const client = new speech.SpeechClient();

        const gcsUri = `gs://${object.bucket}/${object.name}`;
        const encoding = 'LINEAR16';
        const sampleRateHertz = 16000;
        const languageCode = 'ja-JP';

        const config = {
        encoding: encoding,
        sampleRateHertz: sampleRateHertz,
        languageCode: languageCode,
        enableAutomaticPunctuation: true
        };
        const audio = {
        uri: gcsUri,
        };

        const request = {
        config: config,
        audio: audio,
        };

        // Detects speech in the audio file. This creates a recognition job that you
        // can wait for now, or get its result later.
        const [operation] = await client.longRunningRecognize(request);
        // Get a Promise representation of the final result of the job
        const [response] = await operation.promise();
        const transcription = response.results
            .map(result => result.alternatives[0].transcript)
            .join('\n');
        console.log(`Transcription: ${transcription}`);

        admin.firestore().collection('transcription').add({
            text: transcription,
            timestamp: admin.firestore.FieldValue.serverTimestamp()
        });

    });

标签: javascriptnode.jsfirebasegoogle-chromegoogle-cloud-firestore

解决方案


推荐阅读