首页 > 解决方案 > OfflineAudioContext 延迟偏移

问题描述

我正在使用 OfflineAudioContext 下载应用了效果的输入文件。下载效果很好,而且速度非常快,但我遇到的问题是,当我应用增益时,我使用分析仪来指示增益何时应该增加或减少。

这在使用 AudioContext 播放音频时效果很好,但是离线版本会导致增益的时间发生非常明显的变化。增加开始较晚,减少开始较晚。就好像总体上存在延迟变化。

有没有办法对抗这种转变?我对渲染过程需要更长的时间感到满意。

var chunks = [];
var fileInput = document.getElementById("input");
var process = document.getElementById("process");

//Load audio file listener
process.addEventListener(
  "click",
  function () {
    // Web Audio
    var audioCtx2 = new (AudioContext || webkitAudioContext)();
    // Reset buttons and log
    $("#log").empty();
    $("#download_link").addClass("d-none");
    $("#repeat_link").addClass("d-none");

    // Check for file
    if (fileInput.files[0] == undefined) {
      if ($("#upload_err").hasClass("d-none")) {
        $("#upload_err").removeClass("d-none");
      }
      return false;
    }

    var reader1 = new FileReader();

    reader1.onload = function (ev) {
      // console.log("Reader loaded.");

      var tempBuffer = audioCtx2.createBufferSource();

      // Decode audio
      audioCtx2.decodeAudioData(ev.target.result).then(function (buffer) {
        // console.log("Duration1 = " + buffer.duration);

        var offlineAudioCtx = new OfflineAudioContext({
          numberOfChannels: 2,
          length: 44100 * buffer.duration,
          sampleRate: 44100,
        });
        // console.log("test 1");
        // Audio Buffer Source
        var soundSource = offlineAudioCtx.createBufferSource();
        var analyser2d = offlineAudioCtx.createAnalyser();
        var dgate1 = offlineAudioCtx.createGain();
        var dhpf = offlineAudioCtx.createBiquadFilter();
        var dhum60 = offlineAudioCtx.createBiquadFilter();
        var dcompressor = offlineAudioCtx.createDynamicsCompressor();

        dhpf.type = "highpass";
        dhpf.Q.value = 0.5;

        dhum60.type = "notch";
        dhum60.Q.value = 130;

        dcompressor.knee.setValueAtTime(40, offlineAudioCtx.currentTime);
        dcompressor.attack.setValueAtTime(0.1, offlineAudioCtx.currentTime);
        dcompressor.release.setValueAtTime(0.2, offlineAudioCtx.currentTime);

        var reader2 = new FileReader();

        // console.log("Created Reader");

        reader2.onload = function (ev) {
          // console.log("Reading audio data to buffer...");
          $("#log").append("<p>Buffering...</p>");

          soundSource.buffer = buffer;

          let context = offlineAudioCtx;

          //Before Effects

          analyser2d = context.createAnalyser();

          analyser2d.fftSize = 2048;
          analyser2d.smoothingTimeConstant = 0.85;
          const sampleBuffer = new Float32Array(analyser2d.fftSize);

          function loop() {
            analyser2d.getFloatTimeDomainData(sampleBuffer);


            let sumOfSquares = 0;

            for (let i = 0; i < sampleBuffer.length; i++) {
              sumOfSquares += sampleBuffer[i] ** 2;
            }

            const avgPowerDecibels = Math.round(
              10 * Math.log10(sumOfSquares / sampleBuffer.length)
            );

            const gainset = avgPowerDecibels > -50 ? 1 : 0;

            //real-time effects choices start

            if (
              document.getElementById("gate").getAttribute("data-active") ===
              "true"
            ) {
              dgate1.gain.setTargetAtTime(
                gainset,
                offlineAudioCtx.currentTime,
                0.05
              );
            } else if (
              document.getElementById("gate").getAttribute("data-active") ===
              "false"
            ) {
              dgate1.gain.setTargetAtTime(
                1,
                offlineAudioCtx.currentTime,
                0.05
              );
            }

            if (
              document.getElementById("hpf").getAttribute("data-active") ===
              "true"
            ) {
              dhpf.frequency.value = 90;
            } else if (
              document.getElementById("hpf").getAttribute("data-active") ===
              "false"
            ) {
              dhpf.frequency.value = 0;
            }

            if (
              document.getElementById("hum").getAttribute("data-active") ===
              "true"
            ) {
              dhum60.frequency.value = 60;
            } else if (
              document.getElementById("hum").getAttribute("data-active") ===
              "false"
            ) {
              dhum60.frequency.value = 0;
            }

            if (
              document.getElementById("comp").getAttribute("data-active") ===
              "true"
            ) {
              dcompressor.threshold.setValueAtTime(
                -30,

                offlineAudioCtx.currentTime
              );

              dcompressor.ratio.setValueAtTime(
                3.5,
                offlineAudioCtx.currentTime
              );
            } else if (
              document.getElementById("comp").getAttribute("data-active") ===
              "false"
            ) {
              dcompressor.threshold.setValueAtTime(
                0,
                offlineAudioCtx.currentTime
              );

              dcompressor.ratio.setValueAtTime(1, offlineAudioCtx.currentTime);
            }

            // Display value.

            requestAnimationFrame(loop);
          }

          loop();
              soundSource
                .connect(analyser2d)
                .connect(dhpf)
                .connect(dhum60)
                .connect(dgate1)
                .connect(dcompressor);
              dcompressor.connect(offlineAudioCtx.destination);

              offlineAudioCtx
                .startRendering()
                .then(function (renderedBuffer) {
                  // console.log('Rendering completed successfully.');
                  $("#log").append("<p>Rendering new file...</p>");

                  //var song = offlineAudioCtx.createBufferSource();

                  console.log(
                    "OfflineAudioContext.length = " + offlineAudioCtx.length
                  );

                  split(renderedBuffer, offlineAudioCtx.length);

                  $("#log").append("<p>Finished!</p>");
                })
                .catch(function (err) {
                  // console.log('Rendering failed: ' + err);
                  $("#log").append("<p>Rendering failed.</p>");
                });
            
          soundSource.loop = false;
        };
        reader2.readAsArrayBuffer(fileInput.files[0]);
        soundSource.start(0);
      });
    };

    reader1.readAsArrayBuffer(fileInput.files[0]);
  },
  false
);

我已经包含了我认为是代码的相关部分。让我知道是否需要更多。谢谢!

标签: web-audio-apilatency

解决方案


这是行不通的,因为OfflineAudioContext运行速度比实时快(有时快数百倍)。获取分析器数据的循环每 16 毫秒左右完成一次。在实时系统中,这可能足够准确,但是离线上下文的运行速度可能比实时快得多,因此当您获取分析器数据时,已经过去了更多时间。(您可能可以通过在循环中打印出上下文 currentTime 来看到这一点。它可能会增加(多于)超过 16 毫秒。

最好的方法是使用context.suspend(t)在已知时间暂停上下文,以便您可以同步获取分析器数据。请注意,时间是四舍五入的,因此它可能不是您想要的时间,但可能足够接近。另请注意,AFAIK,Firefox 尚未实现此功能,Safari 也没有(但很快会)。

这是一个关于如何使用的简短片段suspend。不过方法不止一种。未经测试,但我认为总体思路是正确的:

// Grab the data every 16 ms, roughly.  `suspend` rounds the time up to
// the nearest multiple of 128 frames (or 128/context.sampleRate time).
for (t = 0; t < <length of buffer>; t += 0.016) {
  context.suspend(t)
    .then(() => {
       // Use analyser to grab the data and compute the values.
       // Use setValueAtTime and friends to adjust the gain and compressor
       // appropriately.  Use context.currentTime to know at what time
       // the context was suspended, and schedule the modifications to be
       // at least 128 samples in the future.  (I think).
   })
   .then(() => context.resume());
}

另一种方法是创建一个实时上下文并像现在一样处理它,当缓冲区播放完毕时,关闭上下文。当然,您必须添加一些东西(ScriptProcessorNode、AudioWorkletNode 或 MediaRecorder)来捕获渲染数据。

如果这些都不适合您,那么我不确定其他选择是什么。


推荐阅读