首页 > 解决方案 > 记录从缓冲区处理的音频结果并将其放置在音频元素中

问题描述

我有一个 JavaScript 文件,它获取一个音频文件,将其放在缓冲区中,然后允许处理缓冲区中的音频(通过更改速度和循环持续时间)。

我希望记录生成的音频,包括操作,并将其放置在一个<audio>元素中。我试图通过在这里集成第一个答案中给出的代码来做到这一点:

录制 webaudio api 的音频上下文的声音

与我的 JavaScript 文件。但是,当我按下播放按钮(也应该开始录制)时出现以下错误:

GET http://localhost:8000/[object%20AudioBufferSourceNode] [HTTP/1.0 404 File not found 1ms] 

HTTP load failed with status 404. Load of media resource http://localhost:8000/[object%20AudioBufferSourceNode] failed. index.html

Uncaught (in promise) DOMException: The media resource indicated by the src attribute or assigned media provider object was not suitable.

我相信我没有正确连接媒体记录器。有人可以帮忙吗?

谢谢,

我的html是:

<!DOCTYPE html>
<html>
  <head>
    <meta charset="utf-8">
    <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
    <meta name="viewport" content="width=device-width">

    <title>decodeAudioData example</title>

    <link rel="stylesheet" href="">
    <!--[if lt IE 9]>
      <script src="//html5shiv.googlecode.com/svn/trunk/html5.js"></script>
    <![endif]-->
  </head>

  <body>
    <h1>decodeAudioData example</h1>

    <button class="play">Play</button>
    <button class="stop">Stop</button>

    <h2>Set playback rate</h2>
    <input class="playback-rate-control" type="range" min="0.25" max="3" step="0.05" value="1">
    <span class="playback-rate-value">1.0</span>

    <h2>Set loop start and loop end</h2>
    <input class="loopstart-control" type="range" min="0" max="20" step="1" value="0">
    <span class="loopstart-value">0</span>

    <input class="loopend-control" type="range" min="0" max="20" step="1" value="0">
    <span class="loopend-value">0</span>

    <br>
    <br>

    <!--Audio recoring destination element-->
    <audio id='recording' controls='true'></audio>

  </body>
</html>

我的 JavaScript 是:

let audioCtx;
let source;
let songLength;

const pre = document.querySelector('pre');
const myScript = document.querySelector('script');
const play = document.querySelector('.play');
const stop = document.querySelector('.stop');

const playbackControl = document.querySelector('.playback-rate-control');
const playbackValue = document.querySelector('.playback-rate-value');
playbackControl.setAttribute('disabled', 'disabled');

const loopstartControl = document.querySelector('.loopstart-control');
const loopstartValue = document.querySelector('.loopstart-value');
loopstartControl.setAttribute('disabled', 'disabled');

const loopendControl = document.querySelector('.loopend-control');
const loopendValue = document.querySelector('.loopend-value');
loopendControl.setAttribute('disabled', 'disabled');

// Recording variables

var recorder=false;
var recordingstream=false;

// use XHR to load an audio track, and
// decodeAudioData to decode it and stick it in a buffer.
// Then we put the buffer into the source

function getData() {
  /*if(window.webkitAudioContext) {
    audioCtx = new window.webkitAudioContext();
  } else {
    audioCtx = new window.AudioContext();
  }*/
  audioCtx = new window.AudioContext();

  source = audioCtx.createBufferSource();
  request = new XMLHttpRequest();

  request.open('GET', 'precky.mp3', true);

  request.responseType = 'arraybuffer';


  request.onload = function() {
    let audioData = request.response;

    audioCtx.decodeAudioData(audioData, function(buffer) {
        myBuffer = buffer;
        songLength = buffer.duration;
        source.buffer = myBuffer;
        source.playbackRate.value = playbackControl.value;
        source.connect(audioCtx.destination);
        source.loop = true;

        loopstartControl.setAttribute('max', Math.floor(songLength));
        loopendControl.setAttribute('max', Math.floor(songLength));
      },

      function(e){"Error with decoding audio data" + e.error});

  }

  request.send();
}


function startrecording(){
    recordingstream=audioCtx.createMediaStreamDestination();
    recorder=new MediaRecorder(recordingstream.stream);
    recorder.start();
}

function glue(){
    let a=new Audio(source);
    let mediasource=audioCtx.createMediaElementSource(a);
    mediasource.connect(recordingstream);//connects also to MediaRecorder
    a.play();
} 

function stoprecording(){
  recorder.addEventListener('dataavailable',function(e){
   document.querySelector('#recording').src=URL.createObjectURL(e.data);
   recorder=false;
   recordingstream=false;
  });
  recorder.stop();
    } 

// wire up buttons to stop and play audio, and range slider control

play.onclick = function() {
  getData();
  source.start(0);
  play.setAttribute('disabled', 'disabled');
  playbackControl.removeAttribute('disabled');
  loopstartControl.removeAttribute('disabled');
  loopendControl.removeAttribute('disabled');
  startrecording();
  glue(source);

}

stop.onclick = function() {
  source.stop(0);
  play.removeAttribute('disabled');
  playbackControl.setAttribute('disabled', 'disabled');
  loopstartControl.setAttribute('disabled', 'disabled');
  loopendControl.setAttribute('disabled', 'disabled');
  stoprecording();

}


playbackControl.oninput = function() {
  source.playbackRate.value = playbackControl.value;
  playbackValue.innerHTML = playbackControl.value;
}

loopstartControl.oninput = function() {
  source.loopStart = loopstartControl.value;
  loopstartValue.innerHTML = loopstartControl.value;
}

loopendControl.oninput = function() {
  source.loopEnd = loopendControl.value;
  loopendValue.innerHTML = loopendControl.value;
}

原始 decodeAudioData 示例(我没有尝试记录)来自此处的 Mozilla 文档: https ://mdn.github.io/webaudio-examples/decode-audio-data/ by chrisdavidmills

标签: audiohtml5-audioweb-audio-apimediastreamwebkitaudiocontext

解决方案


推荐阅读