學習HTML5音頻錄製

以前玩過WebRTC,經過HTML5能夠輕鬆實現語音視頻聊天,網上的示例不少。最近想了解下HTML5可否錄製音頻,用什麼接口能夠作到。這裏分享下學習資料。javascript

瀏覽器

要獲取音頻和視頻,須要用到getUserMedia。桌面平臺支持的瀏覽器包括Chrome, Firefox, Opera和Edge。移動平臺包括Chrome, Firefox和Opera,不過只是Android平臺,iOS不行。微軟彷佛也放棄IE了,全力支持Edge。蘋果對HTML5的支持始終不夠。前段日子還有新聞報道說有開發者針對HTML5起訴蘋果。html

獲取視頻音頻流

網上示例不少,從Mozilla的文檔中找了一段最簡單的代碼:java

var p = navigator.mediaDevices.getUserMedia({ audio: true, video: true });
 
p.then(function(mediaStream) {
  var video = document.querySelector('video');
  video.src = window.URL.createObjectURL(mediaStream);
  video.onloadedmetadata = function(e) {
// Do something with the video here.
    video.play();
 
  };
});
 
p.catch(function(err) { console.log(err.name); }); // always check for errors at the end.

錄製Audio

在Mozilla的HTML5文檔中看到了MediaRecorder。這個接口簡單方便,可是比較新,瀏覽器的兼容性有限。node

桌面git

移動github

不過也有替代方案,使用AudioNodes。基本步驟以下:web

1. 經過getUserMedia獲取視頻音頻流。api

2. 經過createMediaStreamSource建立MediaStreamAudioSourceNode數組

if (navigator.getUserMedia) {
   console.log('getUserMedia supported.');
   navigator.getUserMedia (
      // constraints: audio and video for this app
      {
         audio: true,
         video: true
      },
 
      // Success callback
      function(stream) {
         video.src = (window.URL && window.URL.createObjectURL(stream)) || stream;
         video.onloadedmetadata = function(e) {
            video.play();
            video.muted = 'true';
         };
 
         // Create a MediaStreamAudioSourceNode
         // Feed the HTMLMediaElement into it
         var source = audioCtx.createMediaStreamSource(stream);
 
      },
 
      // Error callback
      function(err) {
         console.log('The following gUM error occured: ' + err);
      }
   );
} else {
   console.log('getUserMedia not supported on your browser!');
}

3. 連接AudioNodes。建立ScriptProcessorNode。經過onaudioprocess來獲取音頻數據。瀏覽器

var scriptNode = audioCtx.createScriptProcessor(4096, 1, 1);
 
scriptNode.onaudioprocess = function(audioProcessingEvent) {
  // The input buffer is the song we loaded earlier
  var inputBuffer = audioProcessingEvent.inputBuffer;
 
  // Loop through the output channels (in this case there is only one)
  for (var channel = 0; channel < outputBuffer.numberOfChannels; channel++) {
    var inputData = inputBuffer.getChannelData(channel);
  }
}
 
source.connect(scriptNode);
scriptNode.connect(audioCtx.destination);

4. 經過XHR或者WebSockets來發送blob數據。

JavaScript庫

流程就這麼簡單,可是寫起來仍是比較複雜的。因此須要參考下別人寫的代碼。在GitHub上能夠找到RecordRTCRecorderjs。前者能夠錄製視頻和音頻,若是隻想錄制音頻,能夠選擇後面這個,比較簡單。

關於RecordRTC,有一個站點能夠體驗視頻音頻錄製:online demo

如今分析下Recorderjs的源碼。

onaudioprocess中獲取音頻buffer:

this.context = source.context;
        this.node = (this.context.createScriptProcessor || this.context.createJavaScriptNode).call(this.context, this.config.bufferLen, this.config.numChannels, this.config.numChannels);
 
        this.node.onaudioprocess = function (e) {
            if (!_this.recording) return;
 
            var buffer = [];
            for (var channel = 0; channel < _this.config.numChannels; channel++) {
                buffer.push(e.inputBuffer.getChannelData(channel));
            }
            _this.worker.postMessage({
                command: 'record',
                buffer: buffer
            });
        };
 
        source.connect(this.node);
        this.node.connect(this.context.destination); //this should not be necessary

用數組存儲音頻buffer:

function record(inputBuffer) {
                for (var channel = 0; channel < numChannels; channel++) {
                    recBuffers[channel].push(inputBuffer[channel]);
                }
                recLength += inputBuffer[0].length;
            }

WAV格式編碼:

function encodeWAV(samples) {
                var buffer = new ArrayBuffer(44 + samples.length * 2);
                var view = new DataView(buffer);
 
                /* RIFF identifier */
                writeString(view, 0, 'RIFF');
                /* RIFF chunk length */
                view.setUint32(4, 36 + samples.length * 2, true);
                /* RIFF type */
                writeString(view, 8, 'WAVE');
                /* format chunk identifier */
                writeString(view, 12, 'fmt ');
                /* format chunk length */
                view.setUint32(16, 16, true);
                /* sample format (raw) */
                view.setUint16(20, 1, true);
                /* channel count */
                view.setUint16(22, numChannels, true);
                /* sample rate */
                view.setUint32(24, sampleRate, true);
                /* byte rate (sample rate * block align) */
                view.setUint32(28, sampleRate * 4, true);
                /* block align (channel count * bytes per sample) */
                view.setUint16(32, numChannels * 2, true);
                /* bits per sample */
                view.setUint16(34, 16, true);
                /* data chunk identifier */
                writeString(view, 36, 'data');
                /* data chunk length */
                view.setUint32(40, samples.length * 2, true);
 
                floatTo16BitPCM(view, 44, samples);
 
                return view;
            }

合併全部的buffer,並用WAV格式導出,最後轉換成blob:

function exportWAV(type) {
                var buffers = [];
                for (var channel = 0; channel < numChannels; channel++) {
                    buffers.push(mergeBuffers(recBuffers[channel], recLength));
                }
                var interleaved = undefined;
                if (numChannels === 2) {
                    interleaved = interleave(buffers[0], buffers[1]);
                } else {
                    interleaved = buffers[0];
                }
                var dataview = encodeWAV(interleaved);
                var audioBlob = new Blob([dataview], { type: type });
            }

這樣就能夠保存或者發送錄製的音頻文件了。

參考資料

相關文章
相關標籤/搜索