<button onclick="record()">開始錄音</button> <button onclick="stopRecord()">結束錄音</button> <!-- <button onclick="resetRecord()">重置錄音</button> --> <audio class="audio-node" id="audio" autoplay></audio> <script> class Recoder { constructor (sampleRate) { this.leftDataList = [] this.rightDataList = [] this.mediaPlayer = null this.audioContext = null this.source = null this.sampleRate = sampleRate || 44100 } startRecord () { return new Promise((resolve, reject) => { window.navigator.mediaDevices.getUserMedia({ audio: { sampleRate: 8000, // 採樣率 channelCount: 1, // 聲道 audioBitsPerSecond : 64, volume: 1.0, // 音量 autoGainControl: true } }).then(mediaStream => { console.log(mediaStream,'mediaStream') this.mediaPlayer = mediaStream this.beginRecord(mediaStream) resolve() }).catch(err => { // 若是用戶電腦沒有麥克風設備或者用戶拒絕了,或者鏈接出問題了等 // 這裏都會拋異常,而且經過err.name能夠知道是哪一種類型的錯誤 console.error(err) reject(err) }) }) } beginRecord (mediaStream) { let audioContext = new (window.AudioContext || window.webkitAudioContext)() // mediaNode包含 mediaStream,audioContext let mediaNode = audioContext.createMediaStreamSource(mediaStream) console.log(mediaNode,'mediaNode') // 建立一個jsNode // audioContext.sampleRate = 8000 console.log(audioContext,'audioContext') let jsNode = this.createJSNode(audioContext) console.log(jsNode,'jsnode') // 須要連到揚聲器消費掉outputBuffer,process回調才能觸發 // 而且因爲不給outputBuffer設置內容,因此揚聲器不會播放出聲音 jsNode.connect(audioContext.destination) jsNode.onaudioprocess = this.onAudioProcess.bind(this) // 把mediaNode鏈接到jsNode mediaNode.connect(jsNode) this.audioContext = audioContext } onAudioProcess (event) { console.log('is recording') // 拿到輸入buffer Float32Array let audioBuffer = event.inputBuffer let leftChannelData = audioBuffer.getChannelData(0) // let rightChannelData = audioBuffer.getChannelData(1) // 須要克隆一下 this.leftDataList.push(leftChannelData.slice(0)) //this.rightDataList.push(rightChannelData.slice(0)) } createJSNode (audioContext) { const BUFFER_SIZE = 4096 const INPUT_CHANNEL_COUNT = 1 const OUTPUT_CHANNEL_COUNT = 1 // createJavaScriptNode已被廢棄 let creator = audioContext.createScriptProcessor || audioContext.createJavaScriptNode creator = creator.bind(audioContext) return creator(BUFFER_SIZE, INPUT_CHANNEL_COUNT, OUTPUT_CHANNEL_COUNT) } playRecord (arrayBuffer) { let blob = new Blob([new Int8Array(arrayBuffer)], { type: 'audio/mp3' // files[0].type }) let blobUrl = URL.createObjectURL(blob) this.source = blob this.blobUrl = blobUrl // document.querySelector('.audio-node').src = blobUrl return blobUrl } stopRecord () { // 中止錄音 let leftData = this.mergeArray(this.leftDataList) //let rightData = this.mergeArray(this.rightDataList) let allData = this.interSingleData(leftData) let wavBuffer = this.createWavFile(allData) let source = this.playRecord(wavBuffer) this.resetRecord() return source } transformArrayBufferToBase64 (buffer) { var binary = '' var bytes = new Uint8Array(buffer) for (var len = bytes.byteLength, i = 0; i < len; i++) { binary += String.fromCharCode(bytes[i]) } return window.btoa(binary) } // 中止控件錄音 resetRecord () { this.leftDataList = [] this.rightDataList = [] this.audioContext.close() this.mediaPlayer.getAudioTracks().forEach(track => { track.stop() this.mediaPlayer.removeTrack(track) }) } createWavFile (audioData) { let channelCount = 1 const WAV_HEAD_SIZE = 44 const sampleBits = 16 let sampleRate = this.sampleRate let buffer = new ArrayBuffer(audioData.length * 2 + WAV_HEAD_SIZE) // 須要用一個view來操控buffer let view = new DataView(buffer) // 寫入wav頭部信息 // RIFF chunk descriptor/identifier this.writeUTFBytes(view, 0, 'RIFF') // RIFF chunk length view.setUint32(4, 44 + audioData.length * channelCount, true) // RIFF type this.writeUTFBytes(view, 8, 'WAVE') // format chunk identifier // FMT sub-chunk this.writeUTFBytes(view, 12, 'fmt ') // format chunk length view.setUint32(16, 16, true) // sample format (raw) view.setUint16(20, 1, true) // stereo (2 channels) view.setUint16(22, channelCount, true) // sample rate view.setUint32(24, sampleRate , true) // byte rate (sample rate * block align) view.setUint32(28, sampleRate * 2, true) // block align (channel count * bytes per sample) view.setUint16(32, 2 * 2, true) // bits per sample view.setUint16(34, 16, true) // data sub-chunk // data chunk identifier this.writeUTFBytes(view, 36, 'data') // data chunk length view.setUint32(40, audioData.length * 2, true) console.log(view,'view') let length = audioData.length let index = 44 let volume = 1 for (let i = 0; i < length; i++) { view.setInt16(index, audioData[i] * (0x7FFF * volume), true) index += 2 } return buffer } writeUTFBytes (view, offset, string) { var lng = string.length for (var i = 0; i < lng; i++) { view.setUint8(offset + i, string.charCodeAt(i)) } } interSingleData (left) { var t = left.length; let sampleRate = this.audioContext.sampleRate, outputSampleRate = this.sampleRate sampleRate += 0.0; outputSampleRate += 0.0; var s = 0, o = sampleRate / outputSampleRate, u = Math.ceil(t * outputSampleRate / sampleRate), a = new Float32Array(u); for (let i = 0; i < u; i++) { a[i] = left[Math.floor(s)]; s += o; } return a; } // 交叉合併左右聲道的數據 interleaveLeftAndRight (left, right) { let totalLength = left.length + right.length let data = new Float32Array(totalLength) for (let i = 0; i < left.length; i++) { let k = i * 2 data[k] = left[i] data[k + 1] = right[i] } return data } mergeArray (list) { let length = list.length * list[0].length let data = new Float32Array(length) let offset = 0 for (let i = 0; i < list.length; i++) { data.set(list[i], offset) offset += list[i].length } return data } // 播放音樂 playMusic () { if (!this.value) { return } // 直接使用File對象生成blob url let blobUrl = URL.createObjectURL(this.files[0]) document.querySelector('.audio-node').src = blobUrl } play (arrayBuffer) { // Safari須要使用webkit前綴 let AudioContext = this.AudioContext || this.webkitAudioContext let audioContext = new AudioContext() // 建立一個AudioBufferSourceNode對象,使用AudioContext的工廠函數建立 let audioNode = audioContext.createBufferSource() // 解碼音頻,能夠使用Promise,可是較老的Safari須要使用回調 audioContext.decodeAudioData(arrayBuffer, function (audioBuffer) { audioNode.buffer = audioBuffer audioNode.connect(audioContext.destination) // 從0s開始播放 audioNode.start(0) }) } } let recoder = new Recoder(8000) function record() { recoder.startRecord() } function stopRecord(params) { recoder.stopRecord() let source = recoder.source let formData = new FormData() formData.append('audio', source) let audio = document.getElementById('audio') audio.src = recoder.blobUrl } </script>
參考 自掘金javascript
1.網上不少都沒有停之功能,新增停之java
2.這裏用的8000採樣率和單聲道,音頻體積爲原來的 快 1/12 (我電腦設備的採樣錄是44K)node
3.詳細解釋請看 H5音頻分析web