錄音功能通常來講在移動端比較常見,可是在pc端也要實現按住說話的功能呢?項目需求:按住說話,時長不超過60秒,生成語音文件並上傳,我這裏用的是recorder.jsjavascript
// 兼容
window.URL = window.URL || window.webkitURL
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia
let HZRecorder = function (stream, config) {
config = config || {}
config.sampleBits = config.sampleBits || 8 // 採樣數位 8, 16
config.sampleRate = config.sampleRate || (44100 / 6) // 採樣率(1/6 44100)
let context = new (window.webkitAudioContext || window.AudioContext)()
let audioInput = context.createMediaStreamSource(stream)
let createScript = context.createScriptProcessor || context.createJavaScriptNode
let recorder = createScript.apply(context, [4096, 1, 1])
let audioData = {
size: 0, // 錄音文件長度
buffer: [], // 錄音緩存
inputSampleRate: context.sampleRate, // 輸入採樣率
inputSampleBits: 16, // 輸入採樣數位 8, 16
outputSampleRate: config.sampleRate, // 輸出採樣率
oututSampleBits: config.sampleBits, // 輸出採樣數位 8, 16
input: function (data) {
this.buffer.push(new Float32Array(data))
this.size += data.length
},
compress: function () { // 合併壓縮
// 合併
let data = new Float32Array(this.size)
let offset = 0
for (let i = 0; i < this.buffer.length; i++) {
data.set(this.buffer[i], offset)
offset += this.buffer[i].length
}
// 壓縮
let compression = parseInt(this.inputSampleRate / this.outputSampleRate)
let length = data.length / compression
let result = new Float32Array(length)
let index = 0; let j = 0
while (index < length) {
result[index] = data[j]
j += compression
index++
}
return result
},
encodeWAV: function () {
let sampleRate = Math.min(this.inputSampleRate, this.outputSampleRate)
let sampleBits = Math.min(this.inputSampleBits, this.oututSampleBits)
let bytes = this.compress()
let dataLength = bytes.length * (sampleBits / 8)
let buffer = new ArrayBuffer(44 + dataLength)
let data = new DataView(buffer)
let channelCount = 1// 單聲道
let offset = 0
let writeString = function (str) {
for (let i = 0; i < str.length; i++) {
data.setUint8(offset + i, str.charCodeAt(i))
}
}
// 資源交換文件標識符
writeString('RIFF'); offset += 4
// 下個地址開始到文件尾總字節數,即文件大小-8
data.setUint32(offset, 36 + dataLength, true); offset += 4
// WAV文件標誌
writeString('WAVE'); offset += 4
// 波形格式標誌
writeString('fmt '); offset += 4
// 過濾字節,通常爲 0x10 = 16
data.setUint32(offset, 16, true); offset += 4
// 格式類別 (PCM形式採樣數據)
data.setUint16(offset, 1, true); offset += 2
// 通道數
data.setUint16(offset, channelCount, true); offset += 2
// 採樣率,每秒樣本數,表示每一個通道的播放速度
data.setUint32(offset, sampleRate, true); offset += 4
// 波形數據傳輸率 (每秒平均字節數) 單聲道×每秒數據位數×每樣本數據位/8
data.setUint32(offset, channelCount * sampleRate * (sampleBits / 8), true); offset += 4
// 快數據調整數 採樣一次佔用字節數 單聲道×每樣本的數據位數/8
data.setUint16(offset, channelCount * (sampleBits / 8), true); offset += 2
// 每樣本數據位數
data.setUint16(offset, sampleBits, true); offset += 2
// 數據標識符
writeString('data'); offset += 4
// 採樣數據總數,即數據總大小-44
data.setUint32(offset, dataLength, true); offset += 4
// 寫入採樣數據
if (sampleBits === 8) {
for (let i = 0; i < bytes.length; i++ , offset++) {
let s = Math.max(-1, Math.min(1, bytes[i]))
let val = s < 0 ? s * 0x8000 : s * 0x7FFF
val = parseInt(255 / (65535 / (val + 32768)))
data.setInt8(offset, val, true)
}
} else {
for (let i = 0; i < bytes.length; i++ , offset += 2) {
let s = Math.max(-1, Math.min(1, bytes[i]))
data.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true)
}
}
return new Blob([data], { type: 'audio/mp3' })
}
}
// 開始錄音
this.start = function () {
audioInput.connect(recorder)
recorder.connect(context.destination)
}
// 中止
this.stop = function () {
recorder.disconnect()
}
// 獲取音頻文件
this.getBlob = function () {
this.stop()
return audioData.encodeWAV()
}
// 回放
this.play = function (audio) {
let downRec = document.getElementById('downloadRec')
downRec.href = window.URL.createObjectURL(this.getBlob())
downRec.download = new Date().toLocaleString() + '.mp3'
audio.src = window.URL.createObjectURL(this.getBlob())
}
// 上傳
this.upload = function (url, callback) {
let fd = new FormData()
fd.append('audioData', this.getBlob())
let xhr = new XMLHttpRequest()
/* eslint-disable */
if (callback) {
xhr.upload.addEventListener('progress', function (e) {
callback('uploading', e)
}, false)
xhr.addEventListener('load', function (e) {
callback('ok', e)
}, false)
xhr.addEventListener('error', function (e) {
callback('error', e)
}, false)
xhr.addEventListener('abort', function (e) {
callback('cancel', e)
}, false)
}
/* eslint-disable */
xhr.open('POST', url)
xhr.send(fd)
}
// 音頻採集
recorder.onaudioprocess = function (e) {
audioData.input(e.inputBuffer.getChannelData(0))
// record(e.inputBuffer.getChannelData(0));
}
}
// 拋出異常
HZRecorder.throwError = function (message) {
alert(message)
throw new function () { this.toString = function () { return message } }()
}
// 是否支持錄音
HZRecorder.canRecording = (navigator.getUserMedia != null)
// 獲取錄音機
HZRecorder.get = function (callback, config) {
if (callback) {
if (navigator.getUserMedia) {
navigator.getUserMedia(
{ audio: true } // 只啓用音頻
, function (stream) {
let rec = new HZRecorder(stream, config)
callback(rec)
}
, function (error) {
switch (error.code || error.name) {
case 'PERMISSION_DENIED':
case 'PermissionDeniedError':
HZRecorder.throwError('用戶拒絕提供信息。')
break
case 'NOT_SUPPORTED_ERROR':
case 'NotSupportedError':
HZRecorder.throwError('瀏覽器不支持硬件設備。')
break
case 'MANDATORY_UNSATISFIED_ERROR':
case 'MandatoryUnsatisfiedError':
HZRecorder.throwError('沒法發現指定的硬件設備。')
break
default:
HZRecorder.throwError('沒法打開麥克風。異常信息:' + (error.code || error.name))
break
}
})
} else {
HZRecorder.throwErr('當前瀏覽器不支持錄音功能。'); return
}
}
}
export default HZRecorder
複製代碼
<template>
<div class="wrap">
<el-form v-model="form">
<el-form-item>
<input type="button" class="btn-record-voice" @mousedown.prevent="mouseStart" @mouseup.prevent="mouseEnd" v-model="form.time"/>
<audio v-if="form.audioUrl" :src="form.audioUrl" controls="controls" class="content-audio" style="display: block;">語音</audio>
</el-form-item>
<el-form>
</div>
</template>
<script>
// 引入recorder.js
import recording from '@/js/recorder/recorder.js'
export default {
data() {
return {
form: {
time: '按住說話(60秒)',
audioUrl: ''
},
num: 60, // 按住說話時間
recorder: null,
interval: '',
audioFileList: [], // 上傳語音列表
startTime: '', // 語音開始時間
endTime: '', // 語音結束
}
},
mounted() {
// 初始化開啓錄音權限
this.$nextTick (() => {
recording.get(rec => {
this.recorder = rec
})
}
},
methods: {
// 清除定時器
clearTimer () {
if (this.interval) {
this.num = 60
clearInterval(this.interval)
}
},
// 長按說話
mouseStart () {
this.clearTimer()
this.startTime = new Date().getTime()
recording.get((rec) => {
// 當首次按下時,要獲取瀏覽器的麥克風權限,因此這時要作一個判斷處理
if (rec) {
this.recorder = rec
this.interval = setInterval(() => {
if (this.num <= 0) {
this.recorder.stop()
this.num = 60
this.clearTimer()
} else {
this.num--
this.time = '鬆開結束(' + this.num + '秒)'
this.recorder.start()
}
}, 1000)
}
})
},
// 鬆開時上傳語音
mouseEnd () {
this.clearTimer()
this.endTime = new Date().getTime()
if (this.recorder) {
this.recorder.stop()
// 重置說話時間
this.num = 60
this.time = '按住說話(' + this.num + '秒)'
// 獲取語音二進制文件
let bold = this.recorder.getBlob()
// 將獲取的二進制對象轉爲二進制文件流
let files = new File([bold], 'test.mp3', {type: 'audio/mp3', lastModified: Date.now()})
let fd = new FormData()
fd.append('file', files)
fd.append('tenantId', 3) // 額外參數,可根據選擇填寫
// 這裏是經過上傳語音文件的接口,獲取接口返回的路徑做爲語音路徑
this.uploadFile(fd)
}
}
}
}
</script>
<style scoped>
</style>
複製代碼
this.form.audioUrl
,不過期長這裏須要注意的是,因爲咱們一開始設置了定時器是有一秒的延遲,因此,要在獲取到的時長基礎上在減去一秒上面說過了語音的按住說話功能,接下來講一說語音的播放,播放這裏分爲兩種,一種是返回的mp3格式,經過展現audio播放,另外一種是返回amr格式的播放html
通常來講,這種格式的播放不須要格外的轉換,接口通常會返回語音的url以及播放時長,經過audio標籤展現便可html5
因爲html5標籤並不支持播放amr格式的文件,因此須要單獨處理,步驟以下java
下載 npm install benz-amr-recorder -Sweb
在文件中引入的前提下,進行以下操做npm
// 播放,playurl爲按鈕的點擊事件,url爲獲取的amr格式文件
playUrl (url) {
let amr = new BenzAMRRecorder()
amr.initWithUrl(url).then(function () {
amr.play()
})
}
複製代碼
若是須要使用amr格式的其餘操做,可參考 https://www.npmjs.com/package/benz-amr-recorder 瀏覽器
好多東西都是在項目中才學會的,因此要趁着記憶還清晰,趕忙記下來,若是上述有什麼不對的地方,還請指正緩存