需求是分析音頻,用圖形化展現。html
思路:git
一、回想當年使用的播放器,如XX靜聽 通常就2種圖形化展現 一個是條形柱 一個是波紋github
二、分析數據轉化成圖像 這個是canvas經常使用的,以前作過的canvas分析圖像數據,作濾鏡作變形都是一把好手,這裏固然 圖形化也交給canvas了。web
三、既然是分析音頻,那固然要將音頻轉化成數據,才能夠進行分析,而關於音頻的HTML API 就有 audio標籤 ,而麥克風訪問就有getUserMedia了。什麼?你問我咋知道這個api的?我只能告訴你 去查MDN 、W3C 這類網站...canvas
首先咱們要獲得音頻數據,這裏咱們用了2個途徑獲得,一個是音頻流,一個是麥克風api
一、api兼容瀏覽器
window.AudioContext = (window.AudioContext || window.webkitAudioContext || window.mozAudioContext); window.requestAnimationFrame = window.requestAnimationFrame || window.webkitRequestAnimationFrame; try { audioCtx = new AudioContext; console.log('瀏覽器支持AudioContext'); } catch(e) { console.log('瀏覽器不支持AudioContext', e); };
二、獲得麥克風數據ide
//開始監聽 if (navigator.getUserMedia) { console.log('瀏覽器支持getUserMedia'); apiMedia.className = "checked"; navigator.getUserMedia( // 咱們只獲取麥克風數據,這裏還能夠設置video爲true來獲取攝像頭數據 { audio: true }, onSccess, onErr) } else { console.log('瀏覽器不支持getUserMedia'); apiMedia.className = "false"; };
經過onSccess來獲得數據,介入分析網站
// Success callback function onSccess(stream) { //將聲音輸入對像 source = audioCtx.createMediaStreamSource(stream); source.connect(analyser); analyser.connect(distortion); distortion.connect(biquadFilter); biquadFilter.connect(convolver); convolver.connect(gainNode); gainNode.connect(audioCtx.destination); visualize();//分析音頻 }
一樣,文件的也是同樣的spa
if (mediaSetting == "file") { loadFile(); //獲得數據源 source=audioCtx.createMediaElementSource(audio); //鏈接節點 source.connect(analyser); analyser.connect(distortion); distortion.connect(gainNode); gainNode.connect(audioCtx.destination); }
visualize();//分析音頻
獲得了數據,接下來就交個canvas君吧
//波紋1 if (visualSetting == "wave1") { analyser.fftSize = 2048; var bufferLength = analyser.fftSize; console.log(bufferLength); var dataArray = new Uint8Array(bufferLength); canvasCtx.clearRect(0, 0, WIDTH, HEIGHT); draw = function() { drawVisual = requestAnimationFrame(draw); analyser.getByteTimeDomainData(dataArray); canvasCtx.fillStyle = '#000'; canvasCtx.fillRect(0, 0, WIDTH, HEIGHT); canvasCtx.lineWidth = 2; canvasCtx.strokeStyle = '#4aeb46'; canvasCtx.beginPath(); var sliceWidth = WIDTH * 1.0 / bufferLength; var x = 0; for (var i = 0; i < bufferLength; i++) { var v = dataArray[i] / 128.0; var y = v * HEIGHT / 2; if (i === 0) { canvasCtx.moveTo(x, y); } else { canvasCtx.lineTo(x, y); } x += sliceWidth; } canvasCtx.lineTo(canvas.width, canvas.height / 2); canvasCtx.stroke(); }; draw(); }
//circle else if (visualSetting == "circle") { // analyser.fftSize = 1024; // var bufferLength = analyser.fftSize; // var dataArray = new Uint8Array(bufferLength); analyser.fftSize = 128; var frequencyData = new Uint8Array(analyser.frequencyBinCount); var count = analyser.frequencyBinCount; var circles = []; var circleMaxWidth = (HEIGHT*0.66) >> 0; canvasCtx.clearRect(0, 0, WIDTH, HEIGHT); canvasCtx.lineWidth = 1; for(var i = 0; i < count; i++ ){ circles.push(i/count*circleMaxWidth) } draw = function() { canvasCtx.clearRect(0, 0, WIDTH, HEIGHT); analyser.getByteFrequencyData(frequencyData); drawVisual = requestAnimationFrame(draw); for(var i = 0; i < circles.length; i++) { var v = frequencyData[i] / 128.0; var y = v * HEIGHT / 2; var circle = circles[i]; canvasCtx.beginPath(); canvasCtx.arc(WIDTH/2,HEIGHT/2, y/2, Math.PI * 2, false); canvasCtx.stroke() } }; draw(); }
//柱形條 else if (visualSetting == "bar") { analyser.fftSize = 256; var bufferLength = analyser.frequencyBinCount; console.log(bufferLength); var dataArray = new Uint8Array(bufferLength); canvasCtx.clearRect(0, 0, WIDTH, HEIGHT); var gradient = canvasCtx.createLinearGradient(0, 0, 0, 200); gradient.addColorStop(1, '#0f0'); gradient.addColorStop(0.5, '#ff0'); gradient.addColorStop(0, '#f00'); var barWidth = 10; var gap = 2; //間距 var capHeight = 2;//頂部高度 var capStyle = '#fff'; var barNum = WIDTH / (barWidth + gap); //bar個數 var capYPositionArray = []; var step = Math.round(dataArray.length / barNum); draw = function() { drawVisual = requestAnimationFrame(draw); analyser.getByteFrequencyData(dataArray); canvasCtx.clearRect(0, 0, WIDTH, HEIGHT); for (var i = 0; i < barNum; i++) { var value = dataArray[i * step]; if (capYPositionArray.length < Math.round(barNum)) { capYPositionArray.push(value); }; canvasCtx.fillStyle = capStyle; //頂端帽子 if (value < capYPositionArray[i]) { canvasCtx.fillRect(i * 12, HEIGHT - (--capYPositionArray[i]), barWidth, capHeight); } else { canvasCtx.fillRect(i * 12, HEIGHT - value, barWidth, capHeight); capYPositionArray[i] = value; }; canvasCtx.fillStyle = gradient;//漸變 canvasCtx.fillRect(i * 12 , HEIGHT - value + capHeight, barWidth, HEIGHT-2); //繪製bar } }; draw(); }
完整代碼 github