作語音識別這塊的呢,國內領先的有科大訊飛,BAT這幾家公司,鑑於使用科大訊飛的接口須要付費,騰訊雲的語音識別申請了幾天也沒給經過,比較了一下阿里和百度的,我的以爲阿里雲的好用一些,這篇博客來說講怎麼講阿里雲的語音識別應用到項目中。javascript
首先是一些連接html
阿里雲語音識別官網:https://help.aliyun.com/document_detail/30416.html前端
語音識別demo下載:http://download.taobaocdn.com/freedom/33762/compress/RealtimeDemo.zip?spm=a2c4g.11186623.2.6.5F8mxh&file=RealtimeDemo.zipjava
要實現的功能:前端瀏覽器錄取用戶說話的錄音,而後轉換爲文字顯示在瀏覽器。例如回答問題後,回答的答案顯示在下方web
流程分析:實現這個功能流程很簡單就是瀏覽器收集到用戶的語音輸入流後,發送給後臺,後臺繼續將數據發送到阿里雲的服務器端,進行語音轉文字,完成後將返回的結果進行處理,再返回到前臺。ajax
本例中主要分爲兩個模塊,(1)前臺得到麥克風的權限進行錄音;(2)錄製完成將數據發送到後臺進行語音轉文字的處理。spring
前臺的功能是,點擊錄音,獲取瀏覽器麥克風權限後,開始錄音。點擊轉換按鈕,中止錄音,將數據發送到後臺進行轉換,轉換後的結果顯示在下方的文本域中,同時出現audio元素標籤和文件下載連接,可回放錄音和保存文件到本地。
錄音相關的js文件來源其餘大神,本人將其代碼進行部分修改以知足需求。數據庫
JSP頁面的代碼json
<%@ page language="java" contentType="text/html; charset=utf-8" pageEncoding="utf-8"%>
<% String path = request.getContextPath(); String basePath = request.getScheme() + "://" + request.getServerName() + ":" + request.getServerPort() + path + "/"; %>
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Insert title here</title>
</head>
<body>
<form id="questions">
<div><h1>回答問題</h1></div>
<input type="hidden" name="records[0].question" value="AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA">
<div><h3>問題一:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA</h3></div>
<div>
<button onclick="startRecording(this)" >錄音</button>
<button onclick="uploadAudio(this,1)" disabled>轉換</button>
<div id="recordingslist1"></div>
</div>
<textarea id="audioText1" name="records[0].answer" rows="3" cols="50" style="font-size:18px"></textarea>
<input type="hidden" name="records[1].question" value="BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB">
<div><h3>問題二:BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB</h3></div>
<div>
<button onclick="startRecording(this)" >錄音</button>
<button onclick="uploadAudio(this,2)" disabled>轉換</button>
<div id="recordingslist2"></div>
</div>
<textarea id="audioText2" name="records[1].answer" rows="3" cols="50" style="font-size:18px"></textarea>
<input type="hidden" name="records[2].question" value="CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC">
<div><h3>問題三:CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC</h3></div>
<div>
<button onclick="startRecording(this)" >錄音</button>
<button onclick="uploadAudio(this,3)" disabled>轉換</button>
<div id="recordingslist3"></div>
</div>
<textarea id="audioText3" name="records[2].answer" rows="3" cols="50" style="font-size:18px"></textarea>
<br>
<input type="button" onclick="save()" value="保存錄音"/>
</form>
<a href="<%=path %>/audio/getAllRecord">查看記錄詳情</a>
<form action="<%=path %>/audio/getaudio" method="post" enctype="multipart/form-data">
<h2>文件上傳</h2> 文件:<input type="file" name="audioData"/><br/><br/>
<input type="submit" value="上傳"/>
</form>
<script type="text/javascript" src="resources/js/HZRecorder.js"></script>
<script type="text/javascript" src="resources/js/jquery-1.11.1.js"></script>
<script> function save() { $.ajax({ type: "POST", dataType: "json", url: "<%=path %>/audio/saveRecord", data: $('#questions').serialize(), success: function (result) { if (result) { alert("添加成功"); }else { alert("添加失敗"); } }, error : function() { alert("異常!"); } }); } var recorder; var audio = document.querySelector('audio'); // 開始錄音 function startRecording(button) { button.disabled = true; button.nextElementSibling.disabled = false; HZRecorder.get(function (rec) { recorder = rec; recorder.start(); }); } // 播放錄音 function playRecording() { recorder.play(audio); } // 轉換錄音 function uploadAudio(button,num) { button.disabled = true; button.previousElementSibling.disabled = false; recorder.stop(); recorder.upload("<%=path %>/audio/getaudio", num); createDownloadLink(num); } // 建立下載連接 function createDownloadLink(num) { var blob = recorder.getBlob(); var url = URL.createObjectURL(blob); var div = document.createElement('div'); var au = document.createElement('audio'); var hf = document.createElement('a'); var record = "recordingslist"+num; au.controls = true; au.src = url; hf.href = url; hf.download = new Date().toISOString() + '.wav'; hf.innerHTML = hf.download; div.appendChild(au); div.appendChild(hf); document.getElementById(record).appendChild(div); } </script>
</body>
</html>
引用的js文件 HZRecorder.js
(function (window) { //兼容
window.URL = window.URL || window.webkitURL; navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia; var HZRecorder = function (stream, config) { config = config || {}; config.sampleBits = config.sampleBits || 16; //採樣數位 8, 16
config.sampleRate = config.sampleRate || (16000); //採樣率(1/6 44100)
var context = new (window.webkitAudioContext || window.AudioContext)(); var audioInput = context.createMediaStreamSource(stream); var createScript = context.createScriptProcessor || context.createJavaScriptNode; var recorder = createScript.apply(context, [4096, 1, 1]); var audioData = { size: 0 //錄音文件長度
, buffer: [] //錄音緩存
, inputSampleRate: context.sampleRate //輸入採樣率
, inputSampleBits: 16 //輸入採樣數位 8, 16
, outputSampleRate: config.sampleRate //輸出採樣率
, oututSampleBits: config.sampleBits //輸出採樣數位 8, 16
, input: function (data) { this.buffer.push(new Float32Array(data)); this.size += data.length; } , compress: function () { //合併壓縮
//合併
var data = new Float32Array(this.size); var offset = 0; for (var i = 0; i < this.buffer.length; i++) { data.set(this.buffer[i], offset); offset += this.buffer[i].length; } //壓縮
var compression = parseInt(this.inputSampleRate / this.outputSampleRate); var length = data.length / compression; var result = new Float32Array(length); var index = 0, j = 0; while (index < length) { result[index] = data[j]; j += compression; index++; } return result; } , encodeWAV: function () { var sampleRate = Math.min(this.inputSampleRate, this.outputSampleRate); var sampleBits = Math.min(this.inputSampleBits, this.oututSampleBits); var bytes = this.compress(); var dataLength = bytes.length * (sampleBits / 8); var buffer = new ArrayBuffer(44 + dataLength); var data = new DataView(buffer); var channelCount = 1;//單聲道
var offset = 0; var writeString = function (str) { for (var i = 0; i < str.length; i++) { data.setUint8(offset + i, str.charCodeAt(i)); } } // 資源交換文件標識符
writeString('RIFF'); offset += 4; // 下個地址開始到文件尾總字節數,即文件大小-8
data.setUint32(offset, 36 + dataLength, true); offset += 4; // WAV文件標誌
writeString('WAVE'); offset += 4; // 波形格式標誌
writeString('fmt '); offset += 4; // 過濾字節,通常爲 0x10 = 16
data.setUint32(offset, 16, true); offset += 4; // 格式類別 (PCM形式採樣數據)
data.setUint16(offset, 1, true); offset += 2; // 通道數
data.setUint16(offset, channelCount, true); offset += 2; // 採樣率,每秒樣本數,表示每一個通道的播放速度
data.setUint32(offset, sampleRate, true); offset += 4; // 波形數據傳輸率 (每秒平均字節數) 單聲道×每秒數據位數×每樣本數據位/8
data.setUint32(offset, channelCount * sampleRate * (sampleBits / 8), true); offset += 4; // 快數據調整數 採樣一次佔用字節數 單聲道×每樣本的數據位數/8
data.setUint16(offset, channelCount * (sampleBits / 8), true); offset += 2; // 每樣本數據位數
data.setUint16(offset, sampleBits, true); offset += 2; // 數據標識符
writeString('data'); offset += 4; // 採樣數據總數,即數據總大小-44
data.setUint32(offset, dataLength, true); offset += 4; // 寫入採樣數據
if (sampleBits === 8) { for (var i = 0; i < bytes.length; i++, offset++) { var s = Math.max(-1, Math.min(1, bytes[i])); var val = s < 0 ? s * 0x8000 : s * 0x7FFF; val = parseInt(255 / (65535 / (val + 32768))); data.setInt8(offset, val, true); } } else { for (var i = 0; i < bytes.length; i++, offset += 2) { var s = Math.max(-1, Math.min(1, bytes[i])); data.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true); } } return new Blob([data], { type: 'audio/wav' }); } }; //開始錄音
this.start = function () { audioInput.connect(recorder); recorder.connect(context.destination); } //中止ֹ
this.stop = function () { recorder.disconnect(); } //獲取音頻文件
this.getBlob = function () { this.stop(); return audioData.encodeWAV(); } //回放
this.play = function (audio) { audio.src = window.URL.createObjectURL(this.getBlob()); } //轉換
this.upload = function (url, num) { var id = "audioText"+num; var fd = new FormData(); fd.append("audioData", this.getBlob()); var xhr = new XMLHttpRequest(); xhr.open("POST", url); xhr.send(fd); xhr.onreadystatechange = function () { if (xhr.readyState == 4 && xhr.status == 200) { document.getElementById(id).value += xhr.responseText; } }; } //音頻採集
recorder.onaudioprocess = function (e) { audioData.input(e.inputBuffer.getChannelData(0)); //record(e.inputBuffer.getChannelData(0));
} }; //拋出異常
HZRecorder.throwError = function (message) { alert(message); throw new function () { this.toString = function () { return message; } } } //是否支持錄音
HZRecorder.canRecording = (navigator.getUserMedia != null); //獲取錄音機
HZRecorder.get = function (callback, config) { if (callback) { if (navigator.getUserMedia) { navigator.getUserMedia( { audio: true } //只啓用音頻
, function (stream) { var rec = new HZRecorder(stream, config); callback(rec); } , function (error) { switch (error.code || error.name) { case 'PERMISSION_DENIED': case 'PermissionDeniedError': HZRecorder.throwError('用戶拒絕提供信息。'); break; case 'NOT_SUPPORTED_ERROR': case 'NotSupportedError': HZRecorder.throwError('瀏覽器不支持硬件設備。'); break; case 'MANDATORY_UNSATISFIED_ERROR': case 'MandatoryUnsatisfiedError': HZRecorder.throwError('沒法發現指定的硬件設備。'); break; default: HZRecorder.throwError('沒法打開麥克風。異常信息:' + (error.name)); break; } }); } else { HZRecorder.throwErr('當前瀏覽器不支持錄音功能。'); return; } } } window.HZRecorder = HZRecorder; })(window);
頁面中也沒有多少要注意的問題。注意的是每個問題上方都有一個隱藏域,裏面的值是問題的內容,這樣作是爲了將問題和答案一塊兒存放在數據庫中,由於form只能提交input中的內容,因此想出了這個辦法,不知道還有沒有其餘方式。
錄音文件流以文件上傳的方式傳到後臺(這裏沒必要將文件流轉換成音頻文件,由於阿里雲的實時語音識別Demo中是將文件轉化爲InputStream,再進行轉文字,可直接得到MultipartFile的InputStream傳給語音轉換)
錄音文件時長超過13分鐘左右,在轉換的過程當中,通訊會被關閉(即錄音20分鐘,只會轉換10分鐘的內容,本人目前不清楚具體的緣由)暫時的解決辦法是將上傳的錄音文件分割成兩部分,分別執行轉換的方法。
RecordController.java
package cn.com.sysystem.controller; import java.io.ByteArrayInputStream; import java.io.InputStream; import java.util.List; import javax.annotation.Resource; import javax.servlet.http.HttpServletRequest; import org.springframework.stereotype.Controller; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.ResponseBody; import org.springframework.web.multipart.MultipartFile; import cn.com.sysystem.base.util.RealtimeAsr; import cn.com.sysystem.entity.RecordEntity; import cn.com.sysystem.model.RecordModel; import cn.com.sysystem.service.RecordService; @Controller @RequestMapping("/audio") public class RecordController{ @Resource RecordService recordService; @ResponseBody @RequestMapping(value = "/getaudio" ,produces = "application/json; charset=utf-8") public String getaudio(MultipartFile audioData,HttpServletRequest request) throws Exception { StringBuffer sb = new StringBuffer(2000); if (audioData != null) {
byte[] bytes = audioData.getBytes(); // 當錄音文件過大時,將文件分割成兩段
if (bytes.length < 20000000) { InputStream inputStream = audioData.getInputStream(); sb.append(getText(inputStream)); } else { byte[] tmp1 = new byte[bytes.length/2]; byte[] tmp2 = new byte[bytes.length-tmp1.length]; System.arraycopy(bytes, 0, tmp1, 0, tmp1.length); System.arraycopy(bytes, tmp1.length, tmp2, 0, tmp2.length); InputStream input1 = new ByteArrayInputStream(tmp1); InputStream input2 = new ByteArrayInputStream(tmp2); sb.append(getText(input1)); sb.append(getText(input2)); }
}else { return "文件上傳失敗"; } return sb.toString(); } @ResponseBody @RequestMapping(value = "/saveRecord") public boolean saveRecord(RecordModel recordlist) throws Exception { boolean flag = true; List<RecordEntity> records = recordlist.getRecords(); for (RecordEntity recordEntity : records) { int row = recordService.saveRecord(recordEntity); if (row < 0) { flag = false; } } return flag; } @RequestMapping(value = "/getAllRecord") public String getAllRecord(HttpServletRequest request) throws Exception { List<RecordEntity> allRecord = recordService.getAllRecord(); request.setAttribute("recordList", allRecord); return "showrecord"; } /** * 將語音輸入流轉換爲文字 * @param input * @return
*/
private synchronized String getText(InputStream input) { StringBuilder finaltext = new StringBuilder(2000); List<String> results = null; RealtimeAsr realtimeAsr = new RealtimeAsr(); results = realtimeAsr.AliAudio2Text(input); // 去除集合中含有status_code = 0的元素
results.removeIf(p -> p.indexOf("\"status_code\":0") == -1); for (String str : results) { String text = ""; String[] split = str.split(","); text = split[split.length-1]; text = text.substring(8, text.length()-2); finaltext.append(text); } // 清空集合
results.clear(); return finaltext.toString(); } }
Controller中getText方法,建立RealtimeAsr類的對象,調用AliAudio2Text方法得到轉換結果,RealtimeAsr類以下:
package cn.com.sysystem.base.util; import java.io.File; import java.io.FileInputStream; import java.io.InputStream; import java.util.ArrayList; import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.alibaba.fastjson.JSON; import com.alibaba.fastjson.JSONPath; import com.alibaba.idst.nls.realtime.NlsClient; import com.alibaba.idst.nls.realtime.NlsFuture; import com.alibaba.idst.nls.realtime.event.NlsEvent; import com.alibaba.idst.nls.realtime.event.NlsListener; import com.alibaba.idst.nls.realtime.protocol.NlsRequest; import com.alibaba.idst.nls.realtime.protocol.NlsResponse; /** * asr 示例 * */
public class RealtimeAsr implements NlsListener { protected NlsClient client = new NlsClient(); protected static final String asrSC = "pcm"; static Logger logger = LoggerFactory.getLogger(RealtimeAsr.class); public String filePath = ""; //public String appKey = "nls-service-shurufa16khz"; //社交聊天領域
public String appKey = "nls-service-multi-domain"; //短視頻,視頻直播領域,教育,娛樂,文學,法律,財經等 //public String appKey = "nls-service-en"; //英語
protected String ak_id = ""; //阿里雲的AccessKeyID 和 AccessKeySecret 自行去註冊帳戶,這裏就不提供了
protected String ak_secret = ""; protected String url = "https://nlsapi.aliyun.com/asr/custom/vocabs"; public static List<String> results = new ArrayList<String>(5000); public RealtimeAsr() { } public void shutDown() { logger.debug("close NLS client manually!"); client.close(); logger.debug("demo done"); } public void start() { logger.debug("init Nls client..."); client.init(); } public void process() { logger.debug("open audio file..."); FileInputStream fis = null; try { File file = new File(filePath); fis = new FileInputStream(file); } catch (Exception e) { logger.error("fail to open file", e); } if (fis != null) { logger.debug("create NLS future"); process(fis); logger.debug("calling NLS service end"); } } public void process(InputStream ins) { try { NlsRequest req = buildRequest(); NlsFuture future = client.createNlsFuture(req, this); logger.debug("call NLS service"); byte[] b = new byte[5000]; int len = 0; while ((len = ins.read(b)) > 0) { future.sendVoice(b, 0, len); //Thread.sleep(200);
} logger.debug("send finish signal!"); future.sendFinishSignal(); logger.debug("main thread enter waiting ."); future.await(100000); } catch (Exception e) { e.printStackTrace(); } } protected NlsRequest buildRequest() { NlsRequest req = new NlsRequest(); req.setAppkey(appKey); req.setFormat(asrSC); req.setResponseMode("streaming"); req.setSampleRate(16000); String body="{\n"
+ " \"global_weight\": 1,\n"
+ " \"words\": [\n"
+ " \"SpringMVC\",\n"
+ " \"Mybatis\",\n"
+ " \"Hibernate\"\n"
+ " ],\n"
+ " \"word_weights\": {\n"
+ " \"spring\": 2\n"
+ " }\n"
+ " }"; //create
String result=HttpUtil.sendPost(url,body,ak_id,ak_secret); String vocabId=(String)JSONPath.read(result,"vocabulary_id"); //update
result=HttpUtil.sendPut(url+"/"+vocabId,body,ak_id,ak_secret); req.setVocabularyId(vocabId); // 設置關鍵詞庫ID 使用時請修改成自定義的詞庫ID // req.setKeyWordListId("c1391f1c1f1b4002936893c6d97592f3");
req.authorize(ak_id, ak_secret); return req; } @Override public void onMessageReceived(NlsEvent e) { NlsResponse response = e.getResponse(); response.getFinish(); if (response.result != null) { String tmptext = response.getResult().toString(); results.add(tmptext); //logger.debug("status code = {},get finish is {},get recognize result: {}", response.getStatusCode(), // response.getFinish(), response.getResult());
if (response.getQuality() != null) { logger.info("Sentence {} is over. Get ended sentence recognize result: {}, voice quality is {}", response.result.getSentence_id(), response.getResult(), JSON.toJSONString(response.getQuality())); } } else { logger.info(JSON.toJSONString(response)); } } @Override public void onOperationFailed(NlsEvent e) { logger.error("status code is {}, on operation failed: {}", e.getResponse().getStatusCode(), e.getErrorMessage()); } @Override public void onChannelClosed(NlsEvent e) { logger.debug("on websocket closed."); } /** * @param inputStream */
public List<String> AliAudio2Text(InputStream inputStream) { RealtimeAsr lun = new RealtimeAsr(); lun.start(); lun.process(inputStream); lun.shutDown(); return results; } }
注意的地方
一、@RequestMapping(value = "/getaudio" ,produces = "application/json; charset=utf-8")
produces = "application/json; charset=utf-8" 保證Controller在return中文時亂碼的問題。
二、StringBuffer sb = new StringBuffer(2000);
由於要常常拼接字符串,因此StringBuffer的效率會比String高些,另外還有一個小竅門,就是在new StringBuffer時指定大小,若不指定且內容較長時,會頻繁的擴容,影響性能(具體也不知道能提升多少,提升一點是一點吧,同時集合中的list和map也是同樣的道理)
三、synchronized
轉換的方法中加入synchronized關鍵字保證線程安全的目的是,當上一段錄音時長較長時,轉換須要必定的時間(20分鐘的音頻,轉換過程3分鐘左右),若當即開始第二段錄音,且時間較短,若不加鎖,第二段轉換的文本中顯示的是第一段的內容。
四、results.removeIf(p -> p.indexOf("\"status_code\":0") == -1);
這裏用到了Java8的Lambda表達式,不明白的同窗能夠自行了解一下,很好用。
五、
語音轉換收集到的信息以下:(例如說ABCD)
{"sentence_id":1,"begin_time":280,"current_time":1670,"end_time":-1,"status_code":1,"text":"A"}
{"sentence_id":1,"begin_time":280,"current_time":1670,"end_time":1793,"status_code":0,"text":"A B C D"}
其中status_code = 1 表示的是轉換的中間狀態,status_code = 0表示語音轉換完成。因此咱們要從集合中篩選出status_code = 0的全部字符串,並截取text的值。