首先感謝幾位大神:android
http://blog.csdn.net/qq_15267341/article/details/52074225json
http://blog.csdn.net/weixin_36303734/article/details/54898166網絡
http://www.jianshu.com/p/5459aa19456aapp
網上有不少關於訊飛Sdk植入到Unity3d的博客教程,這裏作一個總結,專爲超級初學者。ide
第一步,在科大訊飛官網上下載語音Sdk包函數
參考這篇博客:http://blog.csdn.net/qq_15267341/article/details/52074225佈局
得到Sdk:gradle
第二步:生成Android Jar包ui
參考這篇博客:http://blog.csdn.net/weixin_36303734/article/details/54898166,this
Android版本爲24,能夠根據須要適當更改。
流程以下:
一、AS新建工程,EmptyActivity,這樣省了在AndroidManifest添加權限的步驟,同時能夠刪除layout文件夾中的佈局文件,由於導入到unity中用不到。
二、File-New-New Module,新建AndroidLibrary,命名隨意,在這裏命名爲speechrecognizer2。
三、將訊飛SDK文件夾中的MSC.jar考到libs文件夾下。在main文件夾下新建文件夾jinLibs,將SDK中的so文件考進來,如圖:
四、在Unity安裝目錄下找到classes.jar文件。一樣,考到libs文件夾下。路徑:
\Unity\Editor\Data\PlaybackEngines\AndroidPlayer\Variations\mono\Release\Classes
五、此時須要在工程中關聯jar。File-Project Structure。左側選中步驟二中新建的Library。點擊右方加號,選擇Files dependency。將步驟三、4中加入的jar包關聯到module。如圖:
六、設置Android studio 的Android Sdk位置和Java jdk位置,File-Project Structure-SDK Location;
七、環境搭建完成後,開始寫代碼。
MainActivity中的代碼和網上其餘的方法大同小異,直接上源碼:
package com.ssm.ssm.speechrecognizer;
import android.os.Bundle;
import android.util.Log;
import android.widget.Toast;
import com.iflytek.cloud.InitListener;
import com.iflytek.cloud.RecognizerListener;
import com.iflytek.cloud.RecognizerResult;
import com.iflytek.cloud.SpeechConstant;
import com.iflytek.cloud.SpeechError;
import com.iflytek.cloud.SpeechSynthesizer;
import com.iflytek.cloud.SpeechUtility;
import com.iflytek.cloud.SpeechRecognizer;
import com.iflytek.cloud.SynthesizerListener;
import com.unity3d.player.UnityPlayer;
import com.unity3d.player.UnityPlayerActivity;
import org.json.JSONArray;
import org.json.JSONObject;
import org.json.JSONTokener;
public class MainActivity extends UnityPlayerActivity {
public SpeechRecognizer speechRecognizer;
public SpeechSynthesizer speechSynthesizer;
private String ttsSpeakerName = "yefang";
private String ttsSpeakerPitch = "50";
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
//注意這裏的appid爲本身的sdk id
SpeechUtility.createUtility(getApplicationContext(),"appid=59dccd45");
initRecognizer();
}
//初始化
private void initRecognizer(){
speechRecognizer = SpeechRecognizer.createRecognizer(getApplicationContext(),mInitListener);
speechSynthesizer = SpeechSynthesizer.createSynthesizer(getApplicationContext(),mInitListener);
}
public InitListener mInitListener = new InitListener() {
@Override
public void onInit(int i) {
UnityPlayer.UnitySendMessage("Manager", "Result", "init success!");
}
};
public void setTTSSpeaker(String targetName) {
ttsSpeakerName = targetName;
}
public void setTTSPitch(String targetPitch) {
ttsSpeakerPitch = targetPitch;
}
public void doTTS(String ttsStr){
UnityPlayer.UnitySendMessage("Manager", "IsSpeaking", "doTTS");
//設置發音人
speechSynthesizer.setParameter(SpeechConstant.VOICE_NAME,ttsSpeakerName);
//設置音調
speechSynthesizer.setParameter(SpeechConstant.PITCH,ttsSpeakerPitch);
//設置音量
speechSynthesizer.setParameter(SpeechConstant.VOLUME,"50");
int code = speechSynthesizer.startSpeaking(ttsStr, mTTSListener);
}
private SynthesizerListener mTTSListener = new SynthesizerListener() {
@Override
public void onSpeakBegin() {
// UnityPlayer.UnitySendMessage("Manager", "IsSpeaking", "onSpeakBegin ");
}
@Override
public void onBufferProgress(int i, int i1, int i2, String s) {
// UnityPlayer.UnitySendMessage("Manager", "IsSpeaking", "onBufferProgress ");
}
@Override
public void onSpeakPaused() {
// UnityPlayer.UnitySendMessage("Manager", "IsSpeaking", "onSpeakPaused ");
}
@Override
public void onSpeakResumed() {
// UnityPlayer.UnitySendMessage("Manager", "IsSpeaking", "onSpeakResumed ");
}
@Override
public void onSpeakProgress(int i, int i1, int i2) {
// UnityPlayer.UnitySendMessage("Manager", "IsSpeaking", "onSpeakProgress ");
}
@Override
public void onCompleted(SpeechError speechError) {
// UnityPlayer.UnitySendMessage("Manager", "IsSpeaking", "onCompleted ");
}
@Override
public void onEvent(int i, int i1, int i2, Bundle bundle) {
}
};
//開始聽寫
public void startSpeechListener(){
UnityPlayer.UnitySendMessage("Manager", "Result", "");
speechRecognizer.setParameter(SpeechConstant.DOMAIN, "iat");
speechRecognizer.setParameter(SpeechConstant.LANGUAGE, "zh_cn");
speechRecognizer.setParameter(SpeechConstant.ACCENT, "mandarin");
speechRecognizer.startListening(mRecognizerListener);
}
public RecognizerListener mRecognizerListener = new RecognizerListener(){
@Override
public void onBeginOfSpeech() {
// TODO Auto-generated method stub
// UnityPlayer.UnitySendMessage("Manager", "Result", "onBeginOfSpeech ");
}
@Override
public void onEndOfSpeech() {
// TODO Auto-generated method stub
// UnityPlayer.UnitySendMessage("Manager", "Result", "onEndOfSpeech ");
//startSpeechListener();
//UnityPlayer.UnitySendMessage("Manager", "SpeechEnd","");
}
@Override
public void onError(SpeechError arg0) {
// TODO Auto-generated method stub
// UnityPlayer.UnitySendMessage("Manager", "Result", "onError ");
}
@Override
public void onEvent(int arg0, int arg1, int arg2, Bundle arg3) {
// TODO Auto-generated method stub
// UnityPlayer.UnitySendMessage("Manager", "Result", "onEvent ");
}
@Override
public void onResult(RecognizerResult recognizerResult, boolean isLast) {
// UnityPlayer.UnitySendMessage("Manager", "Result", "listener ");
printResult(recognizerResult);
//if(isLast)
//startSpeechListener();
}
@Override
public void onVolumeChanged(int arg0, byte[] arg1) {
// UnityPlayer.UnitySendMessage("Manager", "Result", "onVolumeChanged ");
// TODO Auto-generated method stub
}
};
//解析
private void printResult(RecognizerResult results) {
String json = results.getResultString();
StringBuffer ret = new StringBuffer();
try {
JSONTokener tokener = new JSONTokener(json);
JSONObject joResult = new JSONObject(tokener);
JSONArray words = joResult.getJSONArray("ws");
for (int i = 0; i < words.length(); i++) {
// 轉寫結果詞,默認使用第一個結果
JSONArray items = words.getJSONObject(i).getJSONArray("cw");
JSONObject obj = items.getJSONObject(0);
ret.append(obj.getString("w"));
}
} catch (Exception e) {
e.printStackTrace();
}
//將解析結果「"result:" + ret.toString()」發送至「Manager」這個GameObject,中的「Result」函數
UnityPlayer.UnitySendMessage("Manager", "Result", ret.toString());
}
public void ShowToast(final String mStr2Show){
UnityPlayer.UnitySendMessage("Manager", "Result", "toast");
runOnUiThread(new Runnable() {
@Override
public void run() {
Toast.makeText(getApplicationContext(),mStr2Show,Toast.LENGTH_LONG).show();
}
});
}
}
特別注意幾個地方:
(1)此位置名字必須與unity打包Apk id位置同樣。
(2)輸出解析語音後的代碼,unity scene必須有一個名爲Manager的對象,且腳本有一個Result的函數。
AndroidManifest中添加權限,一樣,和其餘教程大同小異,源碼:
<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="com.ssm.ssm.speechrecognizer">
<application
android:allowBackup="true"
android:label="@string/app_name"
android:supportsRtl="true">
<activity android:name=".MainActivity"
android:label="@string/app_name">
<intent-filter>
<action android:name="android.intent.action.MAIN" />
<category android:name="android.intent.category.LAUNCHER" />
</intent-filter>
<meta-data android:name="unityplayer.UnityActivity" android:value="true" />
</activity>
</application>
<!--鏈接網絡權限,用於執行雲端語音能力 -->
<uses-permission android:name="android.permission.INTERNET"/>
<!--獲取手機錄音機使用權限,聽寫、識別、語義理解須要用到此權限 -->
<uses-permission android:name="android.permission.RECORD_AUDIO"/>
<!--讀取網絡信息狀態 -->
<uses-permission android:name="android.permission.ACCESS_NETWORK_STATE"/>
<!--獲取當前wifi狀態 -->
<uses-permission android:name="android.permission.ACCESS_WIFI_STATE"/>
<!--容許程序改變網絡鏈接狀態 -->
<uses-permission android:name="android.permission.CHANGE_NETWORK_STATE"/>
<!--讀取手機信息權限 -->
<uses-permission android:name="android.permission.READ_PHONE_STATE"/>
<!--讀取聯繫人權限,上傳聯繫人須要用到此權限 -->
<uses-permission android:name="android.permission.READ_CONTACTS"/>
<!--外存儲寫權限,構建語法須要用到此權限 -->
<uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE"/>
<!--外存儲讀權限,構建語法須要用到此權限 -->
<uses-permission android:name="android.permission.READ_EXTERNAL_STORAGE"/>
<!--配置權限,用來記錄應用配置信息 -->
<uses-permission android:name="android.permission.WRITE_SETTINGS"/>
<!--攝相頭權限,拍照須要用到 -->
<uses-permission android:name="android.permission.CAMERA" />
</manifest>
此段權限用於在unity交互,不添加在unity打包時會有警告信息出現(以下圖);
此時,做爲AS,爲了導出jar,須要在build.gradle底部添加如下兩段代碼:
task makeJar(type: Copy) {
delete 'build/libs/speechrecognizer.jar'
from('build/intermediates/bundles/release/')
into('build/libs/')
include('classes.jar')
rename ('classes.jar', 'speechrecognizer.jar')
}
makeJar.dependsOn(build)
// 在終端執行生成JAR包
// gradlew makeJar
dependencies {
compile fileTree(include: ['*.jar'], dir: 'libs')
compile files('libs/classes.jar')
}
八、至此,jar包已經編寫完成。如今導出jar。在Terminal中輸入 gradlew makejar。如圖:
九、等待完成後便可在libs下看到jar文件:
第三步:新建Unity項目,生成Apk
十、新建Unity項目,在Asset文件夾下新建以下結構:
十一、如步驟9,在bin文件夾下拷貝AS導出的jar。libs文件夾下拷貝SDK中的MSC.jar,和os文件,注意,安卓5.0以上系統須要armeabi-v7a,否則會出現21002的錯誤。
最後在Android文件夾下拷貝AndroidManifest文件。
十二、新建編寫Manger腳本文件,取名爲XunFeiTest。源碼
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.UI;
public class XunFeiTest : MonoBehaviour {
// Use this for initialization
void Start () {
}
// Update is called once per frame
void Update () {
}
private string showResult = "";
public Text shurukuang;
public void Kaishi ()
{
AndroidJavaClass jc = new AndroidJavaClass ("com.unity3d.player.UnityPlayer");
AndroidJavaObject jo = jc.GetStatic<AndroidJavaObject> ("currentActivity");
jo.Call ("startSpeechListener");
}
public void Result (string recognizerResult)
{
showResult += recognizerResult;
showResult += '\n';
shurukuang.text = showResult;
}
}
1三、新建一個開始錄音啓動button,和顯示text,修改main camera相機名字爲Manger,並拖動上述腳本到Manger對象。
將場景中的Result Text關聯到腳本中text對象上。
關聯button onclick爲腳本的kaishi函數
1四、場景與腳本已經配置完畢,下面開始打包生成Apk,File-Build Setting
1五、點擊Build-選擇生成位置和名字,就能夠生成安裝文件啦。發佈手機截圖