private val sampleRate = mediaFormat.getInteger(MediaFormat.KEY_SAMPLE_RATE) private val channelCount = mediaFormat.getInteger(MediaFormat.KEY_CHANNEL_COUNT) private val minBufferSize = AudioRecord.getMinBufferSize(sampleRate, if (channelCount == 1) CHANNEL_IN_MONO else CHANNEL_IN_STEREO, AudioFormat.ENCODING_PCM_16BIT); runInBackground { audioRecord = AudioRecord( MediaRecorder.AudioSource.MIC, sampleRate, if (channelCount == 1) CHANNEL_IN_MONO else CHANNEL_IN_STEREO, AudioFormat.ENCODING_PCM_16BIT, 2 * minBufferSize ) audioRecord.startRecording() }
音頻採集時須要設置採集參數,設置的這些參數須要與建立MediaCodec時的參數一致。java
override fun onInputBufferAvailable(codec: MediaCodec, index: Int) { try { codec.getInputBuffer(index)?.let { bb -> var startTime = System.currentTimeMillis(); var readSize = audioRecord.read(bb, bb.capacity()) log { "read time ${System.currentTimeMillis() - startTime} read size $readSize" } if (readSize < 0) { readSize = 0 } codec.queueInputBuffer(index, 0, readSize, System.nanoTime() / 1000, 0) } }catch (e:Exception){ e.printStackTrace() } }
這裏採用的阻塞的方式採集數據,因此AudioRecord依據設置的採樣頻率生成數據的,咱們能夠直接把當前的時間設置爲錄製的時間戳。git
val mediaFormat = MediaFormat.createAudioFormat( MediaFormat.MIMETYPE_AUDIO_AAC, audioSampleRate, audioChannelCount ) mediaFormat.setInteger(MediaFormat.KEY_BIT_RATE, audioBitRate) mediaFormat.setInteger( MediaFormat.KEY_AAC_PROFILE, MediaCodecInfo.CodecProfileLevel.AACObjectLC ) mediaFormat.setInteger(MediaFormat.KEY_MAX_INPUT_SIZE, audioMaxBufferSize)
爲MediaCodec建立MediaFormat並設置參數,這裏設置的音頻參數必須與AudioRecord一致。github
override fun onInputBufferAvailable(codec: MediaCodec, index: Int) { try { codec.getInputBuffer(index)?.let { bb -> var startTime = System.currentTimeMillis(); var readSize = audioRecord.read(bb, bb.capacity()) log { "read time ${System.currentTimeMillis() - startTime} read size $readSize" } if (readSize < 0) { readSize = 0 } codec.queueInputBuffer(index, 0, readSize, System.nanoTime() / 1000, 0) } }catch (e:Exception){ e.printStackTrace() } }
給MediaCodec傳數據的時候設置的時間戳是當前的系統時間,因爲咱們使用rtp發送實時數據,因此flag不須要設置結束標誌。app
audioCodec = object : AudioEncodeCodec(mediaFormat) { override fun onOutputBufferAvailable( codec: MediaCodec, index: Int, info: MediaCodec.BufferInfo ) { try { val buffer = codec.getOutputBuffer(index) ?: return if (lastSendAudioTime == 0L) { lastSendAudioTime = info.presentationTimeUs; } val increase = (info.presentationTimeUs - lastSendAudioTime) * audioSampleRate / 1000 / 1000 if (hasAuHeader) { buffer.position(info.offset) buffer.get(bufferArray, 4, info.size) auHeaderLength.apply { bufferArray[0] = this[0] bufferArray[1] = this[1] } auHeader(info.size).apply { bufferArray[2] = this[0] bufferArray[3] = this[1] } audioRtpWrapper?.sendData(bufferArray, info.size + 4, 97, true, increase.toInt()) } else { buffer.position(info.offset) buffer.get(bufferArray, 0, info.size) audioRtpWrapper?.sendData(bufferArray, info.size, 97, true, increase.toInt()) } lastSendAudioTime = info.presentationTimeUs codec.releaseOutputBuffer(index, false) } catch (e: Exception) { e.printStackTrace() } } }
從MediaCodec讀出的是aac原始的數據,咱們能夠根據具體的需求來決定是否添加au header發送。這裏實現了有au header和沒有 au header兩種方案。沒有au header的狀況咱們直接把MediaCode讀出的數據經過rtp發送出去。有au header的狀況咱們須要在原始的aac數據前面追加4個字節的au header。是否有au header與vlc播放的sdp內容有關。後面會詳解介紹sdp內容的設置。ide
private val auHeaderLength = ByteArray(2).apply { this[0] = 0 this[1] = 0x10 } private fun auHeader(len: Int): ByteArray { return ByteArray(2).apply { this[0] = (len and 0x1fe0 shr 5).toByte() this[1] = (len and 0x1f shl 3).toByte() } }
咱們使用jrtplib庫來發送數據,這裏對庫進行簡單的封裝並提供了java封裝類RtpWrapper。測試
public class RtpWrapper { private long nativeObject = 0; private IDataCallback callback; public RtpWrapper() { init(); } @Override protected void finalize() throws Throwable { release(); super.finalize(); } public void setCallback(IDataCallback callback) { this.callback = callback; } void receivedData(byte[] buffer, int len) { if(this.callback != null) this.callback.onReceivedData(buffer, len); } public interface IDataCallback { void onReceivedData(byte[] buffer, int len); } static { try { System.loadLibrary("rtp-lib"); initLib(); } catch (Throwable e) { e.printStackTrace(); } } private native static void initLib(); private native boolean init(); private native boolean release(); public native boolean open(int port, int payloadType, int sampleRate); public native boolean close(); /** * @param ip "192.168.1.1" * @return */ public native boolean addDestinationIp(String ip); public native int sendData(byte[] buffer, int len, int payloadType, boolean mark, int increase); }
open方法要指定發送數據使用的端口,payloadType設置載體類型,sampleRate是採樣率。
addDestinationIp用於添加接收端ip地址,地址格式: "192.168.1.1"。
sendData方法用於發送數據,increase是時間間隔,時間單位是 sampleRate/秒this
override fun onOutputFormatChanged(codec: MediaCodec, format: MediaFormat) { audioRtpWrapper = RtpWrapper() audioRtpWrapper?.open(audioRtpPort, audioPayloadType, audioSampleRate) audioRtpWrapper?.addDestinationIp(ip) }
MediaCodec返回format的時候建立rtp鏈接並指定目的地址。編碼
try { val buffer = codec.getOutputBuffer(index) ?: return if (lastSendAudioTime == 0L) { lastSendAudioTime = info.presentationTimeUs; } val increase = (info.presentationTimeUs - lastSendAudioTime) * audioSampleRate / 1000 / 1000 if (hasAuHeader) { buffer.position(info.offset) buffer.get(bufferArray, 4, info.size) auHeaderLength.apply { bufferArray[0] = this[0] bufferArray[1] = this[1] } auHeader(info.size).apply { bufferArray[2] = this[0] bufferArray[3] = this[1] } audioRtpWrapper?.sendData(bufferArray, info.size + 4, 97, true, increase.toInt()) } else { buffer.position(info.offset) buffer.get(bufferArray, 0, info.size) audioRtpWrapper?.sendData(bufferArray, info.size, 97, true, increase.toInt()) } lastSendAudioTime = info.presentationTimeUs codec.releaseOutputBuffer(index, false) } catch (e: Exception) { e.printStackTrace() }
發送數據的時候須要指定payloadType,距離上次發送數據的時間間隔等信息。
(info.presentationTimeUs - lastSendAudioTime)計算的是以微妙爲單位的時間間隔。
(info.presentationTimeUs - lastSendAudioTime) * audioSampleRate / 1000 / 1000轉換成sampleRate/秒爲單位的時間間隔。
rtp發送aac數據使用的payloadType爲97。spa
vlc播放器播放rtp音頻數據時須要指定sdp文件,它經過讀取sdp文件中的信息能夠了解rpt接收端口、payloadType類型、音頻的格式等信息用於接收數據流並解碼播放。這裏有兩種配置方式用於支持有au header和沒有au header的狀況。code
m=audio 40020 RTP/AVP 97 a=rtpmap:97 mpeg4-generic/44100/1 a=fmtp: 97 streamtype=5;config=1208;sizeLength=13; indexLength=3
m=audio 40020 RTP/AVP 97 a=rtpmap:97 mpeg4-generic/44100/1 a=fmtp: 97 streamtype=5;config=1208
sdp文件配置了端口號爲40020, Rtp payload type爲97,音頻的採樣率爲44100、通道數爲1。
config是用於描述音頻信息的,它的生成方式以下:
private val audioChannelCount = 1; private val audioProfile = 2 /** * 97000, 88200, 64000, 48000,44100, 32000, 24000, 22050,16000, 12000, 11025, 8000,7350, 0, 0, 0 */ private val audioIndex = 4 private val audioSpecificConfig = ByteArray(2).apply { this[0] = ((audioProfile).shl(3).and(0xff)).or(audioIndex.ushr(1).and(0xff)).toByte() this[1] = ((audioIndex.shl(7).and(0xff)).or(audioChannelCount.shl(3).and(0xff))).toByte() }
audioProfile 與設置給MediaCodec的Profile一致,這裏設置的是AACObjectLC(2),因此audioProfile是2。
audioChannelCount通道數是1 。
audioIndex經過註釋的採樣率表格找到對應44100採樣率的index爲4 。
比較有au header和沒有au header的兩個版本,發現它們的區別在因而否配置了sizeLength和indexLength。
我這裏的au header是兩個字節的,sizeLength爲13表明佔用了13bit,indexLength爲3表明佔用3bit。配合發送數據時添加au header的代碼就容易理解了。
private fun auHeader(len: Int): ByteArray { return ByteArray(2).apply { this[0] = (len and 0x1fe0 shr 5).toByte() this[1] = (len and 0x1f shl 3).toByte() } }