最近有需求從藍牙接收音頻數據進行播放,以前沒作過,就各類百度啊,谷歌,看官方文檔,而後順帶說一下,這裏是用的是Audio Queue Services,只能用於PCM數據,其餘壓縮的音頻文件要配合AudioFileStream或者AudioFile解析後播放。git
在個人這篇文章中有一些音頻的介紹(主要是使用Speex這個庫),適合萌新觀看。github
注意點:segmentfault
上述的解決辦法是往播放隊列中插入空數據(感受效果很差),或者是先暫停後,等數據來了再播放。緩存
具體能夠看 碼農人生這個博客,講的很是詳細。
#import <Foundation/Foundation.h> #import <AudioToolbox/AudioToolbox.h> @interface AudioQueuePlay : NSObject // 播放並順帶附上數據 - (void)playWithData: (NSData *)data; // reset - (void)resetPlay; @end
#import "AudioQueuePlay.h" #define MIN_SIZE_PER_FRAME 2000 #define QUEUE_BUFFER_SIZE 3 //隊列緩衝個數 @interface AudioQueuePlay() { AudioQueueRef audioQueue; //音頻播放隊列 AudioStreamBasicDescription _audioDescription; AudioQueueBufferRef audioQueueBuffers[QUEUE_BUFFER_SIZE]; //音頻緩存 BOOL audioQueueBufferUsed[QUEUE_BUFFER_SIZE]; //判斷音頻緩存是否在使用 NSLock *sysnLock; NSMutableData *tempData; OSStatus osState; } @end @implementation AudioQueuePlay - (instancetype)init { self = [super init]; if (self) { sysnLock = [[NSLock alloc]init]; // 播放PCM使用 if (_audioDescription.mSampleRate <= 0) { //設置音頻參數 _audioDescription.mSampleRate = 8000.0;//採樣率 _audioDescription.mFormatID = kAudioFormatLinearPCM; // 下面這個是保存音頻數據的方式的說明,如能夠根據大端字節序或小端字節序,浮點數或整數以及不一樣體位去保存數據 _audioDescription.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked; //1單聲道 2雙聲道 _audioDescription.mChannelsPerFrame = 1; //每個packet一偵數據,每一個數據包下的楨數,即每一個數據包裏面有多少楨 _audioDescription.mFramesPerPacket = 1; //每一個採樣點16bit量化 語音每採樣點佔用位數 _audioDescription.mBitsPerChannel = 16; _audioDescription.mBytesPerFrame = (_audioDescription.mBitsPerChannel / 8) * _audioDescription.mChannelsPerFrame; //每一個數據包的bytes總數,每楨的bytes數*每一個數據包的楨數 _audioDescription.mBytesPerPacket = _audioDescription.mBytesPerFrame * _audioDescription.mFramesPerPacket; } // 使用player的內部線程播放 新建輸出 AudioQueueNewOutput(&_audioDescription, AudioPlayerAQInputCallback, (__bridge void * _Nullable)(self), nil, 0, 0, &audioQueue); // 設置音量 AudioQueueSetParameter(audioQueue, kAudioQueueParam_Volume, 1.0); // 初始化須要的緩衝區 for (int i = 0; i < QUEUE_BUFFER_SIZE; i++) { audioQueueBufferUsed[i] = false; osState = AudioQueueAllocateBuffer(audioQueue, MIN_SIZE_PER_FRAME, &audioQueueBuffers[i]); printf("第 %d 個AudioQueueAllocateBuffer 初始化結果 %d (0表示成功)", i + 1, osState); } osState = AudioQueueStart(audioQueue, NULL); if (osState != noErr) { printf("AudioQueueStart Error"); } } return self; } - (void)resetPlay { if (audioQueue != nil) { AudioQueueReset(audioQueue); } } // 播放相關 -(void)playWithData:(NSData *)data { [sysnLock lock]; tempData = [NSMutableData new]; [tempData appendData: data]; // 獲得數據 NSUInteger len = tempData.length; Byte *bytes = (Byte*)malloc(len); [tempData getBytes:bytes length: len]; int i = 0; while (true) { if (!audioQueueBufferUsed[i]) { audioQueueBufferUsed[i] = true; break; }else { i++; if (i >= QUEUE_BUFFER_SIZE) { i = 0; } } } audioQueueBuffers[i] -> mAudioDataByteSize = (unsigned int)len; // 把bytes的頭地址開始的len字節給mAudioData memcpy(audioQueueBuffers[i] -> mAudioData, bytes, len); // free(bytes); AudioQueueEnqueueBuffer(audioQueue, audioQueueBuffers[i], 0, NULL); printf("本次播放數據大小: %lu", len); [sysnLock unlock]; } // ************************** 回調 ********************************** // 回調回來把buffer狀態設爲未使用 static void AudioPlayerAQInputCallback(void* inUserData,AudioQueueRef audioQueueRef, AudioQueueBufferRef audioQueueBufferRef) { AudioQueuePlay* player = (__bridge AudioQueuePlay*)inUserData; [player resetBufferState:audioQueueRef and:audioQueueBufferRef]; } - (void)resetBufferState:(AudioQueueRef)audioQueueRef and:(AudioQueueBufferRef)audioQueueBufferRef { for (int i = 0; i < QUEUE_BUFFER_SIZE; i++) { // 將這個buffer設爲未使用 if (audioQueueBufferRef == audioQueueBuffers[i]) { audioQueueBufferUsed[i] = false; } } } // ************************** 內存回收 ********************************** - (void)dealloc { if (audioQueue != nil) { AudioQueueStop(audioQueue,true); } audioQueue = nil; sysnLock = nil; } @end
import UIKit import AudioToolbox class PCMPlayerConstant: NSObject { // 緩衝個數 static let BUFF_NUM = 3 // 一次播放的大小 static let ONCE_PLAY_SIZE: UInt32 = 2000 } class PCMPlayer: NSObject { fileprivate var audioQueueRef: AudioQueueRef? fileprivate var audioQueueBuffer: [AudioQueueBufferRef?]! fileprivate var audioDescription: AudioStreamBasicDescription! fileprivate var audioQueueBufferUsed: [Bool]! fileprivate var syncLock: NSLock! fileprivate var playData: NSMutableData! fileprivate var oSStatus: OSStatus! override init() { super.init() self.playData = NSMutableData() self.syncLock = NSLock() oSStatus = OSStatus() audioQueueBufferUsed = [] self.audioQueueBuffer = [] audioDescription = AudioStreamBasicDescription() // 設置音頻參數 audioDescription.mSampleRate = 8000.0 //採樣率 audioDescription.mFormatID = kAudioFormatLinearPCM // 下面這個是保存音頻數據的方式的說明,如能夠根據大端字節序或小端字節序,浮點數或整數以及不一樣體位去保存數據 audioDescription.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked //1單聲道 2雙聲道 audioDescription.mChannelsPerFrame = 1 //每個packet一偵數據,每一個數據包下的楨數,即每一個數據包裏面有多少楨 audioDescription.mFramesPerPacket = 1 //每一個採樣點16bit量化 語音每採樣點佔用位數 audioDescription.mBitsPerChannel = 16 audioDescription.mBytesPerFrame = (audioDescription.mBitsPerChannel / 8) * audioDescription.mChannelsPerFrame //每一個數據包的bytes總數,每楨的bytes數*每一個數據包的楨數 audioDescription.mBytesPerPacket = audioDescription.mBytesPerFrame * audioDescription.mFramesPerPacket self.initPlay() } fileprivate func initPlay() -> Void { let selfPointer = unsafeBitCast(self, to: UnsafeMutableRawPointer.self) // 使用audioDescripton新建audioQueue oSStatus = AudioQueueNewOutput(&self.audioDescription!, MyAudioQueueOutputCallback, selfPointer, CFRunLoopGetCurrent(), nil, 0, &self.audioQueueRef) if oSStatus != noErr { print("AudioQueueNewOutput Error") return } // 設置音量 AudioQueueSetParameter(self.audioQueueRef!, kAudioQueueParam_Volume, 1.0) for index in 0..<PCMPlayerConstant.BUFF_NUM { var audioBuffer: AudioQueueBufferRef? = nil // oSStatus = AudioQueueAllocateBuffer(self.audioQueueRef!, PCMPlayerConstant.ONCE_PLAY_SIZE, &audioBuffer) if oSStatus != noErr { print("AudioQueueAllocateBuffer Error \\\\\\\\(index)") return }else{ self.audioQueueBuffer.append(audioBuffer) // 表示未使用 self.audioQueueBufferUsed.append(false) print("第 \\\\\\\\(index + 1) 個AudioQueueAllocateBuffer 初始化結果 \\\\\\\\(oSStatus) (0表示成功)") } } AudioQueueStart(self.audioQueueRef!, nil) } func playWithData(data: Data) -> Void { syncLock.lock() playData.append(data) // 數值大於980 再播放 這裏能夠按需求改 if playData.length > 980 { let playDataLength = playData.length var i = 0 // 循環找出可用buffer while true { if !self.audioQueueBufferUsed[i] { // 表示已使用 self.audioQueueBufferUsed[i] = true break }else { i += 1 // 當循環到頭了就從新循環 if i >= PCMPlayerConstant.BUFF_NUM { i = 0 } } } let p = self.audioQueueBuffer[i] let selfPointer = unsafeBitCast(self, to: UnsafeMutableRawPointer.self) p?.pointee.mUserData = selfPointer p?.pointee.mAudioDataByteSize = UInt32(playDataLength) p?.pointee.mAudioData.advanced(by: 0).copyBytes(from: playData.bytes, count: playDataLength) // 丟入audioQueue中 AudioQueueEnqueueBuffer(self.audioQueueRef!, self.audioQueueBuffer[i]!, 0, nil) playData = NSMutableData() print("play length \\\\\\\\(playDataLength)") } syncLock.unlock() } } // 播放完的回調 func MyAudioQueueOutputCallback(clientData: UnsafeMutableRawPointer?, AQ: AudioQueueRef, buffer: AudioQueueBufferRef) { let my = Unmanaged<PCMPlayer>.fromOpaque(UnsafeRawPointer(clientData)!).takeUnretainedValue() // AudioQueueFreeBuffer(AQ, buffer) for index in 0..<PCMPlayerConstant.BUFF_NUM { if my.audioQueueBuffer[index] == buffer { // 把當前放完的buffer設爲未使用 my.audioQueueBufferUsed[index] = false //print("|-> \\\\\\\\(index) buffer is \\\\\\\\(self.audioQueueBufferUsed[index]) <-|") } } }