播放網絡音頻,能夠先下載好,獲得音頻文件,簡單了git
使用 AVAudioPlayer 播放,完github
蘋果封裝下,AVAudioPlayer 處理本地文件,方便api
直接拿到一個文件地址 url,播放bash
便於音頻的傳輸,通常使用音頻壓縮文件,mp3 等。文件壓的體積小,好傳輸網絡
聲卡是播放 PCM 緩衝的session
蘋果幫開發把壓縮格式,轉換爲未壓縮的原始文件 PCM,app
還幫開發作播放音頻的資源調度,從 PCM 文件中拿出一段段的緩衝buffer,交給聲卡消費掉ide
( 實際不會分兩步,過程固然是並行的 )函數
如今手動oop
接收到網絡上的音頻數據包,就去播放。
分四步:
下載音頻文件的二進制數據
URLSession 的 task, 去獲取網絡文件
拿到一個數據包 Data,就處理一個
本例子,一個數據包 Data,對應一個音頻包 packet, 對應一個音頻緩衝 buffer
這一步,比較容易,
建個 URLSessionDataTask ,去下載
要作的,都在網絡代理方法裏
extension Downloader: URLSessionDataDelegate {
// 開始下載,拿到文件的整體積
public func urlSession(_ session: URLSession, dataTask: URLSessionDataTask, didReceive response: URLResponse, completionHandler: @escaping (URLSession.ResponseDisposition) -> Void) {
totalBytesCount = response.expectedContentLength
completionHandler(.allow)
}
// 接收數據
public func urlSession(_ session: URLSession, dataTask: URLSessionDataTask, didReceive data: Data) {
// 更新,下載到本地的數據總量
totalBytesReceived += Int64(data.count)
// 算進度
progress = Float(totalBytesReceived) / Float(totalBytesCount)
// data 交給代理,去解析爲音頻數據包
delegate?.download(self, didReceiveData: data, progress: progress)
}
// 下載完成了
public func urlSession(_ session: URLSession, task: URLSessionTask, didCompleteWithError error: Error?) {
state = .completed
delegate?.download(self, completedWithError: error)
}
}
複製代碼
音頻文件,分爲封裝格式(文件格式),和編碼格式
數據緩衝 buffer , 裝音頻包 packet,
音頻包 packet,裝音頻幀 frame
固定碼率 CBR, 平均採樣,對應原始文件,pcm ( 未壓縮文件 )
可變碼率 VBR,對應壓縮文件,例如: mp3
Core Audio 支持 VBR,通常經過可變幀率格式 VFR
VFR 是指:每一個包 packet 的體積相等, 包 packet 裏面的幀 frame 的數量不一, 幀 frame 含有的音頻數據有大有小
固定碼率用 ASBD 描述,AudioStreamBasicDescription
ASBD 的描述, 就是指一些配置信息,包含通道數、採樣率、位深...
可變碼率中 VFR,用 ASPD 描述,AudioStreamPacketDescription
壓縮音頻數據中 VFR,對應 ASPD
每個包 Packet,都有其 ASPD
ASPD 裏面有,包 packet 的位置信息 mStartOffset,包 packet 的幀 frame 的個數,mVariableFramesInPacket
拿 Audio Queue Services ,處理上一步獲取的音頻二進制數據 data,解析爲音頻數據包 packet
public init() throws {
let context = unsafeBitCast(self, to: UnsafeMutableRawPointer.self)
// 建立一個活躍的音頻文件流解析器,建立解析器 ID
guard AudioFileStreamOpen(context, ParserPropertyChangeCallback, ParserPacketCallback, kAudioFileMP3Type, &streamID) == noErr else {
throw ParserError.streamCouldNotOpen
}
}
複製代碼
public func parse(data: Data) throws {
let streamID = self.streamID!
let count = data.count
_ = try data.withUnsafeBytes({ (rawBufferPointer) in
let bufferPointer = rawBufferPointer.bindMemory(to: UInt8.self)
if let address = bufferPointer.baseAddress{
// 把音頻數據,傳給解析器
// streamID, 指定解析器
let result = AudioFileStreamParseBytes(streamID, UInt32(count), address, [])
guard result == noErr else {
throw ParserError.failedToParseBytes(result)
}
}
})
}
複製代碼
func ParserPropertyChangeCallback(_ context: UnsafeMutableRawPointer, _ streamID: AudioFileStreamID, _ propertyID: AudioFileStreamPropertyID, _ flags: UnsafeMutablePointer<AudioFileStreamPropertyFlags>) {
let parser = Unmanaged<Parser>.fromOpaque(context).takeUnretainedValue()
// 關心什麼信息,取什麼
switch propertyID {
case kAudioFileStreamProperty_DataFormat:
// 拿數據格式
var format = AudioStreamBasicDescription()
GetPropertyValue(&format, streamID, propertyID)
parser.dataFormat = AVAudioFormat(streamDescription: &format)
case kAudioFileStreamProperty_AudioDataPacketCount:
// 音頻流文件,分離出來的音頻數據中,的包 packet 個數
GetPropertyValue(&parser.packetCount, streamID, propertyID)
default:
()
}
}
// 套路就是,先拿內存大小 propSize, 再拿關心的屬性的值 value
func GetPropertyValue<T>(_ value: inout T, _ streamID: AudioFileStreamID, _ propertyID: AudioFileStreamPropertyID) {
var propSize: UInt32 = 0
guard AudioFileStreamGetPropertyInfo(streamID, propertyID, &propSize, nil) == noErr else {
return
}
guard AudioFileStreamGetProperty(streamID, propertyID, &propSize, &value) == noErr else {
return
}
}
複製代碼
func ParserPacketCallback(_ context: UnsafeMutableRawPointer, _ byteCount: UInt32, _ packetCount: UInt32, _ data: UnsafeRawPointer, _ packetDescriptions: UnsafeMutablePointer<AudioStreamPacketDescription>) {
// 拿回了 self ( parser )
let parser = Unmanaged<Parser>.fromOpaque(context).takeUnretainedValue()
let packetDescriptionsOrNil: UnsafeMutablePointer<AudioStreamPacketDescription>? = packetDescriptions
// ASPD 存在,就是壓縮的音頻包
// 未壓縮的 pcm, 使用 ASBD
let isCompressed = packetDescriptionsOrNil != nil
guard let dataFormat = parser.dataFormat else {
return
}
// 拿到了數據,遍歷,
// 存儲進去 parser.packets, 也就是 self.packets
if isCompressed {
for i in 0 ..< Int(packetCount) {
// 壓縮音頻數據,每個包對應一個 ASPD, 逐個計算
let packetDescription = packetDescriptions[i]
let packetStart = Int(packetDescription.mStartOffset)
let packetSize = Int(packetDescription.mDataByteSize)
let packetData = Data(bytes: data.advanced(by: packetStart), count: packetSize)
parser.packets.append((packetData, packetDescription))
}
} else {
// 原始音頻數據 pcm,文件統一配置,計算比較簡單
let format = dataFormat.streamDescription.pointee
let bytesPerPacket = Int(format.mBytesPerPacket)
for i in 0 ..< Int(packetCount) {
let packetStart = i * bytesPerPacket
let packetSize = bytesPerPacket
let packetData = Data(bytes: data.advanced(by: packetStart), count: packetSize)
parser.packets.append((packetData, nil))
}
}
}
複製代碼
public required init(parser: Parsing, readFormat: AVAudioFormat) throws {
// 從以前負責解析的,拿音頻數據
self.parser = parser
guard let dataFormat = parser.dataFormat else {
throw ReaderError.parserMissingDataFormat
}
let sourceFormat = dataFormat.streamDescription
let commonFormat = readFormat.streamDescription
// 建立音頻格式轉換器 converter
// 經過指定輸入格式,和輸出格式
// 輸入格式是上一步解析出來的,從 paser 裏面拿
// 輸出格式,開發指定的
let result = AudioConverterNew(sourceFormat, commonFormat, &converter)
guard result == noErr else {
throw ReaderError.unableToCreateConverter(result)
}
self.readFormat = readFormat
}
複製代碼
開發指定的輸出格式
public var readFormat: AVAudioFormat {
return AVAudioFormat(commonFormat: .pcmFormatFloat32, sampleRate: 44100, channels: 2, interleaved: false)!
}
// 位深,採用 Float32
// 採樣率 44100 Hz, 標準 CD 音質
// 分左右聲道
複製代碼
上一步解析出音頻包 packet,讀取音頻緩衝 buffer
public func read(_ frames: AVAudioFrameCount) throws -> AVAudioPCMBuffer {
let framesPerPacket = readFormat.streamDescription.pointee.mFramesPerPacket
var packets = frames / framesPerPacket
// 建立空白的、指定格式和容量的,音頻緩衝 AVAudioPCMBuffer
guard let buffer = AVAudioPCMBuffer(pcmFormat: readFormat, frameCapacity: frames) else {
throw ReaderError.failedToCreatePCMBuffer
}
buffer.frameLength = frames
// 把解析出的音頻包 packet, 轉換成 AVAudioPCMBuffer,這樣 AVAudioEngine 播放
try queue.sync {
let context = unsafeBitCast(self, to: UnsafeMutableRawPointer.self)
// 設置好的轉換器 converter,使用回調方法 ReaderConverterCallback,填充建立的 buffer 中的數據 buffer.mutableAudioBufferList
let status = AudioConverterFillComplexBuffer(converter!, ReaderConverterCallback, context, &packets, buffer.mutableAudioBufferList, nil)
guard status == noErr else {
switch status {
case ReaderMissingSourceFormatError:
throw ReaderError.parserMissingDataFormat
case ReaderReachedEndOfDataError:
throw ReaderError.reachedEndOfFile
case ReaderNotEnoughDataError:
throw ReaderError.notEnoughData
default:
throw ReaderError.converterFailed(status)
}
}
}
return buffer
}
複製代碼
AudioConverterFillComplexBuffer(格式轉換器,回調函數,自定義參數指針,包的個數指針,接收轉換後數據的指針,接收 ASPD 的指針)
AudioConverterFillComplexBuffer(converter!, ReaderConverterCallback, context, &packets, buffer.mutableAudioBufferList, nil)
複製代碼
回調函數(格式轉換器, 包的個數指針,接收轉換後數據的指針, 接收 ASPD 的指針, 自定義參數指針 )
可看出,傳遞給 AudioConverterFillComplexBuffer 的 6 個參數,
除了其回調參數自己,其餘 5 個參數,其回調函數都有用到
轉換 buffer 的回調函數,以前建立了空白的音頻緩衝 buffer,現往 buffer 裏面,填數據
func ReaderConverterCallback(_ converter: AudioConverterRef,
_ packetCount: UnsafeMutablePointer<UInt32>,
_ ioData: UnsafeMutablePointer<AudioBufferList>,
_ outPacketDescriptions: UnsafeMutablePointer<UnsafeMutablePointer<AudioStreamPacketDescription>?>?,
_ context: UnsafeMutableRawPointer?) -> OSStatus {
// 還原出 self ( reader )
let reader = Unmanaged<Reader>.fromOpaque(context!).takeUnretainedValue()
// 確保輸入格式可用
guard let sourceFormat = reader.parser.dataFormat else {
return ReaderMissingSourceFormatError
}
// 這個類 Reader, 裏面記錄了一個播放到的位置 currentPacket,
// 播放相對位置,就是一個 offset
// 判斷播放到包尾的狀況
// 播放到包尾,根據下載解析狀況,分兩種狀況
// 1, 下載解析完成,播放到告終尾
// 2, 下載沒完成,解析好了的,都播放完了
// (僅此兩種情況,由於解析的時間,遠比不上下載的時間。下載完成 = 解析完成 )
let packetIndex = Int(reader.currentPacket)
let packets = reader.parser.packets
let isEndOfData = packetIndex >= packets.count - 1
if isEndOfData {
if reader.parser.isParsingComplete {
packetCount.pointee = 0
return ReaderReachedEndOfDataError
} else {
return ReaderNotEnoughDataError
}
}
// 以前的設置,每次只處理一個包 packet 的音頻數據
let packet = packets[packetIndex]
var data = packet.0
let dataCount = data.count
ioData.pointee.mNumberBuffers = 1
// 音頻數據拷貝過來:先分配內存,再拷貝地址的數據
ioData.pointee.mBuffers.mData = UnsafeMutableRawPointer.allocate(byteCount: dataCount, alignment: 0)
_ = data.withUnsafeMutableBytes { (rawMutableBufferPointer) in
let bufferPointer = rawMutableBufferPointer.bindMemory(to: UInt8.self)
if let address = bufferPointer.baseAddress{
memcpy((ioData.pointee.mBuffers.mData?.assumingMemoryBound(to: UInt8.self))!, address, dataCount)
}
}
ioData.pointee.mBuffers.mDataByteSize = UInt32(dataCount)
// 處理壓縮文件 MP3, AAC 的 ASPD
let sourceFormatDescription = sourceFormat.streamDescription.pointee
if sourceFormatDescription.mFormatID != kAudioFormatLinearPCM {
if outPacketDescriptions?.pointee == nil {
outPacketDescriptions?.pointee = UnsafeMutablePointer<AudioStreamPacketDescription>.allocate(capacity: 1)
}
outPacketDescriptions?.pointee?.pointee.mDataByteSize = UInt32(dataCount)
outPacketDescriptions?.pointee?.pointee.mStartOffset = 0
outPacketDescriptions?.pointee?.pointee.mVariableFramesInPacket = 0
}
packetCount.pointee = 1
// 更新播放到的位置 currentPacket
reader.currentPacket = reader.currentPacket + 1
return noErr;
}
複製代碼
AVAudioEngine 能夠作實時的音效處理,用 Effect Unit 加效果
設置 AudioEngine,添加節點,鏈接節點
func setupAudioEngine(){
// 添加節點
attachNodes()
// 鏈接節點
connectNodes()
// 準備 AudioEngine
engine.prepare()
// AVAudioEngine 的數據流,採用推 push 模型
// 使用計時器,每隔 0.1 秒左右,調度播放資源
let interval = 1 / (readFormat.sampleRate / Double(readBufferSize))
let timer = Timer(timeInterval: interval / 2, repeats: true) {
[weak self] _ in
guard self?.state != .stopped else {
return
}
// 分配緩衝 buffer, 調度播放資源
self?.scheduleNextBuffer()
self?.handleTimeUpdate()
self?.notifyTimeUpdated()
}
RunLoop.current.add(timer, forMode: .common)
}
// 添加播放節點
open func attachNodes() {
engine.attach(playerNode)
}
// 播放節點,連通到輸出
open func connectNodes() {
engine.connect(playerNode, to: engine.mainMixerNode, format: readFormat)
}
複製代碼
調度播放資源,將數據 ( 上步建立的音頻緩衝 buffer )交給 AudioEngine 的播放節點 playerNode
func scheduleNextBuffer(){
guard let reader = reader else {
return
}
// 經過狀態記錄,管理播放
// 播放狀態,就是一個開關
guard !isFileSchedulingComplete || repeats else {
return
}
do {
// 拿到,上步建立音頻緩衝 buffer
let nextScheduledBuffer = try reader.read(readBufferSize)
// playerNode 播放消費掉
playerNode.scheduleBuffer(nextScheduledBuffer)
} catch ReaderError.reachedEndOfFile {
isFileSchedulingComplete = true
} catch { }
}
複製代碼
開啓播放
public func play() {
// 沒播放,纔開啓
guard !playerNode.isPlaying else {
return
}
if !engine.isRunning {
do {
try engine.start()
} catch { }
}
// 提高用戶體驗,播放前,先靜音
let lastVolume = volumeRampTargetValue ?? volume
volume = 0
// 播放節點播放
playerNode.play()
// 250 毫秒後,正常音量播放
swellVolume(to: lastVolume)
// 更新播放狀態
state = .playing
}
複製代碼
添加實時的音高、播放速度效果
// 使用 AVAudioUnitTimePitch 單元,調節播放速度和音高效果
let timePitchNode = AVAudioUnitTimePitch()
override func attachNodes() {
// 添加播放節點
super.attachNodes()
// 添加音效節點
engine.attach(timePitchNode)
}
// 至關於在播放節點和輸出節點中間,插入音效節點
override func connectNodes() {
engine.connect(playerNode, to: timePitchNode, format: readFormat)
engine.connect(timePitchNode, to: engine.mainMixerNode, format: readFormat)
}
複製代碼
先拿到包的個數, 下載的數據,解析完成後,加出來的
1 首 2 分 34 秒的 mp3, 可分爲 5925 個包
public var totalPacketCount: AVAudioPacketCount? {
guard let _ = dataFormat else {
return nil
}
// 本例子,走的是 AVAudioPacketCount(packets.count)
// 2.4 的解析回調 ParserPacketCallback 中,
// 拿到步驟 1 下載的數據後,就解析,添加數據到 packets
return max(AVAudioPacketCount(packetCount), AVAudioPacketCount(packets.count))
}
複製代碼
去拿音頻幀 frame 的總數
public var totalFrameCount: AVAudioFrameCount? {
guard let framesPerPacket = dataFormat?.streamDescription.pointee.mFramesPerPacket else {
return nil
}
guard let totalPacketCount = totalPacketCount else {
return nil
}
// 上一步包的總數 X 每一個包裏有幾個幀
return AVAudioFrameCount(totalPacketCount) * AVAudioFrameCount(framesPerPacket)
}
複製代碼
計算出音頻持續時間
public var duration: TimeInterval? {
guard let sampleRate = dataFormat?.sampleRate else {
return nil
}
guard let totalFrameCount = totalFrameCount else {
return nil
}
// 上一步的音頻幀 frame 的總數 / 採樣率
return TimeInterval(totalFrameCount) / TimeInterval(sampleRate)
}
複製代碼
public func seek(to time: TimeInterval) throws {
// 有了 parser 的音頻包,和 reader 的音頻緩衝,纔可播放
guard let parser = parser, let reader = reader else {
return
}
// 拿時間,先算出音頻幀的相對位置
// 拿音頻幀的相對位置,算出音頻包的相對位置
guard let frameOffset = parser.frameOffset(forTime: time),
let packetOffset = parser.packetOffset(forFrame: frameOffset) else {
return
}
// 更新當前狀態
currentTimeOffset = time
isFileSchedulingComplete = false
// 記錄當前狀態,一會恢復
let isPlaying = playerNode.isPlaying
let lastVolume = volumeRampTargetValue ?? volume
// 優化體驗,避免雜聲,播放先停下來
playerNode.stop()
volume = 0
// 更新 reader 裏面的播放資源位置
do {
try reader.seek(packetOffset)
} catch {
return
}
// 剛纔記錄當前狀態,恢復
if isPlaying {
playerNode.play()
}
// 更新 UI
delegate?.streamer(self, updatedCurrentTime: time)
// 恢復原來的音量
swellVolume(to: lastVolume)
}
複製代碼
算出當前時間的,幀偏移
public func frameOffset(forTime time: TimeInterval) -> AVAudioFramePosition? {
guard let _ = dataFormat?.streamDescription.pointee,
let frameCount = totalFrameCount,
let duration = duration else {
return nil
}
// 拿當前時間 / 音頻總時長,算出比值
let ratio = time / duration
return AVAudioFramePosition(Double(frameCount) * ratio)
}
複製代碼
算出當前幀,對應的包的位置
public func packetOffset(forFrame frame: AVAudioFramePosition) -> AVAudioPacketCount? {
guard let framesPerPacket = dataFormat?.streamDescription.pointee.mFramesPerPacket else {
return nil
}
// 當前是第多少幀 / 一個包裏面有幾個幀
return AVAudioPacketCount(frame) / AVAudioPacketCount(framesPerPacket)
}
複製代碼
public func seek(_ packet: AVAudioPacketCount) throws {
queue.sync {
// 更改位置偏移
currentPacket = packet
}
}
複製代碼
記錄的位置 currentPacket,這樣做用
步驟三的回調 ReaderConverterCallback 裏
// ...
// 本例子中,一個音頻包 packet, 對應一個音頻緩衝 buffer
let packet = packets[packetIndex]
var data = packet.0
// ...
_ = data.withUnsafeMutableBytes { (rawMutableBufferPointer) in // ...
}
// ...
複製代碼
分三個事件處理:手指按下,手指拖動,手指擡起
// 手指按下, 屏蔽刷新播放進度的代理方法
@IBAction func progressSliderTouchedDown(_ sender: UISlider) {
isSeeking = true
}
// 手指拖動, 屏蔽刷新播放進度的代理方法,採用手勢對應的 UI
@IBAction func progressSliderValueChanged(_ sender: UISlider) {
let currentTime = TimeInterval(progressSlider.value)
currentTimeLabel.text = currentTime.toMMSS()
}
// 手指擡起, 調度播放的資源,恢復刷新播放進度的代理方法
@IBAction func progressSliderTouchedUp(_ sender: UISlider) {
seek(sender)
isSeeking = false
}
複製代碼
相關代理方法,根據播放進度,更新當前事件和進度條的 UI
正在拖動,就屏蔽掉
func streamer(_ streamer: Streaming, updatedCurrentTime currentTime: TimeInterval) {
if !isSeeking {
progressSlider.value = Float(currentTime)
currentTimeLabel.text = currentTime.toMMSS()
}
}
複製代碼
步驟 4 播放中,分發播放資源,是走計時器的
管理下里面的兩個方法的邏輯
( 調度音頻緩衝,和播放完了改狀態 )
let timer = Timer(timeInterval: interval / 2, repeats: true) {
[weak self] _ in
// ...
self?.scheduleNextBuffer()
self?.handleTimeUpdate()
// ...
}
複製代碼
調度音頻緩衝 buffer,
func scheduleNextBuffer(){
guard let reader = reader else {
return
}
// 若是重複 repeats,就繼續播放,不用管播放完了一遍沒有
guard !isFileSchedulingComplete || repeats else {
return
}
// ... 下面是,播放節點播放資源
}
複製代碼
根據播放狀況,處理相關狀態
func handleTimeUpdate(){
guard let currentTime = currentTime, let duration = duration else {
return
}
// 當前播放的時間,過了音頻時長,就認爲播放完一遍,去暫停
if currentTime >= duration {
try? seek(to: 0)
// 若是重複,別暫停
if !repeats{
pause()
}
}
}
複製代碼