直接在上一篇音視頻學習之 - H264編碼代碼基礎上微調進行解碼,即在生成sps/pps和視頻流二進制的地方不去存儲而是直接進行解碼,修改上一篇源碼中 函數didCompressH264 的代碼:數組
獲取H264參數集合中的SPS和PPS:bash
const Byte startCode[] = "\x00\x00\x00\x01";
if (statusCode == noErr)
{
NSData *spsData = [NSData dataWithBytes:sparameterSet length:sparameterSetSize];
NSData *ppsData = [NSData dataWithBytes:pparameterSet length:pparameterSetSize];
NSMutableData *sps = [NSMutableData dataWithCapacity:4 + sparameterSetSize];
[sps appendBytes:startCode length:4];
[sps appendBytes:spsData length:sparameterSetSize];
NSMutableData *pps = [NSMutableData dataWithCapacity:4 + pparameterSetSize];
[pps appendBytes:startCode length:4];
[pps appendBytes:ppsData length:pparameterSetSize];
}
複製代碼
獲取NALU數據:session
const int lengthInfoSize = 4;
//循環獲取nalu數據
while (bufferOffset < totalLength - AVCCHeaderLength) {
uint32_t NALUnitLength = 0;
//讀取 一單元長度的 nalu
memcpy(&NALUnitLength, dataPointer + bufferOffset, lengthInfoSize);
//從大端模式轉換爲系統端模式
NALUnitLength = CFSwapInt32BigToHost(NALUnitLength);
NSMutableData *data = [NSMutableData dataWithCapacity:lengthInfoSize + NALUnitLength];
[data appendBytes:startCode length:lengthInfoSize];
[data dataPointer + bufferOffset + lengthInfoSize length:NALUnitLength];
bufferOffset += lengthInfoSize + NALUnitLength;
}
複製代碼
使用VTDecompressionSessionCreate建立一個解碼器,它的參數中須要一個CMVideoFormatDescriptionRef類型的變量來描述視頻的基本信息,因此咱們要先準備一些建立session須要的數據,而後才能完成視頻的解碼。數據結構
/*初始化解碼器**/
- (BOOL)initDecoder {
if (_decodeSesion) return true;
const uint8_t * const parameterSetPointers[2] = {_sps, _pps};
const size_t parameterSetSizes[2] = {_spsSize, _ppsSize};
int naluHeaderLen = 4;
/**
根據sps pps設置解碼參數
param kCFAllocatorDefault 分配器
param 2 參數個數
param parameterSetPointers 參數集指針
param parameterSetSizes 參數集大小
param naluHeaderLen nalu nalu start code 的長度 4
param _decodeDesc 解碼器描述
return 狀態
*/
OSStatus status = CMVideoFormatDescriptionCreateFromH264ParameterSets(kCFAllocatorDefault, 2, parameterSetPointers, parameterSetSizes, naluHeaderLen, &_decodeDesc);
if (status != noErr) {
NSLog(@"Video hard DecodeSession create H264ParameterSets(sps, pps) failed status= %d", (int)status);
return false;
}
/*
解碼參數:
* kCVPixelBufferPixelFormatTypeKey:攝像頭的輸出數據格式
kCVPixelBufferPixelFormatTypeKey,已測可用值爲
kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange,即420v
kCVPixelFormatType_420YpCbCr8BiPlanarFullRange,即420f
kCVPixelFormatType_32BGRA,iOS在內部進行YUV至BGRA格式轉換
YUV420通常用於標清視頻,YUV422用於高清視頻,這裏的限制讓人感到意外。可是,在相同條件下,YUV420計算耗時和傳輸壓力比YUV422都小。
* kCVPixelBufferWidthKey/kCVPixelBufferHeightKey: 視頻源的分辨率 width*height
* kCVPixelBufferOpenGLCompatibilityKey : 它容許在 OpenGL 的上下文中直接繪製解碼後的圖像,而不是從總線和 CPU 之間複製數據。這有時候被稱爲零拷貝通道,由於在繪製過程當中沒有解碼的圖像被拷貝.
*/
NSDictionary *destinationPixBufferAttrs =
@{
(id)kCVPixelBufferPixelFormatTypeKey: [NSNumber numberWithInt:kCVPixelFormatType_420YpCbCr8BiPlanarFullRange], //iOS上 nv12(uvuv排布) 而不是nv21(vuvu排布)
(id)kCVPixelBufferWidthKey: [NSNumber numberWithInteger:_config.width],
(id)kCVPixelBufferHeightKey: [NSNumber numberWithInteger:_config.height],
(id)kCVPixelBufferOpenGLCompatibilityKey: [NSNumber numberWithBool:true]
};
//解碼回調設置
/*
VTDecompressionOutputCallbackRecord 是一個簡單的結構體,它帶有一個指針 (decompressionOutputCallback),指向幀解壓完成後的回調方法。你須要提供能夠找到這個回調方法的實例 (decompressionOutputRefCon)。VTDecompressionOutputCallback 回調方法包括七個參數:
參數1: 回調的引用
參數2: 幀的引用
參數3: 一個狀態標識 (包含未定義的代碼)
參數4: 指示同步/異步解碼,或者解碼器是否打算丟幀的標識
參數5: 實際圖像的緩衝
參數6: 出現的時間戳
參數7: 出現的持續時間
*/
VTDecompressionOutputCallbackRecord callbackRecord;
callbackRecord.decompressionOutputCallback = videoDecompressionOutputCallback;
callbackRecord.decompressionOutputRefCon = (__bridge void * _Nullable)(self);
//建立session
/*!
@function VTDecompressionSessionCreate
@abstract 建立用於解壓縮視頻幀的會話。
@discussion 解壓後的幀將經過調用OutputCallback發出
@param allocator 內存的會話。經過使用默認的kCFAllocatorDefault的分配器。
@param videoFormatDescription 描述源視頻幀
@param videoDecoderSpecification 指定必須使用的特定視頻解碼器.NULL
@param destinationImageBufferAttributes 描述源像素緩衝區的要求 NULL
@param outputCallback 使用已解壓縮的幀調用的回調
@param decompressionSessionOut 指向一個變量以接收新的解壓會話
*/
status = VTDecompressionSessionCreate(kCFAllocatorDefault, _decodeDesc, NULL, (__bridge CFDictionaryRef _Nullable)(destinationPixBufferAttrs), &callbackRecord, &_decodeSesion);
//判斷一下status
if (status != noErr) {
NSLog(@"Video hard DecodeSession create failed status= %d", (int)status);
return false;
}
//設置解碼會話屬性(實時編碼)
status = VTSessionSetProperty(_decodeSesion, kVTDecompressionPropertyKey_RealTime,kCFBooleanTrue);
NSLog(@"Vidoe hard decodeSession set property RealTime status = %d", (int)status);
return true;
}
複製代碼
獲取到數據以後,開始進行數據處理,前四位表明的是大端模式下的長度信息,第5個字節表示數據類型,轉換爲10進制後,5表明關鍵幀,7表明sps,8表明pps:app
- (void)decodeNaluData:(NSData *)frame {
//將解碼放在異步隊列.
dispatch_async(_decodeQueue, ^{
//獲取frame 二進制數據
uint8_t *nalu = (uint8_t *)frame.bytes;
//調用解碼Nalu數據方法,參數1:數據 參數2:數據長度
[self decodeNaluData:nalu size:(uint32_t)frame.length];
});
}
- (void)decodeNaluData:(uint8_t *)frame size:(uint32_t)size {
int type = (frame[4] & 0x1F);
// 將NALU的開始碼轉爲4字節大端NALU的長度信息
uint32_t naluSize = size - 4;
uint8_t *pNaluSize = (uint8_t *)(&naluSize);
CVPixelBufferRef pixelBuffer = NULL;
frame[0] = *(pNaluSize + 3);
frame[1] = *(pNaluSize + 2);
frame[2] = *(pNaluSize + 1);
frame[3] = *(pNaluSize);
//第一次解析時: 初始化解碼器initDecoder
switch (type) {
case 0x05: //關鍵幀
if ([self initDecoder]) {
pixelBuffer= [self decode:frame withSize:size];
}
break;
case 0x06:
//NSLog(@"SEI");//加強信息
break;
case 0x07: //sps
_spsSize = naluSize;
_sps = malloc(_spsSize);
memcpy(_sps, &frame[4], _spsSize);
break;
case 0x08: //pps
_ppsSize = naluSize;
_pps = malloc(_ppsSize);
memcpy(_pps, &frame[4], _ppsSize);
break;
default: //其餘幀(1-5)
if ([self initDecoder]) {
pixelBuffer = [self decode:frame withSize:size];
}
break;
}
}
複製代碼
先來看一下CMSampleBuffer的數據結構 異步
最終要解碼成一個CVPixelBufferRef類型的對象,先建立一個BlockBuffer,而後根據BlockBuffer建立SampleBuffer,最後將SampleBuffer傳入函數VTDecompressionSessionDecodeFrame獲得解碼後的CVPixelBufferRefasync
- (CVPixelBufferRef)decode:(uint8_t *)frame withSize:(uint32_t)frameSize {
CVPixelBufferRef outputPixelBuffer = NULL;
CMBlockBufferRef blockBuffer = NULL;
CMBlockBufferFlags flag0 = 0;
//建立blockBuffer
/*!
參數1: structureAllocator kCFAllocatorDefault
參數2: memoryBlock frame
參數3: frame size
參數4: blockAllocator: Pass NULL
參數5: customBlockSource Pass NULL
參數6: offsetToData 數據偏移
參數7: dataLength 數據長度
參數8: flags 功能和控制標誌
參數9: newBBufOut blockBuffer地址,不能爲空
*/
OSStatus status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault, frame, frameSize, kCFAllocatorNull, NULL, 0, frameSize, flag0, &blockBuffer);
if (status != kCMBlockBufferNoErr) {
NSLog(@"Video hard decode create blockBuffer error code=%d", (int)status);
return outputPixelBuffer;
}
CMSampleBufferRef sampleBuffer = NULL;
const size_t sampleSizeArray[] = {frameSize};
//建立sampleBuffer
/*
參數1: allocator 分配器,使用默認內存分配, kCFAllocatorDefault
參數2: blockBuffer.須要編碼的數據blockBuffer.不能爲NULL
參數3: formatDescription,視頻輸出格式
參數4: numSamples.CMSampleBuffer 個數.
參數5: numSampleTimingEntries 必須爲0,1,numSamples
參數6: sampleTimingArray. 數組.爲空
參數7: numSampleSizeEntries 默認爲1
參數8: sampleSizeArray
參數9: sampleBuffer對象
*/
status = CMSampleBufferCreateReady(kCFAllocatorDefault, blockBuffer, _decodeDesc, 1, 0, NULL, 1, sampleSizeArray, &sampleBuffer);
if (status != noErr || !sampleBuffer) {
NSLog(@"Video hard decode create sampleBuffer failed status=%d", (int)status);
CFRelease(blockBuffer);
return outputPixelBuffer;
}
//解碼
//向視頻解碼器提示使用低功耗模式是能夠的
VTDecodeFrameFlags flag1 = kVTDecodeFrame_1xRealTimePlayback;
//異步解碼
VTDecodeInfoFlags infoFlag = kVTDecodeInfo_Asynchronous;
//解碼數據
/*
參數1: 解碼session
參數2: 源數據 包含一個或多個視頻幀的CMsampleBuffer
參數3: 解碼標誌
參數4: 解碼後數據outputPixelBuffer
參數5: 同步/異步解碼標識
*/
status = VTDecompressionSessionDecodeFrame(_decodeSesion, sampleBuffer, flag1, &outputPixelBuffer, &infoFlag);
if (status == kVTInvalidSessionErr) {
NSLog(@"Video hard decode InvalidSessionErr status =%d", (int)status);
} else if (status == kVTVideoDecoderBadDataErr) {
NSLog(@"Video hard decode BadData status =%d", (int)status);
} else if (status != noErr) {
NSLog(@"Video hard decode failed status =%d", (int)status);
}
CFRelease(sampleBuffer);
CFRelease(blockBuffer);
return outputPixelBuffer;
}
複製代碼
實際上就是顯示紋理,將生成的CVPixelBufferRef轉換爲紋理對象,而後使用CAEAGLLayer進行顯示,具體顯示代碼下一篇文章來實現。ide