最簡單的iOS 推流代碼,視頻捕獲,軟編碼(faac,x264),硬編碼(aac,h264),美顏,flv編碼,rtmp協議,陸續更新代碼解析,你想學的知識這裏都有,願意懂直播技術的同窗快來看!!html
源代碼:https://github.com/hardman/AWLivegit
前面已經介紹瞭如何從硬件設備獲取到音視頻數據(pcm,NV12)。github
可是咱們須要的視頻格式是 aac和 h264。算法
如今就介紹一下如何將pcm編碼aac,將NV12數據編碼爲h264。bash
編碼分爲軟編碼和硬編碼。網絡
硬編碼是系統提供的,由系統專門嵌入的硬件設備處理音視頻編碼,主要計算操做在對應的硬件中。硬編碼的特色是,速度快,cpu佔用少,可是不夠靈活,只能使用一些特定的功能。session
軟編碼是指,經過代碼計算進行數據編碼,主要計算操做在cpu中。軟編碼的特色是,靈活,多樣,功能豐富可擴展,可是cpu佔用較多。多線程
在代碼中,編碼器是經過AWEncoderManager獲取的。架構
AWENcoderManager是一個工廠,經過audioEncoderType和videoEncoderType指定編碼器類型。app
編碼器分爲兩類,音頻編碼器(AWAudioEncoder),視頻編碼器(AWVideoEncoder)。
音視頻編碼器又分別分爲硬編碼(在HW目錄中)和軟編碼(在SW目錄中)。
因此編碼部分主要有4個文件:硬編碼H264(AWHWH264Encoder),硬編碼AAC(AWHWAACEncoder),軟編碼AAC(AWSWFaacEncoder),軟編碼H264(AWSWX264Encoder)
第一步,開啓硬編碼器
-(void)open{
//建立 video encode session
// 建立 video encode session
// 傳入視頻寬高,編碼類型:kCMVideoCodecType_H264
// 編碼回調:vtCompressionSessionCallback,這個回調函數爲編碼結果回調,編碼成功後,會將數據傳入此回調中。
// (__bridge void * _Nullable)(self):這個參數會被原封不動地傳入vtCompressionSessionCallback中,此參數爲編碼回調同外界通訊的惟一參數。
// &_vEnSession,c語言能夠給傳入參數賦值。在函數內部會分配內存並初始化_vEnSession。
OSStatus status = VTCompressionSessionCreate(NULL, (int32_t)(self.videoConfig.pushStreamWidth), (int32_t)self.videoConfig.pushStreamHeight, kCMVideoCodecType_H264, NULL, NULL, NULL, vtCompressionSessionCallback, (__bridge void * _Nullable)(self), &_vEnSession);
if (status == noErr) {
// 設置參數
// ProfileLevel,h264的協議等級,不一樣的清晰度使用不一樣的ProfileLevel。
VTSessionSetProperty(_vEnSession, kVTCompressionPropertyKey_ProfileLevel, kVTProfileLevel_H264_Main_AutoLevel);
// 設置碼率
VTSessionSetProperty(_vEnSession, kVTCompressionPropertyKey_AverageBitRate, (__bridge CFTypeRef)@(self.videoConfig.bitrate));
// 設置實時編碼
VTSessionSetProperty(_vEnSession, kVTCompressionPropertyKey_RealTime, kCFBooleanTrue);
// 關閉重排Frame,由於有了B幀(雙向預測幀,根據先後的圖像計算出本幀)後,編碼順序可能跟顯示順序不一樣。此參數能夠關閉B幀。
VTSessionSetProperty(_vEnSession, kVTCompressionPropertyKey_AllowFrameReordering, kCFBooleanFalse);
// 關鍵幀最大間隔,關鍵幀也就是I幀。此處表示關鍵幀最大間隔爲2s。
VTSessionSetProperty(_vEnSession, kVTCompressionPropertyKey_MaxKeyFrameInterval, (__bridge CFTypeRef)@(self.videoConfig.fps * 2));
// 關於B幀 P幀 和I幀,請參考:http://blog.csdn.net/abcjennifer/article/details/6577934
//參數設置完畢,準備開始,至此初始化完成,隨時來數據,隨時編碼
status = VTCompressionSessionPrepareToEncodeFrames(_vEnSession);
if (status != noErr) {
[self onErrorWithCode:AWEncoderErrorCodeVTSessionPrepareFailed des:@"硬編碼vtsession prepare失敗"];
}
}else{
[self onErrorWithCode:AWEncoderErrorCodeVTSessionCreateFailed des:@"硬編碼vtsession建立失敗"];
}
}
複製代碼
第二步,向編碼器丟數據:
//這裏的參數yuvData就是從相機獲取的NV12數據。
-(aw_flv_video_tag *)encodeYUVDataToFlvTag:(NSData *)yuvData{
if (!_vEnSession) {
return NULL;
}
//yuv 變成 轉CVPixelBufferRef
OSStatus status = noErr;
//視頻寬度
size_t pixelWidth = self.videoConfig.pushStreamWidth;
//視頻高度
size_t pixelHeight = self.videoConfig.pushStreamHeight;
//如今要把NV12數據放入 CVPixelBufferRef中,由於 硬編碼主要調用VTCompressionSessionEncodeFrame函數,此函數不接受yuv數據,可是接受CVPixelBufferRef類型。
CVPixelBufferRef pixelBuf = NULL;
//初始化pixelBuf,數據類型是kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange,此類型數據格式同NV12格式相同。
CVPixelBufferCreate(NULL, pixelWidth, pixelHeight, kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange, NULL, &pixelBuf);
// Lock address,鎖定數據,應該是多線程防止重入操做。
if(CVPixelBufferLockBaseAddress(pixelBuf, 0) != kCVReturnSuccess){
[self onErrorWithCode:AWEncoderErrorCodeLockSampleBaseAddressFailed des:@"encode video lock base address failed"];
return NULL;
}
//將yuv數據填充到CVPixelBufferRef中
size_t y_size = pixelWidth * pixelHeight;
size_t uv_size = y_size / 4;
uint8_t *yuv_frame = (uint8_t *)yuvData.bytes;
//處理y frame
uint8_t *y_frame = CVPixelBufferGetBaseAddressOfPlane(pixelBuf, 0);
memcpy(y_frame, yuv_frame, y_size);
uint8_t *uv_frame = CVPixelBufferGetBaseAddressOfPlane(pixelBuf, 1);
memcpy(uv_frame, yuv_frame + y_size, uv_size * 2);
//硬編碼 CmSampleBufRef
//時間戳
uint32_t ptsMs = self.manager.timestamp + 1; //self.vFrameCount++ * 1000.f / self.videoConfig.fps;
CMTime pts = CMTimeMake(ptsMs, 1000);
//硬編碼主要其實就這一句。將攜帶NV12數據的PixelBuf送到硬編碼器中,進行編碼。
status = VTCompressionSessionEncodeFrame(_vEnSession, pixelBuf, pts, kCMTimeInvalid, NULL, pixelBuf, NULL);
... ...
}
複製代碼
第三步,經過硬編碼回調獲取h264數據
static void vtCompressionSessionCallback (void * CM_NULLABLE outputCallbackRefCon,
void * CM_NULLABLE sourceFrameRefCon,
OSStatus status,
VTEncodeInfoFlags infoFlags,
CM_NULLABLE CMSampleBufferRef sampleBuffer ){
//經過outputCallbackRefCon獲取AWHWH264Encoder的對象指針,將編碼好的h264數據傳出去。
AWHWH264Encoder *encoder = (__bridge AWHWH264Encoder *)(outputCallbackRefCon);
//判斷是否編碼成功
if (status != noErr) {
dispatch_semaphore_signal(encoder.vSemaphore);
[encoder onErrorWithCode:AWEncoderErrorCodeEncodeVideoFrameFailed des:@"encode video frame error 1"];
return;
}
//是否數據是完整的
if (!CMSampleBufferDataIsReady(sampleBuffer)) {
dispatch_semaphore_signal(encoder.vSemaphore);
[encoder onErrorWithCode:AWEncoderErrorCodeEncodeVideoFrameFailed des:@"encode video frame error 2"];
return;
}
//是不是關鍵幀,關鍵幀和非關鍵幀要區分清楚。推流時也要註明。
BOOL isKeyFrame = !CFDictionaryContainsKey( (CFArrayGetValueAtIndex(CMSampleBufferGetSampleAttachmentsArray(sampleBuffer, true), 0)), kCMSampleAttachmentKey_NotSync);
//首先獲取sps 和pps
//sps pss 也是h264的一部分,能夠認爲它們是特別的h264視頻幀,保存了h264視頻的一些必要信息。
//沒有這部分數據h264視頻很難解析出來。
//數據處理時,sps pps 數據能夠做爲一個普通h264幀,放在h264視頻流的最前面。
BOOL needSpsPps = NO;
if (!encoder.spsPpsData) {
if (isKeyFrame) {
//獲取avcC,這就是咱們想要的sps和pps數據。
//若是保存到文件中,須要將此數據前加上 [0 0 0 1] 4個字節,寫入到h264文件的最前面。
//若是推流,將此數據放入flv數據區便可。
CMFormatDescriptionRef sampleBufFormat = CMSampleBufferGetFormatDescription(sampleBuffer);
NSDictionary *dict = (__bridge NSDictionary *)CMFormatDescriptionGetExtensions(sampleBufFormat);
encoder.spsPpsData = dict[@"SampleDescriptionExtensionAtoms"][@"avcC"];
}
needSpsPps = YES;
}
//獲取真正的視頻幀數據
CMBlockBufferRef blockBuffer = CMSampleBufferGetDataBuffer(sampleBuffer);
size_t blockDataLen;
uint8_t *blockData;
status = CMBlockBufferGetDataPointer(blockBuffer, 0, NULL, &blockDataLen, (char **)&blockData);
if (status == noErr) {
size_t currReadPos = 0;
//通常狀況下都是隻有1幀,在最開始編碼的時候有2幀,取最後一幀
while (currReadPos < blockDataLen - 4) {
uint32_t naluLen = 0;
memcpy(&naluLen, blockData + currReadPos, 4);
naluLen = CFSwapInt32BigToHost(naluLen);
//naluData 即爲一幀h264數據。
//若是保存到文件中,須要將此數據前加上 [0 0 0 1] 4個字節,按順序寫入到h264文件中。
//若是推流,須要將此數據前加上4個字節表示數據長度的數字,此數據需轉爲大端字節序。
//關於大端和小端模式,請參考此網址:http://blog.csdn.net/hackbuteer1/article/details/7722667
encoder.naluData = [NSData dataWithBytes:blockData + currReadPos + 4 length:naluLen];
currReadPos += 4 + naluLen;
encoder.isKeyFrame = isKeyFrame;
}
}else{
[encoder onErrorWithCode:AWEncoderErrorCodeEncodeGetH264DataFailed des:@"got h264 data failed"];
}
... ...
}
複製代碼
第四步,其實,此時硬編碼已結束,這一步跟編碼無關,將取得的h264數據,送到推流器中。
-(aw_flv_video_tag *)encodeYUVDataToFlvTag:(NSData *)yuvData{
... ...
if (status == noErr) {
dispatch_semaphore_wait(self.vSemaphore, DISPATCH_TIME_FOREVER);
if (_naluData) {
//此處 硬編碼成功,_naluData內的數據即爲h264視頻幀。
//咱們是推流,因此獲取幀長度,轉成大端字節序,放到數據的最前面
uint32_t naluLen = (uint32_t)_naluData.length;
//小端轉大端。計算機內通常都是小端,而網絡和文件中通常都是大端。大端轉小端和小端轉大端算法同樣,就是字節序反轉就好了。
uint8_t naluLenArr[4] = {naluLen >> 24 & 0xff, naluLen >> 16 & 0xff, naluLen >> 8 & 0xff, naluLen & 0xff};
//將數據拼在一塊兒
NSMutableData *mutableData = [NSMutableData dataWithBytes:naluLenArr length:4];
[mutableData appendData:_naluData];
//將h264數據合成flv tag,合成flvtag以後就能夠直接發送到服務端了。後續會介紹
aw_flv_video_tag *video_tag = aw_encoder_create_video_tag((int8_t *)mutableData.bytes, mutableData.length, ptsMs, 0, self.isKeyFrame);
//到此,編碼工做完成,清除狀態。
_naluData = nil;
_isKeyFrame = NO;
CVPixelBufferUnlockBaseAddress(pixelBuf, 0);
CFRelease(pixelBuf);
return video_tag;
}
}else{
[self onErrorWithCode:AWEncoderErrorCodeEncodeVideoFrameFailed des:@"encode video frame error"];
}
CVPixelBufferUnlockBaseAddress(pixelBuf, 0);
CFRelease(pixelBuf);
return NULL;
複製代碼
第五步,關閉編碼器
//永遠不忘記關閉釋放資源。
-(void)close{
dispatch_semaphore_signal(self.vSemaphore);
VTCompressionSessionInvalidate(_vEnSession);
_vEnSession = nil;
self.naluData = nil;
self.isKeyFrame = NO;
self.spsPpsData = nil;
}
複製代碼
硬編碼AAC邏輯同H264差很少。
-(void)open{
//建立audio encode converter也就是AAC編碼器
//初始化一系列參數
AudioStreamBasicDescription inputAudioDes = {
.mFormatID = kAudioFormatLinearPCM,
.mSampleRate = self.audioConfig.sampleRate,
.mBitsPerChannel = (uint32_t)self.audioConfig.sampleSize,
.mFramesPerPacket = 1,//每一個包1幀
.mBytesPerFrame = 2,//每幀2字節
.mBytesPerPacket = 2,//每一個包1幀也是2字節
.mChannelsPerFrame = (uint32_t)self.audioConfig.channelCount,//聲道數,推流通常使用單聲道
//下面這個flags的設置參照此文:http://www.mamicode.com/info-detail-986202.html
.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsNonInterleaved,
.mReserved = 0
};
//設置輸出格式,聲道數
AudioStreamBasicDescription outputAudioDes = {
.mChannelsPerFrame = (uint32_t)self.audioConfig.channelCount,
.mFormatID = kAudioFormatMPEG4AAC,
0
};
//初始化_aConverter
uint32_t outDesSize = sizeof(outputAudioDes);
AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &outDesSize, &outputAudioDes);
OSStatus status = AudioConverterNew(&inputAudioDes, &outputAudioDes, &_aConverter);
if (status != noErr) {
[self onErrorWithCode:AWEncoderErrorCodeCreateAudioConverterFailed des:@"硬編碼AAC建立失敗"];
}
//設置碼率
uint32_t aBitrate = (uint32_t)self.audioConfig.bitrate;
uint32_t aBitrateSize = sizeof(aBitrate);
status = AudioConverterSetProperty(_aConverter, kAudioConverterEncodeBitRate, aBitrateSize, &aBitrate);
//查詢最大輸出
uint32_t aMaxOutput = 0;
uint32_t aMaxOutputSize = sizeof(aMaxOutput);
AudioConverterGetProperty(_aConverter, kAudioConverterPropertyMaximumOutputPacketSize, &aMaxOutputSize, &aMaxOutput);
self.aMaxOutputFrameSize = aMaxOutput;
if (aMaxOutput == 0) {
[self onErrorWithCode:AWEncoderErrorCodeAudioConverterGetMaxFrameSizeFailed des:@"AAC 獲取最大frame size失敗"];
}
}
複製代碼
第二步,獲取audio specific config,這是一個特別的flv tag,存儲了使用的aac的一些關鍵數據,做爲解析音頻幀的基礎。 在rtmp中,必須將此幀在全部音頻幀以前發送。
-(aw_flv_audio_tag *)createAudioSpecificConfigFlvTag{
//profile,表示使用的協議
uint8_t profile = kMPEG4Object_AAC_LC;
//採樣率
uint8_t sampleRate = 4;
//channel信息
uint8_t chanCfg = 1;
//將上面3個信息拼在一塊兒,成爲2字節
uint8_t config1 = (profile << 3) | ((sampleRate & 0xe) >> 1);
uint8_t config2 = ((sampleRate & 0x1) << 7) | (chanCfg << 3);
//將數據轉成aw_data
aw_data *config_data = NULL;
data_writer.write_uint8(&config_data, config1);
data_writer.write_uint8(&config_data, config2);
//轉成flv tag
aw_flv_audio_tag *audio_specific_config_tag = aw_encoder_create_audio_specific_config_tag(config_data, &_faacConfig);
free_aw_data(&config_data);
//返回給調用方,準備發送
return audio_specific_config_tag;
}
複製代碼
第三步:當從麥克風獲取到音頻數據時,將數據交給AAC編碼器編碼。
-(aw_flv_audio_tag *)encodePCMDataToFlvTag:(NSData *)pcmData{
self.curFramePcmData = pcmData;
//構造輸出結構體,編碼器須要
AudioBufferList outAudioBufferList = {0};
outAudioBufferList.mNumberBuffers = 1;
outAudioBufferList.mBuffers[0].mNumberChannels = (uint32_t)self.audioConfig.channelCount;
outAudioBufferList.mBuffers[0].mDataByteSize = self.aMaxOutputFrameSize;
outAudioBufferList.mBuffers[0].mData = malloc(self.aMaxOutputFrameSize);
uint32_t outputDataPacketSize = 1;
//執行編碼,此處須要傳一個回調函數aacEncodeInputDataProc,以同步的方式,在回調中填充pcm數據。
OSStatus status = AudioConverterFillComplexBuffer(_aConverter, aacEncodeInputDataProc, (__bridge void * _Nullable)(self), &outputDataPacketSize, &outAudioBufferList, NULL);
if (status == noErr) {
//編碼成功,獲取數據
NSData *rawAAC = [NSData dataWithBytes: outAudioBufferList.mBuffers[0].mData length:outAudioBufferList.mBuffers[0].mDataByteSize];
//時間戳(ms) = 1000 * 每秒採樣數 / 採樣率;
self.manager.timestamp += 1024 * 1000 / self.audioConfig.sampleRate;
//獲取到aac數據,轉成flv audio tag,發送給服務端。
return aw_encoder_create_audio_tag((int8_t *)rawAAC.bytes, rawAAC.length, (uint32_t)self.manager.timestamp, &_faacConfig);
}else{
//編碼錯誤
[self onErrorWithCode:AWEncoderErrorCodeAudioEncoderFailed des:@"aac 編碼錯誤"];
}
return NULL;
}
//回調函數,系統指定格式
static OSStatus aacEncodeInputDataProc(AudioConverterRef inAudioConverter, UInt32 *ioNumberDataPackets, AudioBufferList *ioData, AudioStreamPacketDescription **outDataPacketDescription, void *inUserData){
AWHWAACEncoder *hwAacEncoder = (__bridge AWHWAACEncoder *)inUserData;
//將pcm數據交給編碼器
if (hwAacEncoder.curFramePcmData) {
ioData->mBuffers[0].mData = (void *)hwAacEncoder.curFramePcmData.bytes;
ioData->mBuffers[0].mDataByteSize = (uint32_t)hwAacEncoder.curFramePcmData.length;
ioData->mNumberBuffers = 1;
ioData->mBuffers[0].mNumberChannels = (uint32_t)hwAacEncoder.audioConfig.channelCount;
return noErr;
}
return -1;
}
複製代碼
第四步:關閉編碼器釋放資源
-(void)close{
AudioConverterDispose(_aConverter);
_aConverter = nil;
self.curFramePcmData = nil;
self.aMaxOutputFrameSize = 0;
}
複製代碼