首先,測試是使用live555的testRTSPClient來進行網絡實時流獲取。在DummySink::afterGettingFrame接口處,進行音視頻判斷:ios
void DummySink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
struct timeval presentationTime, unsigned /*durationInMicroseconds*/) {
//----------------------------------------------------------------------------------
if (0 == strcmp(fSubsession.mediumName(), "video"))
{
if (!bInitHead)
{
unsigned int num = 0;
SPropRecord * sps = parseSPropParameterSets(fSubsession.fmtp_spropparametersets(), num);
struct timeval tv = { 0, 0 };
unsigned char start_code[4] = { 0x00, 0x00, 0x00, 0x01 };
memcpy(m_recvBuf, start_code, 4);
memcpy(&m_recvBuf[4], sps[0].sPropBytes, sps[0].sPropLength);
WriteBuf2TsFile(25, 1, m_recvBuf, sps[0].sPropLength + 4, 0);
fwrite(m_recvBuf, 1, sps[0].sPropLength + 4, pVideo_H264_File);git
memset(m_recvBuf, 0, DUMMY_SINK_RECEIVE_BUFFER_SIZE + 4);
memcpy(m_recvBuf, start_code, 4);
memcpy(&m_recvBuf[4], sps[1].sPropBytes, sps[1].sPropLength);
WriteBuf2TsFile(25, 1, m_recvBuf, sps[1].sPropLength + 4, 0);
fwrite(m_recvBuf, 1, sps[1].sPropLength + 4, pVideo_H264_File);
delete[] sps;github
memset(m_recvBuf, 0, DUMMY_SINK_RECEIVE_BUFFER_SIZE + 4);
m_recvBuf[0] = 0x00;
m_recvBuf[1] = 0x00;
m_recvBuf[2] = 0x00;
m_recvBuf[3] = 0x01;
memcpy(&m_recvBuf[4], fReceiveBuffer, frameSize);
WriteBuf2TsFile(25, 1, m_recvBuf, frameSize + 4, 0);
fwrite(m_recvBuf, 1, frameSize + 4, pVideo_H264_File);
bInitHead = true;
}
else
{
memset(m_recvBuf, 0, DUMMY_SINK_RECEIVE_BUFFER_SIZE + 4);
m_recvBuf[0] = 0x00;
m_recvBuf[1] = 0x00;
m_recvBuf[2] = 0x00;
m_recvBuf[3] = 0x01;
memcpy(&m_recvBuf[4], fReceiveBuffer, frameSize);
WriteBuf2TsFile(25, 1, m_recvBuf, frameSize + 4, 0);
fwrite(m_recvBuf, 1, frameSize + 4, pVideo_H264_File);
}
}
if (0 == strcmp(fSubsession.mediumName(), "audio"))
{
BYTE* pbAACBuffer;
pbAACBuffer = new BYTE[nMaxOutputBytes];
frameSize = frameSize / (nPCMBitSize / 8);
int nRet = faacEncEncode(aac_Handle, (int*)fReceiveBuffer, frameSize, pbAACBuffer, nMaxOutputBytes);網絡
WriteBuf2TsFile(fSubsession.scale(), 0, pbAACBuffer, nRet, fSubsession.getNormalPlayTime(presentationTime));
fwrite(pbAACBuffer, 1, nMaxOutputBytes, pAudio_Aac_File);
delete pbAACBuffer;
pbAACBuffer = NULL;
}
//----------------------------------------------------------------------------------session
// Then continue, to request the next frame of data:
continuePlaying();
}ide
此處視頻處理須要注意2點,第一是,首次須要寫入sps數據信息;第二是,若是傳入的H264裸流數據沒有0x00000001解析頭的,須要本身添加解析頭,若是傳輸數據已經存在則沒必要添加了。另外,就是音頻方面,這裏是進行音頻轉換了的轉爲aac格式,使用faac開源庫處理。測試
下面是WriteBuf2TsFile裏面的相關內容:.net
/*實時流寫入ts文件*/
int WriteBuf2TsFile(unsigned int framerate, int iStreamType, unsigned char *pData, int iDataSize, unsigned long lTimeStamp)
{
unsigned int audiosamplerate = 8000; //音頻採樣率
unsigned int videoframetype = 0; //視頻幀類型
Ts_Adaptation_field ts_adaptation_field_Head;
Ts_Adaptation_field ts_adaptation_field_Tail;
unsigned int WritePacketNum;調試
if (0 == iStreamType)
{
Take_Out_Pes(&m_audio_tspes, Timestamp_audio, 0x01, NULL,pData,iDataSize);
if (m_audio_tspes.Pes_Packet_Length_Beyond != 0)
{
printf("PES_AUDIO : SIZE = %d\n", m_audio_tspes.Pes_Packet_Length_Beyond);
//填寫自適應段標誌
WriteAdaptive_flags_Tail(&ts_adaptation_field_Head); //填寫自適應段標誌 ,這裏注意 音頻類型不要算pcr 因此都用幀尾代替就行
WriteAdaptive_flags_Tail(&ts_adaptation_field_Tail); //填寫自適應段標誌幀尾
PES2TS(&m_audio_tspes, TS_AAC_PID, &ts_adaptation_field_Head, &ts_adaptation_field_Tail, Timestamp_video, Timestamp_audio);
Timestamp_audio += 1024 * 1000 * 90 / 8000;
//計算一幀音頻所用時間
}
}
else if (1 == iStreamType)
{
Take_Out_Pes(&m_video_tspes, Timestamp_video, 0x00, &videoframetype,pData,iDataSize);
if (m_video_tspes.Pes_Packet_Length_Beyond != 0)
{
printf("PES_VIDEO : SIZE = %d\n", m_video_tspes.Pes_Packet_Length_Beyond);
if (videoframetype == FRAME_I || videoframetype == FRAME_P || videoframetype == FRAME_B)
{
//填寫自適應段標誌
WriteAdaptive_flags_Head(&ts_adaptation_field_Head, Timestamp_video); //填寫自適應段標誌幀頭
WriteAdaptive_flags_Tail(&ts_adaptation_field_Tail); //填寫自適應段標誌幀尾
//計算一幀視頻所用時間
PES2TS(&m_video_tspes, TS_H264_PID, &ts_adaptation_field_Head, &ts_adaptation_field_Tail, Timestamp_video, Timestamp_audio);
Timestamp_video += 1000 * 90 / framerate;
}
else
{
//填寫自適應段標誌
WriteAdaptive_flags_Tail(&ts_adaptation_field_Head); //填寫自適應段標誌 ,這裏注意 其它幀類型不要算pcr 因此都用幀尾代替就行
WriteAdaptive_flags_Tail(&ts_adaptation_field_Tail); //填寫自適應段標誌幀尾
PES2TS(&m_video_tspes, TS_H264_PID, &ts_adaptation_field_Head, &ts_adaptation_field_Tail, Timestamp_video, Timestamp_audio);
}
}
}
return 1;
}code
這裏須要注意的是,幀率:25,音頻採樣率:8000,由於是測試因此本身寫死了的,須要特別注意的是:時間戳問題,若是搞錯就只能播放一幀視頻了(這個問題困擾一段時間,可能因爲經驗問題)。
下面附上鍊接地址:
H264+AAC文件方式封裝ts:http://download.csdn.net/detail/zhuweigangzwg/5605869
實時流封裝Demo地址:
https://github.com/Jsnails/MUX_TS
因爲空間關係,live555,faac等開源庫須要本身去下載編譯,上傳文件絕對沒有作任何處理,那2個開源庫編譯好,工程配置好,絕對能夠編譯調試。