一個視頻有三個流,視頻流,音頻流,字幕流,我將視頻A的視頻流拿出來,將音樂B的音頻流拿出來,合在一塊兒成新的視頻github
仍是老規矩直接說c代碼如何運行ide
註冊組件,打開並得到MP4文件和MP3文件的信息函數
av_register_all(); //Input if ((ret = avformat_open_input(&ifmt_ctx_v, in_filename_v, 0, 0)) < 0) {//打開輸入的視頻文件 LOGE( "Could not open input file."); goto end; } if ((ret = avformat_find_stream_info(ifmt_ctx_v, 0)) < 0) {//獲取視頻文件信息 LOGE( "Failed to retrieve input stream information"); goto end; } if ((ret = avformat_open_input(&ifmt_ctx_a, in_filename_a, 0, 0)) < 0) {//打開輸入的音頻文件 LOGE( "Could not open input file."); goto end; } if ((ret = avformat_find_stream_info(ifmt_ctx_a, 0)) < 0) {//獲取音頻文件信息 LOGE( "Failed to retrieve input stream information"); goto end; }
建立輸出文件編碼
//Output avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);//初始化輸出碼流的AVFormatContext。 if (!ofmt_ctx) { LOGE( "Could not create output context\n"); ret = AVERROR_UNKNOWN; return -1; } ofmt = ofmt_ctx->oformat;
獲取MP4的視頻流和MP3的音頻流code
//從輸入的AVStream中獲取一個輸出的out_stream for (i = 0; i < ifmt_ctx_v->nb_streams; i++) { //Create output AVStream according to input AVStream if(ifmt_ctx_v->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){ AVStream *in_stream = ifmt_ctx_v->streams[i]; AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);//建立流通道AVStream videoindex_v=i; if (!out_stream) { LOGE( "Failed allocating output stream\n"); ret = AVERROR_UNKNOWN; break; } videoindex_out=out_stream->index; //Copy the settings of AVCodecContext if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0) { LOGE( "Failed to copy context from input to output stream codec context\n"); break; } out_stream->codec->codec_tag = 0; if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; break; } } for (i = 0; i < ifmt_ctx_a->nb_streams; i++) { //Create output AVStream according to input AVStream if(ifmt_ctx_a->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO){ AVStream *in_stream = ifmt_ctx_a->streams[i]; AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec); audioindex_a=i; if (!out_stream) { LOGE( "Failed allocating output stream\n"); ret = AVERROR_UNKNOWN; goto end; } audioindex_out=out_stream->index; //Copy the settings of AVCodecContext if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0) { LOGE( "Failed to copy context from input to output stream codec context\n"); goto end; } out_stream->codec->codec_tag = 0; if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; break; } }
獲取輸出文件的信息並打開輸出文件,得到輸出流orm
LOGE("==========Output Information==========\n"); av_dump_format(ofmt_ctx, 0, out_filename, 1); LOGE("======================================\n"); //Open output file if (!(ofmt->flags & AVFMT_NOFILE)) { if (avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE) < 0) {//打開輸出文件。 LOGE( "Could not open output file '%s'", out_filename); return -1; } } //Write file header if (avformat_write_header(ofmt_ctx, NULL) < 0) { LOGE( "Error occurred when opening output file\n"); return -1; }
接下來就是邊解碼邊編碼了,這個解碼是解碼視頻流和音頻流,這個兩個流解碼速度須要保持一致
這個經過雙方時間軸來判斷,而後就將解碼的數據編碼放入輸出文件視頻
//Get an AVPacket . av_compare_ts是比較時間戳用的。經過該函數能夠決定該寫入視頻仍是音頻。 if(av_compare_ts(cur_pts_v,ifmt_ctx_v->streams[videoindex_v]->time_base,cur_pts_a,ifmt_ctx_a->streams[audioindex_a]->time_base) <= 0)
開始是先解碼視頻,放入cur_pts_v資源
if(av_read_frame(ifmt_ctx, &pkt) >= 0){ do{ in_stream = ifmt_ctx->streams[pkt.stream_index]; out_stream = ofmt_ctx->streams[stream_index]; if(pkt.stream_index==videoindex_v){ //FIX:No PTS (Example: Raw H.264) H.264裸流沒有PTS,所以必須手動寫入PTS //Simple Write PTS if(pkt.pts==AV_NOPTS_VALUE){ //Write PTS AVRational time_base1=in_stream->time_base; //Duration between 2 frames (us) int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(in_stream->r_frame_rate); //Parameters pkt.pts=(double)(frame_index*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE); pkt.dts=pkt.pts; pkt.duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE); frame_index++; } cur_pts_v=pkt.pts; break; } }while(av_read_frame(ifmt_ctx, &pkt) >= 0); }
接着是解碼音頻放入cur_pts_aget
if(av_read_frame(ifmt_ctx, &pkt) >= 0){ do{ in_stream = ifmt_ctx->streams[pkt.stream_index]; out_stream = ofmt_ctx->streams[stream_index]; if(pkt.stream_index==audioindex_a){ //FIX:No PTS //Simple Write PTS if(pkt.pts==AV_NOPTS_VALUE){ //Write PTS AVRational time_base1=in_stream->time_base; //Duration between 2 frames (us) int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(in_stream->r_frame_rate); //Parameters pkt.pts=(double)(frame_index*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE); pkt.dts=pkt.pts; pkt.duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE); frame_index++; } cur_pts_a=pkt.pts; break; } }while(av_read_frame(ifmt_ctx, &pkt) >= 0); }else{ break; }
而後就是將以前解碼的數據編碼放入輸出文件,釋放這個pkt
//Convert PTS/DTS pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base); pkt.pos = -1; pkt.stream_index=stream_index; LOGE("Write 1 Packet. size:%5d\tpts:%lld\n",pkt.size,pkt.pts); //Write AVPacket 音頻或視頻裸流 if (av_interleaved_write_frame(ofmt_ctx, &pkt) < 0) { LOGE( "Error muxing packet\n"); break; }
av_free_packet(&pkt);
經過不斷地循環,解碼一下視頻流,解碼一下音頻流,編碼放入輸出文件,這三個步驟不斷循環完成合成視頻
完成輸出視頻,並釋放資源
//Write file trailer av_write_trailer(ofmt_ctx);
#if USE_H264BSF
av_bitstream_filter_close(h264bsfc);
#endif
#if USE_AACBSF
av_bitstream_filter_close(aacbsfc);
#endif
end: avformat_close_input(&ifmt_ctx_v); avformat_close_input(&ifmt_ctx_a); /* close output */ if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE)) avio_close(ofmt_ctx->pb); avformat_free_context(ofmt_ctx); if (ret < 0 && ret != AVERROR_EOF) { LOGE( "Error occurred.\n"); return -1; }
下一次依舊與音頻有關係