整個項目在 https://github.com/ximikang/ffmpegThumbnail發佈
// Read media file and read the header information from container format AVFormatContext* pFormatContext = avformat_alloc_context(); if (!pFormatContext) { logging("ERROR could not allocate memory for format context"); return -1; } if (avformat_open_input(&pFormatContext, inputFilePath.string().c_str(), NULL, NULL) != 0) { logging("ERROR could not open media file"); } logging("format %s, duration %lld us, bit_rate %lld", pFormatContext->iformat->name, pFormatContext->duration, pFormatContext->bit_rate); cout << "視頻時常:" << pFormatContext->duration / 1000.0 / 1000.0 << "s" << endl; int64_t video_duration = pFormatContext->duration; int sum_count = rowNums * colNums; //跳轉的間隔 ms int64_t time_step = video_duration / sum_count / 1000;
for (int i = 0; i < sum_count ; ++i) { cv::Mat tempImage; // 每次讀取相同時間間隔的圖像並存入vImage中 while (av_read_frame(pFormatContext, pPacket) >= 0) { if (pPacket->stream_index == video_stream_index) { response = decode_packet_2mat(pPacket, pCodecContext, pFrame, tempImage);// 返回 } if (response == 0)// 成功讀取一幀 break; if (response < 0) continue; } vImage.push_back(tempImage); // 跳轉視頻 av_seek_frame(pFormatContext, -1, ((double)time_step / (double)1000)* AV_TIME_BASE*(double)(i+1) + (double)pFormatContext->start_time, AVSEEK_FLAG_BACKWARD); }
3.獲取Frame
在固定的時間點可能沒法獲取從當前時間點的Packet獲取對應的Frame,因此須要對獲取的Packet進行判斷,若是沒有獲取到對應的Frame應該繼續獲取下一Packet直到獲取到對應的Frame爲止。c++
static int decode_packet_2mat(AVPacket* pPacket, AVCodecContext* pCodecContext, AVFrame* pFrame, cv::Mat& image) { int response = avcodec_send_packet(pCodecContext, pPacket); if (response < 0) { logging("Error while sending a packet to the decoder"); return response; } while (response >= 0) { // return decoded out data from a decoder response = avcodec_receive_frame(pCodecContext, pFrame); if (response == AVERROR(EAGAIN) || response == AVERROR_EOF) { logging("averror averror_eof"); break; } else if (response < 0) { logging("Error while receiving frame"); return response; } if (response >= 0) { // 獲取到Frame image = frame2Mat(pFrame, pCodecContext->pix_fmt); } return 0; } }
因爲從視頻流獲取的幀是YUV格式的Frame格式,後面使用opencv進行操做因此進行格式轉換。git
先使用ffmpeg中的SwsContext將從視頻中抽取到的幀從YUV轉換到BGR格式,再從BGRFrame中的內存中獲取原始數據,並轉換到opencv的Mat類型。github
cv::Mat frame2Mat(AVFrame* pFrame, AVPixelFormat pPixFormat) { // image init AVFrame* pRGBFrame = av_frame_alloc(); uint8_t* out_buffer = new uint8_t[avpicture_get_size(AV_PIX_FMT_BGR24, pFrame->width, pFrame->height)]; avpicture_fill((AVPicture*)pRGBFrame, out_buffer, AV_PIX_FMT_BGR24, pFrame->width, pFrame->height); SwsContext* rgbSwsContext = sws_getContext(pFrame->width, pFrame->height, pPixFormat, pFrame->width, pFrame->height, AV_PIX_FMT_BGR24,SWS_BICUBIC, NULL, NULL, NULL); if (!rgbSwsContext) { logging("Error could not create frame to rgbframe sws context"); exit(-1); } if (sws_scale(rgbSwsContext, pFrame->data, pFrame->linesize, 0, pFrame->height, pRGBFrame->data, pRGBFrame->linesize) < 0) { logging("Error could not sws to rgb frame"); exit(-1); } cv::Mat mRGB(cv::Size(pFrame->width, pFrame->height), CV_8UC3); mRGB.data = (uchar*)pRGBFrame->data[0];//注意不能寫爲:(uchar*)pFrameBGR->data av_free(pRGBFrame); sws_freeContext(rgbSwsContext); return mRGB; }
經過畫布須要的大小參數,畫出白色畫布,再對畫布進行填充。ide
cv::Mat makeThumbnail(vector<cv::Mat> vImage, const unsigned int rowNums, const unsigned int colNums) { // 判斷圖片時候知足條件 if (vImage.size() != rowNums * colNums) { logging("Error image size not equal input size"); logging("vImage length: %d, rowNums: %d, col number: %d", vImage.size(), rowNums, colNums); exit(-1); } int interval = 100; int height = vImage[0].size().height * rowNums + interval * (rowNums + 1); int width = vImage[0].size().width * colNums + interval * (colNums + 1); logging("thumbnail size: %d * %d", height, width); cv::Mat thumbnail(cv::Size(width, height), CV_8UC3); thumbnail.setTo(255); // 進行填充 for (int i = 0; i < rowNums; ++i) { for (int j = 0; j < colNums; ++j) { int no = i * rowNums + j; int widthOffset = (vImage[0].size().width + interval) * j + interval; int heightOffset = (vImage[0].size().height + interval) * i + interval; vImage[no].copyTo(thumbnail(cv::Rect(widthOffset, heightOffset, vImage[0].size().width, vImage[0].size().height))); } } return thumbnail; }