以前實現了yolo圖像的在線檢測,此次主要完成遠程視頻的檢測。主要包括推流--収流--檢測顯示三大部分linux
首先說一下推流,主要使用ffmpeg命令進行本地攝像頭的推流,爲了實現首屏秒開使用-g設置gop大小,同時使用-b下降網絡負載,保證流暢度。網絡
linuxtcp
ffmpeg -r 30 -i /dev/video0 -vcodec h264 -max_delay 100 -f flv -g 5 -b 700000 rtmp://219.216.87.170/live/test1
windowide
ffmpeg -r 30 -f vfwcap -i 0 -vcodec h264 -max_delay 100 -f flv -g 5 -b 700000 rtmp://219.216.87.170/live/test1
ffmpeg -list_devices true -f dshow -i dummy ffmpeg -r 30 -f dshow -i video="1.3M HD WebCam" -vcodec h264 -max_delay 100 -f flv -g 5 -b 700000 rtmp://219.216.87.170/live/tes t1
其次是収流,収流最開始的時候,有很大的延遲,大約5秒,後來經過優化,如今延時保證在1s之內,仍是能夠接收的,直接上収流的程序優化
AVFormatContext *pFormatCtx; int i, videoindex; AVCodecContext *pCodecCtx; AVCodec *pCodec; AVFrame *pFrame, *pFrameRGB; uint8_t *out_buffer; AVPacket *packet; //int y_size; int ret, got_picture; struct SwsContext *img_convert_ctx; //輸入文件路徑 // char filepath[] = "rtmp://219.216.87.170/vod/test.flv"; char filepath[] = "rtmp://219.216.87.170/live/test1"; int frame_cnt; printf("wait for playing %s\n", filepath); av_register_all(); avformat_network_init(); pFormatCtx = avformat_alloc_context(); printf("size %ld\tduration %ld\n", pFormatCtx->probesize, pFormatCtx->max_analyze_duration); pFormatCtx->probesize = 20000000; pFormatCtx->max_analyze_duration = 2000; // pFormatCtx->interrupt_callback.callback = timout_callback; // pFormatCtx->interrupt_callback.opaque = pFormatCtx; // pFormatCtx->flags |= AVFMT_FLAG_NONBLOCK; AVDictionary* options = NULL; av_dict_set(&options, "fflags", "nobuffer", 0); // av_dict_set(&options, "max_delay", "100000", 0); // av_dict_set(&options, "rtmp_transport", "tcp", 0); // av_dict_set(&options, "stimeout", "6", 0); printf("wating for opening file\n"); if (avformat_open_input(&pFormatCtx, filepath, NULL, &options) != 0) { printf("Couldn't open input stream.\n"); return -1; } av_dict_free(&options); printf("wating for finding stream\n"); if (avformat_find_stream_info(pFormatCtx, NULL) < 0) { printf("Couldn't find stream information.\n"); return -1; } videoindex = -1; for (i = 0; i < pFormatCtx->nb_streams; i++) if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { videoindex = i; break; } if (videoindex == -1) { printf("Didn't find a video stream.\n"); return -1; } pCodecCtx = pFormatCtx->streams[videoindex]->codec; pCodec = avcodec_find_decoder(pCodecCtx->codec_id); if (pCodec == NULL) { printf("Codec not found.\n"); return -1; } if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) { printf("Could not open codec.\n"); return -1; } /* * 在此處添加輸出視頻信息的代碼 * 取自於pFormatCtx,使用fprintf() */ pFrame = av_frame_alloc(); pFrameRGB = av_frame_alloc(); out_buffer = (uint8_t *) av_malloc( avpicture_get_size(AV_PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height)); avpicture_fill((AVPicture *) pFrameRGB, out_buffer, AV_PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height); packet = (AVPacket *) av_malloc(sizeof(AVPacket)); //Output Info----------------------------- printf("--------------- File Information ----------------\n"); av_dump_format(pFormatCtx, 0, filepath, 0); printf("-------------------------------------------------\n"); img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_BGR24, SWS_BICUBIC, NULL, NULL, NULL); CvSize imagesize; imagesize.width = pCodecCtx->width; imagesize.height = pCodecCtx->height; IplImage *image = cvCreateImageHeader(imagesize, IPL_DEPTH_8U, 3); cvSetData(image, out_buffer, imagesize.width * 3); cvNamedWindow(filepath, CV_WINDOW_AUTOSIZE); frame_cnt = 0; int num = 0; while (av_read_frame(pFormatCtx, packet) >= 0) { if (packet->stream_index == videoindex) { /* * 在此處添加輸出H264碼流的代碼 * 取自於packet,使用fwrite() */ ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet); if (ret < 0) { printf("Decode Error.\n"); return -1; } if (got_picture) { sws_scale(img_convert_ctx, (const uint8_t* const *) pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize); printf("Decoded frame index: %d\n", frame_cnt); /* * 在此處添加輸出YUV的代碼 * 取自於pFrameYUV,使用fwrite() */ frame_cnt++; cvShowImage(filepath, image); cvWaitKey(30); } } av_free_packet(packet); } sws_freeContext(img_convert_ctx); av_frame_free(&pFrameRGB); av_frame_free(&pFrame); avcodec_close(pCodecCtx); avformat_close_input(&pFormatCtx); return 0;
將解壓後的數據區與opencv的IplImage的數據區映射,實現opencv顯示。ui
檢測部分,主要使用IplImage與yolo中的圖像進行對接,在圖像轉換方面,進行了部分優化,縮減一些沒必要要的步驟。而後使用線程區接收ffmepg流,主循環裏區作檢測並顯示。須要作線程同步處理,只有當收到新流時,纔去檢測。spa