H264-YUV經過RTP接收視頻流ffmpeg解碼SDL實時播放

寫在前面的話

  寫一個簡單的播放器,經過RTP接收視頻流,進行實時播放。最初,使用ffplay或者vlc接收按照SDP協議文件能夠播放視頻,可是視頻中斷後重啓,不能正確的解包,時常會出現以下的錯誤信息。git

[sdp @ 0x7ffb35034e00] RTP: dropping old packet received too late
    Last message repeated 15 times

  

使用ffplay播放udp視頻。github

➜  view-show ffplay -protocol_whitelist "file,http,https,rtp,udp,tcp,tls" test.sdp
➜  view-show cat test.sdp
m=video 6665 RTP/AVP 96
a=rtpmap:96 H264/90000
c=IN IP4 0.0.0.0

  分析ffmpeg原代碼,在rtpdec.c代碼文件中,這個錯誤的緣由是ffplay接收RTP視頻流時,若是前一個RTP包的流水號大於後一個幀時,會將當前RTP報文丟棄。windows

static int rtp_parse_one_packet(RTPDemuxContext *s, AVPacket *pkt,                                uint8_t **bufptr, int len) 
{
...
    if ((s->seq == 0 && !s->queue) || s->queue_size <= 1) {
        /* First packet, or no reordering */
        return rtp_parse_packet_internal(s, pkt, buf, len);
    } else {
        uint16_t seq = AV_RB16(buf + 2);
        int16_t diff = seq - s->seq;
        if (diff < 0) {
            /* 注意看這裏 Packet older than the previously emitted one, drop */

            av_log(s->ic, AV_LOG_WARNING,
                   "RTP: dropping old packet received too late\n");
            return -1;
        } else if (diff <= 1) {
            /* Correct packet */
            rv = rtp_parse_packet_internal(s, pkt, buf, len);
            return rv;
        } else {
            /* Still missing some packet, enqueue this one. */
            rv = enqueue_packet(s, buf, len);
            if (rv < 0)
                return rv;
            *bufptr = NULL;
            /* Return the first enqueued packet if the queue is full,
             * even if we're missing something */
            if (s->queue_len >= s->queue_size) {
                av_log(s->ic, AV_LOG_WARNING, "jitter buffer full\n");
                return rtp_parse_queued_packet(s, pkt);
            }
            return -1;
        }
    }
}

  

 

可是,實際的業務場合中,對於一個大的視頻文件,會按照MTU(以太網1500)拆分紅不少個RTP報文(1400大小),多幀視頻拆包的個數遠超過RTP協議中的流水號限制 65536。bash

爲了繞開這個問題,計劃從新作一個簡單的視頻播放器,功能相似於ffplay接收UDP報文,而後播放。tcp

 

整體視頻傳輸流程

使用的庫簡要說明

說明和做用
opencv 攝像頭或視頻文件讀取,圖像處理
x264

H264視頻編碼ide

ffmpeg H264視頻解碼
SDL2 視頻播放
RTP RTP視頻流打包

細節分析

 

Opencv-x264-RTP視頻編碼流程

 

經過OpenCV讀取視頻文件代碼測試

void handleVideo(const char* pFileName) {
    Mat frame;
    cv::VideoCapture capture(pFileName);

    while (true) {
        capture >> frame;
        if (frame.empty()) {
            break;
        }

        STREAM_PUSH_INS->push(&frame);
    }
}

經過x264編碼視頻幀代碼ui

bool X264Encoder::EncodeOneBuf(cv::Mat *yuvMat, Str *resStr) {
    TimeMeasurer tm;
    memset(yuv_buffer_, 0, m_width * m_height * 3);

    uint8_t* yuv_buffer =(uint8_t*) yuvMat->data;

    memcpy(picture_in_.img.plane[0], yuv_buffer, m_width*m_height);
    yuv_buffer += m_width*m_height;
    memcpy(picture_in_.img.plane[1], yuv_buffer, m_width*m_height / 4);
    yuv_buffer += m_width*m_height / 4;
    memcpy(picture_in_.img.plane[2], yuv_buffer, m_width*m_height / 4);
    picture_in_.i_type = X264_TYPE_IDR;

    int64_t i_pts = 0;
    picture_in_.i_pts = i_pts++;

    x264_nal_t *nals;
    int nnal;

    int h264size = 0;

    x264_picture_t pic_out;
    x264_picture_init(&pic_out);
    x264_encoder_encode(x264_encoder_, &nals, &nnal, &picture_in_, &pic_out);
    x264_nal_t *nal;
    for (nal = nals; nal < nals + nnal; nal++) {
        memcpy((char*)resStr->data + h264size,nal->p_payload,nal->i_payload);
        h264size = h264size + nal->i_payload;
    }

    resStr->size = h264size;

    LOG_INFO("x264.encode.cost: %lu", tm.Elapsed());
    return true;
}

  

YUV格式分析

YUV編碼中使用IYUV,也叫YUV420p或者I420,this

以下是YUV420p的數據格式。編碼

 

YUV420P分Y,U,V三個份量

U份量緊跟在Y份量以後,接着V份量(即:YUV)

所以數據格式爲 YYYY YYYY UU VV

 

RTP協議定義

typedef struct rtp_header {
    /* little-endian */
    /* byte 0 */
    uint8_t csrc_len:       4;  /* bit: 0~3 */
    uint8_t extension:      1;  /* bit: 4 */
    uint8_t padding:        1;  /* bit: 5*/
    uint8_t version:        2;  /* bit: 6~7 */
    /* byte 1 */
    uint8_t payload_type:   7;  /* bit: 0~6 */
    uint8_t marker:         1;  /* bit: 7 */
    /* bytes 2, 3 */
    uint16_t seq_no;            
    /* bytes 4-7 */
    uint32_t timestamp;        
    /* bytes 8-11 */
    uint32_t ssrc;
} __attribute__ ((packed)) rtp_header_t; /* 12 bytes */

  

ffmpeg作H264視頻解碼分析

 

ffmpeg作H264解碼初始化代碼

AVCodec *gCodec = NULL;
AVCodecContext *gCodec_ctx = NULL;
AVCodecParserContext *gParser = NULL;
AVFrame *gAVFrame = NULL;

void doAVCodecInit() {
    avcodec_register(&ff_h264_decoder);
    av_register_codec_parser(&ff_h264_parser);

    gCodec = avcodec_find_decoder(AV_CODEC_ID_H264);
    if (!gCodec) {
        fprintf(stderr, "Codec not found\n");
        exit(1);
    }

    gCodec_ctx = avcodec_alloc_context3(gCodec);
    if (!gCodec_ctx) {
        fprintf(stderr, "Could not allocate video codec context\n");
        exit(1);
    }

    if (avcodec_open2(gCodec_ctx, gCodec, NULL) < 0) {
        fprintf(stderr, "Could not open codec\n");
        exit(1);
    }

    gParser = av_parser_init(AV_CODEC_ID_H264);
    if (!gParser) {
        fprintf(stderr, "Could not create H264 parser\n");
        exit(1);
    }

    gAVFrame = av_frame_alloc();
    if (!gAVFrame) {
        fprintf(stderr, "Could not allocate video frame\n");
        exit(1);
    }

    initPacket();
}

  

ffmpeg循環作H264解碼代碼

static int doDecodeFrame(AVPacket *pkt, unsigned int frame_index) {
    int got_frame = 0;
    do {
        int len = avcodec_decode_video2(gCodec_ctx, gAVFrame, &got_frame, pkt);
        if (len < 0) {
            fprintf(stderr, "Error while decoding frame %d\n", frame_index);
            return len;
        }
        if (got_frame) {
            //printf("Got frame %d\n", frame_index);
            //fflush(stdout);
            yuv_show(gAVFrame->data, gAVFrame->linesize, gAVFrame->width, gAVFrame->height);
        }
    } while (0);
    return 0;
}

int doPackDecode(struct ImagePacket *packetPtr) {
    uint8_t *data = NULL;
    int size = 0;
    int bytes_used = av_parser_parse2(gParser, gCodec_ctx, &data, &size, packetPtr->buf_, packetPtr->len_, 0, 0,
                                      AV_NOPTS_VALUE);
    if (size == 0) {
        return -1;
    }

    // We have data of one packet, decode it; or decode whatever when ending
    AVPacket packet;
    av_init_packet(&packet);
    packet.data = data;
    packet.size = size;
    int ret = doDecodeFrame(&packet, packetPtr->frame_index_);
    if (ret < 0) {
        return -1;
    }
    return 0;
}

  

SDL視頻渲染流程

 

SDL初始化代碼

#define LOAD_YUV420P 0

#define HAS_BORDER     1

const int bpp = 12;

const int screen_w = 1434, screen_h = 806;
const int pixel_w = 1434, pixel_h = 806;

//const int screen_w=1920,screen_h=1080;
//const int pixel_w=1920,pixel_h=1080;

SDL_Window *gScreen = NULL;
SDL_Renderer *gSdlRenderer = NULL;
SDL_Texture *gSdlTexture = NULL;
SDL_Rect sdlRect;

//Refresh Event
#define REFRESH_EVENT  (SDL_USEREVENT + 1)

int thread_exit = 0;

int refresh_video(void *opaque) {
    while (thread_exit == 0) {
        SDL_Event event;
        event.type = REFRESH_EVENT;
        SDL_PushEvent(&event);
        SDL_Delay(10);
    }
    return 0;
}

int doSDLInit() {
    if (SDL_Init(SDL_INIT_EVERYTHING)) {
        printf("Could not initialize SDL - %s\n", SDL_GetError());
        return -1;
    }

    //SDL 2.0 Support for multiple windows
    gScreen = SDL_CreateWindow("Video-View", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
                               screen_w, screen_h, SDL_WINDOW_OPENGL | SDL_WINDOW_RESIZABLE);
    if (!gScreen) {
        printf("SDL: could not create window - exiting:%s\n", SDL_GetError());
        return -1;
    }
    gSdlRenderer = SDL_CreateRenderer(gScreen, -1, 0);
    int pixformat = SDL_PIXELFORMAT_IYUV;

    gSdlTexture = SDL_CreateTexture(gSdlRenderer, pixformat, SDL_TEXTUREACCESS_STREAMING, pixel_w, pixel_h);

    int border = 0;


    sdlRect.x = 0 + border;
    sdlRect.y = 0 + border;
    sdlRect.w = screen_w - border * 2;
    sdlRect.h = screen_h - border * 2;

    SDL_Thread *refresh_thread = SDL_CreateThread(refresh_video, NULL, NULL);
return 0; }

  

SDL循環渲染代碼

    SDL_Event event;
    SDL_WaitEvent(&event);
    if (event.type == REFRESH_EVENT) {
        SDL_UpdateTexture(gSdlTexture, NULL, gImagePacket->buf_, pixel_w);
        SDL_RenderClear(gSdlRenderer);
        SDL_RenderCopy(gSdlRenderer, gSdlTexture, NULL, &sdlRect);
        SDL_RenderPresent(gSdlRenderer);
        //Delay 40ms
        SDL_Delay(1);
    } else if (event.type == SDL_QUIT) {
        _exit(0);
    }

  

參考:https://blog.csdn.net/leixiaohua1020/article/details/40525591/

 

發送和接收總的流程以下圖所示

視頻源碼請參考個人github項目。

接收端啓動方式:

./viewer 6665

發送端啓動方式

/bin/video-pusher 127.0.0.1 6665 ./test-video/d001.mp4

 

測試視頻使用1280*720的視頻文件。能夠找一段1280*720的視頻驗證。

視頻讀取編碼和發送:

  https://github.com/gityf/img-video/tree/master/video/opencv-x264-rtp-pusher

視頻接收解密和播放:

  https://github.com/gityf/img-video/tree/master/video/ffmpeg-h264-sdl-view

結束,祝玩的開心!

相關文章
相關標籤/搜索