9、 一個簡單的播放器(各自同步)

[TOC]ios

開始前的BB

開始準備搞播放器了,還不知道怎麼跟大佬們講,頭疼 bash

頭髮都掉了
想來想去,我感受先實現一個簡單的視頻播放器,視頻和音頻自同步,來讓各位大佬們先來體驗一下,有個大致的脈絡

老夫擼碼就是一把梭

咱們先粗暴的分爲兩個線程,一個負責音頻的播放,一個負責視頻的播放,根據以前的咱們寫過的東西,咱們來改一改ide

chapter_09/中新建兩個類VideoThreadAudioThread,一個負責視頻的解碼,一個負責音頻的解碼,渲染的話咱們新建一個AVRender,專門負責渲染以及窗口事件的管理函數

千言萬語註釋中oop

AVRender 渲染以及事件處理

AVRender.h佈局

//
// Created by MirsFang on 2019-03-25.
//

#ifndef LEARNFFMPEG_AVRENDER_H
#define LEARNFFMPEG_AVRENDER_H

#define WINDOW_WIDTH 1080
#define WINDOW_HEIGHT 720

#include <iostream>

extern "C" {
#include <SDL2/SDL.h>
#include <libavcodec/avcodec.h>
}

/** 音視頻渲染器 **/
class AVRender {
public:
    AVRender();

    ~AVRender();

    /**
     * 打開音頻
     *
     * @param sample_rate 採樣率
     * @param channel   通道數
     * @param samples   採樣大小(一幀的音頻數據大小)
     * @param userdata  用戶數據
     * @param fillaudio 回調函數
     */
    void openAudio(int sample_rate, Uint8 channel, Uint16 samples, void *userdata,
                   void (*fill_audio)(void *codecContext, Uint8 *stream, int len));

    /** 循環獲取事件 **/
    void loopEvent();

    /** 渲染視頻
     *
     * @param frame 視頻幀
     * @param duration 幀持續的時間
     */
    void renderVideo(AVFrame *frame,Uint32 duration);

private:
    /** SDL窗口 **/
    SDL_Window *window;
    /** SDL渲染者 **/
    SDL_Renderer *render;
    /** SDL紋理 **/
    SDL_Texture *texture;
    /** 顯示區域 **/
    SDL_Rect rect;

    /** 本身想要的輸出的音頻格式 **/
    SDL_AudioSpec wantSpec;

};


#endif //LEARNFFMPEG_AVRENDER_H
複製代碼

AVRender.cppui

//
// Created by MirsFang on 2019-03-25.
//

#include "AVRender.h"


AVRender::AVRender() {
    //初始化SDL2
    if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_EVENTS)) {
        std::cout << "[error] SDL Init error !" << std::endl;
        return;
    }

    //建立window
    window = SDL_CreateWindow("LearnFFmpeg", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, WINDOW_WIDTH,
                              WINDOW_HEIGHT, SDL_WINDOW_OPENGL);
    if (!window) {
        std::cout << "[error] SDL Create window error!" << std::endl;
        return;
    }

    //建立Render
    render = SDL_CreateRenderer(window, -1, 0);
    //建立Texture
    texture = SDL_CreateTexture(render, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING, WINDOW_WIDTH, WINDOW_HEIGHT);

    //初始化Rect
    rect.x = 0;
    rect.y = 0;
    rect.w = WINDOW_WIDTH;
    rect.h = WINDOW_HEIGHT;
}

AVRender::~AVRender() {
    SDL_CloseAudio();
    SDL_Quit();
    if(render)SDL_DestroyRenderer(render);
    if(texture)SDL_DestroyTexture(texture);
    if(window)SDL_DestroyWindow(window);
}

void AVRender::loopEvent() {
    SDL_Event event;
    for (;;) {
        SDL_PollEvent(&event);
        switch (event.type) {
            case SDL_KEYDOWN:
                switch (event.key.keysym.sym) {

                }
                break;
            case SDL_QUIT:
                return;
            default:
                break;
        }
    }
}


void AVRender::renderVideo(AVFrame *frame, Uint32 duration) {
    if (frame == nullptr)return;
    //上傳YUV到Texture
    SDL_UpdateYUVTexture(texture, &rect,
                         frame->data[0], frame->linesize[0],
                         frame->data[1], frame->linesize[1],
                         frame->data[2], frame->linesize[2]
    );

    SDL_RenderClear(render);
    SDL_RenderCopy(render, texture, NULL, &rect);
    SDL_RenderPresent(render);
    SDL_Delay(duration);
}

void AVRender::openAudio(int sample_rate, Uint8 channel, Uint16 samples, void *userdata,
                         void (*fill_audio)(void *, Uint8 *, int)) {

    //初始化SDL中本身想設置的參數
    wantSpec.freq = sample_rate;
    wantSpec.format = AUDIO_S16SYS;
    wantSpec.channels = channel;
    wantSpec.silence = 0;
    wantSpec.samples = samples;
    wantSpec.callback = fill_audio;
    wantSpec.userdata = userdata;

    //打開音頻以後wantSpec的值可能會有改動,返回實際設備的參數值
    if (SDL_OpenAudio(&wantSpec, NULL) < 0) {
        std::cout << "[error] open audio error" << std::endl;
        return;
    }

    SDL_PauseAudio(0);
}
複製代碼

VideoThread 視頻解碼

視頻解碼類VideoThread.hthis

//
// Created by MirsFang on 2019-03-25.
//

#ifndef LEARNFFMPEG_VIDEOTHREAD_H
#define LEARNFFMPEG_VIDEOTHREAD_H

#include <pthread.h>
#include <iostream>
#include "AVRender.h"

extern "C" {
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
};

/** 視頻線程 **/
class VideoThread {
public:
    VideoThread();

    ~VideoThread();

    /** 設置視頻路徑 **/
    void setUrl(const char *url);

    /** 設置渲染器 **/
    void setRender(AVRender *render);

    /** 開始運行線程 **/
    void start();


private:
    AVFormatContext *format_context;
    AVCodecContext *codec_context;
    AVCodec *codec;
    AVPacket *packet;
    AVFrame *frame;

    const char *url;
    int video_index;

    pthread_t pid;
    pthread_mutex_t mutex;

    AVRender *avRender;

    double last_pts = 0;
    /** 幀間距同步 **/
    bool is_interval_sync = true;

    static void *start_thread(void *arg);

    void run();

    /** 初始化解碼器 **/
    void prepare_codec();

    /** 解碼數據幀 **/
    void decodec_frame();

    /**
     * 根據幀率獲取顯示時間
     * @param frame_rate 幀率
     * @return 須要顯示的時長
     */
    Uint32 sync_frame_rate(double frame_rate);

    /**
     * 根據幀間隔獲取一幀顯示的時長
     * @param timebase
     * @param pts 秒
     * @return
     */
    double sync_frame_interval(AVRational timebase, int pts);
};


#endif //LEARNFFMPEG_VIDEOTHREAD_H

複製代碼

VideoThread.cppurl

//
// Created by MirsFang on 2019-03-25.
//

#include "VideoThread.h"

VideoThread::VideoThread() {

}

VideoThread::~VideoThread() {
    if (format_context != nullptr) avformat_close_input(&format_context);
    if (codec_context != nullptr) avcodec_free_context(&codec_context);
    if (packet != nullptr) av_packet_free(&packet);
    if (frame != nullptr) av_frame_free(&frame);
}

void VideoThread::start() {
    prepare_codec();
    if (pthread_create(&pid, NULL, start_thread, (void *) this) != 0) {
        std::cout << "初始化視頻線程失敗!" << std::endl;
        return;
    }
}

void *VideoThread::start_thread(void *arg) {
    VideoThread *audioThread = (VideoThread *) arg;
    audioThread->run();
    return nullptr;
}

void VideoThread::run() {
    std::cout << "視頻線程運行中..." << std::endl;
    decodec_frame();
}

void VideoThread::setRender(AVRender *render) {
    this->avRender = render;
}

void VideoThread::setUrl(const char *url) {
    this->url = url;
}

void VideoThread::prepare_codec() {
    int retcode;
    //初始化FormatContext
    format_context = avformat_alloc_context();
    if (!format_context) {
        std::cout << "[error] alloc format context error!" << std::endl;
        return;
    }

    //打開輸入流
    retcode = avformat_open_input(&format_context, url, nullptr, nullptr);
    if (retcode != 0) {
        std::cout << "[error] open input error!" << std::endl;
        return;
    }

    //讀取媒體文件信息
    retcode = avformat_find_stream_info(format_context, NULL);
    if (retcode != 0) {
        std::cout << "[error] find stream error!" << std::endl;
        return;
    }

    //分配codecContext
    codec_context = avcodec_alloc_context3(NULL);
    if (!codec_context) {
        std::cout << "[error] alloc codec context error!" << std::endl;
        return;
    }

    //尋找到視頻流的下標
    video_index = av_find_best_stream(format_context, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
    //將視頻流的的編解碼信息拷貝到codecContext中
    retcode = avcodec_parameters_to_context(codec_context, format_context->streams[video_index]->codecpar);
    if (retcode != 0) {
        std::cout << "[error] parameters to context error!" << std::endl;
        return;
    }

    //查找解碼器
    codec = avcodec_find_decoder(codec_context->codec_id);
    if (codec == nullptr) {
        std::cout << "[error] find decoder error!" << std::endl;
        return;
    }

    //打開解碼器
    retcode = avcodec_open2(codec_context, codec, nullptr);
    if (retcode != 0) {
        std::cout << "[error] open decodec error!" << std::endl;
        return;
    }

    //初始化一個packet
    packet = av_packet_alloc();
    //初始化一個Frame
    frame = av_frame_alloc();
}

void VideoThread::decodec_frame() {
    int sendcode = 0;

    //計算幀率
    double frameRate = av_q2d(format_context->streams[video_index]->avg_frame_rate);
    //計算顯示的時間
    Uint32 display_time_ms = 0;

    if (!is_interval_sync) {
        display_time_ms = sync_frame_rate(frameRate);
    }

    //記錄幀間延遲
    clock_t start = 0, finish = 0;
    //讀取包
    while (av_read_frame(format_context, packet) == 0) {
        if (packet->stream_index != video_index)continue;
        //接受解碼後的幀數據
        while (avcodec_receive_frame(codec_context, frame) == 0) {
            /**
             * 若是開啓幀間隔同步模式,那麼是根據
             *
             *  顯示時長 = 當前幀 - 上一幀 - 單幀解碼耗時
             *
             *  可得出當前幀真正要顯示的時間
             *
             * **/
            if (is_interval_sync) {
                //計算上一幀與當前幀的延時
                display_time_ms = (Uint32) (
                        sync_frame_interval(format_context->streams[video_index]->time_base, frame->pts) * 1000);
                //幀解碼結束時間
                finish = clock();
                double diff_time = (finish - start) / 1000;

                //減去幀間解碼時差 幀解碼開始時間 - 幀解碼結束時間
                if (display_time_ms > diff_time)display_time_ms = display_time_ms - (Uint32) diff_time;
            }
            //繪製圖像
            if (avRender)avRender->renderVideo(frame, display_time_ms);

            av_frame_unref(frame);
            //幀解碼開始時間
            start = clock();
        }
        //發送解碼前的包數據
        sendcode = avcodec_send_packet(codec_context, packet);
        //根據發送的返回值判斷狀態
        if (sendcode == 0) {
//            std::cout << "[debug] " << "SUCCESS" << std::endl;
        } else if (sendcode == AVERROR_EOF) {
            std::cout << "[debug] " << "EOF" << std::endl;
        } else if (sendcode == AVERROR(EAGAIN)) {
            std::cout << "[debug] " << "EAGAIN" << std::endl;
        } else {
            std::cout << "[debug] " << av_err2str(AVERROR(sendcode)) << std::endl;
        }
        av_packet_unref(packet);
    }
}

Uint32 VideoThread::sync_frame_rate(double frame_rate) {
    return 1 * 1000 / frame_rate;
}

double VideoThread::sync_frame_interval(AVRational timebase, int pts) {
    double display = (pts - last_pts) * av_q2d(timebase);
    last_pts = pts;
    std::cout << "pts : " << pts * av_q2d(timebase) << " -- display :" << display << std::endl;
    return display;
}

複製代碼

AudioThread 音頻解碼

AudioThreadspa

//
// Created by MirsFang on 2019-03-25.
//

#ifndef LEARNFFMPEG_AUDIOTHREAD_H
#define LEARNFFMPEG_AUDIOTHREAD_H

#include <pthread.h>
#include <iostream>

extern "C" {
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libswresample/swresample.h>
};

#include "AVRender.h"


/**
 * 音頻線程
 */
class AudioThread {
public:
    AudioThread();

    ~AudioThread();

    void setUrl(const char *url);

    /** 開啓線程 **/
    void start();

    /** 設置渲染器 **/
    void setRender(AVRender *render);

private:
    /** 重採樣上下文 **/
    SwrContext *convert_context;
    AVFormatContext *format_context;
    AVCodecContext *codec_context;
    AVCodec *codec;
    AVPacket *packet;
    AVFrame *frame;
    int audioIndex = -1;

    uint64_t out_chn_layout = AV_CH_LAYOUT_STEREO;  //輸出的通道佈局 雙聲道
    enum AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16; //輸出的聲音格式
    int out_sample_rate = 44100;   //輸出的採樣率
    int out_nb_samples = -1;        //輸出的音頻採樣
    int out_channels = -1;        //輸出的通道數
    int out_buffer_size = -1;   //輸出buff大小
    unsigned char *outBuff = NULL;//輸出的Buffer數據
    uint64_t in_chn_layout = -1;  //輸入的通道佈局

    pthread_t pid;
    pthread_mutex_t mutex;
    AVRender *av_render;

    const char *url;

    static void *start_thread(void *arg);

    void run();

    /** 初始化解碼器 **/
    void prepare_codec();
};


#endif //LEARNFFMPEG_AUDIOTHREAD_H

複製代碼

AudioThread.cpp

//
// Created by MirsFang on 2019-03-25.
//

#include "AudioThread.h"


#define MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio 48000 * (32/8)

//一幀PCM的數據長度
unsigned int audioLen = 0;
unsigned char *audioChunk = nullptr;
//當前讀取的位置
unsigned char *audioPos = nullptr;

/** 被SDL2調用的回調函數 當須要獲取數據喂入硬件播放的時候調用 **/
void fill_audio(void *codecContext, Uint8 *stream, int len) {
    //SDL2中必須首先使用SDL_memset()將stream中的數據設置爲0
    SDL_memset(stream, 0, len);
    if (audioLen == 0)
        return;

    len = (len > audioLen ? audioLen : len);
    //將數據合併到 stream 裏
    SDL_MixAudio(stream, audioPos, len, SDL_MIX_MAXVOLUME);

    //一幀的數據控制
    audioPos += len;
    audioLen -= len;
}

AudioThread::AudioThread() {

}


AudioThread::~AudioThread() {
    if (format_context != nullptr) avformat_close_input(&format_context);
    if (codec_context != nullptr) avcodec_free_context(&codec_context);
    if (packet != nullptr) av_packet_free(&packet);
    if (frame != nullptr) av_frame_free(&frame);
    if (convert_context != nullptr) swr_free(&convert_context);
}


void AudioThread::start() {
    prepare_codec();
    if (pthread_create(&pid, NULL, start_thread, (void *) this) != 0) {
        std::cout << "初始化音頻線程失敗!" << std::endl;
        return;
    }
}

void *AudioThread::start_thread(void *arg) {
    AudioThread *audioThread = (AudioThread *) arg;
    audioThread->run();
    return nullptr;
}

void AudioThread::run() {
    std::cout << "音頻線程已啓動" << std::endl;

    //循環讀取packet而且解碼
    int sendcode = 0;
    while (av_read_frame(format_context, packet) >= 0) {
        if (packet->stream_index != audioIndex)continue;
        //接受解碼後的音頻數據
        while (avcodec_receive_frame(codec_context, frame) == 0) {
            swr_convert(convert_context, &outBuff, MAX_AUDIO_FRAME_SIZE, (const uint8_t **) frame->data,
                        frame->nb_samples);
            //若是沒有播放完就等待1ms
            while (audioLen > 0)
                SDL_Delay(1);
            //同步數據
            audioChunk = (unsigned char *) outBuff;
            audioPos = audioChunk;
            audioLen = out_buffer_size;
            av_frame_unref(frame);
        }
        //發送解碼前的包數據
        sendcode = avcodec_send_packet(codec_context, packet);
        //根據發送的返回值判斷狀態
        if (sendcode == 0) {
//            std::cout << "[debug] " << "SUCCESS" << std::endl;
        } else if (sendcode == AVERROR_EOF) {
            std::cout << "[debug] " << "EOF" << std::endl;
        } else if (sendcode == AVERROR(EAGAIN)) {
            std::cout << "[debug] " << "EAGAIN" << std::endl;
        } else {
            std::cout << "[debug] " << av_err2str(AVERROR(sendcode)) << std::endl;
        }

        av_packet_unref(packet);
    }

}

void AudioThread::setRender(AVRender *render) {
    this->av_render = render;
}

void AudioThread::prepare_codec() {
    int retcode;
    //初始化FormatContext
    format_context = avformat_alloc_context();
    if (!format_context) {
        std::cout << "[error] alloc format context error!" << std::endl;
        return;
    }

    //打開輸入流
    retcode = avformat_open_input(&format_context, url, nullptr, nullptr);
    if (retcode != 0) {
        std::cout << "[error] open input error!" << std::endl;
        return;
    }

    //讀取媒體文件信息
    retcode = avformat_find_stream_info(format_context, NULL);
    if (retcode != 0) {
        std::cout << "[error] find stream error!" << std::endl;
        return;
    }

    //分配codecContext
    codec_context = avcodec_alloc_context3(NULL);
    if (!codec_context) {
        std::cout << "[error] alloc codec context error!" << std::endl;
        return;
    }

    //尋找到音頻流的下標
    audioIndex = av_find_best_stream(format_context, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0);
    //將視頻流的的編解碼信息拷貝到codecContext中
    retcode = avcodec_parameters_to_context(codec_context, format_context->streams[audioIndex]->codecpar);
    if (retcode != 0) {
        std::cout << "[error] parameters to context error!" << std::endl;
        return;
    }

    //查找解碼器
    codec = avcodec_find_decoder(codec_context->codec_id);
    if (codec == nullptr) {
        std::cout << "[error] find decoder error!" << std::endl;
        return;
    }

    //打開解碼器
    retcode = avcodec_open2(codec_context, codec, nullptr);
    if (retcode != 0) {
        std::cout << "[error] open decodec error!" << std::endl;
        return;
    }

    //初始化一個packet
    packet = av_packet_alloc();
    //初始化一個Frame
    frame = av_frame_alloc();


    /** ########## 獲取實際音頻的參數 ##########**/
    //單個通道中的採樣數
    out_nb_samples = codec_context->frame_size;
    //輸出的聲道數
    out_channels = av_get_channel_layout_nb_channels(out_chn_layout);
    //輸出音頻的佈局
    in_chn_layout = av_get_default_channel_layout(codec_context->channels);

    /** 計算重採樣後的實際數據大小,並分配空間 **/
    //計算輸出的buffer的大小
    out_buffer_size = av_samples_get_buffer_size(NULL, out_channels, out_nb_samples, out_sample_fmt, 1);
    //分配輸出buffer的空間
    outBuff = (unsigned char *) av_malloc(MAX_AUDIO_FRAME_SIZE * 2); //雙聲道

    //初始化SDL中本身想設置的參數
    if (av_render)av_render->openAudio(out_sample_rate, out_channels, out_nb_samples, codec_context, fill_audio);

    convert_context = swr_alloc_set_opts(NULL, out_chn_layout, out_sample_fmt, out_sample_rate,
                                          in_chn_layout, codec_context->sample_fmt, codec_context->sample_rate, 0,
                                          NULL);
    //初始化SwResample的Context
    swr_init(convert_context);

}

void AudioThread::setUrl(const char *url) {
    this->url = url;
}

複製代碼

咱們在Main方法中

#ifdef chapter_09

    //實例化渲染器
    AVRender* render = new AVRender();

    //初始化視頻線程
    VideoThread *videoThread = new VideoThread();
    videoThread->setRender(render);
    videoThread->setUrl(url);


    //初始化音頻線程
    AudioThread *audioThread = new AudioThread();
    audioThread->setRender(render);
    audioThread->setUrl(url);

    //開啓音視頻線程
    videoThread->start();
    audioThread->start();

    //事件循環
    render->loopEvent();

#endif
複製代碼

若是沒錯,那麼就應該正常的播放視頻了。。。

祝各位大佬們好運

未完持續 ...

相關文章
相關標籤/搜索