OpenGL全景視頻

    全景視頻其實在實現上和通常的視頻播放基本差很少,解碼能夠用ffmpeg,只是對解碼後的圖片在繪製的時候要繪製在一個球上(我這裏是球,好像有說有的格式要繪製在四面體上的,美作深刻研究),而不是畫在一個表面上。因此這裏應該要用紋理。html

1.計算球的頂點座標和紋理座標

    球的頂點座標和紋理座標的計算能夠說是全景的關鍵。這裏參考android opengl播放全景視頻 android

int cap_H = 1;//必須大於0,且cap_H應等於cap_W
int cap_W = 1;//繪製球體時,每次增長的角度

float* verticals;
float* UV_TEX_VERTEX;

........................

void getPointMatrix(GLfloat radius)
{
    verticals = new float[(180 / cap_H) * (360 / cap_W) * 6 * 3];
    UV_TEX_VERTEX = new float[(180 / cap_H) * (360 / cap_W) * 6 * 2];

    float x = 0;
    float y = 0;
    float z = 0;

    int index = 0;
    int index1 = 0;
    float r = radius;//球體半徑
    double d = cap_H * PI / 180;//每次遞增的弧度
    for (int i = 0; i < 180; i += cap_H) {
        double d1 = i * PI / 180;
        for (int j = 0; j < 360; j += cap_W) {
            //得到球體上切分的超小片矩形的頂點座標(兩個三角形組成,因此有六點頂點)
            double d2 = j * PI / 180;
            verticals[index++] = (float)(x + r * sin(d1 + d) * cos(d2 + d));
            verticals[index++] = (float)(y + r * cos(d1 + d));
            verticals[index++] = (float)(z + r * sin(d1 + d) * sin(d2 + d));
            //得到球體上切分的超小片三角形的紋理座標
            UV_TEX_VERTEX[index1++] = (j + cap_W) * 1.0f / 360;
            UV_TEX_VERTEX[index1++] = (i + cap_H) * 1.0f / 180;

            verticals[index++] = (float)(x + r * sin(d1) * cos(d2));
            verticals[index++] = (float)(y + r * cos(d1));
            verticals[index++] = (float)(z + r * sin(d1) * sin(d2));

            UV_TEX_VERTEX[index1++] = j * 1.0f / 360;
            UV_TEX_VERTEX[index1++] = i * 1.0f / 180;

            verticals[index++] = (float)(x + r * sin(d1) * cos(d2 + d));
            verticals[index++] = (float)(y + r * cos(d1));
            verticals[index++] = (float)(z + r * sin(d1) * sin(d2 + d));

            UV_TEX_VERTEX[index1++] = (j + cap_W) * 1.0f / 360;
            UV_TEX_VERTEX[index1++] = i * 1.0f / 180;

            verticals[index++] = (float)(x + r * sin(d1 + d) * cos(d2 + d));
            verticals[index++] = (float)(y + r * cos(d1 + d));
            verticals[index++] = (float)(z + r * sin(d1 + d) * sin(d2 + d));

            UV_TEX_VERTEX[index1++] = (j + cap_W) * 1.0f / 360;
            UV_TEX_VERTEX[index1++] = (i + cap_H) * 1.0f / 180;

            verticals[index++] = (float)(x + r * sin(d1 + d) * cos(d2));
            verticals[index++] = (float)(y + r * cos(d1 + d));
            verticals[index++] = (float)(z + r * sin(d1 + d) * sin(d2));

            UV_TEX_VERTEX[index1++] = j * 1.0f / 360;
            UV_TEX_VERTEX[index1++] = (i + cap_H) * 1.0f / 180;

            verticals[index++] = (float)(x + r * sin(d1) * cos(d2));
            verticals[index++] = (float)(y + r * cos(d1));
            verticals[index++] = (float)(z + r * sin(d1) * sin(d2));

            UV_TEX_VERTEX[index1++] = j * 1.0f / 360;
            UV_TEX_VERTEX[index1++] = i * 1.0f / 180;
        }
    }
}

2.文件解碼

    我這裏用ffmpeg來作文件的解碼,用了一個最簡單的單線程循環來作,沒有作過多複雜的考慮。解出來的圖像數據放到一個循環隊列中。ide

DWORD WINAPI ThreadFunc(LPVOID n)
{
    AVFormatContext    *pFormatCtx;
    int                i, videoindex;
    AVCodec            *pCodec;
    AVCodecContext    *pCodecCtx = NULL;

    char filepath[] = "H:\\F-5飛行.mp4";

    av_register_all();
    avformat_network_init();
    pFormatCtx = avformat_alloc_context();

    if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) != 0){
        printf("Couldn't open input stream.(沒法打開輸入流)\n");
        return -1;
    }

    if (avformat_find_stream_info(pFormatCtx, NULL)<0)
    {
        printf("Couldn't find stream information.(沒法獲取流信息)\n");
        return -1;
    }

    videoindex = -1;
    for (i = 0; i<pFormatCtx->nb_streams; i++)
    if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
    {
        videoindex = i;
        break;
    }
    if (videoindex == -1)
    {
        printf("Didn't find a video stream.(沒有找到視頻流)\n");
        return -1;
    }
    pCodecCtx = pFormatCtx->streams[videoindex]->codec;
    pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
    if (pCodec == NULL)
    {
        printf("Codec not found.(沒有找到解碼器)\n");
        return -1;
    }
    if (avcodec_open2(pCodecCtx, pCodec, NULL)<0)
    {
        printf("Could not open codec.(沒法打開解碼器)\n");
        return -1;
    }

    AVFrame    *pFrame;
    pFrame = av_frame_alloc();
    int ret, got_picture;
    AVPacket *packet = (AVPacket *)av_malloc(sizeof(AVPacket));

    AVFrame *pFrameBGR = NULL;
    pFrameBGR = av_frame_alloc();

    struct SwsContext *img_convert_ctx;

    int index = 0;
    while (av_read_frame(pFormatCtx, packet) >= 0)
    {
        if (packet->stream_index == videoindex)
        {
            ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
            if (ret < 0)
            {
                printf("Decode Error.(解碼錯誤)\n");
                continue;
            }
            if (got_picture)
            {
                index++;

flag_wait:
                if (frame_queue.size >= MAXSIZE)
                {
                    printf("size = %d   I'm WAITING ... \n", frame_queue.size);
                    Sleep(10);
                    goto flag_wait;
                }

                EnterCriticalSection(&frame_queue.cs);

                Vid_Frame *vp;
                vp = &frame_queue.queue[frame_queue.rear];

                vp->frame->pts = pFrame->pts;

                /* alloc or resize hardware picture buffer */
                if (vp->buffer == NULL || vp->width != pFrame->width || vp->height != pFrame->height)
                {
                    if (vp->buffer != NULL)
                    {
                        av_free(vp->buffer);
                        vp->buffer = NULL;
                    }

                    int iSize = avpicture_get_size(AV_PIX_FMT_BGR24, pFrame->width, pFrame->height);
                    av_free(vp->buffer);
                    vp->buffer = (uint8_t *)av_mallocz(iSize);


                    vp->width = pFrame->width;
                    vp->height = pFrame->height;

                }

                avpicture_fill((AVPicture *)vp->frame, vp->buffer, AV_PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height);

                if (vp->buffer)
                {

                    img_convert_ctx = sws_getContext(vp->width, vp->height, (AVPixelFormat)pFrame->format, vp->width, vp->height,
                        AV_PIX_FMT_BGR24, SWS_BICUBIC, NULL, NULL, NULL);
                    sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, vp->height, vp->frame->data, vp->frame->linesize);
                    sws_freeContext(img_convert_ctx);

                    vp->pts = pFrame->pts;
                }
                    
                frame_queue.size++;
                frame_queue.rear = (frame_queue.rear + 1) % MAXSIZE;

                LeaveCriticalSection(&frame_queue.cs);

                //MySaveBmp("f5.bmp", vp->buffer, vp->width, vp->height);

                //int nHeight = vp->height;
                //int nWidth = vp->width;

                //Mat tmp_mat = Mat::zeros(nHeight, nWidth, CV_32FC3);

                //int k = 0;
                //for (int i = 0; i < nHeight; i++)
                //{
                //    for (int j = 0; j < nWidth; j++)
                //    {
                //        tmp_mat.at<Vec3f>(i, j)[0] = vp->buffer[k++] / 255.0f;
                //        tmp_mat.at<Vec3f>(i, j)[1] = vp->buffer[k++] / 255.0f;
                //        tmp_mat.at<Vec3f>(i, j)[2] = vp->buffer[k++] / 255.0f;
                //    }
                //}

                //imwrite("mat_Image.jpg", tmp_mat);

                //namedWindow("Marc_Antony");
                //imshow("Marc_Antony", tmp_mat);

                //waitKey(0);
                
            }
        }
        av_free_packet(packet);
    }

    avcodec_close(pCodecCtx);
    avformat_close_input(&pFormatCtx);

    return 0;
}

其中frame_queue是一個循環隊列,解碼的時候入隊,渲染的時候出隊。雖然沒有實際測,但我試用的幾個視頻文件都是4K的,因此解碼時間估計有些長,解碼這裏若是能用硬解應該效果會更好。而後我這裏沒有考慮音頻。函數

3.渲染

(1)初始化oop

void init(void)
{
    initQueue(&frame_queue);

    glGenTextures(1, &texturesArr);    //建立紋理

    glBindTexture(GL_TEXTURE_2D, texturesArr);

    //Mat image = imread("1.jpg");
    //glTexImage2D(GL_TEXTURE_2D, 0, 3, image.cols, image.rows, 0, GL_BGR_EXT, GL_UNSIGNED_BYTE, image.data);

    //IplImage *image = cvLoadImage("4.png", 1);
    //IplImage *image = cvLoadImage("5.png", 1);
    //glTexImage2D(GL_TEXTURE_2D, 0, 3, image->width, image->height, 0, GL_BGR_EXT, GL_UNSIGNED_BYTE, image->imageData);
    //printf("nChannels is %d \n", image->nChannels);
    //cvNamedWindow("1");
    //cvShowImage("1", image);
    //cvWaitKey(0);

    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);    //線形濾波
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);    //線形濾波


    glClearColor(0.0, 0.0, 0.0, 0.0);
    glClearDepth(1);
    glShadeModel(GL_SMOOTH);
    //GLfloat _ambient[] = { 1.0, 1.0, 1.0, 1.0 };
    //GLfloat _diffuse[] = { 1.0, 1.0, 1.0, 1.0 };
    //GLfloat _specular[] = { 1.0, 1.0, 1.0, 1.0 };
    //GLfloat _position[] = { 255, 255, 255, 0 };
    //glLightfv(GL_LIGHT0, GL_AMBIENT, _ambient);
    //glLightfv(GL_LIGHT0, GL_DIFFUSE, _diffuse);
    //glLightfv(GL_LIGHT0, GL_SPECULAR, _specular);
    //glLightfv(GL_LIGHT0, GL_POSITION, _position);
    //glEnable(GL_LIGHTING);
    //glEnable(GL_LIGHT0);

    glEnable(GL_TEXTURE_2D);
    glEnable(GL_DEPTH_TEST);
    glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST);

    glDisable(GL_CULL_FACE);    //禁用裁剪

    getPointMatrix(500);
}

初始化中包含隊列的初始化,建立紋理,計算球的頂點座標和紋理座標,各類參數設置。其中注意急用光源,不然圖片各處的明暗會依據光源位置的設置而有不一樣;其次是禁用剪裁,不然沒法進入到球體內部,由於全景視頻是在球體內部看的。ui

(2)設置投影矩陣spa

void reshape(int w, int h)
{
    glViewport(0, 0, (GLsizei)w, (GLsizei)h);
    glMatrixMode(GL_PROJECTION);
    glLoadIdentity();
    //glOrtho(-250.0, 250, -250.0, 250, -500, 500);
    //glFrustum(-250.0, 250, -250.0, 250, -5, -500);
    gluPerspective(45, (GLfloat)w / h, 1.0f, 1000.0f);    //設置投影矩陣
    glMatrixMode(GL_MODELVIEW);
    glLoadIdentity();
}

投影採用透視投影,這樣能夠進入球體內部。這裏角度設置成45,能夠自行設置,但不宜過大,過大效果不是很好。.net

(3)渲染線程

void display(void)
{
    glLoadIdentity();
    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

    gluLookAt(0, 0, distance, 0, 0, 500.0, 0, 1, 0);
    printf("distance: %f \n", distance);
    glRotatef(xangle, 1.0f, 0.0f, 0.0f);    //繞X軸旋轉
    glRotatef(yangle, 0.0f, 1.0f, 0.0f);    //繞Y軸旋轉
    glRotatef(zangle, 0.0f, 0.0f, 1.0f);    //繞Z軸旋轉

    EnterCriticalSection(&frame_queue.cs);

    printf("display size = %d \n", frame_queue.size);
    if (frame_queue.size > 0)
    {
        Vid_Frame *vp = &frame_queue.queue[frame_queue.front];

        glBindTexture(GL_TEXTURE_2D, texturesArr);
        glTexImage2D(GL_TEXTURE_2D, 0, 3, vp->width, vp->height, 0, GL_BGR_EXT, GL_UNSIGNED_BYTE, vp->buffer);

        frame_queue.size--;
        frame_queue.front = (frame_queue.front + 1) % MAXSIZE;
    }

    LeaveCriticalSection(&frame_queue.cs);

    //glColor3f(1.0, 0.0, 0.0);
    glEnableClientState(GL_VERTEX_ARRAY);
    glEnableClientState(GL_TEXTURE_COORD_ARRAY);
    glVertexPointer(3, GL_FLOAT, 0, verticals);
    glTexCoordPointer(2, GL_FLOAT, 0, UV_TEX_VERTEX);
    glPushMatrix();
    glDrawArrays(GL_TRIANGLES, 0, (180 / cap_H) * (360 / cap_W) * 6);
    glPopMatrix();
    glDisableClientState(GL_TEXTURE_COORD_ARRAY);
    glDisableClientState(GL_VERTEX_ARRAY);  // disable vertex arrays

    glFlush();

    av_usleep(25000);
}

渲染時把解出來的數據從隊列中取出生成新的紋理。渲染採用glDrawArrays函數,使用的GL_TRIANGLES參數,使用這個參數對於計算球的頂點座標和紋理座標來講不須要考慮不少,比較方便,就是點數過多的時候可能會影響渲染的效率。code

(5)畫面更新與重繪

void reDraw(int millisec)
{
    glutTimerFunc(millisec, reDraw, millisec);
    glutPostRedisplay();
}

這裏用OpenGL的定時器來對畫面作一個定時的更新,從而實現視頻播放的效果。

4.一些控制操做

(1)鍵盤控制

void keyboard(unsigned char key, int x, int y)
{
    switch (key)
    {
    case 'x':        //當按下鍵盤上d時,以沿X軸旋轉爲主
        xangle += 1.0f;    //設置旋轉增量
        break;
    case 'X':
        xangle -= 1.0f;    //設置旋轉增量
        break;
    case 'y':
        yangle += 1.0f;
        break;
    case 'Y':
        yangle -= 1.0f;
        break;
    case 'z':
        zangle += 1.0f;
        break;
    case 'Z':
        zangle -= 1.0f;
        break;
    case 'a':
        distance += 10.0f;
        break;
    case 'A':
        distance -= 10.0f;
        break;
    default:
        return;
    }
    glutPostRedisplay();    //重繪函數
}

用鍵盤來實現球體繞x,y,z軸的旋轉,以及觀察球體的距離。

(2)鼠標控制

//處理鼠標點擊
void Mouse(int button, int state, int x, int y)
{
    if (state == GLUT_DOWN) //第一次鼠標按下時,記錄鼠標在窗口中的初始座標
    {
        //記住鼠標點擊後光標座標
        cx = x;
        cy = y;
    }
}

//處理鼠標拖動
void onMouseMove(int x, int y)
{
    float offset = 0.18;
    //計算拖動後的偏移量,而後進行xy疊加減
    yangle -= ((x - cx) * offset);

    if ( y > cy) {//往下拉
        xangle += ((y - cy) * offset);
    }
    else if ( y < cy) {//往上拉
        xangle += ((y - cy) * offset);
    }

    glutPostRedisplay();

    //保存好當前拖放後光標座標點
    cx = x;
    cy = y;
}

5.主函數

int main(int argc, char* argv[])
{
    glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB | GLUT_DEPTH);
    glutInitWindowSize(1640, 840);
    glutInitWindowPosition(100, 100);
    glutCreateWindow("OpenGL全景");
    init();
    glutReshapeFunc(reshape);
    glutDisplayFunc(display);
    glutKeyboardFunc(keyboard);
    glutMouseFunc(Mouse);
    glutMotionFunc(onMouseMove);

    glutTimerFunc(25, reDraw, 25);

    HANDLE hThrd = NULL;
    DWORD threadId;
    hThrd = CreateThread(NULL, 0, ThreadFunc, 0, 0, &threadId);

    glutMainLoop();

    WaitForSingleObject(hThrd, INFINITE);

    if (hThrd)
    {
        CloseHandle(hThrd);
    }

    return 0;
}

glutMainLoop()函數真是個噁心的函數,都沒找到正常退出他的方法,要退出貌似必須得把整個程序都退出去,在實際使用的時候大多數時候咱們都只是但願退出循環就夠了,不必定要退出整個程序。因此若是用win32來作,最好就不要用這個函數,用一個獨立的線程來作渲染,各類消息經過win32來實現,這樣是比較方便的。

運行截圖:

image

 

 

工程源碼:http://download.csdn.net/download/qq_33892166/9856939

VR視頻推薦:http://dl.pconline.com.cn/vr/list0_1_2007_2018.html

相關文章
相關標籤/搜索