使用OpenMP加快OpenCV圖像處理性能 | speed up opencv image processing with openmp

本文首發於我的博客kezunlin.me/post/7a6ba8…,歡迎閱讀!html

speed up opencv image processing with openmplinux

Series

Guide

config

  • linux/window: cmake with CXX_FLAGS=-fopenmp
  • window VS: VS also support openmp, C/C++| Language | /openmp

usage

#include <omp.h>

#pragma omp parallel for
    for loop ...複製代碼

code

#include <iostream>
#include <omp.h>

int main()
{
    omp_set_num_threads(4);
#pragma omp parallel for
    for (int i = 0; i < 8; i++)
    {
        printf("i = %d, I am Thread %d\n", i, omp_get_thread_num());
    }
    printf("\n");    

    return 0;
}

/*
i = 0, I am Thread 0
i = 1, I am Thread 0
i = 4, I am Thread 2
i = 5, I am Thread 2
i = 6, I am Thread 3
i = 7, I am Thread 3
i = 2, I am Thread 1
i = 3, I am Thread 1
*/
複製代碼

CMakeLists.txt

use CXX_FLAGS=-fopenmp in CMakeLists.txtios

cmake_minimum_required(VERSION 3.0.0)

project(hello)

find_package(OpenMP REQUIRED)
if(OPENMP_FOUND)
    message("OPENMP FOUND")

    message([main] " OpenMP_C_FLAGS=${OpenMP_C_FLAGS}") # -fopenmp
    message([main] " OpenMP_CXX_FLAGS}=${OpenMP_CXX_FLAGS}") # -fopenmp
    message([main] " OpenMP_EXE_LINKER_FLAGS=${OpenMP_EXE_LINKER_FLAGS}") # ***

    # no use for xxx_INCLUDE_DIRS and xxx_libraries for OpenMP
    message([main] " OpenMP_INCLUDE_DIRS=${OpenMP_INCLUDE_DIRS}") # ***
    message([main] " OpenMP_LIBRARIES=${OpenMP_LIBRARIES}") # ***

    set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
    set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${OpenMP_EXE_LINKER_FLAGS}")
endif()

add_executable(hello hello.cpp)
#target_link_libraries(hello xxx)複製代碼

optionsopenmpc++

or use g++ hello.cpp -fopenmp to compileubuntu

view demo

list dynamic dependencies (ldd)windows

ldd hello 
        linux-vdso.so.1 =>  (0x00007ffd71365000)
        libstdc++.so.6 => /usr/lib/x86_64-linux-gnu/libstdc++.so.6 (0x00007f8ea7f00000)
        libgomp.so.1 => /usr/lib/x86_64-linux-gnu/libgomp.so.1 (0x00007f8ea7cde000)
        libc.so.6 => /lib/x86_64-linux-gnu/libc.so.6 (0x00007f8ea7914000)
        libm.so.6 => /lib/x86_64-linux-gnu/libm.so.6 (0x00007f8ea760b000)
        /lib64/ld-linux-x86-64.so.2 (0x00007f8ea8282000)
        libgcc_s.so.1 => /lib/x86_64-linux-gnu/libgcc_s.so.1 (0x00007f8ea73f5000)
        libdl.so.2 => /lib/x86_64-linux-gnu/libdl.so.2 (0x00007f8ea71f1000)
        libpthread.so.0 => /lib/x86_64-linux-gnu/libpthread.so.0 (0x00007f8ea6fd4000)複製代碼

libgomp.so.1 => /usr/lib/x86_64-linux-gnu/libgomp.so.1數組

list names (nm)多線程

nm hello 
    0000000000602080 B __bss_start
    0000000000602190 b completed.7594
                     U __cxa_atexit@@GLIBC_2.2.5
    0000000000602070 D __data_start
    0000000000602070 W data_start
    0000000000400b00 t deregister_tm_clones
    0000000000400b80 t __do_global_dtors_aux
    0000000000601df8 t __do_global_dtors_aux_fini_array_entry
    0000000000602078 d __dso_handle
    0000000000601e08 d _DYNAMIC
    0000000000602080 D _edata
    0000000000602198 B _end
    0000000000400d44 T _fini
    0000000000400ba0 t frame_dummy
    0000000000601de8 t __frame_dummy_init_array_entry
    0000000000400f18 r __FRAME_END__
    0000000000602000 d _GLOBAL_OFFSET_TABLE_
    0000000000400c28 t _GLOBAL__sub_I_main
                     w __gmon_start__
    0000000000400d54 r __GNU_EH_FRAME_HDR
                     U GOMP_parallel@@GOMP_4.0
                     U __gxx_personality_v0@@CXXABI_1.3
    00000000004009e0 T _init
    0000000000601df8 t __init_array_end
    0000000000601de8 t __init_array_start
    0000000000400d50 R _IO_stdin_used
                     w _ITM_deregisterTMCloneTable
                     w _ITM_registerTMCloneTable
    0000000000601e00 d __JCR_END__
    0000000000601e00 d __JCR_LIST__
                     w _Jv_RegisterClasses
    0000000000400d40 T __libc_csu_fini
    0000000000400cd0 T __libc_csu_init
                     U __libc_start_main@@GLIBC_2.2.5
    0000000000400bc6 T main
    0000000000400c3d t main._omp_fn.0
                     U omp_get_num_threads@@OMP_1.0
                     U omp_get_thread_num@@OMP_1.0
    0000000000400b40 t register_tm_clones
    0000000000400ad0 T _start
    0000000000602080 d __TMC_END__
    0000000000400bea t _Z41__static_initialization_and_destruction_0ii
                     U _ZNSolsEPFRSoS_E@@GLIBCXX_3.4
                     U _ZNSt8ios_base4InitC1Ev@@GLIBCXX_3.4
                     U _ZNSt8ios_base4InitD1Ev@@GLIBCXX_3.4
    0000000000602080 B _ZSt4cout@@GLIBCXX_3.4
                     U _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@@GLIBCXX_3.4
    0000000000602191 b _ZStL8__ioinit
                     U _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_c@@GLIBCXX_3.4複製代碼

omp_get_num_threads, omp_get_thread_numapp

OpenMP Introduction

OpenMP的指令格式負載均衡

#pragma omp directive [clause[clause]…]
    #pragma omp parallel private(i, j)複製代碼

parallel is directive, private is clause

directive

  • parallel,用在一個代碼段以前,表示這段代碼將被多個線程並行執行
  • for,用於for循環以前,將循環分配到多個線程中並行執行,必須保證每次循環之間無相關性。
  • parallel for, parallel 和 for語句的結合,也是用在一個for循環以前,表示for循環的代碼將被多個線程並行執行。
  • sections,用在可能會被並行執行的代碼段以前
  • parallel sections,parallel和sections兩個語句的結合
  • critical,用在一段代碼臨界區以前
  • single,用在一段只被單個線程執行的代碼段以前,表示後面的代碼段將被單線程執行。
  • flush,
  • barrier,用於並行區內代碼的線程同步,全部線程執行到barrier時要中止,直到全部線程都執行到barrier時才繼續往下執行。
  • atomic,用於指定一塊內存區域被制動更新
  • master,用於指定一段代碼塊由主線程執行
  • ordered, 用於指定並行區域的循環按順序執行
  • threadprivate, 用於指定一個變量是線程私有的。

parallel for

OpenMP 對能夠多線程化的循環有以下五個要求:

  • 循環的變量變量(就是i)必須是有符號整形,其餘的都不行。
  • 循環的比較條件必須是< <=> >=中的一種
  • 循環的增量部分必須是增減一個不變的值(即每次循環是不變的)。
  • 若是比較符號是< <=,那每次循環i應該增長,反之應該減少 <="" li="">
  • 循環必須是沒有奇奇怪怪的東西,不能從內部循環跳到外部循環,goto和break只能在循環內部跳轉,異常必須在循環內部被捕獲。

若是你的循環不符合這些條件,那就只好改寫了.

avoid race condition

當一個循環知足以上五個條件時,依然可能由於數據依賴而不可以合理的並行化。當兩個不一樣的迭代之間的數據存在依賴關係時,就會發生這種狀況。

// 假設數組已經初始化爲1
#pragma omp parallel for
for (int i = 2; i < 10; i++) {
    factorial[i] = i * factorial[i-1];
}複製代碼

ERROR.

omp_set_num_threads(4);
#pragma omp parallel
    {
        #pragma omp for
        for (int i = 0; i < 8; i++)
        {
            printf("i = %d, I am Thread %d\n", i, omp_get_thread_num());
        }
    }複製代碼

same as

omp_set_num_threads(4);
#pragma omp parallel for
    for (int i = 0; i < 8; i++)
    {
        printf("i = %d, I am Thread %d\n", i, omp_get_thread_num());
    }複製代碼

parallel sections

#pragma omp parallel sections # parallel 
{
    #pragma omp section # thread-1
    {
        function1();
    }
  #pragma omp section # thread-2
    {
        function2();
    }
}複製代碼

parallel sections裏面的內容要並行執行,具體分工上,每一個線程執行其中的一個section

clause

  • private, 指定每一個線程都有它本身的變量私有副本。
  • firstprivate,指定每一個線程都有它本身的變量私有副本,而且變量要被繼承主線程中的初值。
  • lastprivate,主要是用來指定將線程中的私有變量的值在並行處理結束後複製回主線程中的對應變量。
  • reduce,用來指定一個或多個變量是私有的,而且在並行處理結束後這些變量要執行指定的運算。
  • nowait,忽略指定中暗含的等待
  • num_threads,指定線程的個數
  • schedule,指定如何調度for循環迭代
  • shared,指定一個或多個變量爲多個線程間的共享變量
  • ordered,用來指定for循環的執行要按順序執行
  • copyprivate,用於single指令中的指定變量爲多個線程的共享變量
  • copyin,用來指定一個threadprivate的變量的值要用主線程的值進行初始化。
  • default,用來指定並行處理區域內的變量的使用方式,缺省是shared

private

#pragma omp parallel
{
    int x; // private to each thread ? YES
}

#pragma omp parallel for
for (int i = 0; i < 1000; ++i)
{
    int x; // private to each thread ? YES
}複製代碼

local variables are automatically private to each thread.

The reason for the existence of the private clause is so that you don't have to change your code.

see here

The only way to parallelize the following code without the private clause

int i,j;
#pragma omp parallel for private(j)
for(i = 0; i < n; i++) {
    for(j = 0; j < n; j++) {
        //do something
    }
}複製代碼

is to change the code. For example like this:

int i;
#pragma omp parallel for
for(i = 0; i < n; i++) {
    int j; // mark j as local variable to worker thread
    for(j = 0; j < n; j++) {
        //do something
    }
}複製代碼

reduction

例如累加

int sum = 0;
for (int i = 0; i < 100; i++) {
    sum += array[i]; // sum須要私有才能實現並行化,可是又必須是公有的才能產生正確結果
}複製代碼

上面的這個程序裏,sum公有或者私有都不對,爲了解決這個問題,OpenMP 提供了reduction語句;

int sum = 0;
#pragma omp parallel for reduction(+:sum)
for (int i = 0; i < 100; i++) {
    sum += array[i];
}複製代碼

內部實現中,OpenMP爲每一個線程提供了私有的sum變量(初始化爲0),當線程退出時,OpenMP再把每一個線程私有的sum加在一塊兒獲得最終結果。

num_threads

num_threads(4) same as omp_set_num_threads(4)

// `num_threads(4)` same as `omp_set_num_threads(4)`
    #pragma omp parallel num_threads(4)
    {
        printf("Hello, I am Thread %d\n", omp_get_thread_num()); // 0,1,2,3,
    }複製代碼

schedule

format

#pragma omp parallel for schedule(kind [, chunk size])複製代碼

kind: see openmp-loop-scheduling and whats-the-difference-between-static-and-dynamic-schedule-in-openmp

  • static: Divide the loop into equal-sized chunks or as equal as possible in the case where the number of loop iterations is not evenly divisible by the number of threads multiplied by the chunk size. By default, chunk size is loop_count/number_of_threads.
  • dynamic: Use the internal work queue to give a chunk-sized block of loop iterations to each thread. When a thread is finished, it retrieves the next block of loop iterations from the top of the work queue. By default, the chunk size is 1. Be careful when using this scheduling type because of the extra overhead involved.
  • guided: special case of dynamic. Similar to dynamic scheduling, but the chunk size starts off large and decreases to better handle load imbalance between iterations. The optional chunk parameter specifies them minimum size chunk to use. By default the chunk size is approximately loop_count/number_of_threads.
  • auto: When schedule (auto) is specified, the decision regarding scheduling is delegated to the compiler. The programmer gives the compiler the freedom to choose any possible mapping of iterations to threads in the team.
  • runtime: with ENVOMP_SCHEDULE, we can test 3 types scheduling: static,dynamic,guided without recompile the code.

The optional parameter (chunk), when specified, must be a positive integer.

默認狀況下,OpenMP認爲全部的循環迭代運行的時間都是同樣的,這就致使了OpenMP會把不一樣的迭代等分到不一樣的核心上,而且讓他們分佈的儘量減少內存訪問衝突,這樣作是由於循環通常會線性地訪問內存, 因此把循環按照前一半後一半的方法分配能夠最大程度的減小衝突. 然而對內存訪問來講這多是最好的方法, 可是對於負載均衡可能並非最好的方法, 並且反過來最好的負載均衡可能也會破壞內存訪問. 所以必須折衷考慮.

內存訪問vs負載均衡,須要折中考慮。

openmp默認使用的schedule是取決於編譯器實現的。gcc默認使用schedule(dynamic,1),也就是動態調度而且塊大小是1.
線程數不要大於實際核數,不然就是oversubscription

isprime能夠對dynamic作一個展現。

functions

  • omp_get_num_procs, 返回運行本線程的多處理機的處理器個數。
  • omp_set_num_threads, 設置並行執行代碼時的線程個數
  • omp_get_num_threads, 返回當前並行區域中的活動線程(active thread)個數,若是沒有設置,默認爲1。
  • omp_get_thread_num, 返回線程號(0,1,2,...)
  • omp_init_lock, 初始化一個簡單鎖
  • omp_set_lock, 上鎖操做
  • omp_unset_lock, 解鎖操做,要和omp_set_lock函數配對使用
  • omp_destroy_lock,關閉一個鎖,要和 omp_init_lock函數配對使用

check cpu

cat /proc/cpuinfo | grep name | cut -f2 -d: | uniq -c 
        8  Intel(R) Core(TM) i7-7700HQ CPU @ 2.80GHz複製代碼

omp_get_num_procs return 8.

OpenMP Example

ompgetnum_threads

void test0()
{
    printf("I am Thread %d,  omp_get_num_threads = %d, omp_get_num_procs = %d\n", 
        omp_get_thread_num(), 
        omp_get_num_threads(),
        omp_get_num_procs()
    );
}
/*
I am Thread 0,  omp_get_num_threads = 1, omp_get_num_procs = 8
*/複製代碼

parallel

case1

void test1()
{
    // `parallel`,用在一個代碼段以前,表示這段代碼block將被多個線程並行執行
    // if not set `omp_set_num_threads`, by default use `omp_get_num_procs`, eg 8
    //omp_set_num_threads(4); // 設置線程數,通常設置的線程數不超過CPU核心數
#pragma omp parallel
    {
        printf("Hello, I am Thread %d,  omp_get_num_threads = %d, omp_get_num_procs = %d\n", 
            omp_get_thread_num(), 
            omp_get_num_threads(),
            omp_get_num_procs()
        );
    }
}
/*
Hello, I am Thread 3,  omp_get_num_threads = 8, omp_get_num_procs = 8
Hello, I am Thread 7,  omp_get_num_threads = 8, omp_get_num_procs = 8
Hello, I am Thread 1,  omp_get_num_threads = 8, omp_get_num_procs = 8
Hello, I am Thread 6,  omp_get_num_threads = 8, omp_get_num_procs = 8
Hello, I am Thread 5,  omp_get_num_threads = 8, omp_get_num_procs = 8
Hello, I am Thread 4,  omp_get_num_threads = 8, omp_get_num_procs = 8
Hello, I am Thread 2,  omp_get_num_threads = 8, omp_get_num_procs = 8
Hello, I am Thread 0,  omp_get_num_threads = 8, omp_get_num_procs = 8
*/複製代碼

case2

void test1_2()
{
    // `parallel`,用在一個代碼段以前,表示這段代碼block將被多個線程並行執行
    omp_set_num_threads(4); // 設置線程數,通常設置的線程數不超過CPU核心數
#pragma omp parallel
    {
        printf("Hello, I am Thread %d,  omp_get_num_threads = %d, omp_get_num_procs = %d\n", 
            omp_get_thread_num(), 
            omp_get_num_threads(),
            omp_get_num_procs()
        );
        //std::cout << "Hello" << ", I am Thread " << omp_get_thread_num() << std::endl; // 0,1,2,3
    }
}
/*
# use `cout`
HelloHello, I am Thread Hello, I am Thread , I am Thread Hello, I am Thread 2
1
3
0
*/

/* use `printf`
Hello, I am Thread 0,  omp_get_num_threads = 4, omp_get_num_procs = 8
Hello, I am Thread 3,  omp_get_num_threads = 4, omp_get_num_procs = 8
Hello, I am Thread 1,  omp_get_num_threads = 4, omp_get_num_procs = 8
Hello, I am Thread 2,  omp_get_num_threads = 4, omp_get_num_procs = 8
*/
複製代碼

notice the difference of std::cout and printf

case3

void test1_3()
{
    // `parallel`,用在一個代碼段以前,表示這段代碼block將被多個線程並行執行
    omp_set_num_threads(4);
#pragma omp parallel
    for (int i = 0; i < 3; i++)
    {
        printf("i = %d, I am Thread %d\n", i, omp_get_thread_num());
    }    
}
/*
i = 0, I am Thread 1
i = 1, I am Thread 1
i = 2, I am Thread 1
i = 0, I am Thread 3
i = 1, I am Thread 3
i = 2, I am Thread 3
i = 0, I am Thread 2
i = 1, I am Thread 2
i = 2, I am Thread 2
i = 0, I am Thread 0
i = 1, I am Thread 0
i = 2, I am Thread 0
*/複製代碼

omp parallel/for

omp parallel + omp for

void test2()
{
    // `omp parallel` + `omp for` === `omp parallel for`
    // `omp for` 用在一個for循環以前,表示for循環的每一次iteration將被分配到多個線程並行執行。
    // 此處8次iteration被平均分配到4個thread執行,每一個thread執行2次iteration
    /*
    iter   #thread id
    0,1     0
    2,3     1
    4,5     2
    6,7     3
    */
    omp_set_num_threads(4);
#pragma omp parallel
    {
        #pragma omp for
        for (int i = 0; i < 8; i++)
        {
            printf("i = %d, I am Thread %d\n", i, omp_get_thread_num());
        }
    }
}
/*
i = 0, I am Thread 0
i = 1, I am Thread 0
i = 2, I am Thread 1
i = 3, I am Thread 1
i = 6, I am Thread 3
i = 7, I am Thread 3
i = 4, I am Thread 2
i = 5, I am Thread 2
*/複製代碼

omp parallel for

void test2_2()
{
    // `parallel for`,用在一個for循環以前,表示for循環的每一次iteration將被分配到多個線程並行執行。
    // 此處8次iteration被平均分配到4個thread執行,每一個thread執行2次iteration
    /*
    iter   #thread id
    0,1     0
    2,3     1
    4,5     2
    6,7     3
    */
    omp_set_num_threads(4);
#pragma omp parallel for
    for (int i = 0; i < 8; i++)
    {
        printf("i = %d, I am Thread %d\n", i, omp_get_thread_num());
    }
}
/*
i = 0, I am Thread 0
i = 1, I am Thread 0
i = 4, I am Thread 2
i = 5, I am Thread 2
i = 6, I am Thread 3
i = 7, I am Thread 3
i = 2, I am Thread 1
i = 3, I am Thread 1
*/複製代碼

sqrt case

void base_sqrt()
{
    boost::posix_time::ptime pt1 = boost::posix_time::microsec_clock::local_time();

    float a = 0;
    for (int i=0;i<1000000000;i++)
        a = sqrt(i);
    
    boost::posix_time::ptime pt2 = boost::posix_time::microsec_clock::local_time();
    int64_t cost = (pt2 - pt1).total_milliseconds();
    printf("Worker Thread = %d, cost = %d ms\n",omp_get_thread_num(), cost);
}

void test2_3()
{
    boost::posix_time::ptime pt1 = boost::posix_time::microsec_clock::local_time();

    omp_set_num_threads(8);
#pragma omp parallel for
    for (int i=0;i<8;i++)
        base_sqrt();
    
    boost::posix_time::ptime pt2 = boost::posix_time::microsec_clock::local_time();
    int64_t cost = (pt2 - pt1).total_milliseconds();
    printf("Main Thread = %d, cost = %d ms\n",omp_get_thread_num(), cost);
}複製代碼

sequential

time ./demo_openmp
    Worker Thread = 0, cost = 1746 ms
    Worker Thread = 0, cost = 1711 ms
    Worker Thread = 0, cost = 1736 ms
    Worker Thread = 0, cost = 1734 ms
    Worker Thread = 0, cost = 1750 ms
    Worker Thread = 0, cost = 1718 ms
    Worker Thread = 0, cost = 1769 ms
    Worker Thread = 0, cost = 1732 ms
    Main Thread = 0, cost = 13899 ms
    ./demo_openmp  13.90s user 0.00s system 99% cpu 13.903 total複製代碼

parallel

time ./demo_openmp
    Worker Thread = 1, cost = 1875 ms
    Worker Thread = 6, cost = 1876 ms
    Worker Thread = 0, cost = 1876 ms
    Worker Thread = 7, cost = 1876 ms
    Worker Thread = 5, cost = 1877 ms
    Worker Thread = 3, cost = 1963 ms
    Worker Thread = 4, cost = 2000 ms
    Worker Thread = 2, cost = 2027 ms
    Main Thread = 0, cost = 2031 ms
    ./demo_openmp  15.10s user 0.01s system 740% cpu 2.041 total複製代碼

2031s + 10ms(system) = 2041ms (total)

2.041* 740% = 15.1034 s

parallel sections

void test3()
{
    boost::posix_time::ptime pt1 = boost::posix_time::microsec_clock::local_time();

    omp_set_num_threads(4);
    // `parallel sections`裏面的內容要並行執行,具體分工上,每一個線程執行其中的一個`section`
    #pragma omp parallel sections // parallel 
    {
        #pragma omp section // thread-0
        {
            base_sqrt();
        }

        #pragma omp section // thread-1
        {
            base_sqrt();
        }

        #pragma omp section // thread-2
        {
            base_sqrt();
        }

        #pragma omp section // thread-3
        {
            base_sqrt();
        }
    }

    boost::posix_time::ptime pt2 = boost::posix_time::microsec_clock::local_time();
    int64_t cost = (pt2 - pt1).total_milliseconds();
    printf("Main Thread = %d, cost = %d ms\n",omp_get_thread_num(), cost);
}
/*
time ./demo_openmp
Worker Thread = 0, cost = 1843 ms
Worker Thread = 1, cost = 1843 ms
Worker Thread = 3, cost = 1844 ms
Worker Thread = 2, cost = 1845 ms
Main Thread = 0, cost = 1845 ms
./demo_openmp  7.39s user 0.00s system 398% cpu 1.855 total
*/
複製代碼

private

error case

void test4_error()
{
    int i,j;
    omp_set_num_threads(4);
    // we get error result, because `j` is shared between all worker threads.
    #pragma omp parallel for
    for(i = 0; i < 4; i++) {
        for(j = 0; j < 8; j++) {
            printf("Worker Thread = %d, j = %d ms\n",omp_get_thread_num(), j);
        }
    }
}
/*
Worker Thread = 3, j = 0 ms
Worker Thread = 3, j = 1 ms
Worker Thread = 0, j = 0 ms
Worker Thread = 0, j = 3 ms
Worker Thread = 0, j = 4 ms
Worker Thread = 0, j = 5 ms
Worker Thread = 3, j = 2 ms
Worker Thread = 3, j = 7 ms
Worker Thread = 0, j = 6 ms
Worker Thread = 1, j = 0 ms
Worker Thread = 2, j = 0 ms
*/複製代碼

error results.

fix1 by changing code

void test4_fix1()
{
    int i;
    omp_set_num_threads(4);
    // we get error result, because `j` is shared between all worker threads.
    // fix1: we have to change original code to make j as local variable
    #pragma omp parallel for
    for(i = 0; i < 4; i++) {
        int j;  // fix1: `int j`
        for(j = 0; j < 8; j++) { 
            printf("Worker Thread = %d, j = %d ms\n",omp_get_thread_num(), j);
        }
    }
}

/*
Worker Thread = 0, j = 0 ms
Worker Thread = 0, j = 1 ms
Worker Thread = 2, j = 0 ms
Worker Thread = 2, j = 1 ms
Worker Thread = 1, j = 0 ms
Worker Thread = 1, j = 1 ms
Worker Thread = 1, j = 2 ms
Worker Thread = 1, j = 3 ms
Worker Thread = 1, j = 4 ms
Worker Thread = 1, j = 5 ms
Worker Thread = 1, j = 6 ms
Worker Thread = 1, j = 7 ms
Worker Thread = 2, j = 2 ms
Worker Thread = 2, j = 3 ms
Worker Thread = 2, j = 4 ms
Worker Thread = 2, j = 5 ms
Worker Thread = 2, j = 6 ms
Worker Thread = 2, j = 7 ms
Worker Thread = 0, j = 2 ms
Worker Thread = 0, j = 3 ms
Worker Thread = 0, j = 4 ms
Worker Thread = 0, j = 5 ms
Worker Thread = 0, j = 6 ms
Worker Thread = 0, j = 7 ms
Worker Thread = 3, j = 0 ms
Worker Thread = 3, j = 1 ms
Worker Thread = 3, j = 2 ms
Worker Thread = 3, j = 3 ms
Worker Thread = 3, j = 4 ms
Worker Thread = 3, j = 5 ms
Worker Thread = 3, j = 6 ms
Worker Thread = 3, j = 7 ms
*/
複製代碼

fix2 by private(j)

void test4_fix2()
{
    int i,j;
    omp_set_num_threads(4);
    // we get error result, because `j` is shared between all worker threads.
    // fix1: we have to change original code to make j as local variable
    // fix2: use `private(j)`, no need to change original code
    #pragma omp parallel for private(j) // fix2
    for(i = 0; i < 4; i++) {
        for(j = 0; j < 8; j++) {
            printf("Worker Thread = %d, j = %d ms\n",omp_get_thread_num(), j);
        }
    }
}

/*
Worker Thread = 0, j = 0 ms
Worker Thread = 0, j = 1 ms
Worker Thread = 0, j = 2 ms
Worker Thread = 0, j = 3 ms
Worker Thread = 0, j = 4 ms
Worker Thread = 0, j = 5 ms
Worker Thread = 0, j = 6 ms
Worker Thread = 0, j = 7 ms
Worker Thread = 2, j = 0 ms
Worker Thread = 2, j = 1 ms
Worker Thread = 2, j = 2 ms
Worker Thread = 2, j = 3 ms
Worker Thread = 2, j = 4 ms
Worker Thread = 2, j = 5 ms
Worker Thread = 2, j = 6 ms
Worker Thread = 2, j = 7 ms
Worker Thread = 3, j = 0 ms
Worker Thread = 3, j = 1 ms
Worker Thread = 3, j = 2 ms
Worker Thread = 3, j = 3 ms
Worker Thread = 3, j = 4 ms
Worker Thread = 3, j = 5 ms
Worker Thread = 1, j = 0 ms
Worker Thread = 1, j = 1 ms
Worker Thread = 1, j = 2 ms
Worker Thread = 1, j = 3 ms
Worker Thread = 1, j = 4 ms
Worker Thread = 1, j = 5 ms
Worker Thread = 1, j = 6 ms
Worker Thread = 1, j = 7 ms
Worker Thread = 3, j = 6 ms
Worker Thread = 3, j = 7 ms
*/
複製代碼

reduction

error case

void test5_error()
{
    int array[8] = {0,1,2,3,4,5,6,7};

    int sum = 0;
    omp_set_num_threads(4);
//#pragma omp parallel for reduction(+:sum)
#pragma omp parallel for  // ERROR
    for (int i = 0; i < 8; i++) {
        sum += array[i];
        printf("Worker Thread = %d, sum = %d ms\n",omp_get_thread_num(), sum);
    }
    printf("Main Thread = %d, sum = %d ms\n",omp_get_thread_num(), sum);
}
/*
// ERROR RESULT
Worker Thread = 0, sum = 0 ms
Worker Thread = 0, sum = 9 ms
Worker Thread = 3, sum = 8 ms
Worker Thread = 3, sum = 16 ms
Worker Thread = 1, sum = 2 ms
Worker Thread = 1, sum = 19 ms
Worker Thread = 2, sum = 4 ms
Worker Thread = 2, sum = 24 ms
Main Thread = 0, sum = 24 ms
*/複製代碼

reduction(+:sum)

void test5_fix()
{
    int array[8] = {0,1,2,3,4,5,6,7};

    int sum = 0;
    /*
    sum須要私有才能實現並行化,可是又必須是公有的才能產生正確結果;
    sum公有或者私有都不對,爲了解決這個問題,OpenMP提供了reduction語句.
    內部實現中,OpenMP爲每一個線程提供了私有的sum變量(初始化爲0),
    當線程退出時,OpenMP再把每一個線程私有的sum加在一塊兒獲得最終結果。
    */
    omp_set_num_threads(4);
#pragma omp parallel for reduction(+:sum)
//#pragma omp parallel for  // ERROR
    for (int i = 0; i < 8; i++) {
        sum += array[i];
        printf("Worker Thread = %d, sum = %d ms\n",omp_get_thread_num(), sum);
    }
    printf("Main Thread = %d, sum = %d ms\n",omp_get_thread_num(), sum);
}

/*
Worker Thread = 0, sum = 0 ms
Worker Thread = 0, sum = 1 ms
Worker Thread = 1, sum = 2 ms
Worker Thread = 1, sum = 5 ms
Worker Thread = 3, sum = 6 ms
Worker Thread = 3, sum = 13 ms
Worker Thread = 2, sum = 4 ms
Worker Thread = 2, sum = 9 ms
Main Thread = 0, sum = 28 ms
*/複製代碼

num_threads

void test6()
{
    // `num_threads(4)` same as `omp_set_num_threads(4)`
    #pragma omp parallel num_threads(4)
    {
        printf("Hello, I am Thread %d\n", omp_get_thread_num()); // 0,1,2,3,
    }
}
/*
Hello, I am Thread 0
Hello, I am Thread 2
Hello, I am Thread 3
Hello, I am Thread 1
*/複製代碼

schedule

(static,2)

void test7_1()
{
    omp_set_num_threads(4);
    // static, num_loop/num_threads
#pragma omp parallel for schedule(static,2) 
    for (int i = 0; i < 8; i++)
    {
        printf("i = %d, I am Thread %d\n", i, omp_get_thread_num());
    }
}
/*
i = 2, I am Thread 1
i = 3, I am Thread 1
i = 6, I am Thread 3
i = 7, I am Thread 3
i = 4, I am Thread 2
i = 5, I am Thread 2
i = 0, I am Thread 0
i = 1, I am Thread 0
*/
複製代碼

(static,4)

void test7_2()
{
    omp_set_num_threads(4);
    // static, num_loop/num_threads
#pragma omp parallel for schedule(static,4) 
    for (int i = 0; i < 8; i++)
    {
        printf("i = %d, I am Thread %d\n", i, omp_get_thread_num());
    }
}
/*
i = 0, I am Thread 0
i = 1, I am Thread 0
i = 2, I am Thread 0
i = 3, I am Thread 0
i = 4, I am Thread 1
i = 5, I am Thread 1
i = 6, I am Thread 1
i = 7, I am Thread 1
*/複製代碼

(dynamic,1)

void test7_3()
{
    omp_set_num_threads(4);
    // dynamic
#pragma omp parallel for schedule(dynamic,1) 
    for (int i = 0; i < 8; i++)
    {
        printf("i = %d, I am Thread %d\n", i, omp_get_thread_num());
    }
}
/*
i = 0, I am Thread 2
i = 4, I am Thread 2
i = 5, I am Thread 2
i = 6, I am Thread 2
i = 7, I am Thread 2
i = 3, I am Thread 3
i = 1, I am Thread 0
i = 2, I am Thread 1
*/
複製代碼

(dynamic,3)

void test7_4()
{
    omp_set_num_threads(4);
    // dynamic
#pragma omp parallel for schedule(dynamic,3) 
    for (int i = 0; i < 8; i++)
    {
        printf("i = %d, I am Thread %d\n", i, omp_get_thread_num());
    }
}
/*
i = 0, I am Thread 0
i = 1, I am Thread 0
i = 2, I am Thread 0
i = 6, I am Thread 2
i = 7, I am Thread 2
i = 3, I am Thread 1
i = 4, I am Thread 1
i = 5, I am Thread 1
*/複製代碼

schedule compare

#define NUM 100000000

int isprime( int x )
{
    for( int y = 2; y * y <= x; y++ )
    {
        if( x % y == 0 )
            return 0;
    }
    return 1;
}

void test8()
{
    int sum = 0;

    #pragma omp parallel for reduction (+:sum) schedule(dynamic,1) 
    for( int i = 2; i <= NUM ; i++ )
    {
        sum += isprime(i);
    }

    printf( "Number of primes numbers: %d", sum );
}複製代碼

no schedule

Number of primes numbers: 5761455./demo_openmp  151.64s user 0.04s system 582% cpu 26.048 total複製代碼

schedule(static,1)

Number of primes numbers: 5761455./demo_openmp  111.13s user 0.00s system 399% cpu 27.799 total
複製代碼

schedule(dynamic,1)


Number of primes numbers: 5761455./demo_openmp  167.22s user 0.02s system 791% cpu 21.135 total複製代碼

schedule(dynamic,200)

Number of primes numbers: 5761455./demo_openmp  165.96s user 0.02s system 791% cpu 20.981 total

複製代碼

OpenCV with OpenMP

see how-opencv-use-openmp-thread-to-get-performance

3 type OpenCV implementation

  • sequential implementation: default (slowest)
  • parallel implementation: OpenMP / TBB
  • GPU implementation: CUDA(fastest) / OpenCL

With CMake-gui, Building OpenCV with the WITH_OPENMP flag means that the internal functions will use OpenMP to parallelize some of the algorithms, like cvCanny, cvSmooth and cvThreshold.

In OpenCV, an algorithm can have a sequential (slowest) implementation; a parallel implementation using OpenMP or TBB; and a GPU implementation using OpenCL or CUDA(fastest). You can decide with the WITH_XXX flags which version to use.

Of course, not every algorithm can be parallelized.

Now, if you want to parallelize your methods with OpenMP, you have to implement it yourself.

concepts

avoiding extra copying

from improving-image-processing-speed

There is one important thing about increasing speed in OpenCV not related to processor nor algorithm and it is avoiding extra copying when dealing with matrices. I will give you an example taken from the documentation:

"...by constructing a header for a part of another matrix. It can be a single row, single column, several rows, several columns, rectangular region in the matrix (called a minor in algebra) or a diagonal. Such operations are also O(1), because the new header will reference the same data. You can actually modify a part of the matrix using this feature, e.g."

parallel for

#include "opencv2/highgui/highgui.hpp"
#include "opencv2/features2d/features2d.hpp"
#include <iostream>
#include <vector>
#include <omp.h>

void opencv_vector()
{
    int imNum = 2;
    std::vector<cv::Mat> imVec(imNum);
    std::vector<std::vector<cv::KeyPoint>>keypointVec(imNum);
    std::vector<cv::Mat> descriptorsVec(imNum);
    
    cv::Ptr<cv::ORB> detector = cv::ORB::create();
    cv::Ptr<DescriptorMatcher> matcher = cv::DescriptorMatcher::create("BruteForce-Hamming");

    std::vector< cv::DMatch > matches;
    char filename[100];
    double t1 = omp_get_wtime();
    
//#pragma omp parallel for
    for (int i=0;i<imNum;i++){
        sprintf(filename,"rgb%d.jpg",i);
        imVec[i] = cv::imread( filename, CV_LOAD_IMAGE_GRAYSCALE );
        detector->detect( imVec[i], keypointVec[i] );
        detector->compute( imVec[i],keypointVec[i],descriptorsVec[i]);
        std::cout<<"find "<<keypointVec[i].size()<<" keypoints in im"<<i<<std::endl;
    }
    
    double t2 = omp_get_wtime();
    std::cout<<"time: "<<t2-t1<<std::endl;
    
    matcher->match(descriptorsVec[0], descriptorsVec[1], matches, 2); // uchar descriptor Mat

    cv::Mat img_matches;
    cv::drawMatches( imVec[0], keypointVec[0], imVec[1], keypointVec[1], matches, img_matches ); 
    cv::namedWindow("Matches",CV_WINDOW_AUTOSIZE);
    cv::imshow( "Matches", img_matches );
    cv::waitKey(0);
}複製代碼

parallel sections

#pragma omp parallel sections
    {
#pragma omp section
        {
            std::cout<<"processing im0"<<std::endl;
            im0 = cv::imread("rgb0.jpg", CV_LOAD_IMAGE_GRAYSCALE );
            detector.detect( im0, keypoints0);
            extractor.compute( im0,keypoints0,descriptors0);
            std::cout<<"find "<<keypoints0.size()<<"keypoints in im0"<<std::endl;
        }
        
#pragma omp section
        {
            std::cout<<"processing im1"<<std::endl;
            im1 = cv::imread("rgb1.jpg", CV_LOAD_IMAGE_GRAYSCALE );
            detector.detect( im1, keypoints1);
            extractor.compute( im1,keypoints1,descriptors1);
            std::cout<<"find "<<keypoints1.size()<<"keypoints in im1"<<std::endl;
        }
    }複製代碼

Reference

History

  • 20190403: created.

Copyright

相關文章
相關標籤/搜索