Cuda Programming Interface (I)

   Cuda C是支持C/C++語言的。它只對C語言作了一個很小的擴展而且提供了一個C runtime library.數組

  想要知道Cuda是怎麼運行的,咱們首先要知道Cuda程序的編譯過程。ide

  Compilation with NVCCoop

   Offline Compilation

    NVCC 的工做流主要分下面幾步優化

      1,將程序中的host code 和 device code 區別開來。ui

      2,將device code進行轉化可裝配形式(assembly form (PTX code)),進而轉化成2進制流,用於交給GPU處理。this

      3,將Host code中不符合C語言標準的代碼進行替換,而後按照正常的編譯過程進行編譯連接,在CPU中處理。spa

   

    // Launch a kernel on the GPU with one thread for each element.
    addKernel<<<1, size>>>(dev_c, dev_a, dev_b);

__global__ void addKernel(int *c, const int *a, const int *b)
{
    int i = threadIdx.x;
    
    c[i] = a[i] + b[i];
}

     Initialization

     通常來講,沒有一個明確的開始標誌,當第一個runtime function被調用的時候,GPU section就被初始化了。線程

     在初始化過程當中,會建立一個cuda context。這個上下文是primary context,被全部的Host Thread共享。code

      cudaDeviceReset() 能夠destory當前的上下文,直至下一個runtime function被call時,將從新建立primary context.orm

 

    Device Memory

      Device memory can be allocated either as linear memory or as CUDA arrays

      Cuda arrays是和Texture and Surface Memory相關的,咱們後續再談。

        linear memory 一般使用 cudaMalloc()進行內存分配, cudaFree()釋放內存, cudaMemcpy()在host memory 和 device momory之間傳遞data。

        若是想要申請2D或者3D數組的內存可使用cudaMallocPicth()和cudaMalloc3D(),對應使用cudaMemcpy2D()和cudaMemcpy3D()來拷貝data。

  

      Shared Memory

        shared memory經過 __shared__來標識。它比global memory要快。

 

        能夠看到上一節,當咱們想利用分塊矩陣對GPU運算進行優化的時候,因爲每個線程只須要進行BLOCK_SIZE次乘法的運算。故對於每個Cij,須要計算屢次進行疊加,而疊加的過程必須經過共享內存和同步線程機智來完成。

        

The following code sample is an implementation of matrix multiplication that does take
advantage of shared memory. In this implementation, each thread block is responsible
for computing one square sub-matrix Csub of C and each thread within the block is
responsible for computing one element of Csub. As illustrated in Figure 10, Csub is equal
to the product of two rectangular matrices: the sub-matrix of A of dimension (A.width,
block_size) that has the same row indices as Csub, and the sub-matrix of B of dimension
(block_size, A.width )that has the same column indices as Csub. In order to fit into the
device's resources, these two rectangular matrices are divided into as many square
matrices of dimension block_size as necessary and Csub is computed as the sum of the
products of these square matrices. Each of these products is performed by first loading
the two corresponding square matrices from global memory to shared memory with one
thread loading one element of each matrix, and then by having each thread compute one
element of the product. Each thread accumulates the result of each of these products into
a register and once done writes the result to global memory.
Programming Interface
www.nvidia.com
CUDA C Programming Guide PG-02829-001_v7.5 | 27
By blocking the computation this way, we take advantage of fast shared memory and
save a lot of global memory bandwidth since A is only read (B.width / block_size) times
from global memory and B is read (A.height / block_size) times.
The Matrix type from the previous code sample is augmented with a stride field, so that
sub-matrices can be efficiently represented with the same type. __device__ functions are
used to get and set elements and build any sub-matrix from a matrix.
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.stride + col)
typedef struct {
int width;
int height;
int stride;
float* elements;
} Matrix;
// Get a matrix element
__device__ float GetElement(const Matrix A, int row, int col)
{
return A.elements[row * A.stride + col];
}
// Set a matrix element
__device__ void SetElement(Matrix A, int row, int col,
float value)
{
A.elements[row * A.stride + col] = value;
}
// Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is
// located col sub-matrices to the right and row sub-matrices down
// from the upper-left corner of A
__device__ Matrix GetSubMatrix(Matrix A, int row, int col)
{
Matrix Asub;
Asub.width = BLOCK_SIZE;
Asub.height = BLOCK_SIZE;
Asub.stride = A.stride;
Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row
+ BLOCK_SIZE * col];
return Asub;
}
// Thread block size
#define BLOCK_SIZE 16
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
// Load A and B to device memory
Matrix d_A;
d_A.width = d_A.stride = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc(&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size,
cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = d_B.stride = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
Programming Interface
www.nvidia.com
CUDA C Programming Guide PG-02829-001_v7.5 | 28
cudaMalloc(&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size,
cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = d_C.stride = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc(&d_C.elements, size);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
// Read C from device memory
cudaMemcpy(C.elements, d_C.elements, size,
cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) {
// Get sub-matrix Asub of A
Matrix Asub = GetSubMatrix(A, blockRow, m);
// Get sub-matrix Bsub of B
Matrix Bsub = GetSubMatrix(B, m, blockCol);
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = GetElement(Asub, row, col);
Bs[row][col] = GetElement(Bsub, row, col);
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
Programming Interface
www.nvidia.com
CUDA C Programming Guide PG-02829-001_v7.5 | 29
// Multiply Asub and Bsub together
for (int e = 0; e < BLOCK_SIZE; ++e)
Cvalue += As[row][e] * Bs[e][col];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write Csub to device memory
// Each thread writes one element
SetElement(Csub, row, col, Cvalue);
相關文章
相關標籤/搜索