CUDA 編程:第一個CUDA程序

 

 

# 環境準備shell

Windows 下安裝好CUDA,VS2013。api

建立一個空的控制檯程序,新增長一個文件「test.cu」。ide

##配置頭文件和庫文件目錄ui

```spa

C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v9.0\include3d

C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v9.0\lib\x64blog

```it

```io

cublas.lib
cublas_device.lib
cuda.lib
cudadevrt.lib
cudart.lib
cudart_static.lib
cufft.lib
cufftw.lib
curand.lib
cusolver.lib
cusparse.lib
nppc.lib
nppial.lib
nppicc.lib
nppicom.lib
nppidei.lib
nppif.lib
nppig.lib
nppim.lib
nppist.lib
nppisu.lib
nppitc.lib
npps.lib
nvblas.lib
nvcuvid.lib
nvgraph.lib
nvml.lib
nvrtc.lib
OpenCL.lib
kernel32.lib
user32.lib
gdi32.lib
winspool.lib
comdlg32.lib
advapi32.lib
shell32.lib
ole32.lib
oleaut32.lib
uuid.lib
odbc32.lib
odbccp32.libthread

```

##配置項目屬性、文件屬性

 

# 第一個CUDA程序

```

#include "cuda_runtime.h"
#include "device_launch_parameters.h"


#include "stdio.h"

 

__global__ void addKernel(int *c, int *a, int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}

cudaError_t addWithCuda(int *c, const int *a, const int *b, const int size);


int main()
{
const int arraysize = 5;
const int a[arraysize] = { 1, 2, 3, 4, 5 };
const int b[arraysize] = { 10, 20, 30, 40, 50};
int c[arraysize] = { 0 };


cudaError_t cudaStatus = addWithCuda(c, a, b, arraysize);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "addWithCuda failed!");
return 1;
}

printf("{ 1, 2, 3, 4, 5 } + { 10, 20, 30, 40, 50} = {%d, %d, %d, %d, %d} \n",
c[0], c[1], c[2], c[3], c[4]);

cudaStatus = cudaThreadExit();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaThreadExit failed!");
return 1;
}

return 0;
}

cudaError_t addWithCuda(int *c, const int *a, const int *b, const int size)
{
int * dev_a = NULL;
int * dev_b = NULL;
int * dev_c = NULL;

cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr,"cudaSetDevice failed!");
return cudaStatus;
}

cudaStatus = cudaMalloc((void**)&dev_c, size*sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed");
goto Error;
}

cudaStatus = cudaMalloc((void**)&dev_b, size*sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed");
goto Error;
}

cudaStatus = cudaMalloc((void**)&dev_a, size*sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed");
goto Error;
}

cudaStatus = cudaMemcpy(dev_a,a, size*sizeof(int),cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed");
goto Error;
}

cudaStatus = cudaMemcpy(dev_b,b, size*sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed");
goto Error;
}

addKernel << <1, size >> >(dev_c, dev_a, dev_b);

cudaStatus = cudaThreadSynchronize();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaThreadSynchronize failed");
goto Error;
}

cudaStatus = cudaMemcpy(c,dev_c,size*sizeof(int),cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed");
goto Error;
}

Error:
cudaFree(dev_c);
cudaFree(dev_b);
cudaFree(dev_a);
return cudaStatus;
}

```

運行:

相關文章
相關標籤/搜索