2 * Copyright 1993-2008 NVIDIA Corporation. All rights reserved.
6 * This source code is subject to NVIDIA ownership rights under U.S. and
7 * international Copyright laws. Users and possessors of this source code
8 * are hereby granted a nonexclusive, royalty-free license to use this code
9 * in individual and commercial software.
11 * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
12 * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
13 * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
14 * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
16 * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
17 * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
18 * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
19 * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
20 * OR PERFORMANCE OF THIS SOURCE CODE.
22 * U.S. Government End Users. This source code is a "commercial item" as
23 * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
24 * "commercial computer software" and "commercial computer software
25 * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
26 * and is provided to the U.S. Government only as a commercial end item.
27 * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
28 * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
29 * source code with only those rights set forth herein.
31 * Any use of this source code in individual and commercial software must
32 * include, in the user documentation and internal comments to the code,
33 * the above Disclaimer and U.S. Government End Users Notice.
40 // Simple utility function to check for CUDA runtime errors
41 void checkCUDAError(const char *msg);
43 // Part 3 of 5: implement the kernel
44 __global__ void myFirstKernel(int *d_a)
46 int idx = blockIdx.x * blockDim.x + threadIdx.x;
47 d_a[idx] = 1000 * blockIdx.x + threadIdx.x;
50 ////////////////////////////////////////////////////////////////////////////////
52 ////////////////////////////////////////////////////////////////////////////////
53 int main(int argc, char **argv)
56 // pointer for host memory
59 // pointer for device memory
62 // define grid and block size
64 int numThreadsPerBlock = 8;
66 // Part 1 of 5: allocate host and device memory
67 size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int);
68 h_a = (int *)malloc(memSize);
69 cudaMalloc((void **)&d_a, memSize);
71 // Part 2 of 5: launch kernel
72 dim3 dimGrid(numBlocks);
73 dim3 dimBlock(numThreadsPerBlock);
74 myFirstKernel <<< dimGrid, dimBlock >>> (d_a);
76 // block until the device has completed
77 cudaThreadSynchronize();
79 // check if kernel execution generated an error
80 checkCUDAError("kernel execution");
82 // Part 4 of 5: device to host copy
83 cudaMemcpy(h_a, d_a, memSize, cudaMemcpyDeviceToHost);
85 // Check for any CUDA errors
86 checkCUDAError("cudaMemcpy");
88 // Part 5 of 5: verify the data returned to the host is correct
89 for (int i = 0; i < numBlocks; i++) {
90 for (int j = 0; j < numThreadsPerBlock; j++) {
91 assert(h_a[i * numThreadsPerBlock + j] == 1000 * i + j);
99 // If the program makes it this far, then the results are correct and
100 // there are no run-time errors. Good work!
101 printf("Correct!\n");
105 void checkCUDAError(const char *msg)
107 cudaError_t err = cudaGetLastError();
108 if (cudaSuccess != err) {
109 fprintf(stderr, "Cuda error: %s: %s.\n", msg,
110 cudaGetErrorString(err));