首页 > 其他 > 详细

CUDA 例程

时间:2014-07-26 01:40:46      阅读:337      评论:0      收藏:0      [点我收藏+]
scalar add

#include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <iostream> __global__ void add(int *a, int *b,int *c) { c[blockIdx.x]=a[blockIdx.x]+b[blockIdx.x]; } int main(void) { // H has storage for 4 integers int a,b,c; int *da,*db,*dc; int size=1*sizeof(int); //scalar; cudaMalloc((void**)&da,size); cudaMalloc((void**)&db,size); cudaMalloc((void**)&dc,size); a=2; b=7; cudaMemcpy(da,&a,size,cudaMemcpyHostToDevice); cudaMemcpy(db,&b,size,cudaMemcpyHostToDevice); add<<<1,1>>>(da,db,dc); cudaMemcpy(&c,dc,size,cudaMemcpyDeviceToHost ); std::cout<<c<<std::endl; cudaFree(da); cudaFree(db); cudaFree(dc); std::cout<<"hell"; thrust::host_vector<int> H(4); // initialize individual elements H[0] = 14; H[1] = 20; H[2] = 38; H[3] = 46; // H.size() returns the size of vector H std::cout << "H has size " << H.size() << std::endl; // print contents of H for(int i = 0; i < H.size(); i++) std::cout << "H[" << i << "] = " << H[i] << std::endl; // resize H H.resize(2); std::cout << "H now has size " << H.size() << std::endl; // Copy host_vector H to device_vector D thrust::device_vector<int> D = H; // elements of D can be modified D[0] = 99; D[1] = 88; // print contents of D for(int i = 0; i < D.size(); i++) std::cout << "D[" << i << "] = " << D[i] << std::endl; // H and D are automatically deleted when the function returns return 0; }
block or thread

#include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <iostream> const int N=512; __global__ void add(int *a, int *b,int *c) { c[blockIdx.x]=a[blockIdx.x]+b[blockIdx.x]; //c[threadIdx.x]=a[threadIdx.x]+b[threadIdx.x]; } int main(void) { // H has storage for 4 integers int *a,*b,*c; int *da,*db,*dc; int size=N*sizeof(int); //scalar; cudaMalloc((void**)&da,size); cudaMalloc((void**)&db,size); cudaMalloc((void**)&dc,size); a=(int *) malloc(size); memset(a,0,N*sizeof(int));//rand_ints(a,N); a[0]=10; a[3]=3; b=(int *) malloc(size); memset(b,0, N*sizeof(int));// rand_ints(b,N); b[0]=2; b[4]=32; c=(int *) malloc(size); //rand_ints(c,N); memset(c,0, N*sizeof(int)); cudaMemcpy(da,a,size,cudaMemcpyHostToDevice); cudaMemcpy(db,b,size,cudaMemcpyHostToDevice); add<<<N,1>>>(da,db,dc); //N blocks add<<<1,N>>>(da,db,dc); N threads
cudaMemcpy(c,dc,size,cudaMemcpyDeviceToHost ); for (int i=0; i<20;i++) std::cout<<c[i]<<std::endl; //_syncthreads(); //useless cudaDeviceSynchronize(); free(a); free(b); free(c); cudaFree(da); cudaFree(db); cudaFree(dc); return 0; }

 

block+thread
#include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <iostream>

/*

#define N (2048*2048)
#define M 512 // THREADS_PER_BLOCK

add<<<N/M, M>>>(d_a, d_b, d_c);

N /M      blocks used
M threads / block
*/
const int N=2048*2048; const int M=512; __global__ void add(int *a, int *b,int *c,int n) { int index=threadIdx.x+blockIdx.x*blockDim.x; c[index]=a[index]+b[index]; if (index<n) c[index]=a[index]+b[index]; //c[threadIdx.x]=a[threadIdx.x]+b[threadIdx.x]; } int main(void) { // H has storage for 4 integers int *a,*b,*c; int *da,*db,*dc; int size=N*sizeof(int); //scalar; cudaMalloc((void**)&da,size); cudaMalloc((void**)&db,size); cudaMalloc((void**)&dc,size); a=(int *) malloc(size); memset(a,0,N*sizeof(int));//rand_ints(a,N); a[0]=10; a[3]=3; b=(int *) malloc(size); memset(b,0, N*sizeof(int));// rand_ints(b,N); b[0]=2; b[4]=32; c=(int *) malloc(size); //rand_ints(c,N); memset(c,0, N*sizeof(int)); cudaMemcpy(da,a,size,cudaMemcpyHostToDevice); cudaMemcpy(db,b,size,cudaMemcpyHostToDevice); add<<<(N+M-1)/M,M>>>(da,db,dc,N); cudaMemcpy(c,dc,size,cudaMemcpyDeviceToHost ); for (int i=0; i<20;i++) std::cout<<c[i]<<std::endl; //_syncthreads(); //useless cudaDeviceSynchronize(); free(a); free(b); free(c); cudaFree(da); cudaFree(db); cudaFree(dc); return 0; }

 

CUDA 例程,布布扣,bubuko.com

CUDA 例程

原文:http://www.cnblogs.com/huashiyiqike/p/3869093.html

(0)
(0)
   
举报
评论 一句话评论(0
关于我们 - 联系我们 - 留言反馈 - 联系我们:wmxa8@hotmail.com
© 2014 bubuko.com 版权所有
打开技术之扣,分享程序人生!