#include "convolution.h" #include #include #include #include #define CUDA_CALL(f) \ { \ cudaError_t err = (f); \ if (err != cudaSuccess) { \ fprintf(stderr, "CUDA error at [%s:%d] %d %s\n", __FILE__, __LINE__, \ err, cudaGetErrorString(err)); \ exit(1); \ } \ } #define MAX_NUM_GPU 4 int num_devices = 0; float *input, *output, *filter; static int N, C, H, W; static int K, R, S; static int OH, OW; static int pad; static int dilation; static int stride; static int mpi_rank, mpi_world_size; static float *in_d[MAX_NUM_GPU]; static float *fil_d[MAX_NUM_GPU]; static float *out_d[MAX_NUM_GPU]; static int Nbegin[MAX_NUM_GPU], Nend[MAX_NUM_GPU]; int N_dev[MAX_NUM_GPU]; __global__ void sgemm(float *_input, float *_output, float *_filter, int _N, int _C, int _H, int _W, int _K, int _R, int _S, int _pad, int _dilation, int _stride) { float* input = _input; float* output = _output; float* filter = _filter; int N = _N; int C = _C; int H = _H; int W = _W; int K = _K; int R = _R; int S = _S; int pad = _pad; int dilation = _dilation; int stride = _stride; int OH = (H + 2 * pad - dilation * (R - 1) - 1) / stride + 1; int OW = (W + 2 * pad - dilation * (S - 1) - 1) / stride + 1; int n = blockIdx.x; int k = blockIdx.y; int oh = blockIdx.z; int ow = threadIdx.x; float o = 0.0f; for (int c=0; c=H || w<0 || w>=W) continue; float i = input[n*C*H*W + c*H*W + h*W +w]; float f = filter[k*C*R*S + c*R*S + r*S + s]; o += i * f; } } } output[n*K*OH*OW + k*OH*OW + oh*OW + ow] = o; } void convolution( float *_input, float *_output, float *_filter, int _N, int _C, int _H, int _W, int _K, int _R, int _S, int _pad, int _dilation, int _stride) { if ( mpi_rank != 0 ) return; input = _input; output = _output; filter = _filter; OH = (H + 2 * pad - dilation * (R - 1) - 1) / stride + 1; OW = (W + 2 * pad - dilation * (S - 1) - 1) / stride + 1; CUDA_CALL( cudaGetDeviceCount(&num_devices) ); printf("Using %d devices\n", num_devices); for (int i = 0; i < num_devices; i++) { cudaDeviceProp prop; CUDA_CALL( cudaGetDeviceProperties(&prop, i) ); // Try printing more detailed information here printf("[GPU %d] %s\n", i, prop.name); } if (num_devices <= 0) { printf("No CUDA device found. Aborting\n"); exit(1); } // Setup problem size for each GPU for (int i = 0; i < num_devices; i++) { Nbegin[i] = (N / num_devices) * i; Nend[i] = (N / num_devices) * (i + 1); } Nend[num_devices - 1] = N; // Allocate device memory for each GPU for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaSetDevice(i) ); CUDA_CALL( cudaMalloc(&in_d[i], (Nend[i]-Nbegin[i]) * C*H*W * sizeof(float)) ); CUDA_CALL( cudaMalloc(&fil_d[i], K*C*R*S * sizeof(float)) ); CUDA_CALL( cudaMalloc(&out_d[i], (Nend[i]-Nbegin[i]) * K*OH*OW * sizeof(float)) ); } for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaDeviceSynchronize() ); } // Upload A and B matrix to every GPU for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaMemcpy(in_d[i], input + Nbegin[i] * C*H*W, (Nend[i]-Nbegin[i]) * C*H*W * sizeof(float), cudaMemcpyHostToDevice) ); CUDA_CALL( cudaMemcpy(fil_d[i], filter, K*C*R*S * sizeof(float), cudaMemcpyHostToDevice) ); } // DO NOT REMOVE; NEEDED FOR TIME MEASURE for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaDeviceSynchronize() ); } // Launch kernel on every GPU for (int i = 0; i < num_devices; i++) { dim3 blockDim(OW, 1, 1); dim3 gridDim((Nend[i]-Nbegin[i]), K, OH); N_dev[i]=Nend[i]-Nbegin[i]; CUDA_CALL( cudaSetDevice(i) ); sgemm<<>>(in_d[i], out_d[i], fil_d[i], N_dev[i], C, H, W, K, R, S, pad, dilation, stride); } // DO NOT REMOVE; NEEDED FOR TIME MEASURE for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaDeviceSynchronize() ); } // Download C matrix from GPUs for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaMemcpy(output + Nbegin[i] * K*OH*OW, out_d[i], (Nend[i]-Nbegin[i]) * K*OH*OW * sizeof(float), cudaMemcpyDeviceToHost) ); } // DO NOT REMOVE; NEEDED FOR TIME MEASURE for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaDeviceSynchronize() ); } } void convolution_init( int _N, int _C, int _H, int _W, int _K, int _R, int _S, int _pad, int _dilation, int _stride) { N = _N; C = _C; H = _H; W = _W; K = _K; R = _R; S = _S; pad = _pad; dilation = _dilation; stride = _stride; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_world_size); } void convolution_final( int _N, int _C, int _H, int _W, int _K, int _R, int _S, int _pad, int _dilation, int _stride) { }