#include "util.h" #include "convolution.h" #include #include #include #define CUDA_CALL(f) \ { \ cudaError_t err = (f); \ if (err != cudaSuccess) { \ fprintf(stderr, "CUDA error at [%s:%d] %d %s\n", __FILE__, __LINE__, \ err, cudaGetErrorString(err)); \ exit(1); \ } \ } #define MAX_NODES 2 #define MAX_NUM_GPU 4 #define TS 8 static float *input, *output, *filter; static int N, C, H, W; static int K, R, S; static int OH, OW; static int pad; static int dilation; static int stride; static int mpi_rank, mpi_world_size; static int num_devices; static int Nstart[MAX_NUM_GPU], Nsize[MAX_NUM_GPU]; static int NN[MAX_NODES]; MPI_Status status; MPI_Request request; static float *input_d[MAX_NUM_GPU]; static float *output_d[MAX_NUM_GPU]; static float *filter_d[MAX_NUM_GPU]; __global__ void conv( float *input, float *output, float *filter, int N, int C, int H, int W, int K, int R, int S, int pad, int dilation, int stride) { int OH = (H + 2 * pad - dilation * (R - 1) - 1) / stride + 1; int OW = (W + 2 * pad - dilation * (S - 1) - 1) / stride + 1; int n = blockIdx.x; int k = blockIdx.y; int oh = blockIdx.z; int ow = threadIdx.x; float o = 0.f; for (int c = 0; c < C; ++c) { for (int r = 0; r < R; ++r) { for (int s = 0; s < S; ++s) { int h = oh * stride - pad + r * dilation; int w = ow * stride - pad + s * dilation; if (h < 0 || h >= H || w < 0 || w >= W) continue; float i = input[n * C * H * W + c * H * W + h * W + w]; float f = filter[k * C * R * S + c * R * S + r * S + s]; o += i * f; } } } output[n * K * OH * OW + k * OH * OW + oh * OW + ow] = o; } void convolution( float *_input, float *_output, float *_filter, int _N, int _C, int _H, int _W, int _K, int _R, int _S, int _pad, int _dilation, int _stride) { input = _input; output = _output; filter = _filter; // Scatter Input if (mpi_rank == 0 && NN[1] != 0) { MPI_Isend(&input[NN[0] * C * H * W], (NN[1] * C * H * W), MPI_FLOAT, 1, 0, MPI_COMM_WORLD, &request); MPI_Isend(filter, (K * C * R * S), MPI_FLOAT, 1, 0, MPI_COMM_WORLD, &request); } else if (mpi_rank == 1 && NN[mpi_rank] != 0) { alloc_tensor(&input, NN[1], C, H, W); alloc_tensor(&output, NN[1], K, OH, OW); alloc_tensor(&filter, K, C, R, S); MPI_Recv(input, (NN[1] * C * H * W), MPI_FLOAT, 0, 0, MPI_COMM_WORLD, &status); MPI_Recv(filter, (K * C * R * S), MPI_FLOAT, 0, 0, MPI_COMM_WORLD, &status); } if(NN[mpi_rank] < MAX_NUM_GPU) num_devices = NN[mpi_rank]; // Upload input and filter to every GPU for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaMemcpy(input_d[i], input + Nstart[i] * C * H * W, Nsize[i] * C * H * W * sizeof(float), cudaMemcpyHostToDevice) ); CUDA_CALL( cudaMemcpy(filter_d[i], filter, K * C * R * S * sizeof(float), cudaMemcpyHostToDevice) ); } for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaSetDevice(i) ); CUDA_CALL( cudaDeviceSynchronize() ); } for (int i = 0; i < num_devices; i++) { dim3 gridDim(Nsize[i], K, OH); dim3 blockDim(OW, 1); CUDA_CALL( cudaSetDevice(i) ); conv<<>>(input_d[i], output_d[i], filter_d[i], Nsize[i], C, H, W, K, R, S, pad, dilation, stride); } for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaSetDevice(i) ); CUDA_CALL( cudaDeviceSynchronize() ); } // Download output from GPUs for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaMemcpy(output + Nstart[i] * K * OH * OW, output_d[i], Nsize[i] * K * OH * OW * sizeof(float), cudaMemcpyDeviceToHost) ); } for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaSetDevice(i) ); CUDA_CALL( cudaDeviceSynchronize() ); } // Gather Output if (mpi_rank == 0) { MPI_Recv(&output[NN[0] * K * OH * OW], (NN[1] * K * OH * OW), MPI_FLOAT, 1, 0, MPI_COMM_WORLD, &status); } else { MPI_Isend(output, NN[1] * K * OH * OW, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, &request); } } void convolution_init( int _N, int _C, int _H, int _W, int _K, int _R, int _S, int _pad, int _dilation, int _stride) { N = _N; C = _C; H = _H; W = _W; K = _K; R = _R; S = _S; pad = _pad; dilation = _dilation; stride = _stride; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_world_size); //printf("\nNode[%d] mpi_ramk = %d, mpi_world_size = %d\n", mpi_rank, mpi_rank, mpi_world_size); OH = (H + 2 * pad - dilation * (R - 1) - 1) / stride + 1; OW = (W + 2 * pad - dilation * (S - 1) - 1) / stride + 1; CUDA_CALL( cudaGetDeviceCount(&num_devices) ); printf("Using %d devices\n", num_devices); for (int i = 0; i < num_devices; i++) { cudaDeviceProp prop; CUDA_CALL( cudaGetDeviceProperties(&prop, i) ); // Try printing more detailed information here printf("Node[%d] [GPU %d] %s\n", mpi_rank, i, prop.name); } if (num_devices <= 0) { printf("No CUDA device found. Aborting\n"); exit(1); } if(mpi_world_size == 2) { NN[0] = N - (N / 2); NN[1] = N / 2; } else { NN[0] = N; NN[1] = 0; } // Setup problem size for each GPU if(NN[mpi_rank] < MAX_NUM_GPU) { num_devices = NN[mpi_rank]; for(int i = 0; i < NN[mpi_rank]; i++) { Nstart[i] = i; Nsize[i] = 1; } } else { for (int i = 0; i < num_devices; i++) { Nstart[i] = (NN[mpi_rank] / num_devices) * i; Nsize[i] = ((NN[mpi_rank] / num_devices) * (i + 1)) - Nstart[i]; } Nsize[num_devices - 1] = NN[mpi_rank] - Nstart[num_devices - 1]; } // Allocate device memory for each GPU for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaSetDevice(i) ); CUDA_CALL( cudaMalloc(&input_d[i], Nsize[i] * C * H * W * sizeof(float)) ); CUDA_CALL( cudaMalloc(&filter_d[i], K * C * R * S * sizeof(float)) ); CUDA_CALL( cudaMalloc(&output_d[i], Nsize[i] * K * OH * OW * sizeof(float)) ); } } void convolution_final( int _N, int _C, int _H, int _W, int _K, int _R, int _S, int _pad, int _dilation, int _stride) { }