#include "convolution.h" #include #include #include #include #define MAX_NUM_GPU 4 #define TS 8 static float *input, *output, *filter; static float *in_d[MAX_NUM_GPU], *out_d[MAX_NUM_GPU], *fil_d[MAX_NUM_GPU]; static int N, C, H, W; static int K, R, S; static int pad; static int dilation; static int stride; static int mpi_rank, mpi_world_size; static int num_devices = 1; static int size[2]; static int NN[MAX_NUM_GPU]; static int OH, OW; #define CUDA_CALL(f) \ { \ cudaError_t err = (f); \ if (err != cudaSuccess) { \ fprintf(stderr, "CUDA error at [%s:%d] %d %s\n", __FILE__, __LINE__, \ err, cudaGetErrorString(err)); \ exit(1); \ } \ } extern void alloc_tensor(float **t, int D0, int D1, int D2, int D3); __global__ void conv( float *_input, float *_output, float *_filter, int _N, int _C, int _H, int _W, int _K, int _R, int _S, int _pad, int _dilation, int _stride){ const int globalRow = blockDim.x * blockIdx.x + threadIdx.x; const int globalCol = blockDim.y * blockIdx.y + threadIdx.y; int OH, OW; OH = (_H + 2 * _pad - _dilation * (_R - 1) - 1) / _stride + 1; OW = (_W + 2 * _pad - _dilation * (_S - 1) - 1) / _stride + 1; int n, k, w; w = globalCol; n = w / (_K * OW); w = w- n *(_K * OW); k = w / OW; w = w - k * OW; int col = w; int row = globalRow; if (globalRow >= OH || globalCol >= _N*_K*OW) return; int start_row = row * _stride - _pad; int start_col = col * _stride - _pad; float o = 0.0f; for (int c = 0; c < _C; c++){ for (int i = 0; i < _R; i++){ for (int j = 0; j < _S; j++){ int h = start_row + i * _dilation; int w = start_col + j * _dilation; if (h < 0 || w < 0 || h >= _H || w >= _W) continue; float in = _input[n*_C*_W*_H + c*_W*_H + h*_W + w]; float fil = _filter[k*_C*_R*_S + c*_R*_S + i*_S+j]; o += in * fil; } } } _output[n*_K*OH*OW + k*OH*OW + row * OW + col ] = o; } void convolution( float *_input, float *_output, float *_filter, int _N, int _C, int _H, int _W, int _K, int _R, int _S, int _pad, int _dilation, int _stride) { int offset = 0; MPI_Request request; MPI_Status status; input = _input; output = _output; filter = _filter; if(mpi_rank == 0 && mpi_world_size == 2 && size[1] != 0){ MPI_Isend(&input[size[0]*C*H*W], size[1]*C*H*W, MPI_FLOAT, 1, 0, MPI_COMM_WORLD, &request); MPI_Isend(filter, _K*_C*_R*_S, MPI_FLOAT, 1, 0, MPI_COMM_WORLD, &request); if(size[mpi_rank] < MAX_NUM_GPU){ num_devices = size[mpi_rank]; } } else if (mpi_rank == 1 && size[mpi_rank] != 0){ alloc_tensor(&input, size[1], C, H, W); alloc_tensor(&output, size[1], K, OH, OW); alloc_tensor(&filter, _K, _C, _R, _S); MPI_Recv(input, size[1]*C*H*W, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, &status); MPI_Recv(filter, _K*_C*_R*_S, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, &status); if(size[mpi_rank] < MAX_NUM_GPU){ num_devices = size[mpi_rank]; } } offset = 0; for (int i = 0; i < num_devices; i++){ CUDA_CALL(cudaMemcpy(in_d[i], input + offset, NN[i]*C*H*W*sizeof(float), cudaMemcpyHostToDevice)); CUDA_CALL(cudaMemcpy(fil_d[i], filter, K*C*R*S*sizeof(float), cudaMemcpyHostToDevice)); offset += NN[i] * C * H * W; } for(int i = 0; i>>(in_d[i], out_d[i], fil_d[i], NN[i], _C, _H, _W, _K, _R, _S, _pad, _dilation, _stride); } for(int i = 0; i 4) size[1] = _N / 2; else size[1] = 0; size[0] = N - size[1]; if(size[mpi_rank] < MAX_NUM_GPU){ num_devices = size[mpi_rank]; for(int i = 0; i < size[mpi_rank]; i++){ NN[i] = 1; } } else { num_devices = MAX_NUM_GPU; int quotient = size[mpi_rank] / MAX_NUM_GPU; int remain = size[mpi_rank] % MAX_NUM_GPU; for (int i = 0; i < MAX_NUM_GPU; i++){ NN[i] = quotient; if (i < remain) NN[i]++; } } for (int i = 0; i < num_devices; i++){ CUDA_CALL(cudaSetDevice(i)); CUDA_CALL(cudaMalloc(&in_d[i], NN[i]*C*H*W*sizeof(float))); CUDA_CALL(cudaMalloc(&out_d[i], NN[i]*K*OH*OW*sizeof(float))); CUDA_CALL(cudaMalloc(&fil_d[i], K*C*R*S*sizeof(float))); } } void convolution_final( int _N, int _C, int _H, int _W, int _K, int _R, int _S, int _pad, int _dilation, int _stride) { }