#include "convolution.h" #include "util.h" #include #include #include #include #define min(a,b) (a>b?b:a) #define CUDA_CALL(f) \ { \ cudaError_t err = (f); \ if (err != cudaSuccess) { \ fprintf(stderr, "CUDA error at [%s:%d] %d %s\n", __FILE__, __LINE__, \ err, cudaGetErrorString(err)); \ exit(1); \ } \ } static float *input, *output, *filter; static int N, C, H, W; static int K, R, S; static int OH, OW; static int pad; static int dilation; static int stride; static int mpi_rank, mpi_world_size; void print_tensor(float *m, int A, int B, int C, int D) { for (int i = 0; i < A; ++i) { for (int j = 0; j < B; ++j) { printf("[%d][%d]\n", i, j); for (int k = 0; k < C; ++k) { for (int l = 0; l < D; ++l) { printf("%+.3f ", m[i*B*C*D + j*C*D + k*D + l]); } printf("\n"); } } } } __device__ void print_1d_arr(float *m, int size) { for (int i = 0; i < size; ++i) { printf("%+.3f ", m[i]); } printf("\n"); } #define OTILE_SIZE 32 // Output tile == Block size #define FTILE_SIZE 16 // Filter tile #define ITILE_SIZE 64 // Input tile //#define OTILE_SIZE 4 // Output tile == Block size //#define FTILE_SIZE 2 // Filter tile //#define ITILE_SIZE 8 // Input tile #define TEST_TILE_SIZE OTILE_SIZE #define OF_RATIO (OTILE_SIZE/FTILE_SIZE) #define IO_RATIO (ITILE_SIZE/OTILE_SIZE) #define MAX_NUM_GPU 4 int num_devices = 0; // Array of device (GPU) pointers static float *input_d[MAX_NUM_GPU]; static float *filter_d[MAX_NUM_GPU]; static float *output_d[MAX_NUM_GPU]; static float *test_d[MAX_NUM_GPU]; static int Nbegin[MAX_NUM_GPU], Nend[MAX_NUM_GPU]; void gpu_init(){ CUDA_CALL( cudaGetDeviceCount(&num_devices) ); if(num_devices > MAX_NUM_GPU) num_devices = MAX_NUM_GPU; //printf("Using %d devices\n", num_devices); for (int i = 0; i < num_devices; i++) { cudaDeviceProp prop; CUDA_CALL( cudaGetDeviceProperties(&prop, i) ); // Try printing more detailed information here //printf("[GPU %d] %s\n", i, prop.name); } if (num_devices <= 0) { printf("No CUDA device found. Aborting\n"); exit(1); } // Setup problem size for each GPU for (int i = 0; i < num_devices; i++) { Nbegin[i] = (N / num_devices) * i; Nend[i] = (N / num_devices) * (i + 1); } Nend[num_devices - 1] = N; // Allocate device memory for each GPU for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaSetDevice(i) ); CUDA_CALL( cudaMalloc(&input_d[i], (Nend[i] - Nbegin[i]) * (C*H*W) * sizeof(float)) ); CUDA_CALL( cudaMalloc(&filter_d[i], (K*C*R*S) * sizeof(float)) ); CUDA_CALL( cudaMalloc(&output_d[i], (Nend[i] - Nbegin[i]) * (K*OH*OW) * sizeof(float)) ); //CUDA_CALL( cudaMalloc(&test_d[i], (TEST_TILE_SIZE*TEST_TILE_SIZE*sizeof(float)) )); } // Upload A and B matrix to every GPU for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaMemcpy(input_d[i], input + Nbegin[i] * (C*H*W), (Nend[i] - Nbegin[i]) * (C*H*W) * sizeof(float), cudaMemcpyHostToDevice) ); CUDA_CALL( cudaMemcpy(filter_d[i], filter, (K*C*R*S) * sizeof(float), cudaMemcpyHostToDevice) ); } // DO NOT REMOVE; NEEDED FOR TIME MEASURE for (int i = 0; i < num_devices; i++) { //CUDA_CALL( cudaSetDevice(i) ); CUDA_CALL( cudaDeviceSynchronize()); } } void convolution_gpu_final(){ // Download C matrix from GPUs for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaMemcpy(output + Nbegin[i] * (K*OH*OW), output_d[i], (Nend[i] - Nbegin[i]) * (K*OH*OW) * sizeof(float), cudaMemcpyDeviceToHost)); } // DO NOT REMOVE; NEEDED FOR TIME MEASURE for (int i = 0; i < num_devices; i++) { //CUDA_CALL( cudaSetDevice(i) ); CUDA_CALL( cudaDeviceSynchronize()); } /*float* test_tile_ret; alloc_tensor(&test_tile_ret, 1, 1, TEST_TILE_SIZE, TEST_TILE_SIZE); CUDA_CALL( cudaMemcpy(test_tile_ret, test_d[0], TEST_TILE_SIZE*TEST_TILE_SIZE*sizeof(float), cudaMemcpyDeviceToHost));*/ //print_tensor(input, N, C, H, W); //print_tensor(filter, K, C, R, S); //print_tensor(test_tile_ret, 1, 1, TEST_TILE_SIZE, TEST_TILE_SIZE); //print_tensor(output, N, C, OH, OW); } __global__ void convolution_kernel( float *input, float *filter, float *output, /*float *test,*/ int N, int C, int H, int W, int K, int R, int S, int OH, int OW, int pad, int dilation, int stride) { int row = threadIdx.y; int col = threadIdx.x; int oh = blockDim.y * blockIdx.y + threadIdx.y; int ow = blockDim.x * blockIdx.x + threadIdx.x; //dim2 input_block_min; int input_block_min_y = (blockDim.y*blockIdx.y) * stride - pad; int input_block_min_x = (blockDim.x*blockIdx.x) * stride - pad; //dim2 input_block_max; int input_block_max_y = min(blockDim.y*blockIdx.y + blockDim.y - 1, H-1) * stride - pad + (R-1) * dilation; int input_block_max_x = min(blockDim.x*blockIdx.x + blockDim.x - 1, W-1) * stride - pad + (S-1) * dilation; /*if(oh==0 && ow==0){ printf("stride=%d\n", stride); printf("block min(%d,%d)\n", input_block_min_y, input_block_min_x); printf("block max(%d,%d)\n", input_block_max_y, input_block_max_x); }*/ __shared__ float Isub[ITILE_SIZE][ITILE_SIZE]; __shared__ float Fsub[FTILE_SIZE][FTILE_SIZE]; for (int n = 0; n < N; ++n) { for (int k = 0; k < K; ++k) { float o = 0.f; for (int c = 0; c < C; ++c) { //int h_base_step = ITILE_SIZE - R + 1; //int w_base_step = ITILE_SIZE - S + 1; int input_slice_min_x, input_slice_min_y; int input_slice_max_x, input_slice_max_y; for(input_slice_min_y = input_block_min_y; input_slice_min_y <= input_block_max_y; input_slice_min_y += ITILE_SIZE) { input_slice_max_y = min(input_slice_min_y + ITILE_SIZE - 1, input_block_max_y); for(input_slice_min_x = input_block_min_x; input_slice_min_x <= input_block_max_x; input_slice_min_x += ITILE_SIZE) { input_slice_max_x = min(input_slice_min_x + ITILE_SIZE - 1, input_block_max_x); /*if(oh==0 && ow==3){ printf("i_slc min(%d,%d)\n", input_slice_min_y, input_slice_min_x); printf("i_slc max(%d,%d)\n", input_slice_max_y, input_slice_max_x); }*/ //load input slice to shared memory for(int j=0; j(%d,%d)\n", row, col, new_row, new_col); printf("i tile\n"); for(int i=0; i input_slice_max_y || w < input_slice_min_x || w > input_slice_max_x) continue; //input slice coordinate int slc_h = h - input_slice_min_y; int slc_w = w - input_slice_min_x; float i = Isub[slc_h][slc_w]; float f = Fsub[r][s]; o += i * f; /*if(oh == 1 && ow == 0){ printf("(%d,%d)%+.3f * %+.3f = %+.3f\n", r, s, i, f, o); printf(" h=%d, w=%d rs_base(%d,%d)\n", h, w, r_base, s_base); printf(" slc h=%d, w=%d\n", slc_h, slc_w); }*/ } } __syncthreads(); }} // filter slide control }} // input slide control } // C int output_idx = n*(K*OH*OW) + k*(OH*OW) + oh*(OW) + ow; //if(output_idx < (N*K*OH*OW)) if(oh=OH || ow>=OW) return;*/ int n = blockIdx.x; int k = blockIdx.y; int oh = blockIdx.z; int ow = threadIdx.x; //for (int n = 0; n < N; ++n) { // for (int k = 0; k < K; ++k) { float o = 0.f; int input_c_ptr = n*(C*H*W); int filter_c_ptr = k*(C*R*S); for (int c = 0; c < C; ++c) { for (int r = 0; r < R; ++r) { for (int s = 0; s < S; ++s) { int h = oh * stride - pad + r * dilation; int w = ow * stride - pad + s * dilation; if (h < 0 || h >= H || w < 0 || w >= W) continue; //float i = input[n * C * H * W + c * H * W + h * W + w]; float i = input[input_c_ptr + h*W + w]; //float f = filter[k*(C*R*S) + c*(R*S) + r*(S) + s]; float f = filter[filter_c_ptr + r*(S) + s]; o += i * f; } } input_c_ptr += H*W; filter_c_ptr += R*S; } int output_idx = n*(K*OH*OW) + k*(OH*OW) + oh*(OW) + ow; //if(output_idx < (N*K*OH*OW)) output[output_idx] = o; //output[n*(K*OH*OW) + k*(OH*OW) + oh*(OW) + ow] = oh*100.0f +ow*1.0f; // } //} } void convolution_gpu(){ gpu_init(); for (int i = 0; i < num_devices; i++) { // kernel //dim3 blockDim(OTILE_SIZE, OTILE_SIZE, 1); //dim3 gridDim((OW+OTILE_SIZE-1)/OTILE_SIZE, (OH+OTILE_SIZE-1)/OTILE_SIZE, 1); // kernel_2 dim3 blockDim(OW, 1); dim3 gridDim(Nend[i]-Nbegin[i], K, OH); CUDA_CALL( cudaSetDevice(i) ); //printf("kernel run:%d\n", i); convolution_kernel_2<<>>(input_d[i], filter_d[i], output_d[i], /*test_d[i],*/ Nend[i]-Nbegin[i], C, H, W, K, R, S, OH, OW, pad, dilation, stride) ; } convolution_gpu_final(); // DO NOT REMOVE; NEEDED FOR TIME MEASURE for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaSetDevice(i) ); CUDA_CALL( cudaDeviceSynchronize() ); } } void convolution( float *_input, float *_output, float *_filter, int _N, int _C, int _H, int _W, int _K, int _R, int _S, int _pad, int _dilation, int _stride) { input = _input; output = _output; filter = _filter; OH = (H + 2 * pad - dilation * (R - 1) - 1) / stride + 1; OW = (W + 2 * pad - dilation * (S - 1) - 1) / stride + 1; int default_div_size = N/mpi_world_size; MPI_Status status; MPI_Request request; if (mpi_rank == 0) { // 1. Distribute batch to the other nodes //timer_start(1); MPI_Request arrA_req[4]; MPI_Status arrA_status[4]; for(int target_rank = 1; target_rank < mpi_world_size; target_rank++){ int div_start, div_size; div_start = target_rank * default_div_size; div_size = default_div_size; if(target_rank == (mpi_world_size - 1)) div_size += N - (default_div_size * mpi_world_size); //printf("send. target_rank=%d, div_start=%d, div_size=%d, tot_size=%d\n", target_rank, div_start, div_size, div_size * C*H*W); MPI_Isend(input + (div_start * C*H*W), div_size * C*H*W, MPI_FLOAT, target_rank, 0, MPI_COMM_WORLD, &arrA_req[target_rank-1]); MPI_Isend(filter, K*C*R*S, MPI_FLOAT, target_rank, 0, MPI_COMM_WORLD, &request); } // 2. Broadcase all Filters //MPI_Bcast(filter, K*C*R*S, MPI_FLOAT, 0, MPI_COMM_WORLD); //double elapsed_time = timer_stop(1); //printf("[rank %d] scatter time: %f sec\n", mpi_rank, elapsed_time); int original_N = N; N = default_div_size; // 3. Do Convolution //timer_start(1); //convolution_omp(); convolution_gpu(); //elapsed_time = timer_stop(1); //printf("[rank %d] time: %f sec\n", mpi_rank, elapsed_time); N = original_N; //timer_start(1); // 4. Receive result from the other node MPI_Request arrC_req[4]; MPI_Status arrC_status[4]; for(int target_rank = 1; target_rank < mpi_world_size; target_rank++){ int div_start, div_size; div_start = target_rank * default_div_size; div_size = default_div_size; if(target_rank == (mpi_world_size - 1)) div_size += N - (default_div_size * mpi_world_size); //printf("wait div_size=%d\n", div_size); MPI_Irecv(output + (div_start * K*OH*OW), div_size * K*OH*OW, MPI_FLOAT, target_rank, 0, MPI_COMM_WORLD, &arrC_req[target_rank-1]); //MPI_Recv(output + (div_start * K*OH*OW), div_size * K*OH*OW, MPI_FLOAT, target_rank, 0, MPI_COMM_WORLD, &arrC_status[target_rank-1]); } //MPI_Waitall(mpi_world_size-1, arrA_req, arrA_status); MPI_Waitall(mpi_world_size-1, arrC_req, arrC_status); //elapsed_time = timer_stop(1); //printf("[rank %d] collect time: %f sec\n", mpi_rank, elapsed_time); }else{ //0. alloc local memory int div_size; div_size = default_div_size; if(mpi_rank == (mpi_world_size - 1)) div_size += N - (default_div_size * mpi_world_size); int original_N = N; N = div_size; // Adjust N size //printf("defulat div size=%d\n", default_div_size); alloc_tensor(&input, N, C, H, W); alloc_tensor(&filter, K, C, R, S); alloc_tensor(&output, N, K, OH, OW); // 1. Recv part of A //printf("sub. rank=%d, div_size=%d, Recv start, tot_size=%d\n", mpi_rank, div_size, N*C*H*W); MPI_Recv(input, N*C*H*W, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, &status); //printf("sub. rank=%d, div_size=%d, Recv end\n", mpi_rank, div_size); // 2. Recv full Filter //MPI_Bcast(filter, K*C*R*S, MPI_FLOAT, 0, MPI_COMM_WORLD); MPI_Recv(filter, K*C*R*S, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, &status); // 3. Do Convolution //timer_start(1); //convolution_omp(); convolution_gpu(); //double elapsed_time = timer_stop(1); //printf("[rank %d] time: %f sec\n", mpi_rank, elapsed_time); // 4. Send C to rank 0 node. //printf("sub. end. my rank=%d, div_size=%d\n", mpi_rank, div_size); MPI_Send(output, N*K*OH*OW, MPI_FLOAT, 0, 0, MPI_COMM_WORLD); N = original_N; //free free(input); free(filter); free(output); } } void convolution_init( int _N, int _C, int _H, int _W, int _K, int _R, int _S, int _pad, int _dilation, int _stride) { N = _N; C = _C; H = _H; W = _W; K = _K; R = _R; S = _S; pad = _pad; dilation = _dilation; stride = _stride; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_world_size); } void convolution_final( int _N, int _C, int _H, int _W, int _K, int _R, int _S, int _pad, int _dilation, int _stride) { }