#include "mat_mul.h" #include #include #define CUDA_CALL(f) \ { \ cudaError_t err = (f); \ if (err != cudaSuccess) { \ fprintf(stderr, "CUDA error at [%s:%d] %d %s\n", __FILE__, __LINE__, \ err, cudaGetErrorString(err)); \ exit(1); \ } \ } #define MAX_NUM_GPU 4 int num_devices = 0; __global__ void sgemm(float *A, float *B, float *C, int M, int N, int K) { #if 1 #define TS 32 #define WPT 16 #define RTS (TS/WPT) int r = threadIdx.x; //get_local_id(0); int c = threadIdx.y; //get_local_id(1); int g_r = (blockDim.x * WPT) * blockIdx.x + r; //TS * get_group_id(0) + r; int g_c = blockDim.y * blockIdx.y + c; //TS * get_group_id(1) + c; __shared__ float PA[TS][TS]; __shared__ float PB[TS][TS]; float tmp[WPT]; for (int w = 0; w < WPT; w++) { tmp[w] = 0.0f; } int n_tiles = (K + TS - 1) / TS; for (int t = 0; t < n_tiles; t++) { for (int w = 0; w < WPT; w++) { int t_r = TS * t + r; int t_c = TS * t + c; PA[r + w * RTS][c] = ((g_r + w * RTS) >= M || t_c >= K)? 0 : A[(g_r + w * RTS) * K + t_c]; PB[r + w * RTS][c] = ((t_r + w * RTS) >= K || g_c >= N)? 0 : B[(t_r + w * RTS) * N + g_c]; } __syncthreads(); for (int k = 0; k < TS; k++) { for (int w = 0; w < WPT; w++) { tmp[w] += PA[r + w * RTS][k] * PB[k][c]; } } __syncthreads(); } for (int w = 0; w < WPT; w++) { if ((g_r + w * RTS) >= M || g_c >= N) continue; C[(g_r + w * RTS) * N + g_c] = tmp[w]; } #else int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i >= M || j >= N) return; C[i * N + j] = 0; for (int k = 0; k < K; ++k) { C[i * N + j] += A[i * K + k] * B[k * N + j]; } #endif } // Array of device (GPU) pointers static float *a_d[MAX_NUM_GPU]; static float *b_d[MAX_NUM_GPU]; static float *c_d[MAX_NUM_GPU]; static int M, N, K; static int Mbegin[MAX_NUM_GPU], Mend[MAX_NUM_GPU]; #define TS 32 #define WPT 16 void mat_mul(float *_A, float *_B, float *_C, int _M, int _N, int _K) { // Launch kernel on every GPU for (int i = 0; i < num_devices; i++) { dim3 blockDim(TS/WPT, TS, 1); dim3 gridDim((Mend[i] - Mbegin[i] + TS - 1)/TS, (N + TS - 1)/TS , 1); CUDA_CALL( cudaSetDevice(i) ); sgemm<<>>(a_d[i], b_d[i], c_d[i], Mend[i] - Mbegin[i], N, K); } // DO NOT REMOVE; NEEDED FOR TIME MEASURE for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaDeviceSynchronize() ); } } void mat_mul_init(float *A, float *B, float *C, int _M, int _N, int _K) { M = _M, N = _N, K = _K; CUDA_CALL( cudaGetDeviceCount(&num_devices) ); printf("Using %d devices\n", num_devices); for (int i = 0; i < num_devices; i++) { cudaDeviceProp prop; CUDA_CALL( cudaGetDeviceProperties(&prop, i) ); // Try printing more detailed information here printf("[GPU %d] %s\n", i, prop.name); } if (num_devices <= 0) { printf("No CUDA device found. Aborting\n"); exit(1); } // Setup problem size for each GPU for (int i = 0; i < num_devices; i++) { Mbegin[i] = (M / num_devices) * i; Mend[i] = (M / num_devices) * (i + 1); } Mend[num_devices - 1] = M; // Allocate device memory for each GPU for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaSetDevice(i) ); CUDA_CALL( cudaMalloc(&a_d[i], (Mend[i] - Mbegin[i]) * K * sizeof(float)) ); CUDA_CALL( cudaMalloc(&b_d[i], K * N * sizeof(float)) ); CUDA_CALL( cudaMalloc(&c_d[i], (Mend[i] - Mbegin[i]) * N * sizeof(float)) ); } // Upload A and B matrix to every GPU for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaMemcpy(a_d[i], A + Mbegin[i] * K, (Mend[i] - Mbegin[i]) * K * sizeof(float), cudaMemcpyHostToDevice) ); CUDA_CALL( cudaMemcpy(b_d[i], B, K * N * sizeof(float), cudaMemcpyHostToDevice) ); } // DO NOT REMOVE; NEEDED FOR TIME MEASURE for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaDeviceSynchronize() ); } } void mat_mul_final(float *A, float *B, float *C, int M, int N, int K) { // Do any post-matmul cleanup work here. // Download C matrix from GPUs for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaMemcpy(C + Mbegin[i] * N, c_d[i], (Mend[i] - Mbegin[i]) * N * sizeof(float), cudaMemcpyDeviceToHost) ); } // DO NOT REMOVE; NEEDED FOR TIME MEASURE for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaDeviceSynchronize() ); } }