#include "mat_mul.h" #include "util.h" #include #include #define CUDA_CALL(f) \ { \ cudaError_t err = (f); \ if (err != cudaSuccess) { \ fprintf(stderr, "CUDA error at [%s:%d] %d %s\n", __FILE__, __LINE__, \ err, cudaGetErrorString(err)); \ exit(1); \ } \ } #define MAX_NUM_GPU 4 int num_devices = 0; int slice; #define BLOCK_SIZE (16) #define VECT_SIZE (4) __global__ void sgemm(float4 *A, float4 *B, float4 *C, int M, int N, int K) { int locRow = threadIdx.x; int locCol = threadIdx.y; int gloRow = blockDim.x * blockIdx.x + threadIdx.x; int gloCol = blockDim.y * blockIdx.y + threadIdx.y; float4 value = {0.0f,0.0f,0.0f,0.0f}; __shared__ float4 locA[BLOCK_SIZE][BLOCK_SIZE/VECT_SIZE]; __shared__ float4 locB[BLOCK_SIZE][BLOCK_SIZE/VECT_SIZE]; int nTiles = (K + BLOCK_SIZE - 1) / BLOCK_SIZE; for (int kk = 0; kk < nTiles; kk++) { int kRow = BLOCK_SIZE * kk + locRow; int kCol = (BLOCK_SIZE/VECT_SIZE) * kk + locCol; if (gloRow < M && kCol < K/VECT_SIZE) // boundary check locA[locRow][locCol] = A[gloRow * (K/VECT_SIZE) + kCol]; else locA[locRow][locCol] = {0.0f,0.0f,0.0f,0.0f}; if (kRow < K && gloCol < N/VECT_SIZE) // boundary check locB[locRow][locCol] = B[kRow * (N/VECT_SIZE) + gloCol]; else locB[locRow][locCol] = {0.0f,0.0f,0.0f,0.0f}; __syncthreads(); float4 vecA, vecB; float valA; for (int k = 0; k < BLOCK_SIZE/VECT_SIZE; k++) { vecA = locA[locRow][k]; for (int m = 0; m < VECT_SIZE; m++) { vecB = locB[VECT_SIZE*k+m][locCol]; switch(m) { case 0: valA = vecA.x; break; case 1: valA = vecA.y; break; case 2: valA = vecA.z; break; case 3: valA = vecA.w; break; } value.x += vecB.x * valA; value.y += vecB.y * valA; value.z += vecB.z * valA; value.w += vecB.w * valA; } } __syncthreads(); } if (gloRow >= M || gloCol >= N/VECT_SIZE) return; // boundary check C[gloRow*(N/VECT_SIZE)+gloCol]=value; } __global__ void addPadding(int P, int Q, int nP, int nQ, float* input, float* output) { // Thread identifiers int gloRow = blockDim.x * blockIdx.x + threadIdx.x; int gloCol = blockDim.y * blockIdx.y + threadIdx.y; float value=0.0f; if (gloRow < nP && gloCol < nQ) { if (gloRow < P && gloCol < Q) value = input[gloRow*Q+gloCol]; output[gloRow*nQ+gloCol] = value; } } __global__ void delPadding(int P, int Q, int cP, int cQ, float* input, float* output) { // Thread identifiers int gloRow = blockDim.x * blockIdx.x + threadIdx.x; int gloCol = blockDim.y * blockIdx.y + threadIdx.y; if (gloRow < cP && gloCol < cQ) { output[gloRow*cQ+gloCol] = input[gloRow*Q+gloCol]; } } // Array of device (GPU) pointers static float *a_d[MAX_NUM_GPU]; static float *b_d[MAX_NUM_GPU]; static float *c_d[MAX_NUM_GPU]; static float *a_v[MAX_NUM_GPU]; static float *b_v[MAX_NUM_GPU]; static float *c_v[MAX_NUM_GPU]; static int M, N, K; static int Mbegin[MAX_NUM_GPU], Mend[MAX_NUM_GPU], Msize[MAX_NUM_GPU]; static int vM, vN, vK; void mat_mul(float *_A, float *_B, float *_C, int _M, int _N, int _K) { // A padding for (int i = 0; i < num_devices; i++) { dim3 blockDim(BLOCK_SIZE, BLOCK_SIZE, 1); dim3 gridDim((Msize[i] + BLOCK_SIZE - 1) / BLOCK_SIZE, (vK + BLOCK_SIZE -1 ) / BLOCK_SIZE, 1); CUDA_CALL( cudaSetDevice(i) ); addPadding<<>>(Msize[i], K, slice, vK, a_d[i], a_v[i]); } // B padding for (int i = 0; i < num_devices; i++) { dim3 blockDim(BLOCK_SIZE, BLOCK_SIZE, 1); dim3 gridDim((vK + BLOCK_SIZE - 1) / BLOCK_SIZE, (vN + BLOCK_SIZE -1 ) / BLOCK_SIZE, 1); CUDA_CALL( cudaSetDevice(i) ); addPadding<<>>(K, N, vK, vN, b_d[i], b_v[i]); } // Launch kernel on every GPU for (int i = 0; i < num_devices; i++) { dim3 blockDim(BLOCK_SIZE, BLOCK_SIZE/VECT_SIZE, 1); dim3 gridDim((Msize[i] + BLOCK_SIZE - 1) / BLOCK_SIZE, (vN + BLOCK_SIZE -1 ) / BLOCK_SIZE, 1); CUDA_CALL( cudaSetDevice(i) ); sgemm<<>>((float4 *)a_v[i], (float4 *)b_v[i], (float4 *)c_v[i], Msize[i], vN, vK); } // Remove C padding for (int i = 0; i < num_devices; i++) { dim3 blockDim(BLOCK_SIZE, BLOCK_SIZE, 1); dim3 gridDim((Msize[i] + BLOCK_SIZE - 1) / BLOCK_SIZE, (vN + BLOCK_SIZE -1 ) / BLOCK_SIZE, 1); CUDA_CALL( cudaSetDevice(i) ); delPadding<<>>(Msize[i], vN, Msize[i], N, c_v[i], c_d[i]); } // DO NOT REMOVE; NEEDED FOR TIME MEASURE for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaSetDevice(i) ); CUDA_CALL( cudaDeviceSynchronize() ); } } void mat_mul_init(float *A, float *B, float *C, int _M, int _N, int _K) { M = _M, N = _N, K = _K; CUDA_CALL( cudaGetDeviceCount(&num_devices) ); vM = (M + num_devices - 1) / num_devices * num_devices; vN = (N + VECT_SIZE - 1) / VECT_SIZE * VECT_SIZE; vK = (K + VECT_SIZE - 1) / VECT_SIZE * VECT_SIZE; slice = vM / num_devices; printf("Using %d devices\n", num_devices); for (int i = 0; i < num_devices; i++) { cudaDeviceProp prop; CUDA_CALL( cudaGetDeviceProperties(&prop, i) ); // Try printing more detailed information here printf("[GPU %d] %s\n", i, prop.name); } if (num_devices <= 0) { printf("No CUDA device found. Aborting\n"); exit(1); } // Setup problem size for each GPU for (int i = 0; i < num_devices; i++) { Mbegin[i] = slice * i; if (Mbegin[i] + slice < M) Mend[i] = Mbegin[i] + slice; else if (Mbegin[i] < M) Mend[i] = M; else Mbegin[i]=0, Mend[i] = 0; Msize[i] = Mend[i] - Mbegin[i]; } // Allocate device memory for each GPU for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaSetDevice(i) ); CUDA_CALL( cudaMalloc(&a_d[i], slice * K * sizeof(float)) ); CUDA_CALL( cudaMalloc(&b_d[i], K * N * sizeof(float)) ); CUDA_CALL( cudaMalloc(&c_d[i], slice * N * sizeof(float)) ); CUDA_CALL( cudaMalloc(&a_v[i], slice * vK * sizeof(float)) ); CUDA_CALL( cudaMalloc(&b_v[i], vK * vN * sizeof(float)) ); CUDA_CALL( cudaMalloc(&c_v[i], slice * vN * sizeof(float)) ); } // Upload A and B matrix to every GPU for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaSetDevice(i) ); CUDA_CALL( cudaMemcpy(a_d[i], A + Mbegin[i] * K, slice * K * sizeof(float), cudaMemcpyHostToDevice) ); CUDA_CALL( cudaMemcpy(b_d[i], B, K * N * sizeof(float), cudaMemcpyHostToDevice) ); } // DO NOT REMOVE; NEEDED FOR TIME MEASURE for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaSetDevice(i) ); CUDA_CALL( cudaDeviceSynchronize() ); } } void mat_mul_final(float *A, float *B, float *C, int M, int N, int K) { // Do any post-matmul cleanup work here. // Download C matrix from GPUs for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaSetDevice(i) ); CUDA_CALL( cudaMemcpy(C + Mbegin[i] * N, c_d[i], Msize[i] * N * sizeof(float), cudaMemcpyDeviceToHost) ); } // DO NOT REMOVE; NEEDED FOR TIME MEASURE for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaSetDevice(i) ); CUDA_CALL( cudaDeviceSynchronize() ); } }