#include "mat_mul.h" #include #include #define CUDA_CALL(f) \ { \ cudaError_t err = (f); \ if (err != cudaSuccess) { \ fprintf(stderr, "CUDA error at [%s:%d] %d %s\n", __FILE__, __LINE__, \ err, cudaGetErrorString(err)); \ exit(1); \ } \ } #define MAX_NUM_GPU 4 int num_devices = 0; #define TS 32 #define WPT 8 #define RTS (TS/WPT) __global__ void sgemm(float *A, float *B, float *C, int M, int N, int K) { /*int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i >= M || j >= N) return; C[i * N + j] = 0; for (int k = 0; k < K; ++k) { C[i * N + j] += A[i * K + k] * B[k * N + j]; }*/ //const int row = get_local_id(0); // Local row ID (max: TS) //const int col = get_local_id(1); // Local col ID (max: TS/WPT == RTS) //const int globalRow = TS*get_group_id(0) + row; // Row ID of C (0..M) //const int globalCol = TS*get_group_id(1) + col; // Col ID of C (0..N) //if(globalCol >= N) return; int row = threadIdx.y; int col = threadIdx.x; int globalRow = (blockDim.y*WPT) * blockIdx.y + threadIdx.y; int globalCol = blockDim.x * blockIdx.x + threadIdx.x; // Local memory to fit a tile of TS*TS elements of A and B __shared__ float Asub[TS][TS]; __shared__ float Bsub[TS][TS]; // Initialise the accumulation registers float acc[WPT]; for (int w=0; w>>(a_d[i], b_d[i], c_d[i], M, N, K); } // DO NOT REMOVE; NEEDED FOR TIME MEASURE for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaDeviceSynchronize() ); } } void mat_mul_init(float *A, float *B, float *C, int _M, int _N, int _K) { M = _M, N = _N, K = _K; CUDA_CALL( cudaGetDeviceCount(&num_devices) ); if(num_devices > MAX_NUM_GPU) num_devices = MAX_NUM_GPU; printf("Using %d devices\n", num_devices); for (int i = 0; i < num_devices; i++) { cudaDeviceProp prop; CUDA_CALL( cudaGetDeviceProperties(&prop, i) ); // Try printing more detailed information here printf("[GPU %d] %s\n", i, prop.name); } if (num_devices <= 0) { printf("No CUDA device found. Aborting\n"); exit(1); } // Setup problem size for each GPU for (int i = 0; i < num_devices; i++) { Mbegin[i] = (M / num_devices) * i; Mend[i] = (M / num_devices) * (i + 1); } Mend[num_devices - 1] = M; // Allocate device memory for each GPU for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaSetDevice(i) ); CUDA_CALL( cudaMalloc(&a_d[i], (Mend[i] - Mbegin[i]) * K * sizeof(float)) ); CUDA_CALL( cudaMalloc(&b_d[i], K * N * sizeof(float)) ); CUDA_CALL( cudaMalloc(&c_d[i], (Mend[i] - Mbegin[i]) * N * sizeof(float)) ); } // Upload A and B matrix to every GPU for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaMemcpy(a_d[i], A + Mbegin[i] * K, (Mend[i] - Mbegin[i]) * K * sizeof(float), cudaMemcpyHostToDevice) ); CUDA_CALL( cudaMemcpy(b_d[i], B, K * N * sizeof(float), cudaMemcpyHostToDevice) ); } // DO NOT REMOVE; NEEDED FOR TIME MEASURE for (int i = 0; i < num_devices; i++) { //CUDA_CALL( cudaSetDevice(i) ); CUDA_CALL( cudaDeviceSynchronize() ); } } void mat_mul_final(float *A, float *B, float *C, int M, int N, int K) { // Do any post-matmul cleanup work here. // Download C matrix from GPUs for (int i = 0; i < num_devices; i++) { CUDA_CALL( cudaMemcpy(C + Mbegin[i] * N, c_d[i], (Mend[i] - Mbegin[i]) * N * sizeof(float), cudaMemcpyDeviceToHost) ); } // DO NOT REMOVE; NEEDED FOR TIME MEASURE for (int i = 0; i < num_devices; i++) { //CUDA_CALL( cudaSetDevice(i) ); CUDA_CALL( cudaDeviceSynchronize() ); } }