chundoong-lab-ta/SamsungDS22/submissions/HW6/jb114.seo/mat_mul.cu

241 lines
7.6 KiB
Plaintext

#include "mat_mul.h"
#include <cstdio>
#include <cuda_runtime.h>
#define CUDA_CALL(d, f) \
{ \
cudaError_t err = (f); \
if (err != cudaSuccess) { \
fprintf(stderr, "[GPU %d] CUDA error at [%s:%d] %d %s\n", \
(d), __FILE__, __LINE__, err, cudaGetErrorString(err)); \
exit(1); \
} \
}
#define MAX_NUM_GPU 4
int num_devices = 0;
// A: M rows, K columns
// B: K rows, N columns
// C: M rows, N columns
//
// N
// o-----o
// | |
// K | [B] |
// | |
// o-----o
// K N
// o-------o o-----o
// M | [A] | M | [C] |
// | | | |
// o-------o o-----o
//
#define TS_M 64 // The tile-size in dimension M
#define TS_N 64 // The tile-size in dimension N
#define TS_K 64 // The tile-size in dimension K
#define WPT_M 16 // The amount of work-per-thread in dimension M
#define WPT_N 8 // The amount of work-per-thread in dimension N
#define CEIL_DIV(x,y) ( ((x) + (y) - 1) / (y) )
#define CEIL(x,y) ( CEIL_DIV((x),(y)) * (y) )
__global__ void sgemm(float *A, float *B, float *C, int M, int N, int K)
{
// Thread identifiers
const int row = threadIdx.x; // Local row ID (max: TS_M/WPT_M)
const int col = threadIdx.y; // Local col ID (max: TS_N/WPT_N)
const int globalRow = TS_M * blockIdx.x + row * WPT_M; // Row ID of C (0..M)
const int globalCol = TS_N * blockIdx.y + col * WPT_N; // Col ID of C (0..N)
// printf("[R%03d, C%03d] GR=%04d GC=%04d\n", row, col, globalRow, globalCol);
// Local memory to fit a tile of TS*TS elements of A and B
__shared__ float Asub[TS_M][TS_K];
__shared__ float Bsub[TS_K][TS_N];
// Initialize the accumulation registers
float acc[WPT_M][WPT_N];
for (int wm = 0; wm < WPT_M; wm++)
{
for (int wn = 0; wn < WPT_N; wn++)
{
acc[wm][wn] = 0.0f;
}
}
// Loop over all tiles
const int numTiles = CEIL_DIV(K, TS_K);
for (int t = 0; t < numTiles; t++)
{
const int rowInB = TS_M * t + row * WPT_M;
const int colInA = TS_N * t + col * WPT_N;
// Load one tile of A and B into local memory
#pragma unroll
for (int wm = 0; wm < WPT_M; wm++)
{
#pragma unroll
for (int wn = 0; wn < WPT_N; wn++)
{
int r, c;
r = globalRow + wm;
c = colInA + wn;
Asub[row * WPT_M + wm][col * WPT_N + wn] = (r >= M || c >= K) ? 0.0f : A[r * K + c];
r = rowInB + wm;
c = globalCol + wn;
Bsub[row * WPT_M + wm][col * WPT_N + wn] = (r >= K || c >= N) ? 0.0f : B[r * N + c];
}
}
// Synchronize to make sure the tile is loaded
__syncthreads();;
// Loop over the values of a single tile
for (int k = 0; k < TS_K; k++)
{
// Cache the values of Bsub in registers
float bs[WPT_N];
#pragma unroll
for (int wn = 0; wn < WPT_N; wn++) {
bs[wn] = Bsub[k][col * WPT_N + wn];
}
// Perform the computation
#pragma unroll
for (int wm = 0; wm < WPT_M; wm++)
{
float a = Asub[row * WPT_M + wm][k];
#pragma unroll
for (int wn = 0; wn < WPT_N; wn++)
{
acc[wm][wn] += a * bs[wn];
}
}
}
// Synchronize before loading the next tile
__syncthreads();;
}
// Store the final results in C
#pragma unroll
for (int wm = 0; wm < WPT_M; wm++)
{
#pragma unroll
for (int wn = 0; wn < WPT_N; wn++)
{
int r = globalRow + wm;
int c = globalCol + wn;
if (r < M && c < N)
{
C[r * N + c] = acc[wm][wn];
}
}
}
}
// Array of device (GPU) pointers
static float *a_d[MAX_NUM_GPU];
static float *b_d[MAX_NUM_GPU];
static float *c_d[MAX_NUM_GPU];
static int M, N, K;
static int Mbegin[MAX_NUM_GPU], Mend[MAX_NUM_GPU];
void mat_mul(float *_A, float *_B, float *_C, int _M, int _N, int _K) {
// Launch kernel on every GPU
for (int i = 0; i < num_devices; i++) {
// dim3 blockDim(1, 1, 1);
// dim3 gridDim(Mend[i] - Mbegin[i], N, 1);
dim3 blockDim(CEIL_DIV(TS_M, WPT_M), CEIL_DIV(TS_N, WPT_N), 1);
dim3 gridDim(CEIL_DIV(Mend[i] - Mbegin[i], TS_M), CEIL_DIV(N, TS_N), 1);
CUDA_CALL( i, cudaSetDevice(i) );
sgemm<<<gridDim, blockDim>>>(a_d[i], b_d[i], c_d[i], Mend[i] - Mbegin[i], N, K);
}
// DO NOT REMOVE; NEEDED FOR TIME MEASURE
for (int i = 0; i < num_devices; i++) {
CUDA_CALL( i, cudaSetDevice(i) );
CUDA_CALL( i, cudaDeviceSynchronize() );
}
}
void mat_mul_init(float *A, float *B, float *C, int _M, int _N, int _K) {
M = _M, N = _N, K = _K;
CUDA_CALL( 0, cudaGetDeviceCount(&num_devices) );
if (num_devices > MAX_NUM_GPU)
{
num_devices = MAX_NUM_GPU;
}
printf("Using %d devices\n", num_devices);
for (int i = 0; i < num_devices; i++) {
cudaDeviceProp prop;
CUDA_CALL( i, cudaGetDeviceProperties(&prop, i) );
// Try printing more detailed information here
printf("[GPU %d] %s\n", i, prop.name);
}
if (num_devices <= 0) {
printf("No CUDA device found. Aborting\n");
exit(1);
}
// Setup problem size for each GPU
for (int i = 0; i < num_devices; i++) {
Mbegin[i] = (M / num_devices) * i;
Mend[i] = (M / num_devices) * (i + 1);
}
Mend[num_devices - 1] = M;
// Allocate device memory for each GPU
for (int i = 0; i < num_devices; i++) {
CUDA_CALL( i, cudaSetDevice(i) );
CUDA_CALL( i, cudaMalloc(&a_d[i], (Mend[i] - Mbegin[i]) * K * sizeof(float)) );
CUDA_CALL( i, cudaMalloc(&b_d[i], K * N * sizeof(float)) );
CUDA_CALL( i, cudaMalloc(&c_d[i], (Mend[i] - Mbegin[i]) * N * sizeof(float)) );
}
// Upload A and B matrix to every GPU
for (int i = 0; i < num_devices; i++) {
CUDA_CALL( i, cudaSetDevice(i) );
CUDA_CALL( i, cudaMemcpy(a_d[i], A + Mbegin[i] * K,
(Mend[i] - Mbegin[i]) * K * sizeof(float),
cudaMemcpyHostToDevice) );
CUDA_CALL( i, cudaMemcpy(b_d[i], B, K * N * sizeof(float), cudaMemcpyHostToDevice) );
}
// DO NOT REMOVE; NEEDED FOR TIME MEASURE
for (int i = 0; i < num_devices; i++) {
CUDA_CALL( i, cudaSetDevice(i) );
CUDA_CALL( i, cudaDeviceSynchronize() );
}
}
void mat_mul_final(float *A, float *B, float *C, int M, int N, int K) {
// Do any post-matmul cleanup work here.
// Download C matrix from GPUs
for (int i = 0; i < num_devices; i++) {
CUDA_CALL( i, cudaMemcpy(C + Mbegin[i] * N, c_d[i],
(Mend[i] - Mbegin[i]) * N * sizeof(float),
cudaMemcpyDeviceToHost) );
}
// DO NOT REMOVE; NEEDED FOR TIME MEASURE
for (int i = 0; i < num_devices; i++) {
CUDA_CALL( i, cudaDeviceSynchronize() );
}
}