chundoong-lab-ta/SamsungDS22/submissions/HW6/yw0.kim/mat_mul.cu

258 lines
7.0 KiB
Plaintext
Raw Normal View History

2022-09-29 18:01:45 +09:00
#include "mat_mul.h"
#include <cstdio>
#include <cuda_runtime.h>
#define CUDA_CALL(f) \
{ \
cudaError_t err = (f); \
if (err != cudaSuccess) \
{ \
fprintf(stderr, "CUDA error at [%s:%d] %d %s\n", __FILE__, __LINE__, \
err, cudaGetErrorString(err)); \
exit(1); \
} \
}
#define MAX_NUM_GPU 4
int num_devices = 0;
#define TILE_SIZE 32
#define WORK_PER_THREAD 8
#define REG_TILE_SIZE (TILE_SIZE / WORK_PER_THREAD)
#define min(A, B) (((A)>(B))?(B):(A))
__global__ void sgemm(float *A, float *B, float *C, int M, int N, int K)
{
int lr = threadIdx.x;
int lc = threadIdx.y;
int gr = blockIdx.x * TILE_SIZE + lr;
int gc = blockIdx.y * TILE_SIZE + lc;
// printf("(gr, gc): (%d, %d), (lr, lc): (%d, %d)\n", gr, gc, lr, lc);
__shared__ float A_mini[TILE_SIZE][TILE_SIZE];
__shared__ float B_mini[TILE_SIZE][TILE_SIZE];
float temp[WORK_PER_THREAD];
for(int i = 0; i < WORK_PER_THREAD; i++)
{
temp[i] = 0.0;
}
int tiles = (K + TILE_SIZE - 1) / TILE_SIZE;
int tr = lr;
int tc = lc;
for(int tile = 0; tile < tiles; tile++)
{
int tileRow = lr;
int tileCol = lc;
int Arow = gr;
int Acol = tc;
int Brow = tr;
int Bcol = gc;
for(int work = 0; work < WORK_PER_THREAD; work++)
{
// if(gr == 32 && gc >= 32 && tile == 1 && work == 0)
// {
// printf("(gr, gc): (%d, %d), A_mini[%d][%d], B_mini[%d][%d], A[%d][%d], B[%d][%d]\n", gr, gc, (lr + work * REG_TILE_SIZE), lc, (lr + work * REG_TILE_SIZE), lc, (gr + work * REG_TILE_SIZE), tc, (tr + work * REG_TILE_SIZE), gc);
// }
if(Arow < M && Acol < K)
{
A_mini[tileRow][tileCol] = A[Arow * K + tc];
}
else
{
A_mini[tileRow][tileCol] = 0.0;
}
if(Brow < K && Bcol < N)
{
B_mini[tileRow][tileCol] = B[Brow * N + gc];
}
else
{
B_mini[tileRow][tileCol] = 0.0;
}
tileRow += REG_TILE_SIZE;
Arow += REG_TILE_SIZE;
Brow += REG_TILE_SIZE;
}
__syncthreads();
// if(gr == 32 && gc == 32 && tile == 1)
// {
// printf("(gr, gc): (%d, %d), (lr, lc): (%d, %d)\n", gr, gc, lr, lc);
// printf("------ A_mini ------\n");
// for(int i = 0; i < TILE_SIZE; i++)
// {
// for(int j = 0; j < TILE_SIZE; j++)
// {
// printf("%.3f ", A_mini[i][j]);
// }
// printf("\n");
// }
// printf("------ B_mini ------\n");
// for(int i = 0; i < TILE_SIZE; i++)
// {
// for(int j = 0; j < TILE_SIZE; j++)
// {
// printf("%.3f ", B_mini[i][j]);
// }
// printf("\n");
// }
// printf("------ C ------\n");
// for(int i = 0; i < M; i++)
// {
// for(int j = 0; j < N; j++)
// {
// printf("%.3f ", C[i * N + j]);
// }
// printf("\n");
// }
// }
for(int k = 0; k < TILE_SIZE; k++)
{
int AtileRow = lr;
int BtileCol = lc;
for(int work = 0; work < WORK_PER_THREAD; work++)
{
temp[work] += A_mini[AtileRow][k] * B_mini[k][BtileCol];
AtileRow += REG_TILE_SIZE;
}
}
__syncthreads();
tr += TILE_SIZE;
tc += TILE_SIZE;
}
int Crow = gr;
int Ccol = gc;
for(int work = 0; work < WORK_PER_THREAD; work++)
{
if(Crow < M && Ccol < N)
{
C[Crow * N + Ccol] = temp[work];
}
Crow += REG_TILE_SIZE;
}
}
// Array of device (GPU) pointers
static float *a_d[MAX_NUM_GPU];
static float *b_d[MAX_NUM_GPU];
static float *c_d[MAX_NUM_GPU];
static int M, N, K;
static int Mbegin[MAX_NUM_GPU], Mend[MAX_NUM_GPU], Mlen[MAX_NUM_GPU];
void mat_mul(float *_A, float *_B, float *_C, int _M, int _N, int _K)
{
// Launch kernel on every GPU
for (int i = 0; i < num_devices; i++)
{
dim3 blockDim(TILE_SIZE / WORK_PER_THREAD, TILE_SIZE);
dim3 gridDim((Mlen[i] + TILE_SIZE - 1) / TILE_SIZE, (N + TILE_SIZE - 1) / TILE_SIZE);
// printf("Device #%d (gridDim.x, gridDim.y): (%d, %d), (blockDim.x, blockDim.y): (%d, %d)\n", i, gridDim.x, gridDim.y, blockDim.x, blockDim.y);
CUDA_CALL(cudaSetDevice(i));
sgemm <<< gridDim, blockDim >>> (a_d[i], b_d[i], c_d[i], Mlen[i], N, K);
}
// DO NOT REMOVE; NEEDED FOR TIME MEASURE
for (int i = 0; i < num_devices; i++)
{
CUDA_CALL(cudaSetDevice(i));
CUDA_CALL(cudaDeviceSynchronize());
}
}
void mat_mul_init(float *A, float *B, float *C, int _M, int _N, int _K)
{
M = _M, N = _N, K = _K;
CUDA_CALL(cudaGetDeviceCount(&num_devices));
// num_devices = 1;
printf("Using %d devices\n", num_devices);
for (int i = 0; i < num_devices; i++)
{
cudaDeviceProp prop;
CUDA_CALL(cudaGetDeviceProperties(&prop, i));
// Try printing more detailed information here
printf("[GPU %d] %s\n", i, prop.name);
}
if (num_devices <= 0)
{
printf("No CUDA device found. Aborting\n");
exit(1);
}
// Setup problem size for each GPU
for (int i = 0; i < num_devices; i++)
{
Mbegin[i] = (M / num_devices) * i + min(i, M % num_devices);
Mend[i] = (M / num_devices) * (i + 1) + min(i + 1, M % num_devices);
Mlen[i] = Mend[i] - Mbegin[i];
// printf("Device #%d (Mbegin, Mend, Mlen): (%d, %d, %d)\n", i, Mbegin[i], Mend[i], Mlen[i]);
}
// Allocate device memory for each GPU
for (int i = 0; i < num_devices; i++)
{
CUDA_CALL(cudaSetDevice(i));
CUDA_CALL(cudaMalloc(&a_d[i], Mlen[i] * K * sizeof(float)));
CUDA_CALL(cudaMalloc(&b_d[i], K * N * sizeof(float)));
CUDA_CALL(cudaMalloc(&c_d[i], Mlen[i] * N * sizeof(float)));
}
// Upload A and B matrix to every GPU
for (int i = 0; i < num_devices; i++)
{
CUDA_CALL(cudaSetDevice(i));
CUDA_CALL(cudaMemcpy(a_d[i], A + Mbegin[i] * K,
Mlen[i] * K * sizeof(float),
cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(b_d[i], B, K * N * sizeof(float), cudaMemcpyHostToDevice));
}
// DO NOT REMOVE; NEEDED FOR TIME MEASURE
for (int i = 0; i < num_devices; i++)
{
CUDA_CALL(cudaSetDevice(i));
CUDA_CALL(cudaDeviceSynchronize());
}
}
void mat_mul_final(float *A, float *B, float *C, int M, int N, int K)
{
// Do any post-matmul cleanup work here.
// Download C matrix from GPUs
for (int i = 0; i < num_devices; i++)
{
CUDA_CALL(cudaSetDevice(i));
CUDA_CALL(cudaMemcpy(C + Mbegin[i] * N, c_d[i],
Mlen[i] * N * sizeof(float),
cudaMemcpyDeviceToHost));
}
// DO NOT REMOVE; NEEDED FOR TIME MEASURE
for (int i = 0; i < num_devices; i++)
{
CUDA_CALL(cudaSetDevice(i));
CUDA_CALL(cudaDeviceSynchronize());
}
}