chundoong-lab-ta/SamsungDS22/submissions/HW6/youngsik.eom/mat_mul.cu

197 lines
6.4 KiB
Plaintext

#include "mat_mul.h"
#include <cstdio>
#include <cuda_runtime.h>
#define CUDA_CALL(f) \
{ \
cudaError_t err = (f); \
if (err != cudaSuccess) { \
fprintf(stderr, "CUDA error at [%s:%d] %d %s\n", __FILE__, __LINE__, \
err, cudaGetErrorString(err)); \
exit(1); \
} \
}
#define MAX_NUM_GPU 4
int num_devices = 0;
#define TS 32
#define WPT 8
#define RTS (TS/WPT)
__global__ void sgemm(float *A, float *B, float *C, int M, int N, int K) {
/*int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i >= M || j >= N)
return;
C[i * N + j] = 0;
for (int k = 0; k < K; ++k) {
C[i * N + j] += A[i * K + k] * B[k * N + j];
}*/
//const int row = get_local_id(0); // Local row ID (max: TS)
//const int col = get_local_id(1); // Local col ID (max: TS/WPT == RTS)
//const int globalRow = TS*get_group_id(0) + row; // Row ID of C (0..M)
//const int globalCol = TS*get_group_id(1) + col; // Col ID of C (0..N)
//if(globalCol >= N) return;
int row = threadIdx.y;
int col = threadIdx.x;
int globalRow = (blockDim.y*WPT) * blockIdx.y + threadIdx.y;
int globalCol = blockDim.x * blockIdx.x + threadIdx.x;
// Local memory to fit a tile of TS*TS elements of A and B
__shared__ float Asub[TS][TS];
__shared__ float Bsub[TS][TS];
// Initialise the accumulation registers
float acc[WPT];
for (int w=0; w<WPT; w++) {
acc[w] = 0.0f;
}
// Loop over all tiles
const int numTiles = (K+TS-1)/TS;
for (int t=0; t<numTiles; t++) {
// Load one tile of A and B into local memory
for (int w=0; w<WPT; w++) {
const int tiledRow = TS*t + row;
const int tiledCol = TS*t + col;
if((globalRow + w*RTS)<M && tiledCol < K)
Asub[row + w*RTS][col] = A[(globalRow + w*RTS)*K + tiledCol];
else
Asub[row + w*RTS][col] = 0.0f;
if((tiledRow + w*RTS)<K && globalCol < N)
Bsub[row + w*RTS][col] = B[(tiledRow + w*RTS)*N + globalCol];
else
Bsub[row + w*RTS][col] = 0.0f;
}
//barrier(CLK_LOCAL_MEM_FENCE);
__syncthreads();
// Perform the computation for a single tile
int fix_TS = min(K-t*TS, TS);
for (int k=0; k<TS; k++) {
for (int w=0; w<WPT; w++) {
acc[w] += Asub[row + w*RTS][k] * Bsub[k][col];
}
}
//barrier(CLK_LOCAL_MEM_FENCE);
__syncthreads();
}
// Store the final results in C
for (int w=0; w<WPT; w++) {
if((globalRow+w*RTS) < M && globalCol <N)
C[(globalRow + w*RTS)*N + globalCol] = acc[w];
}
}
// Array of device (GPU) pointers
static float *a_d[MAX_NUM_GPU];
static float *b_d[MAX_NUM_GPU];
static float *c_d[MAX_NUM_GPU];
static int M, N, K;
static int Mbegin[MAX_NUM_GPU], Mend[MAX_NUM_GPU];
void mat_mul(float *_A, float *_B, float *_C, int _M, int _N, int _K) {
//size_t gws[2] = {(size_t)(M_size[d]+WPT-1)/WPT, (size_t)N}, lws[2] = {TS/WPT,TS};
//gws[i] = (gws[i] + lws[i] - 1) / lws[i] * lws[i];
// Launch kernel on every GPU
for (int i = 0; i < num_devices; i++) {
//dim3 blockDim(1, 1, 1);
//dim3 gridDim(Mend[i] - Mbegin[i], N, 1);
dim3 blockDim(TS, TS/WPT, 1);
//dim3 globalDim(N, ((Mend[i]-Mbegin[i])+WPT-1)/WPT, 1);
//dim3 gridDim;
//gridDim.x = (globalDim.x + blockDim.x -1)/blockDim.x*blockDim.x;
//gridDim.y = (globalDim.y + blockDim.y -1)/blockDim.y*blockDim.y;
//gridDim.z = (globalDim.z + blockDim.z -1)/blockDim.z*blockDim.z;
//dim3 gridDim(((Mend[i] - Mbegin[i]+TS-1)/TS), (N+TS-1)/TS, 1); //J.I.
dim3 gridDim((N+TS-1)/TS, (Mend[i] - Mbegin[i] + TS -1)/TS, 1);
CUDA_CALL( cudaSetDevice(i) );
sgemm<<<gridDim, blockDim>>>(a_d[i], b_d[i], c_d[i], M, N, K);
}
// DO NOT REMOVE; NEEDED FOR TIME MEASURE
for (int i = 0; i < num_devices; i++) {
CUDA_CALL( cudaDeviceSynchronize() );
}
}
void mat_mul_init(float *A, float *B, float *C, int _M, int _N, int _K) {
M = _M, N = _N, K = _K;
CUDA_CALL( cudaGetDeviceCount(&num_devices) );
if(num_devices > MAX_NUM_GPU)
num_devices = MAX_NUM_GPU;
printf("Using %d devices\n", num_devices);
for (int i = 0; i < num_devices; i++) {
cudaDeviceProp prop;
CUDA_CALL( cudaGetDeviceProperties(&prop, i) );
// Try printing more detailed information here
printf("[GPU %d] %s\n", i, prop.name);
}
if (num_devices <= 0) {
printf("No CUDA device found. Aborting\n");
exit(1);
}
// Setup problem size for each GPU
for (int i = 0; i < num_devices; i++) {
Mbegin[i] = (M / num_devices) * i;
Mend[i] = (M / num_devices) * (i + 1);
}
Mend[num_devices - 1] = M;
// Allocate device memory for each GPU
for (int i = 0; i < num_devices; i++) {
CUDA_CALL( cudaSetDevice(i) );
CUDA_CALL( cudaMalloc(&a_d[i], (Mend[i] - Mbegin[i]) * K * sizeof(float)) );
CUDA_CALL( cudaMalloc(&b_d[i], K * N * sizeof(float)) );
CUDA_CALL( cudaMalloc(&c_d[i], (Mend[i] - Mbegin[i]) * N * sizeof(float)) );
}
// Upload A and B matrix to every GPU
for (int i = 0; i < num_devices; i++) {
CUDA_CALL( cudaMemcpy(a_d[i], A + Mbegin[i] * K,
(Mend[i] - Mbegin[i]) * K * sizeof(float),
cudaMemcpyHostToDevice) );
CUDA_CALL( cudaMemcpy(b_d[i], B, K * N * sizeof(float), cudaMemcpyHostToDevice) );
}
// DO NOT REMOVE; NEEDED FOR TIME MEASURE
for (int i = 0; i < num_devices; i++) {
//CUDA_CALL( cudaSetDevice(i) );
CUDA_CALL( cudaDeviceSynchronize() );
}
}
void mat_mul_final(float *A, float *B, float *C, int M, int N, int K) {
// Do any post-matmul cleanup work here.
// Download C matrix from GPUs
for (int i = 0; i < num_devices; i++) {
CUDA_CALL( cudaMemcpy(C + Mbegin[i] * N, c_d[i],
(Mend[i] - Mbegin[i]) * N * sizeof(float),
cudaMemcpyDeviceToHost) );
}
// DO NOT REMOVE; NEEDED FOR TIME MEASURE
for (int i = 0; i < num_devices; i++) {
//CUDA_CALL( cudaSetDevice(i) );
CUDA_CALL( cudaDeviceSynchronize() );
}
}