chundoong-lab-ta/SamsungDS22/submissions/HW6/ty.jeon/mat_mul.cu

232 lines
6.1 KiB
Plaintext
Raw Normal View History

2022-09-29 18:01:45 +09:00
#include "mat_mul.h"
#include <cstdio>
#include <cuda_runtime.h>
#define CUDA_CALL(f) \
{ \
cudaError_t err = (f); \
if (err != cudaSuccess) { \
fprintf(stderr, "CUDA error at [%s:%d] %d %s\n", __FILE__, __LINE__, \
err, cudaGetErrorString(err)); \
exit(1); \
} \
}
#define MAX_NUM_GPU 4
int num_devices = 0;
//// 4800
//#define TS 16
//// 6900
//#define TS 18
// 7400
#define TS 20
//// 6400
//#define TS 25
__global__ void sgemm_no_wpt(float *A_d, float *B_d, float *C_d, int M_d, int N_d, int K_d) {
int global_row = blockDim.x * blockIdx.x + threadIdx.x;
int global_col = blockDim.y * blockIdx.y + threadIdx.y;
int global_row_K_d = global_row * K_d;
int numTiles = (K_d+TS-1)/TS;
__shared__ float Asub[TS][TS];
__shared__ float Bsub[TS][TS];
int t;
int k;
float acc = 0;
for(t = 0; t < TS*(numTiles-1); t += TS){
int t_row = t + threadIdx.x;
int t_col = t + threadIdx.y;
Asub[threadIdx.x][threadIdx.y] = A_d[global_row_K_d + t_col];
Bsub[threadIdx.x][threadIdx.y] = B_d[(t_row)*N_d + global_col];
__syncthreads();
for(k = 0; k < TS; k++){
acc += Asub[threadIdx.x][k] * Bsub[k][threadIdx.y];
}
__syncthreads();
}
int t_row = TS * (numTiles-1) + threadIdx.x;
int t_col = TS * (numTiles-1) + threadIdx.y;
if(((global_row) >= M_d) || (t_col >= K_d)){
Asub[threadIdx.x][threadIdx.y] = 0;
}
else{
Asub[threadIdx.x][threadIdx.y] = A_d[(global_row)*K_d + t_col];
}
if(((t_row) >= K_d) || (global_col >= N_d)){
Bsub[threadIdx.x][threadIdx.y] = 0;
}
else{
Bsub[threadIdx.x][threadIdx.y] = B_d[(t_row)*N_d + global_col];
}
__syncthreads();
for(k = 0; k < TS; k++){
acc += Asub[threadIdx.x][k] * Bsub[k][threadIdx.y];
}
__syncthreads();
if(((global_row) >= M_d) || (global_col >= N_d)) return;
C_d[(global_row)*N_d + global_col] = acc;
}
// Array of device (GPU) pointers
static float * a_d[MAX_NUM_GPU];
static float * b_d[MAX_NUM_GPU];
static float * c_d[MAX_NUM_GPU];
static int M, N, K;
static int Mbegin[MAX_NUM_GPU], Mend[MAX_NUM_GPU];
void mat_mul(float *_A, float *_B, float *_C, int _M, int _N, int _K) {
// Launch kernel on every GPU
dim3 blockDim(TS, TS, 1);
for (int i = 0; i < num_devices; i++) {
dim3 gridDim(((Mend[i] - Mbegin[i])+TS-1)/(TS), (N+TS-1)/TS, 1);
CUDA_CALL( cudaSetDevice(i) );
sgemm_no_wpt<<<gridDim, blockDim>>>(a_d[i], b_d[i], c_d[i], Mend[i] - Mbegin[i], N, K);
}
// DO NOT REMOVE; NEEDED FOR TIME MEASURE
for (int i = 0; i < num_devices; i++) {
CUDA_CALL( cudaDeviceSynchronize() );
}
}
void mat_mul_init(float *A, float *B, float *C, int _M, int _N, int _K) {
M = _M, N = _N, K = _K;
CUDA_CALL( cudaGetDeviceCount(&num_devices) );
printf("Using %d devices\n", num_devices);
for (int i = 0; i < num_devices; i++) {
cudaDeviceProp prop;
CUDA_CALL( cudaGetDeviceProperties(&prop, i) );
// Try printing more detailed information here
printf("[GPU %d] %s\n", i, prop.name);
}
if (num_devices <= 0) {
printf("No CUDA device found. Aborting\n");
exit(1);
}
// Setup problem size for each GPU
for (int i = 0; i < num_devices; i++) {
Mbegin[i] = (M / num_devices) * i;
Mend[i] = (M / num_devices) * (i + 1);
}
Mend[num_devices - 1] = M;
// Allocate device memory for each GPU
for (int i = 0; i < num_devices; i++) {
CUDA_CALL( cudaSetDevice(i) );
CUDA_CALL( cudaMalloc(&a_d[i], (Mend[i] - Mbegin[i]) * K * sizeof(float)) );
CUDA_CALL( cudaMalloc(&b_d[i], K * N * sizeof(float)) );
CUDA_CALL( cudaMalloc(&c_d[i], (Mend[i] - Mbegin[i]) * N * sizeof(float)) );
}
// Upload A and B matrix to every GPU
for (int i = 0; i < num_devices; i++) {
CUDA_CALL( cudaMemcpy(a_d[i], A + Mbegin[i] * K,
(Mend[i] - Mbegin[i]) * K * sizeof(float),
cudaMemcpyHostToDevice) );
CUDA_CALL( cudaMemcpy(b_d[i], B, K * N * sizeof(float), cudaMemcpyHostToDevice) );
}
// DO NOT REMOVE; NEEDED FOR TIME MEASURE
for (int i = 0; i < num_devices; i++) {
CUDA_CALL( cudaDeviceSynchronize() );
}
}
void mat_mul_final(float *A, float *B, float *C, int M, int N, int K) {
// Do any post-matmul cleanup work here.
// Download C matrix from GPUs
for (int i = 0; i < num_devices; i++) {
CUDA_CALL( cudaMemcpy(C + Mbegin[i] * N, c_d[i],
(Mend[i] - Mbegin[i]) * N * sizeof(float),
cudaMemcpyDeviceToHost) );
}
// DO NOT REMOVE; NEEDED FOR TIME MEASURE
for (int i = 0; i < num_devices; i++) {
CUDA_CALL( cudaDeviceSynchronize() );
}
}
//#define WPT 1
//#define RTS (TS/WPT)
//__global__ void sgemm(float *A_d, float *B_d, float *C_d, int M_d, int N_d, int K_d) {
//
// int global_row = blockDim.x * blockIdx.x + threadIdx.x;
// int global_col = blockDim.y * blockIdx.y + threadIdx.y;
//
// int numTiles = (K_d+TS-1)/TS;
//
// __shared__ float Asub[TS][TS];
// __shared__ float Bsub[TS][TS];
//
// int t;
// int w;
// int k;
//
// float acc[WPT];
// for(w = 0; w < WPT; w++){
// acc[w] = 0;
// }
//
// for(t = 0; t < numTiles; t++){
// int t_row = TS * t + threadIdx.x;
// int t_col = TS * t + threadIdx.y;
// for(w = 0; w < WPT; w++){
// if(((global_row + w*RTS) >= M_d) || (t_col >= K_d)){
// Asub[threadIdx.x + w*RTS][threadIdx.y] = 0;
// }
// else{
// Asub[threadIdx.x + w*RTS][threadIdx.y] = A_d[(global_row + w*RTS)*K_d + t_col];
// }
//
// if(((t_row + w*RTS) >= K_d) || (global_col >= N_d)){
// Bsub[threadIdx.x + w*RTS][threadIdx.y] = 0;
// }
// else{
// Bsub[threadIdx.x + w*RTS][threadIdx.y] = B_d[(t_row + w*RTS)*N_d + global_col];
// }
// }
// __syncthreads();
// for(k = 0; k < TS; k++){
// for(w = 0; w < WPT; w++){
// acc[w] += Asub[threadIdx.x + w*RTS][k] * Bsub[k][threadIdx.y];
// }
// }
// __syncthreads();
// }
//
// for(w = 0; w < WPT; w++){
// if(((global_row + w*RTS) >= M_d) || (global_col >= N_d)) break;
// C_d[(global_row + w*RTS)*N_d + global_col] = acc[w];
// }
//}