chundoong-lab-ta/SamsungDS22/submissions/HW4/mjstyle.kim/mat_mul.cpp

186 lines
6.2 KiB
C++
Raw Normal View History

2022-09-29 18:01:45 +09:00
#include "mat_mul.h"
#include "util.h"
#include <omp.h>
#include <cstdio>
#include <cstdlib>
#include <mpi.h>
static float *A, *B, *C;
static int M, N, K;
static int num_threads;
static int mpi_rank, mpi_world_size;
static int min(int x, int y) {
return x < y ? x : y;
}
#define ITILESIZE (32)
#define JTILESIZE (1024)
#define KTILESIZE (1024)
static void mat_mul_thread(int i_start, int i_end, int M_sub, int idx) {
int is, ie;
is = i_start + M_sub / num_threads * idx + min(idx, M_sub % num_threads);
ie = i_start + M_sub / num_threads * (idx + 1) + min(idx + 1, M_sub % num_threads);
for (int kk = 0; kk < K; kk += KTILESIZE) {
for (int ii = is; ii < ie; ii += ITILESIZE) {
for (int jj = 0; jj < N; jj += JTILESIZE) {
int k_cond = min(kk + KTILESIZE, K);
for (int k = kk; k < k_cond; k++) {
int i_cond = min(ii + ITILESIZE, ie);
for (int i = ii; i < i_cond; i++) {
float ar = A[i * K + k];
int j_cond = min(jj + JTILESIZE, N);
for (int j = jj; j < j_cond; j+=1) {
C[i * N + j] += ar * B[k * N + j];
}
}
}
}
}
}
}
static void mat_mul_thread4(int i_start, int i_end, int M_sub, int idx) {
int is, ie;
is = i_start + M_sub / num_threads * idx + min(idx, M_sub % num_threads);
ie = i_start + M_sub / num_threads * (idx + 1) + min(idx + 1, M_sub % num_threads);
for (int kk = 0; kk < K; kk += KTILESIZE) {
for (int ii = is; ii < ie; ii += ITILESIZE) {
for (int jj = 0; jj < N; jj += JTILESIZE) {
int k_cond = min(kk + KTILESIZE, K);
for (int k = kk; k < k_cond; k++) {
int i_cond = min(ii + ITILESIZE, ie);
for (int i = ii; i < i_cond; i++) {
float ar = A[i * K + k];
int j_cond = min(jj + JTILESIZE, N);
for (int j = jj; j < j_cond; j+=4) {
C[i * N + j] += ar * B[k * N + j];
C[i * N + j+1] += ar * B[k * N + j+1];
C[i * N + j+2] += ar * B[k * N + j+2];
C[i * N + j+3] += ar * B[k * N + j+3];
}
}
}
}
}
}
}
static void mat_mul_thread16(int i_start, int i_end, int M_sub, int idx) {
int is, ie;
is = i_start + M_sub / num_threads * idx + min(idx, M_sub % num_threads);
ie = i_start + M_sub / num_threads * (idx + 1) + min(idx + 1, M_sub % num_threads);
for (int kk = 0; kk < K; kk += KTILESIZE) {
for (int ii = is; ii < ie; ii += ITILESIZE) {
for (int jj = 0; jj < N; jj += JTILESIZE) {
int k_cond = min(kk + KTILESIZE, K);
for (int k = kk; k < k_cond; k++) {
int i_cond = min(ii + ITILESIZE, ie);
for (int i = ii; i < i_cond; i++) {
float ar = A[i * K + k];
int j_cond = min(jj + JTILESIZE, N);
for (int j = jj; j < j_cond; j+=16) {
C[i * N + j] += ar * B[k * N + j];
C[i * N + j+1] += ar * B[k * N + j+1];
C[i * N + j+2] += ar * B[k * N + j+2];
C[i * N + j+3] += ar * B[k * N + j+3];
C[i * N + j+4] += ar * B[k * N + j+4];
C[i * N + j+5] += ar * B[k * N + j+5];
C[i * N + j+6] += ar * B[k * N + j+6];
C[i * N + j+7] += ar * B[k * N + j+7];
C[i * N + j+8] += ar * B[k * N + j+8];
C[i * N + j+9] += ar * B[k * N + j+9];
C[i * N + j+10] += ar * B[k * N + j+10];
C[i * N + j+11] += ar * B[k * N + j+11];
C[i * N + j+12] += ar * B[k * N + j+12];
C[i * N + j+13] += ar * B[k * N + j+13];
C[i * N + j+14] += ar * B[k * N + j+14];
C[i * N + j+15] += ar * B[k * N + j+15];
}
}
}
}
}
}
}
static void mat_mul_omp() {
// TODO: parallelize & optimize matrix multiplication
// Use num_threads per node
int i_start, i_end, M_sub;
MPI_Status status;
i_start = (M / mpi_world_size) * mpi_rank + min(mpi_rank, M % mpi_world_size);
i_end = (M / mpi_world_size) * (mpi_rank + 1) + min(mpi_rank + 1, M % mpi_world_size);
M_sub = i_end - i_start;
if(mpi_rank == 0) {
for(int rank_idx = 1; rank_idx < mpi_world_size; rank_idx++) {
int rank_start = (M / mpi_world_size) * rank_idx + min(rank_idx, M % mpi_world_size);
int rank_end = (M / mpi_world_size) * (rank_idx + 1) + min(rank_idx + 1, M % mpi_world_size);
int rank_sub = rank_end - rank_start;
MPI_Send(A + rank_start*K, rank_sub*K, MPI_FLOAT, rank_idx, 1001, MPI_COMM_WORLD);
}
} else {
MPI_Recv(A + i_start*K, M_sub*K, MPI_FLOAT, 0, 1001, MPI_COMM_WORLD, &status);
}
if(N%16==0) {
#pragma omp parallel for
for (long i = 0; i < omp_get_num_threads(); ++i) {
mat_mul_thread16(i_start, i_end, M_sub, i);
}
} else if(N%4==0) {
#pragma omp parallel for
for (long i = 0; i < omp_get_num_threads(); ++i) {
mat_mul_thread4(i_start, i_end, M_sub, i);
}
} else {
#pragma omp parallel for
for (long i = 0; i < omp_get_num_threads(); ++i) {
mat_mul_thread(i_start, i_end, M_sub, i);
}
}
if(mpi_rank == 0) {
for(int rank_idx = 1; rank_idx < mpi_world_size; rank_idx++) {
int rank_start = (M / mpi_world_size) * rank_idx + min(rank_idx, M % mpi_world_size);
int rank_end = (M / mpi_world_size) * (rank_idx + 1) + min(rank_idx + 1, M % mpi_world_size);
int rank_sub = rank_end - rank_start;
MPI_Recv(C + rank_start*N, rank_sub*N, MPI_FLOAT, rank_idx, 1001, MPI_COMM_WORLD, &status);
}
} else {
MPI_Send(C + i_start*N, M_sub*N, MPI_FLOAT, 0, 1001, MPI_COMM_WORLD);
}
}
void mat_mul(float *_A, float *_B, float *_C, int _M, int _N, int _K,
int _num_threads, int _mpi_rank, int _mpi_world_size) {
A = _A, B = _B, C = _C;
M = _M, N = _N, K = _K;
num_threads = _num_threads, mpi_rank = _mpi_rank,
mpi_world_size = _mpi_world_size;
// TODO: parallelize & optimize matrix multiplication on multi-node
// You must allocate & initialize A, B, C for non-root processes
if(mpi_rank != 0) {
alloc_mat(&A, M, K);
alloc_mat(&B, K, N);
alloc_mat(&C, M, N);
}
MPI_Bcast(B, K*N, MPI_FLOAT, 0, MPI_COMM_WORLD);
omp_set_num_threads(num_threads);
// FIXME: for now, only root process runs the matrix multiplication.
mat_mul_omp();
}