chundoong-lab-ta/SamsungDS22/submissions/HW4/ig1004.lee/mat_mul.cpp

139 lines
4.6 KiB
C++

#include "mat_mul.h"
#include "util.h"
#include <cstdio>
#include <cstdlib>
#include <mpi.h>
#include <omp.h>
static float *A, *B, *C;
static int M, N, K;
static int num_threads;
static int mpi_rank, mpi_world_size;
static int min(int x, int y) {
return x < y ? x : y;
}
static int rows;
#define ITILESIZE (25)
#define JTILESIZE (1024)
#define KTILESIZE (1024)
static void mat_mul_omp() {
#pragma omp parallel num_threads(num_threads)
{
int tid = omp_get_thread_num();
int is = rows / num_threads * tid + min(tid, rows % num_threads);
int ie = rows / num_threads * (tid + 1) + min(tid + 1, rows % num_threads);
for (int ii = is; ii < ie; ii += ITILESIZE) {
for (int jj = 0; jj < N; jj += JTILESIZE) {
for (int kk = 0; kk < K; kk += KTILESIZE) {
for (int k = kk; k < min(K, kk + KTILESIZE); k++) {
for (int i = ii; i < min(ie, ii + ITILESIZE); i++) {
float ar = A[i * K + k];
for (int j = jj; j < min(N, jj + JTILESIZE); j+=1) {
C[i * N + j] += ar * B[k * N + j];
}
}
}
}
}
}
}
}
#define MASTER 0
#define FROM_MASTER 1
#define FROM_WORKER 2
void mat_mul(float *_A, float *_B, float *_C, int _M, int _N, int _K,
int _num_threads, int _mpi_rank, int _mpi_world_size) {
// TODO: parallelize & optimize matrix multiplication on multi-node
// You must allocate & initialize A, B, C for non-root processes
M = _M, N = _N, K = _K;
num_threads = _num_threads, mpi_rank = _mpi_rank,
mpi_world_size = _mpi_world_size;
int numworkers = mpi_world_size -1;
int averrow, extra, offset;
int NCA = K, NCB = N;
int mtype;
MPI_Status status;
MPI_Request req;
int dest, source;
averrow = M/mpi_world_size;
extra = M%mpi_world_size;
rows = (mpi_rank == numworkers) ? averrow+extra : averrow;
offset = averrow;
int off[4], rrr[4];
if (mpi_rank == MASTER) {
A = _A, B = _B, C = _C;
mtype = FROM_MASTER;
for(dest = 1; dest<=numworkers; dest++) {
rows = (dest == numworkers) ? averrow + extra : averrow;
rrr[dest] = rows;
off[dest] = offset;
MPI_Isend(&offset, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD, &req);
MPI_Isend(&rows, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD, &req);
MPI_Isend(&A[offset*NCA], rows*NCA, MPI_FLOAT, dest, mtype, MPI_COMM_WORLD, &req);
MPI_Isend(&B[0], NCA*NCB, MPI_FLOAT, dest, mtype, MPI_COMM_WORLD, &req);
offset += rows;
}
mtype = FROM_WORKER;
for(int i = 1; i<=numworkers; i++) {
source = i;
MPI_Irecv(&C[off[i]*NCB], rrr[i]*NCB, MPI_FLOAT, source, mtype, MPI_COMM_WORLD, &req);
}
rows = averrow;
mat_mul_omp();
for(int i = 1; i<=numworkers; i++) {
MPI_Wait(&req, &status);
}
}
if (mpi_rank > MASTER) {
alloc_mat(&A, rows, K);
alloc_mat(&B, K, N);
alloc_mat(&C, rows, N);
zero_mat(C, rows, N);
mtype = FROM_MASTER;
MPI_Recv(&offset, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
MPI_Recv(&rows, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
MPI_Recv(&A[0], rows*NCA, MPI_FLOAT, MASTER, mtype, MPI_COMM_WORLD, &status);
MPI_Recv(&B[0], NCA*NCB, MPI_FLOAT, MASTER, mtype, MPI_COMM_WORLD, &status);
#pragma omp parallel num_threads(num_threads)
{
int tid = omp_get_thread_num();
int is = rows / num_threads * tid + min(tid, rows % num_threads);
int ie = rows / num_threads * (tid + 1) + min(tid + 1, rows % num_threads);
for (int ii = is; ii < ie; ii += ITILESIZE) {
for (int jj = 0; jj < N; jj += JTILESIZE) {
for (int kk = 0; kk < K; kk += KTILESIZE) {
for (int k = kk; k < min(K, kk + KTILESIZE); k++) {
for (int i = ii; i < min(ie, ii + ITILESIZE); i++) {
float ar = A[i * K + k];
for (int j = jj; j < min(N, jj + JTILESIZE); j+=1) {
C[i * N + j] += ar * B[k * N + j];
}
}
}
}
}
}
}
mtype = FROM_WORKER;
MPI_Send(&C[0], rows*NCB, MPI_FLOAT, MASTER, mtype, MPI_COMM_WORLD);
}
}