chundoong-lab-ta/SamsungDS22/submissions/HW4/hero83.kim/mat_mul.cpp

135 lines
3.9 KiB
C++

#include "mat_mul.h"
#include "util.h"
#include <cstdio>
#include <cstdlib>
#include <mpi.h>
#include <omp.h>
static float *A, *B, *C;
static int M, N, K;
static int num_threads;
static int mpi_rank, mpi_world_size;
static int num_cnt;
static int min(int x, int y) {
return x < y ? x : y;
}
//#define DEBUG (true)
#define DEBUG (false)
#define ITILESIZE (32) // 32
#define JTILESIZE (1024) // 1024
#define KTILESIZE (1024) // 1024
static void mat_mul_omp(int is, int ie) {
// TODO: parallelize & optimize matrix multiplication
// Use num_threads per node
omp_set_num_threads(num_threads);
//for (int ii = 0; ii < M; ii += ITILESIZE) {
#pragma omp parallel for schedule(runtime)
for (int ii = is; ii < ie; ii += ITILESIZE) {
for (int jj = 0; jj < N; jj += JTILESIZE) {
for (int kk = 0; kk < K; kk += KTILESIZE) {
for (int k = kk; k < min(kk + KTILESIZE, K); k++) {
for (int i = ii; i < min(ii + ITILESIZE, ie); i++) {
float ar = A[i * K + k];
for (int j = jj; j < min(jj + JTILESIZE, N); j+=1) {
C[i * N + j] += ar * B[k * N + j];
} // for j
} // for i
} // for k
} // for kk
} // for jj
} // for ii
return;
}
void mat_mul(float *_A, float *_B, float *_C, int _M, int _N, int _K,
int _num_threads, int _mpi_rank, int _mpi_world_size) {
A = _A, B = _B, C = _C;
M = _M, N = _N, K = _K;
num_threads = _num_threads, mpi_rank = _mpi_rank,
mpi_world_size = _mpi_world_size;
// split number for node
num_cnt = (M / mpi_world_size) + ((M % mpi_world_size) > 0);
int is, ie, in;
MPI_Status status;
// TODO: parallelize & optimize matrix multiplication on multi-node
// You must allocate & initialize A, B, C for non-root processes
if (mpi_rank == 0) {
if (DEBUG) printf("[mat_mul %d] Start...\n", mpi_rank);
// Send A,B to Others
for (int r = 1; r < mpi_world_size; r++) {
is = r * num_cnt;
ie = min((r + 1) * num_cnt, M);
in = ie - is;
//MPI_Send(&A[is*K], in*K, MPI_FLOAT, r, 100, MPI_COMM_WORLD);
MPI_Request reqA;
MPI_Isend(&A[is*K], in*K, MPI_FLOAT, r, 100, MPI_COMM_WORLD, &reqA);
if (DEBUG) printf("[mat_mul %d] Send A to %0d, %0d ~ %0d\n", mpi_rank, r, is, ie);
//MPI_Send(B, K*N, MPI_FLOAT, r, 200, MPI_COMM_WORLD);
MPI_Request reqB;
MPI_Isend(B, K*N, MPI_FLOAT, r, 200, MPI_COMM_WORLD, &reqB);
if (DEBUG) printf("[mat_mul %d] Send B to %0d\n", mpi_rank, r);
}
// Process
mat_mul_omp(0, num_cnt); // for rank 0
if (DEBUG) printf("[mat_mul %d] Process done (%0.2f)\n", mpi_rank, C[0]);
// Receive C from Others
for (int r = 1; r < mpi_world_size; r++) {
is = r * num_cnt;
ie = min((r + 1) * num_cnt, M);
in = ie - is;
MPI_Recv(&C[is*N], in*N, MPI_FLOAT, r, 300, MPI_COMM_WORLD, &status);
if (DEBUG) printf("[mat_mul %d] Receive C : %0d, %0d ~ %0d\n", mpi_rank, r, is, ie);
}
}
else {
if (DEBUG) printf("[mat_mul %d] Start...\n", mpi_rank);
is = mpi_rank * num_cnt;
ie = min((mpi_rank + 1) * num_cnt, M);
in = ie - is;
// Memory allocation
alloc_mat(&A, in, K);
alloc_mat(&B, K, N);
alloc_mat(&C, in, N);
zero_mat(C, in, N);
// Recieve A from Rank0
MPI_Recv(A, in*K, MPI_FLOAT, 0, 100, MPI_COMM_WORLD, &status);
if (DEBUG) printf("[mat_mul %d] Receive A\n", mpi_rank);
// Recieve B from Rank0
MPI_Recv(B, K*N, MPI_FLOAT, 0, 200, MPI_COMM_WORLD, &status);
if (DEBUG) printf("[mat_mul %d] Receive B\n", mpi_rank);
// Process
mat_mul_omp(0, in);
if (DEBUG) printf("[mat_mul %d] Process done (%0.2f)\n", mpi_rank, C[0]);
// Send C to Rank0
MPI_Send(C, in*N, MPI_FLOAT, 0, 300, MPI_COMM_WORLD);
if (DEBUG) printf("[mat_mul %d] Send C\n", mpi_rank);
}
// FIXME: for now, only root process runs the matrix multiplication.
//if (mpi_rank == 0)
// mat_mul_omp();
}