chundoong-lab-ta/SamsungDS22/submissions/HW4/yoojin73.kim/mat_mul.cpp

98 lines
2.8 KiB
C++

#include "mat_mul.h"
#include "util.h"
#include <cstdio>
#include <cstdlib>
#include <mpi.h>
#include <omp.h>
#define MIN(a, b) (((a)<(b)) ? (a):(b))
static float *A, *B, *C;
static int M, N, K;
static int num_threads;
static int mpi_rank, mpi_world_size;
static int rows;
static int kBS=32;
static int nBS=2048;
static int BS=32;
static void mat_mul_omp() {
// TODO: parallelize & optimize matrix multiplication
// Use num_threads per node
#pragma omp parallel num_threads(num_threads)
#pragma omp for
for(int ii = 0; ii < rows; ii += BS){
for (int bk = 0; bk < K; bk += kBS) {
for (int bn = 0; bn < N; bn += nBS) {
for(int i = ii; i < MIN(rows, ii+BS); i++) {
for (int k = bk; k < MIN(bk+kBS, K); ++k) {
float a = A[i * K + k];
for (int j = bn; j < MIN(bn+nBS, N); j+=1) {
C[i * N + j] += a * B[k * N + j];
}
}
}
}
}
}
}
void mat_mul(float *_A, float *_B, float *_C, int _M, int _N, int _K,
int _num_threads, int _mpi_rank, int _mpi_world_size) {
A = _A, B = _B, C = _C;
M = _M, N = _N, K = _K;
num_threads = _num_threads, mpi_rank = _mpi_rank,
mpi_world_size = _mpi_world_size;
// TODO: parallelize & optimize matrix multiplication on multi-node
// You must allocate & initialize A, B, C for non-root processes
// FIXME: for now, only root process runs the matrix multiplication.
// NRA = M, NCA = K, NCB = N
int offset;
offset = 0;
MPI_Request request;
if (mpi_rank == 0) {
int averow, si, ei;
averow = M / mpi_world_size;
for (int i = 1; i < mpi_world_size; i++) {
si = offset = i * averow;
ei = i == mpi_world_size -1 ? M : (i+1)*averow;
rows = ei - si;
MPI_Isend(&offset, 1, MPI_INT, i, 1, MPI_COMM_WORLD, &request);
MPI_Isend(&rows, 1, MPI_INT, i, 1, MPI_COMM_WORLD, &request);
MPI_Isend(&A[offset*K], rows*K, MPI_FLOAT, i, 1, MPI_COMM_WORLD, &request);
MPI_Isend(B, K*N, MPI_FLOAT, i, 1, MPI_COMM_WORLD, &request);
}
rows = averow;
mat_mul_omp();
//offset = offset + rows;
MPI_Status status;
for (int i = 1; i < mpi_world_size; i++) {
MPI_Recv(&offset, 1, MPI_INT, i, 2, MPI_COMM_WORLD, &status);
MPI_Recv(&rows, 1, MPI_INT, i, 2, MPI_COMM_WORLD, &status);
MPI_Recv(&C[offset*N], rows*N, MPI_FLOAT, i, 2, MPI_COMM_WORLD, &status);
}
} else {
alloc_mat(&A, M, K);
alloc_mat(&B, K, N);
alloc_mat(&C, M, N);
zero_mat(C, M, N);
MPI_Status status;
MPI_Recv(&offset, 1, MPI_INT, 0, 1, MPI_COMM_WORLD, &status);
MPI_Recv(&rows, 1, MPI_INT, 0, 1, MPI_COMM_WORLD, &status);
MPI_Recv(A, rows*K, MPI_FLOAT, 0, 1, MPI_COMM_WORLD, &status);
MPI_Recv(B, K*N, MPI_FLOAT, 0, 1, MPI_COMM_WORLD, &status);
mat_mul_omp();
MPI_Isend(&offset, 1, MPI_INT, 0, 2, MPI_COMM_WORLD, &request);
MPI_Isend(&rows, 1, MPI_INT, 0, 2, MPI_COMM_WORLD, &request);
MPI_Isend(C, rows*N, MPI_FLOAT, 0, 2, MPI_COMM_WORLD, &request);
}
}