chundoong-lab-ta/SamsungDS22/submissions/HW4/sung_min.kim/mat_mul.cpp

149 lines
4.5 KiB
C++

#include "mat_mul.h"
#include "util.h"
#include <cstdio>
#include <cstdlib>
#include <mpi.h>
#include <omp.h>
static float *A, *B, *C;
static int M, N, K;
static int num_threads;
static int mpi_rank, mpi_world_size;
inline int min(int a, int b) { if(a>b) return b; else return a; }
static void mat_mul_omp(int MY_M) {
// TODO: parallelize & optimize matrix multiplication
// Use num_threads per node
int num;
int q, r;
int i, j, k;
int kk, off_k;
int jj, off_j;
float A_tmp;
r = MY_M % num_threads;
q = MY_M / num_threads;
// printf("q = %d, r = %d\n", q, r);
if(r){
q++;
}
off_k = 32;
off_j = 1024;
/* DEBUG_p */// printf("%d: MATRIX A:\n", mpi_rank);
/* DEBUG_p */// print_mat(A, M, K);
/* DEBUG_p */// printf("%d: MATRIX B:\n", mpi_rank);
/* DEBUG_p */// print_mat(B, K, N);
#pragma omp parallel private(num, i, j, k, jj, kk, A_tmp) num_threads(num_threads)
{
num = omp_get_thread_num();
// printf("\n");
// printf("num : %d/%d\n", num, omp_get_num_threads());
// printf("start: num = %d\n", num);
for(jj = 0; jj < N; jj += off_j){
for(kk = 0; kk < K; kk += off_k){
for(i = q * num; i < min(q * (num+1), MY_M); ++i){
for(k = kk; k < min(kk+off_k, K); ++k){
A_tmp = A[i * K + k];
for(j = jj; j < min(jj+off_j, N); ++j){
C[i * N + j] += A_tmp * B[k * N + j];
}
}
}
}
}
}
/* DEBUG_p */// printf("%d: MATRIX C:\n", mpi_rank);
/* DEBUG_p */// print_mat(C, M, N);
}
void mat_mul(float *_A, float *_B, float *_C, int _M, int _N, int _K,
int _num_threads, int _mpi_rank, int _mpi_world_size) {
A = _A, B = _B, C = _C;
M = _M, N = _N, K = _K;
num_threads = _num_threads, mpi_rank = _mpi_rank,
mpi_world_size = _mpi_world_size;
// TODO: parallelize & optimize matrix multiplication on multi-node
// You must allocate & initialize A, B, C for non-root processes
MPI_Status status;
int i;
int tag_A[4] = {100, 101, 102, 103};
int tag_B = 1;
int tag_C[4] = {2, 3, 4, 5};
int A_count[4];
int C_count[4];
int sum = 0;
int q, r;
r = M % mpi_world_size;
q = r ? M / mpi_world_size + 1 : M / mpi_world_size;
/* Before Calculation */
/* DEBUG */// if(mpi_rank == 0) printf("\nBefore calculation!\n\n");
if(mpi_rank != 0){
alloc_mat(&A, M, K);
alloc_mat(&B, K, N);
alloc_mat(&C, M, N);
zero_mat(C, M, N);
}
for(i=0; i<mpi_world_size; i++){
if(q*(i+1) < M){
A_count[i] = q*K;
C_count[i] = q*N;
}else{
if(M-q*i > 0){
A_count[i] = (M-q*i)*K;
C_count[i] = (M-q*i)*N;
}else{
A_count[i] = 0;
C_count[i] = 0;
}
}
}
/* DEBUG */// if(mpi_rank == 0) printf("q = %d, r = %d\n", q, r);
/* DEBUG */// if(mpi_rank == 0) for(i=0;i<mpi_world_size;i++) printf("A_count[%d]/K = %d\n", i, A_count[i]/K);
/* DEBUG */// if(mpi_rank == 0) for(i=0;i<mpi_world_size;i++) printf("C_count[%d]/N = %d\n", i, C_count[i]/N);
if (mpi_rank == 0){
sum = A_count[0];
for(i=1; i<mpi_world_size; i++){
MPI_Send(A+sum, A_count[i], MPI_FLOAT, i, tag_A[i], MPI_COMM_WORLD); // A slice send
MPI_Send(B, N*K, MPI_FLOAT, i, tag_B, MPI_COMM_WORLD); // B total send
sum += A_count[i];
/* DEBUG *///// printf("A_count[%d] = %d, sum = %d\n", i, A_count[i], sum);
}
}else{
MPI_Recv(A, A_count[mpi_rank], MPI_FLOAT, 0, tag_A[mpi_rank], MPI_COMM_WORLD, &status); // A slice receive
MPI_Recv(B, N*K, MPI_FLOAT, 0, tag_B, MPI_COMM_WORLD, &status); // B total receive
}
mat_mul_omp(A_count[mpi_rank]/K);
/* After Calculation */
/* DEBUG */// if(mpi_rank == 0) printf("\nAfter calculation!\n\n");
if (mpi_rank == 0){
sum = C_count[0];
for(i=1; i<mpi_world_size; i++){
MPI_Recv(C+sum, C_count[i], MPI_FLOAT, i, tag_C[i], MPI_COMM_WORLD, &status); // C slice receive
sum += C_count[i];
/* DEBUG */// printf("C_count[%d]/N = %d, sum/N = %d\n", i, C_count[i], sum);
}
}else{
MPI_Send(C, C_count[mpi_rank], MPI_FLOAT, 0, tag_C[mpi_rank], MPI_COMM_WORLD); // C slice send
}
}