chundoong-lab-ta/SamsungDS22/submissions/HW4/gyehyung.kim/mat_mul.cpp

119 lines
3.1 KiB
C++
Raw Normal View History

2022-09-29 18:01:45 +09:00
#include "mat_mul.h"
#include "util.h"
#include <cstdio>
#include <cstdlib>
//#include <algorithm>
#include <omp.h>
#include <mpi.h>
static float *A, *B, *C;
static int M, N, K;
static int num_threads;
static int mpi_rank, mpi_world_size;
static int min(int x,int y)
{
return (x < y)? x : y;
}
//static void mat_mul_omp() {
static void mat_mul_omp(int start,int size) {
// TODO: parallelize & optimize matrix multiplication
// Use num_threads per node
num_threads = (size<num_threads)? size:num_threads;
int i_M = size/num_threads;
int ii_M = 32;
int i_J = 2048;
int i_K = 32;
int ii,kk,jj,i,j,k;
omp_set_num_threads(num_threads);
#pragma omp parallel
{
int tid = omp_get_thread_num();
int iii = (tid==num_threads-1)? size+start: tid*i_M+i_M+start;
for ( ii = start+tid*i_M; ii < iii; ii+=ii_M)
for ( jj = 0; jj < N; jj+= i_J)
for (kk = 0; kk < K; kk += i_K)
for ( k = kk; k < min(kk+i_K,K); ++k) {
for ( i = ii; i < min(iii,ii+ii_M) ; ++i){
float ar = A[i * K + k];
for ( j = jj; j < min(jj+i_J,N); ++j)
C[i * N + j] += ar * B[k * N + j];
}
}
}
}
void mat_mul(float *_A, float *_B, float *_C, int _M, int _N, int _K,
int _num_threads, int _mpi_rank, int _mpi_world_size) {
A = _A, B = _B, C = _C;
M = _M, N = _N, K = _K;
num_threads = _num_threads, mpi_rank = _mpi_rank,
mpi_world_size = _mpi_world_size;
int dest, source, start;
MPI_Status status;
MPI_Request request;
int i_remain = M % mpi_world_size;
int i_M = M/mpi_world_size;
if (M < mpi_world_size)
{
if (mpi_rank == 0 )
mat_mul_omp(0,M);
return;
}
// TODO: parallelize & optimize matrix multiplication on multi-node
// You must allocate & initialize A, B, C for non-root processes
if (mpi_rank != 0 )
{
alloc_mat(&A,M,K);
alloc_mat(&B,K,N);
alloc_mat(&C,M,N);
zero_mat(C,M,N);
}
MPI_Bcast(B,K*N,MPI_FLOAT,0,MPI_COMM_WORLD);
if (mpi_rank == 0 ) //master
{
for(dest=1; dest <= (mpi_world_size-1); dest++)
{
MPI_Isend(&(A[(dest*i_M+i_remain)*K]),i_M*K,MPI_FLOAT,dest,3,MPI_COMM_WORLD,&request);
}
mat_mul_omp(0,i_M+i_remain);
for(source=1; source <= (mpi_world_size-1); source++)
{
MPI_Recv(&(C[(source*i_M+i_remain)*N]), i_M*N , MPI_FLOAT, source, 5, MPI_COMM_WORLD, &status);
}
}
else // workers
{
//MPI_Recv(&i_M, 1, MPI_INT, 0, 1, MPI_COMM_WORLD, &status);
MPI_Recv(&(A[(mpi_rank*i_M+i_remain)*K]), i_M*K , MPI_FLOAT, 0, 3, MPI_COMM_WORLD, &status);
//printf("A <---- %d\n", mpi_rank);
//print_mat(A, M, K);
//printf("A <---- %d\n", mpi_rank);
start = (mpi_rank*i_M+i_remain);
mat_mul_omp(start,i_M);
//MPI_Send(&(C[(mpi_rank*i_M+i_remain)*N]),i_M*N,MPI_FLOAT,0,5,MPI_COMM_WORLD);
MPI_Isend(&(C[(mpi_rank*i_M+i_remain)*N]),i_M*N,MPI_FLOAT,0,5,MPI_COMM_WORLD,&request);
// printf("C ----> %d,%d\n", mpi_rank,start);
//print_mat(C, M, N);
//printf("C ----> %d,%d\n", mpi_rank,start);
}
// FIXME: for now, only root process runs the matrix multiplication.
//if (mpi_rank == 0)
//mat_mul_omp();
}