chundoong-lab-ta/SamsungDS22/submissions/HW4/kiyong.son/mat_mul.cpp

135 lines
3.5 KiB
C++
Raw Normal View History

2022-09-29 18:01:45 +09:00
#include "mat_mul.h"
#include <cstdio>
#include <cstdlib>
#include <mpi.h>
#include "util.h"
#include <immintrin.h>
#define MASTER 0
#define FROM_MASTER 1
#define FROM_WORKER 2
#define ITILESIZE (32)
#define JTILESIZE (1024)
#define KTILESIZE (1024)
static float *A, *B, *C;
static int M, N, K;
static int num_threads;
static int mpi_rank, mpi_world_size;
MPI_Status status;
MPI_Request request;
int numworkers, source, dest, mtype, rows, averow, extra, offset, i, j, k;
static int min(int x, int y) {
return (x<y)? x: y;
}
static void mat_mul_omp() {
// TODO: parallelize & optimize matrix multiplication
// Use num_threads per node
int is=0;
int ie=rows;
#pragma omp parallel for num_threads(num_threads) schedule(dynamic)
for (int ii = is; ii < ie; ii += ITILESIZE) {
for (int jj = 0; jj < N; jj += JTILESIZE) {
for (int kk = 0; kk < K; kk += KTILESIZE) {
for (k = kk; k < min(K, kk + KTILESIZE); k++) {
for (i = ii; i < min(ie, ii + ITILESIZE); i++) {
float ar = A[i * K + k];
for (j = jj; j < min(N, jj+ JTILESIZE); j +=1) {
C[i * N + j] += ar * B[k * N + j];
}
}
}
}
}
}
}
void mat_mul(float *_A, float *_B, float *_C, int _M, int _N, int _K,
int _num_threads, int _mpi_rank, int _mpi_world_size) {
A = _A, B = _B, C = _C;
M = _M, N = _N, K = _K;
num_threads = _num_threads, mpi_rank = _mpi_rank,
mpi_world_size = _mpi_world_size;
// TODO: parallelize & optimize matrix multiplication on multi-node
// You must allocate & initialize A, B, C for non-root processes
numworkers = mpi_world_size;
if(numworkers ==0) {
rows=M;
mat_mul_omp();
} else {
if(mpi_rank == MASTER) {
averow = M/numworkers;
int s, e;
extra = M%numworkers;
offset =0;
mtype=FROM_MASTER;
for(dest=1;dest<numworkers;dest++){
//rows=(dest+1<=extra) ? averow+1:averow;
//offset = offset+rows;
s=offset=dest*averow;
e=dest==numworkers-1 ? M : (dest+1)*averow;
rows=e-s;
printf("sending %d rows to task %d offset =%d\n", rows, dest, offset);
MPI_Isend(&offset, 1, MPI_INT, dest, mtype,MPI_COMM_WORLD,&request);
MPI_Isend(&rows, 1, MPI_INT, dest, mtype,MPI_COMM_WORLD,&request);
MPI_Isend(&A[offset*K], rows*K, MPI_FLOAT, dest, mtype,MPI_COMM_WORLD,&request);
MPI_Isend(B, K*N, MPI_FLOAT, dest, mtype,MPI_COMM_WORLD,&request);
}
//rows= (extra > 0) ? averow+1:averow;
rows=averow;
mat_mul_omp();
mtype=FROM_WORKER;
for(i=1;i<numworkers;i++){
source=i;
MPI_Recv(&offset, 1, MPI_INT, source,mtype,MPI_COMM_WORLD,&status);
MPI_Recv(&rows, 1, MPI_INT, source,mtype,MPI_COMM_WORLD,&status);
MPI_Recv(&C[offset*N], rows*N, MPI_FLOAT, source, mtype,MPI_COMM_WORLD,&status);
printf("Received result from task %d\n", source);
}
} //MASTER ==0;
if(mpi_rank > MASTER) {
alloc_mat(&A, M, K);
alloc_mat(&B, K, N);
alloc_mat(&C, M, N);
zero_mat(C,M,N);
mtype=FROM_MASTER;
MPI_Recv(&offset, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD,&status);
MPI_Recv(&rows, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD,&status);
MPI_Recv(A, rows*K, MPI_FLOAT, MASTER, mtype, MPI_COMM_WORLD,&status);
MPI_Recv(B, K*N, MPI_FLOAT, MASTER, mtype, MPI_COMM_WORLD,&status);
mat_mul_omp();
mtype = FROM_WORKER;
MPI_Isend(&offset, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD,&request);
MPI_Isend(&rows, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD,&request);
MPI_Isend(C, rows*N, MPI_FLOAT, MASTER, mtype, MPI_COMM_WORLD,&request);
}
}
}