119 lines
3.1 KiB
C++
119 lines
3.1 KiB
C++
#include "mat_mul.h"
|
|
|
|
#include <cstdio>
|
|
#include <cstdlib>
|
|
#include <mpi.h>
|
|
#include <omp.h>
|
|
#include "util.h"
|
|
|
|
//#define min(a, b) (((a) < (b)) ? (a) : (b))
|
|
#define MASTER 0
|
|
#define FORM_MASTER 1
|
|
#define FORM_WORKER 2
|
|
|
|
|
|
static float *A, *B, *C;
|
|
static int M, N, K;
|
|
static int num_threads;
|
|
static int mpi_rank, mpi_world_size;
|
|
static int rows[4]={0,};
|
|
static int offset[4]={0,};
|
|
|
|
#define SLICEM 32
|
|
#define SLICEK 32
|
|
#define SLICEN 2048
|
|
|
|
static void mat_mul_omp() {
|
|
// TODO: parallelize & optimize matrix multiplication
|
|
|
|
// #pragma omp parallel num_threads(num_threads)
|
|
// #pragma omp parallel for
|
|
// for (int i = 0; i < rows; ++i) {
|
|
// for (int k = 0; k < K; ++k) {
|
|
// float arr = A[i*K+k];
|
|
// for (int j = 0; j < N; j+=1) {
|
|
// C[i * N + j] += arr * B[k * N + j];
|
|
// }
|
|
// }
|
|
// }
|
|
int start=0;
|
|
int ed=rows[mpi_rank];
|
|
|
|
#pragma omp parallel for num_threads(num_threads) schedule(dynamic)
|
|
for(int i2=start;i2<ed;i2+=SLICEM) {
|
|
for(int k2=0;k2<K;k2+=SLICEK) {
|
|
for(int j2=0;j2<N;j2+=SLICEN) {
|
|
int edk=k2+SLICEK<K?(k2+SLICEK):K;
|
|
int edm=i2+SLICEM<M?(i2+SLICEM):M;
|
|
int edn=j2+SLICEN<N?(j2+SLICEN):N;
|
|
for(int i=i2;i<edm;++i) {
|
|
for(int k=k2;k<edk;++k) {
|
|
for(int j=j2;j<edn;++j) {
|
|
C[i*N+j]+=A[i*K+k]*B[k*N+j];
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
void mat_mul(float *_A, float *_B, float *_C, int _M, int _N, int _K,
|
|
int _num_threads, int _mpi_rank, int _mpi_world_size) {
|
|
A = _A, B = _B, C = _C;
|
|
M = _M, N = _N, K = _K;
|
|
num_threads = _num_threads, mpi_rank = _mpi_rank,
|
|
mpi_world_size = _mpi_world_size;
|
|
|
|
|
|
// TODO: parallelize & optimize matrix multiplication on multi-node
|
|
// You must allocate & initialize A, B, C for non-root processes
|
|
//int numworkers = mpi_world_size -1;
|
|
//int averow, extra;
|
|
MPI_Status status;
|
|
MPI_Request request;
|
|
// FIXME: for now, only root process runs the matrix multiplication.
|
|
// if (mpi_rank == 0)
|
|
// mat_mul_omp();
|
|
int nrows = M / mpi_world_size;
|
|
//averow = M/numworkers;
|
|
//extra = M%numworkers;
|
|
//offset = 0;
|
|
|
|
//calcurate rows numbers
|
|
for(int i=0;i<mpi_world_size;i++) {
|
|
rows[i]=(i==mpi_world_size-1)?(M-(nrows *(mpi_world_size-1))):nrows;
|
|
}
|
|
|
|
//calculate offsets
|
|
for(int i=0;i<mpi_world_size-1;i++) {
|
|
offset[i+1]=offset[i]+rows[i];
|
|
}
|
|
if(mpi_rank != 0) {
|
|
M=rows[mpi_rank];
|
|
alloc_mat(&A, rows[mpi_rank], K);
|
|
alloc_mat(&B, K, N);
|
|
alloc_mat(&C, rows[mpi_rank], N);
|
|
}
|
|
|
|
MPI_Bcast(B,K*N,MPI_FLOAT,0,MPI_COMM_WORLD);
|
|
|
|
//isend / recv
|
|
if(mpi_rank == 0) {
|
|
for(int i=1;i<mpi_world_size;i++)
|
|
MPI_Isend(&A[offset[i]*K],rows[i]*K,MPI_FLOAT,i,0,MPI_COMM_WORLD,&request);
|
|
} else {
|
|
MPI_Recv(A,rows[mpi_rank]*K,MPI_FLOAT,0,0,MPI_COMM_WORLD,&status);
|
|
}
|
|
|
|
mat_mul_omp();
|
|
|
|
if(mpi_rank != 0) {
|
|
MPI_Isend(C,rows[mpi_rank]*N,MPI_FLOAT,0,0,MPI_COMM_WORLD,&request);
|
|
} else {
|
|
for(int i=1;i<mpi_world_size;i++)
|
|
MPI_Recv(&C[offset[i]*N],rows[i]*N,MPI_FLOAT,i,0,MPI_COMM_WORLD,&status);
|
|
}
|
|
|
|
|
|
}
|