138 lines
4.5 KiB
C++
138 lines
4.5 KiB
C++
#include "mat_mul.h"
|
|
|
|
#include <cstdio>
|
|
#include <cstdlib>
|
|
#include <mpi.h>
|
|
#include <omp.h>
|
|
|
|
static float *A, *B, *C;
|
|
static int M, N, K;
|
|
static int num_threads;
|
|
static int mpi_rank, mpi_world_size;
|
|
|
|
#define FROM_MASTER 1
|
|
#define FROM_WORKER 2
|
|
|
|
#ifndef max
|
|
#define max(a,b) (((a) > (b)) ? (a) : (b))
|
|
#endif
|
|
|
|
#ifndef min
|
|
#define min(a,b) (((a) < (b)) ? (a) : (b))
|
|
#endif
|
|
|
|
void alloc_mat_local(float **m, int R, int C) {
|
|
*m = (float *)aligned_alloc(32, sizeof(float) * R * C);
|
|
if (*m == NULL) {
|
|
printf("Failed to allocate memory for matrix.\n");
|
|
exit(0);
|
|
}
|
|
}
|
|
|
|
void zero_mat_local(float *m, int R, int C) { memset(m, 0, sizeof(float) * R * C); }
|
|
|
|
static void mat_mul_omp(int offset, int rows) {
|
|
// TODO: parallelize & optimize matrix multiplication
|
|
// Use num_threads per node
|
|
|
|
#pragma omp parallel
|
|
{
|
|
int idx = omp_get_thread_num();
|
|
int slice = rows / num_threads;
|
|
int start = offset + (idx * slice);
|
|
int end = idx == num_threads - 1 ? offset+rows : offset + (idx + 1) * slice;
|
|
|
|
float Aik;
|
|
int iBS = 32;
|
|
int jBS = 1024;
|
|
int kBS = 1024;
|
|
|
|
for (int kk = 0; kk < K; kk += iBS) {
|
|
for (int jj = 0; jj < N; jj += jBS) {
|
|
for(int ii=start;ii<end;ii+=kBS){
|
|
for (int i = ii; i < min(ii+kBS,end); ++i) {
|
|
for (int k = kk; k < min(kk + iBS, K); ++k) {
|
|
Aik = A[i * K + k];
|
|
for (int j = jj; j < min(jj + jBS, N); ++j) {
|
|
C[i * N + j] += Aik * B[k * N + j];
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
void mat_mul(float *_A, float *_B, float *_C, int _M, int _N, int _K,
|
|
int _num_threads, int _mpi_rank, int _mpi_world_size) {
|
|
A = _A, B = _B, C = _C;
|
|
M = _M, N = _N, K = _K;
|
|
num_threads = _num_threads, mpi_rank = _mpi_rank,
|
|
mpi_world_size = _mpi_world_size;
|
|
int i, rows, mpi_start, mpi_end, mpi_rows;
|
|
|
|
// TODO: parallelize & optimize matrix multiplication on multi-node
|
|
// You must allocate & initialize A, B, C for non-root processes
|
|
// FIXME: for now, only root process runs the matrix multiplication.
|
|
|
|
MPI_Status stat;
|
|
MPI_Request req;
|
|
rows = M / mpi_world_size;
|
|
omp_set_num_threads(num_threads);
|
|
|
|
if (mpi_rank == 0)
|
|
{
|
|
for(i=1;i<mpi_world_size;i++)
|
|
{
|
|
mpi_start = i * rows;
|
|
mpi_end = i == mpi_world_size - 1 ? M : (i + 1) * rows;
|
|
mpi_rows = mpi_end - mpi_start;
|
|
//int MPI_Send(const void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm)
|
|
// MPI_Send(&mpi_start, 1, MPI_INT, i, FROM_MASTER, MPI_COMM_WORLD);
|
|
// MPI_Send(&mpi_rows, 1, MPI_INT, i,FROM_MASTER,MPI_COMM_WORLD);
|
|
// MPI_Send(A+(mpi_start*K), mpi_rows*K, MPI_FLOAT, i, FROM_MASTER, MPI_COMM_WORLD);
|
|
// MPI_Send(B,K*N, MPI_FLOAT, i, FROM_MASTER, MPI_COMM_WORLD);
|
|
|
|
MPI_Isend(&mpi_start, 1, MPI_INT, i, FROM_MASTER, MPI_COMM_WORLD,&req);
|
|
MPI_Isend(&mpi_rows, 1, MPI_INT, i,FROM_MASTER,MPI_COMM_WORLD,&req);
|
|
MPI_Isend(A+(mpi_start*K), mpi_rows*K, MPI_FLOAT, i, FROM_MASTER, MPI_COMM_WORLD,&req);
|
|
MPI_Isend(B,K*N, MPI_FLOAT, i, FROM_MASTER, MPI_COMM_WORLD,&req);
|
|
}
|
|
|
|
mat_mul_omp(0, rows);
|
|
|
|
for(i=1;i<mpi_world_size;i++)
|
|
{
|
|
//int MPI_Recv(void *buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Status *status)
|
|
MPI_Recv(&mpi_start, 1, MPI_INT, i, FROM_WORKER, MPI_COMM_WORLD,&stat);
|
|
MPI_Recv(&mpi_rows, 1, MPI_INT, i,FROM_WORKER,MPI_COMM_WORLD,&stat);
|
|
MPI_Recv(C+(mpi_start*N), mpi_rows*N, MPI_FLOAT, i, FROM_WORKER, MPI_COMM_WORLD,&stat);
|
|
}
|
|
}
|
|
else
|
|
{
|
|
alloc_mat_local(&A, M, K);
|
|
alloc_mat_local(&B, K, N);
|
|
alloc_mat_local(&C, M, N);
|
|
//zero_mat_local(A, M, K);
|
|
//zero_mat_local(B, K, N);
|
|
zero_mat_local(C, M, N);
|
|
|
|
MPI_Recv(&mpi_start, 1, MPI_INT, 0, FROM_MASTER, MPI_COMM_WORLD,&stat);
|
|
MPI_Recv(&mpi_rows, 1, MPI_INT, 0,FROM_MASTER,MPI_COMM_WORLD,&stat);
|
|
MPI_Recv(A+(mpi_start*K), mpi_rows*K, MPI_FLOAT, 0, FROM_MASTER, MPI_COMM_WORLD,&stat);
|
|
MPI_Recv(B, K*N, MPI_FLOAT, 0, FROM_MASTER, MPI_COMM_WORLD,&stat);
|
|
|
|
mat_mul_omp(mpi_start, mpi_rows);
|
|
|
|
// MPI_Send(&mpi_start, 1, MPI_INT, 0, FROM_WORKER, MPI_COMM_WORLD);
|
|
// MPI_Send(&mpi_rows, 1, MPI_INT, 0,FROM_WORKER,MPI_COMM_WORLD);
|
|
// MPI_Send(C+(mpi_start*N), mpi_rows*N, MPI_FLOAT, 0, FROM_WORKER, MPI_COMM_WORLD);
|
|
|
|
MPI_Isend(&mpi_start, 1, MPI_INT, 0, FROM_WORKER, MPI_COMM_WORLD,&req);
|
|
MPI_Isend(&mpi_rows, 1, MPI_INT, 0,FROM_WORKER,MPI_COMM_WORLD,&req);
|
|
MPI_Isend(C+(mpi_start*N), mpi_rows*N, MPI_FLOAT, 0, FROM_WORKER, MPI_COMM_WORLD,&req);
|
|
}
|
|
}
|