116 lines
3.2 KiB
C++
116 lines
3.2 KiB
C++
|
#include "mat_mul.h"
|
||
|
|
||
|
#include <cstdio>
|
||
|
#include <cstdlib>
|
||
|
#include <mpi.h>
|
||
|
#include "util.h"
|
||
|
|
||
|
static float *A, *B, *C;
|
||
|
static int M, N, K;
|
||
|
static int num_threads;
|
||
|
static int mpi_rank, mpi_world_size;
|
||
|
|
||
|
static int min(int x, int y) {
|
||
|
return x < y ? x : y;
|
||
|
}
|
||
|
|
||
|
MPI_Request request;
|
||
|
MPI_Status status;
|
||
|
|
||
|
#define ITILESIZE (32)
|
||
|
#define JTILESIZE (1024)
|
||
|
#define KTILESIZE (1024)
|
||
|
|
||
|
//static void mat_mul_omp(int rows) {
|
||
|
static void mat_mul_omp() {
|
||
|
//
|
||
|
// TODO: parallelize & optimize matrix multiplication
|
||
|
// Use num_threads per node
|
||
|
# pragma omp parallel for num_threads(num_threads)
|
||
|
for (int ii = 0; ii < M; ii += ITILESIZE) {
|
||
|
for (int jj = 0; jj < N; jj += JTILESIZE) {
|
||
|
for (int kk = 0; kk < K; kk += KTILESIZE) {
|
||
|
|
||
|
for (int k = kk; k < min(K, kk + KTILESIZE); k++) {
|
||
|
for (int i = ii; i < min(M, ii + ITILESIZE); i++) {
|
||
|
float ar = A[i * K + k];
|
||
|
for (int j = jj; j < min(N, jj + JTILESIZE); j+=1) {
|
||
|
C[i * N + j] += ar * B[k * N + j];
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
void mat_mul(float *_A, float *_B, float *_C, int _M, int _N, int _K,
|
||
|
int _num_threads, int _mpi_rank, int _mpi_world_size) {
|
||
|
A = _A, B = _B, C = _C;
|
||
|
M = _M, N = _N, K = _K;
|
||
|
num_threads = _num_threads, mpi_rank = _mpi_rank,
|
||
|
mpi_world_size = _mpi_world_size;
|
||
|
|
||
|
// TODO: parallelize & optimize matrix multiplication on multi-node
|
||
|
// You must allocate & initialize A, B, C for non-root processes
|
||
|
|
||
|
// FIXME: for now, only root process runs the matrix multiplication.
|
||
|
|
||
|
int ave_row = M / mpi_world_size;
|
||
|
int extra_row = M % mpi_world_size;
|
||
|
int offset;
|
||
|
|
||
|
/* send matrix data to the worker tasks */
|
||
|
if (mpi_rank == 0) {
|
||
|
|
||
|
offset = ave_row + extra_row;
|
||
|
M = offset;
|
||
|
|
||
|
if(ave_row) {
|
||
|
for (int node=1; node < mpi_world_size; node++)
|
||
|
{
|
||
|
MPI_Isend ( &offset, 1, MPI_INT, node, 1, MPI_COMM_WORLD, &request);
|
||
|
MPI_Isend ( &A[offset * K], ave_row*K, MPI_FLOAT, node, 1, MPI_COMM_WORLD, &request);
|
||
|
MPI_Isend ( B, K*N, MPI_FLOAT, node, 1, MPI_COMM_WORLD, &request);
|
||
|
|
||
|
offset = offset + ave_row;
|
||
|
}
|
||
|
}
|
||
|
|
||
|
mat_mul_omp();
|
||
|
|
||
|
} else {
|
||
|
|
||
|
/* Return the result to mater node */
|
||
|
if(ave_row) {
|
||
|
|
||
|
alloc_mat ( &A, ave_row, K);
|
||
|
alloc_mat ( &B, K, N);
|
||
|
alloc_mat ( &C, ave_row, N);
|
||
|
zero_mat ( C, ave_row, N);
|
||
|
|
||
|
MPI_Recv ( &offset, 1, MPI_INT, 0, 1, MPI_COMM_WORLD, &status);
|
||
|
MPI_Recv ( A, ave_row*K, MPI_FLOAT, 0, 1, MPI_COMM_WORLD, &status);
|
||
|
MPI_Recv ( B, K*N, MPI_FLOAT, 0, 1, MPI_COMM_WORLD, &status);
|
||
|
|
||
|
M = ave_row;
|
||
|
mat_mul_omp();
|
||
|
|
||
|
MPI_Isend ( &offset, 1, MPI_INT, 0, 2, MPI_COMM_WORLD, &request);
|
||
|
MPI_Isend ( C, ave_row*N, MPI_FLOAT, 0, 2, MPI_COMM_WORLD, &request);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
/* wait for results from all worker tasks */
|
||
|
if (mpi_rank == 0) {
|
||
|
if(ave_row) {
|
||
|
for (int i=1; i < mpi_world_size; i++)
|
||
|
{
|
||
|
MPI_Recv ( &offset, 1, MPI_INT, i, 2, MPI_COMM_WORLD, &status);
|
||
|
MPI_Recv ( &C[offset * N],ave_row*N, MPI_FLOAT, i, 2, MPI_COMM_WORLD, &status);
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|