136 lines
3.4 KiB
C++
136 lines
3.4 KiB
C++
|
#include "mat_mul.h"
|
||
|
#include "util.h"
|
||
|
|
||
|
#include <cstdio>
|
||
|
#include <cstdlib>
|
||
|
#include <mpi.h>
|
||
|
#include <omp.h>
|
||
|
|
||
|
#define TAG0 0
|
||
|
#define TAG1 1
|
||
|
#define TS_I 32
|
||
|
#define TS_J 1024
|
||
|
#define TS_K 32
|
||
|
|
||
|
static float *A, *B, *C;
|
||
|
static int M, N, K;
|
||
|
static int num_threads;
|
||
|
static int mpi_rank, mpi_world_size;
|
||
|
|
||
|
|
||
|
static int MIN(int a, int b) {
|
||
|
return (a > b) ? b : a;
|
||
|
}
|
||
|
|
||
|
|
||
|
static void mat_mul_omp(int row) {
|
||
|
int i,j,k;
|
||
|
int ii,jj,kk;
|
||
|
|
||
|
omp_set_num_threads(num_threads);
|
||
|
#pragma omp parallel for
|
||
|
for (ii = 0; ii < row; ii += TS_I)
|
||
|
{
|
||
|
for (jj = 0; jj < N; jj += TS_J)
|
||
|
{
|
||
|
for (kk = 0; kk < K; kk += TS_K)
|
||
|
{
|
||
|
for (k = kk; k < MIN(K, kk + TS_K); k++)
|
||
|
{
|
||
|
for (i = ii; i < MIN(row, ii + TS_I); i++)
|
||
|
{
|
||
|
float ar = A[i * K + k];
|
||
|
for (j = jj; j < MIN(N, jj + TS_J); j+=1)
|
||
|
{
|
||
|
C[i * N + j] += ar * B[k * N + j];
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
// return NULL;
|
||
|
}
|
||
|
|
||
|
|
||
|
|
||
|
|
||
|
|
||
|
|
||
|
void mat_mul(float *_A, float *_B, float *_C, int _M, int _N, int _K,
|
||
|
int _num_threads, int _mpi_rank, int _mpi_world_size) {
|
||
|
A = _A, B = _B, C = _C;
|
||
|
M = _M, N = _N, K = _K;
|
||
|
num_threads = _num_threads, mpi_rank = _mpi_rank,
|
||
|
mpi_world_size = _mpi_world_size;
|
||
|
|
||
|
MPI_Status status;
|
||
|
MPI_Request request;
|
||
|
|
||
|
int node;
|
||
|
int chunk_rows;
|
||
|
int offset;
|
||
|
int row_size;
|
||
|
int start_ptr;
|
||
|
int end_ptr;
|
||
|
|
||
|
// TODO: parallelize & optimize matrix multiplication on multi-node
|
||
|
// You must allocate & initialize A, B, C for non-root processes
|
||
|
|
||
|
// FIXME: for now, only root process runs the matrix multiplication.
|
||
|
if (mpi_rank == 0)
|
||
|
{
|
||
|
row_size = M/mpi_world_size;
|
||
|
|
||
|
for(node = 1; node<mpi_world_size; node++)
|
||
|
{
|
||
|
start_ptr = offset = node*row_size;
|
||
|
end_ptr = (node == (mpi_world_size-1)) ? M : (node+1)*row_size;
|
||
|
chunk_rows = end_ptr - start_ptr;
|
||
|
|
||
|
MPI_Isend(&offset, 1, MPI_INT, node, TAG0, MPI_COMM_WORLD,&request);
|
||
|
MPI_Isend(&chunk_rows, 1, MPI_INT, node, TAG0, MPI_COMM_WORLD,&request);
|
||
|
|
||
|
MPI_Isend(&A[offset*K], chunk_rows*K, MPI_FLOAT, node, TAG0, MPI_COMM_WORLD,&request);
|
||
|
MPI_Isend(B, K*N, MPI_FLOAT, node, TAG0, MPI_COMM_WORLD,&request);
|
||
|
}
|
||
|
|
||
|
chunk_rows = row_size;
|
||
|
mat_mul_omp(chunk_rows);
|
||
|
|
||
|
for(node = 1; node<mpi_world_size; node++)
|
||
|
{
|
||
|
MPI_Recv(&offset, 1, MPI_INT, node, TAG1, MPI_COMM_WORLD, &status);
|
||
|
MPI_Recv(&chunk_rows, 1, MPI_INT, node, TAG1, MPI_COMM_WORLD, &status);
|
||
|
MPI_Recv(&C[offset*N], chunk_rows*N, MPI_FLOAT, node, TAG1, MPI_COMM_WORLD, &status);
|
||
|
}
|
||
|
}
|
||
|
else
|
||
|
{
|
||
|
alloc_mat(&A, M, K);
|
||
|
alloc_mat(&B, K, N);
|
||
|
alloc_mat(&C, M, N);
|
||
|
|
||
|
zero_mat(C, M, N);
|
||
|
|
||
|
MPI_Recv(&offset, 1, MPI_INT, 0, TAG0, MPI_COMM_WORLD, &status);
|
||
|
MPI_Recv(&chunk_rows, 1, MPI_INT, 0, TAG0, MPI_COMM_WORLD, &status);
|
||
|
|
||
|
MPI_Recv(A, chunk_rows*K, MPI_FLOAT, 0, TAG0, MPI_COMM_WORLD, &status);
|
||
|
//MPI_Recv(B, K*N, MPI_INT, 0, TAG0, MPI_COMM_WORLD, &status);
|
||
|
MPI_Recv(B, K*N, MPI_FLOAT, 0, TAG0, MPI_COMM_WORLD, &status);
|
||
|
|
||
|
//#pragma omp parallel num_threads(num_threads)
|
||
|
mat_mul_omp(chunk_rows);
|
||
|
|
||
|
MPI_Isend(&offset, 1, MPI_INT, 0, TAG1, MPI_COMM_WORLD, &request);
|
||
|
MPI_Isend(&chunk_rows, 1, MPI_INT, 0, TAG1, MPI_COMM_WORLD, &request);
|
||
|
MPI_Isend(C, chunk_rows*N, MPI_FLOAT, 0, TAG1, MPI_COMM_WORLD, &request);
|
||
|
}
|
||
|
|
||
|
//MPI_Finalize();
|
||
|
//return 0;
|
||
|
}
|
||
|
|