97 lines
3.1 KiB
C++
97 lines
3.1 KiB
C++
|
#include "mat_mul.h"
|
||
|
|
||
|
#include <cstdio>
|
||
|
#include <cstdlib>
|
||
|
#include <mpi.h>
|
||
|
#include "util.h"
|
||
|
#include <omp.h>
|
||
|
|
||
|
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
|
||
|
|
||
|
#define I_BSIZE 32
|
||
|
#define J_BSIZE 1024
|
||
|
#define K_BSIZE 1024
|
||
|
|
||
|
static float *A, *B, *C;
|
||
|
static int M, N, K;
|
||
|
static int num_threads;
|
||
|
static int mpi_rank, mpi_world_size;
|
||
|
static MPI_Status status;
|
||
|
|
||
|
static void mat_mul_omp() {
|
||
|
#pragma omp parallel for num_threads(20) shared(A, B, C)
|
||
|
for (int ii = 0; ii < M; ii += I_BSIZE)
|
||
|
for (int jj = 0; jj < N; jj += J_BSIZE)
|
||
|
for (int kk = 0; kk < K; kk += K_BSIZE)
|
||
|
for (int k = kk; k < MIN(K, kk + K_BSIZE); k++)
|
||
|
for (int i = ii; i < MIN(M, ii + I_BSIZE); i++) {
|
||
|
float tmp = A[i * K + k];
|
||
|
for (int j = jj; j < MIN(N, jj + J_BSIZE); j++) {
|
||
|
C[i * N + j] += tmp * B[k * N + j];
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
void mat_mul(float *_A, float *_B, float *_C, int _M, int _N, int _K,
|
||
|
int _num_threads, int _mpi_rank, int _mpi_world_size) {
|
||
|
A = _A, B = _B, C = _C;
|
||
|
M = _M, N = _N, K = _K;
|
||
|
num_threads = _num_threads;
|
||
|
mpi_rank = _mpi_rank;
|
||
|
mpi_world_size = _mpi_world_size;
|
||
|
|
||
|
MPI_Request mpi_req[16];
|
||
|
|
||
|
// For master node
|
||
|
if (mpi_rank == 0) {
|
||
|
for (int i=1; i<mpi_world_size; i++) {
|
||
|
int rows = _M / mpi_world_size;
|
||
|
int start = i * rows;
|
||
|
int end = start + rows;
|
||
|
|
||
|
// if last node
|
||
|
if (i == mpi_world_size - 1) {
|
||
|
end = _M;
|
||
|
}
|
||
|
M = end - start;
|
||
|
|
||
|
MPI_Isend(&M, 1, MPI_INT, i, 1, MPI_COMM_WORLD, &mpi_req[i*4] );
|
||
|
MPI_Isend(&A[start*K], M * K, MPI_FLOAT, i, 1, MPI_COMM_WORLD, &mpi_req[i*4+1]);
|
||
|
MPI_Isend(B, K * N, MPI_FLOAT, i, 1, MPI_COMM_WORLD, &mpi_req[i*4+2]);
|
||
|
// MPI_Isend(&C[start * N],M * N, MPI_FLOAT, i, 1, MPI_COMM_WORLD, &mpi_req[i*4+3]);
|
||
|
|
||
|
}
|
||
|
// To block for completion (non-blocking -> blocking)
|
||
|
// if (mpi_world_size > 1)
|
||
|
// MPI_Waitall((mpi_world_size - 1)*4, &mpi_req[4], MPI_STATUSES_IGNORE);
|
||
|
|
||
|
M = _M / mpi_world_size;
|
||
|
}
|
||
|
// For slave node
|
||
|
else {
|
||
|
alloc_mat(&A, M, K);
|
||
|
alloc_mat(&B, K, N);
|
||
|
alloc_mat(&C, M, N);
|
||
|
zero_mat(C, M, N);
|
||
|
|
||
|
MPI_Recv(&M, 1, MPI_INT, 0, 1, MPI_COMM_WORLD, &status);
|
||
|
MPI_Recv(A, M * K, MPI_FLOAT, 0, 1, MPI_COMM_WORLD, &status);
|
||
|
MPI_Recv(B, K * N, MPI_FLOAT, 0, 1, MPI_COMM_WORLD, &status);
|
||
|
// MPI_Recv(C, M * N, MPI_FLOAT, 0, 1, MPI_COMM_WORLD, &status);
|
||
|
}
|
||
|
|
||
|
mat_mul_omp();
|
||
|
|
||
|
if (mpi_rank == 0) {
|
||
|
for (int i=1; i<mpi_world_size; i++) {
|
||
|
int rows = _M / mpi_world_size;
|
||
|
int start = i * rows;
|
||
|
MPI_Recv(&M, 1, MPI_INT, i, 2, MPI_COMM_WORLD, &status);
|
||
|
MPI_Recv(&C[start * N], M * N, MPI_INT, i, 2, MPI_COMM_WORLD, &status);
|
||
|
}
|
||
|
} else {
|
||
|
MPI_Send(&M, 1, MPI_INT, 0, 2, MPI_COMM_WORLD);
|
||
|
MPI_Send(C, M * N, MPI_FLOAT, 0, 2, MPI_COMM_WORLD);
|
||
|
}
|
||
|
}
|