106 lines
3.1 KiB
C++
106 lines
3.1 KiB
C++
#include "mat_mul.h"
|
|
#include "util.h"
|
|
|
|
#include <cstdio>
|
|
#include <cstdlib>
|
|
#include <mpi.h>
|
|
|
|
#define BLOCKSIZE_I 20
|
|
#define BLOCKSIZE_J 1024
|
|
#define BLOCKSIZE_K 512
|
|
#define FROM_MASTER 1
|
|
#define FROM_SLAVE 2
|
|
|
|
static float *A, *B, *C;
|
|
static int M, N, K;
|
|
static int num_threads;
|
|
static int mpi_rank, mpi_world_size;
|
|
|
|
|
|
static void mat_mul_omp(int size_of_M) {
|
|
// TODO: parallelize & optimize matrix multiplication
|
|
// Use num_threads per node
|
|
|
|
int temp_k;
|
|
int temp_i;
|
|
int temp_j;
|
|
|
|
#pragma omp parallel for num_threads(num_threads) private(temp_i, temp_j, temp_k) shared(A, B, C, K, N, size_of_M)
|
|
for (int ii = 0; ii < size_of_M; ii += BLOCKSIZE_I) {
|
|
if (ii+BLOCKSIZE_I <= size_of_M) temp_i = ii+BLOCKSIZE_I;
|
|
else temp_i = size_of_M;
|
|
for (int jj = 0; jj < N; jj += BLOCKSIZE_J) {
|
|
if (jj+BLOCKSIZE_J <= N) temp_j = jj+BLOCKSIZE_J;
|
|
else temp_j = N;
|
|
for (int kk = 0; kk < K; kk += BLOCKSIZE_K) {
|
|
if (kk+BLOCKSIZE_K <= K) temp_k = kk+BLOCKSIZE_K;
|
|
else temp_k = K;
|
|
for (int k = kk; k < temp_k; k++) {
|
|
for (int i = ii; i < temp_i; i++) {
|
|
float ar = A[i * K + k];
|
|
for (int j = jj; j < temp_j; j++) {
|
|
C[i * N + j] += ar * B[k * N + j];
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
void mat_mul(float *_A, float *_B, float *_C, int _M, int _N, int _K,
|
|
int _num_threads, int _mpi_rank, int _mpi_world_size) {
|
|
A = _A, B = _B, C = _C;
|
|
M = _M, N = _N, K = _K;
|
|
num_threads = _num_threads, mpi_rank = _mpi_rank,
|
|
mpi_world_size = _mpi_world_size;
|
|
|
|
MPI_Status status;
|
|
MPI_Request request;
|
|
|
|
int divided_M = M / mpi_world_size;
|
|
int modular_M = M % mpi_world_size;
|
|
int stride_M[4];
|
|
int offset[4];
|
|
|
|
int stride;
|
|
|
|
// TODO: parallelize & optimize matrix multiplication on multi-node
|
|
// You must allocate & initialize A, B, C for non-root processes
|
|
if (mpi_rank != 0) {
|
|
alloc_mat(&A, divided_M+1, K);
|
|
alloc_mat(&B, K, N);
|
|
alloc_mat(&C, divided_M+1, N);
|
|
zero_mat(C, divided_M+1, N);
|
|
}
|
|
|
|
MPI_Bcast (B, K*N, MPI_FLOAT, 0, MPI_COMM_WORLD);
|
|
|
|
if (mpi_rank == 0) {
|
|
for (int i = 1; i < mpi_world_size; i++) {
|
|
if (i <= modular_M) stride_M[i] = divided_M + 1;
|
|
else stride_M[i] = divided_M;
|
|
offset[1] = divided_M;
|
|
if (i > 1) offset[i] = offset[i-1] + stride_M[i-1];
|
|
}
|
|
for (int i = 1; i < mpi_world_size; i++) {
|
|
MPI_Isend (&stride_M[i], 1, MPI_INT, i, FROM_MASTER, MPI_COMM_WORLD, &request);
|
|
MPI_Isend (&A[offset[i]*K], stride_M[i]*K, MPI_FLOAT, i, FROM_MASTER, MPI_COMM_WORLD, &request);
|
|
}
|
|
mat_mul_omp(divided_M);
|
|
for (int i = 1; i < mpi_world_size; i++) {
|
|
MPI_Recv (&stride_M[i], 1, MPI_INT, i, FROM_SLAVE, MPI_COMM_WORLD, &status);
|
|
MPI_Recv (&C[offset[i]*N], stride_M[i]*N, MPI_FLOAT, i, FROM_SLAVE, MPI_COMM_WORLD, &status);
|
|
}
|
|
}
|
|
|
|
// FIXME: for now, only root process runs the matrix multiplication.
|
|
if (mpi_rank != 0) {
|
|
MPI_Recv (&stride, 1, MPI_INT, 0, FROM_MASTER, MPI_COMM_WORLD, &status);
|
|
MPI_Recv (A, stride*K, MPI_FLOAT, 0, FROM_MASTER, MPI_COMM_WORLD, &status);
|
|
mat_mul_omp(stride);
|
|
MPI_Send (&stride, 1, MPI_INT, 0, FROM_SLAVE, MPI_COMM_WORLD);
|
|
MPI_Send (C, stride*N, MPI_FLOAT, 0, FROM_SLAVE, MPI_COMM_WORLD);
|
|
}
|
|
}
|