118 lines
2.9 KiB
C++
118 lines
2.9 KiB
C++
#include "mat_mul.h"
|
|
#include <cstdlib>
|
|
#include <cstdio>
|
|
#include <pthread.h>
|
|
#include <algorithm>
|
|
#include <omp.h>
|
|
#include <mpi.h>
|
|
#include "util.h"
|
|
#define blocksize 30
|
|
#define MASTER 0
|
|
#define FROM_MASTER 1
|
|
#define FROM_SLAVE 2
|
|
|
|
static float *A, *B, *C;
|
|
static int M, N, K;
|
|
static int num_threads;
|
|
static int mpi_rank, mpi_world_size;
|
|
int rows, offset;
|
|
|
|
|
|
static void mat_mul_omp()
|
|
{
|
|
|
|
#pragma omp parallel num_threads (num_threads)
|
|
{
|
|
|
|
int pid = omp_get_thread_num();
|
|
int slice = rows / num_threads;
|
|
int start = pid * slice;
|
|
int end = pid == num_threads - 1 ? rows : (pid + 1) * slice;
|
|
|
|
float Aik;
|
|
int bs = blocksize;
|
|
|
|
for (int kk = 0; kk < K; kk += bs) {
|
|
for (int i = start; i < end; ++i) {
|
|
for (int k = kk; k < std::min(kk + bs, K); ++k) {
|
|
Aik = A[i * K + k];
|
|
for (int j = 0; j < N; ++j) {
|
|
C[i * N + j] += Aik * B[k * N + j];
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
void mat_mul(float *_A, float *_B, float *_C, int _M, int _N, int _K,
|
|
int _num_threads, int _mpi_rank, int _mpi_world_size)
|
|
{
|
|
|
|
A = _A, B = _B, C = _C;
|
|
M = _M, N = _N, K = _K;
|
|
num_threads = _num_threads, mpi_rank = _mpi_rank,
|
|
mpi_world_size = _mpi_world_size;
|
|
MPI_Status status;
|
|
MPI_Request request;
|
|
|
|
// MASTER node
|
|
if (mpi_rank == 0)
|
|
{
|
|
|
|
int row_size = M / mpi_world_size;
|
|
int st, ed;
|
|
offset = 0;
|
|
|
|
for (int dest=1; dest < mpi_world_size; dest++)
|
|
{
|
|
|
|
|
|
st = offset = dest * row_size;
|
|
ed = (dest == mpi_world_size -1) ? M : (dest + 1) * row_size;
|
|
rows = ed - st;
|
|
MPI_Isend(&offset, 1, MPI_INT, dest, FROM_MASTER, MPI_COMM_WORLD, &request);
|
|
MPI_Isend(&rows, 1, MPI_INT, dest, FROM_MASTER, MPI_COMM_WORLD, &request);
|
|
MPI_Isend(&A[offset*K], rows*K, MPI_FLOAT, dest, FROM_MASTER, MPI_COMM_WORLD, &request);
|
|
MPI_Isend(B, K*N, MPI_FLOAT, dest, FROM_MASTER, MPI_COMM_WORLD, &request);
|
|
}
|
|
|
|
rows = row_size;
|
|
mat_mul_omp();
|
|
|
|
for(int dest = 1; dest < mpi_world_size; dest++)
|
|
{
|
|
MPI_Recv(&offset, 1, MPI_INT, dest, FROM_SLAVE, MPI_COMM_WORLD, &status);
|
|
MPI_Recv(&rows, 1, MPI_INT, dest, FROM_SLAVE, MPI_COMM_WORLD, &status);
|
|
MPI_Recv(&C[offset*N], rows*N, MPI_FLOAT, dest, FROM_SLAVE, MPI_COMM_WORLD, &status);
|
|
}
|
|
|
|
}
|
|
|
|
|
|
// Slave node
|
|
else
|
|
{
|
|
|
|
alloc_mat(&A, M, K);
|
|
alloc_mat(&B, K, N);
|
|
alloc_mat(&C, M, N);
|
|
zero_mat(C, M, N);
|
|
|
|
MPI_Recv(&offset, 1, MPI_INT, MASTER, FROM_MASTER, MPI_COMM_WORLD, &status);
|
|
MPI_Recv(&rows, 1, MPI_INT, MASTER, FROM_MASTER, MPI_COMM_WORLD, &status);
|
|
MPI_Recv(A, rows*K, MPI_FLOAT, MASTER, FROM_MASTER, MPI_COMM_WORLD, &status);
|
|
MPI_Recv(B, K*N, MPI_FLOAT, MASTER, FROM_MASTER, MPI_COMM_WORLD, &status);
|
|
|
|
mat_mul_omp();
|
|
|
|
MPI_Isend(&offset, 1, MPI_INT, MASTER, FROM_SLAVE, MPI_COMM_WORLD, &request);
|
|
MPI_Isend(&rows, 1, MPI_INT, MASTER, FROM_SLAVE, MPI_COMM_WORLD,&request);
|
|
MPI_Isend(C, rows*N, MPI_FLOAT, MASTER, FROM_SLAVE, MPI_COMM_WORLD,&request);
|
|
|
|
}
|
|
|
|
|
|
}
|