87 lines
2.3 KiB
C++
87 lines
2.3 KiB
C++
#include "mat_mul.h"
|
|
#include "util.h"
|
|
|
|
#include <cstdio>
|
|
#include <cstdlib>
|
|
#include <immintrin.h>
|
|
#include <mpi.h>
|
|
|
|
static float *A, *B, *C;
|
|
static int M, N, K;
|
|
static int num_threads;
|
|
static int mpi_rank, mpi_world_size;
|
|
|
|
int min(int a, int b) { return a < b ? a : b; }
|
|
|
|
#define ITILESIZE (64)
|
|
#define JTILESIZE (512)
|
|
#define KTILESIZE (16)
|
|
|
|
void mat_mul_omp(int is, int ie) {
|
|
|
|
#pragma omp parallel for num_threads(num_threads) schedule(static)
|
|
for (int ii = is; ii < ie; ii += ITILESIZE) {
|
|
for (int kk = 0; kk < K; kk += KTILESIZE) {
|
|
for (int jj = 0; jj < N; jj += JTILESIZE) {
|
|
|
|
for (int i = ii; i < min(ie, ii + ITILESIZE); i++) {
|
|
for (int k = kk; k < min(K, kk + KTILESIZE); k++) {
|
|
for (int j = jj; j < min(N, jj + JTILESIZE); j++) {
|
|
C[i * N + j] += A[i * K + k] * B[k * N + j];
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
void mat_mul(float *_A, float *_B, float *_C, int _M, int _N, int _K,
|
|
int _num_threads, int _mpi_rank, int _mpi_world_size) {
|
|
A = _A, B = _B, C = _C;
|
|
M = _M, N = _N, K = _K;
|
|
num_threads = _num_threads, mpi_rank = _mpi_rank,
|
|
mpi_world_size = _mpi_world_size;
|
|
|
|
if (mpi_rank != 0) {
|
|
alloc_mat(&A, M, K);
|
|
alloc_mat(&B, K, N);
|
|
alloc_mat(&C, M, N);
|
|
}
|
|
|
|
// Calculate is and ie redundantly on every processes
|
|
int is[mpi_world_size], ie[mpi_world_size];
|
|
for (int i = 0; i < mpi_world_size; i++) {
|
|
is[i] = M / mpi_world_size * i;
|
|
ie[i] = M / mpi_world_size * (i + 1);
|
|
}
|
|
ie[mpi_world_size - 1] = M;
|
|
|
|
// Scatter A
|
|
if (mpi_rank == 0) {
|
|
for (int i = 1; i < mpi_world_size; i++) {
|
|
MPI_Send(A + is[i] * K, (ie[i] - is[i]) * K, MPI_FLOAT, i, 0,
|
|
MPI_COMM_WORLD);
|
|
}
|
|
} else {
|
|
MPI_Recv(A + is[mpi_rank] * K, (ie[mpi_rank] - is[mpi_rank]) * K, MPI_FLOAT,
|
|
0, 0, MPI_COMM_WORLD, nullptr);
|
|
}
|
|
|
|
// Broadcast B
|
|
MPI_Bcast(B, K * N, MPI_FLOAT, 0, MPI_COMM_WORLD);
|
|
|
|
mat_mul_omp(is[mpi_rank], ie[mpi_rank]);
|
|
|
|
// Gather C
|
|
if (mpi_rank == 0) {
|
|
for (int i = 1; i < mpi_world_size; i++) {
|
|
MPI_Recv(C + is[i] * N, (ie[i] - is[i]) * N, MPI_FLOAT, i, 0,
|
|
MPI_COMM_WORLD, nullptr);
|
|
}
|
|
} else {
|
|
MPI_Send(C + is[mpi_rank] * N, (ie[mpi_rank] - is[mpi_rank]) * N, MPI_FLOAT, 0, 0,
|
|
MPI_COMM_WORLD);
|
|
}
|
|
}
|