121 lines
3.8 KiB
C++
121 lines
3.8 KiB
C++
#include "mat_mul.h"
|
|
#include "util.h"
|
|
#include <cstdio>
|
|
#include <cstdlib>
|
|
#include <mpi.h>
|
|
#include <immintrin.h>
|
|
#define MASTER 0
|
|
#define FROM_MASTER 1
|
|
#define FROM_WORKER 2
|
|
#define ITILESIZE (32)
|
|
#define JTILESIZE (1024)
|
|
#define KTILESIZE (1024)
|
|
|
|
static float *A, *B, *C;
|
|
static int M, N, K;
|
|
static int num_threads;
|
|
static int mpi_rank, mpi_world_size;
|
|
MPI_Status status;
|
|
MPI_Request request;
|
|
|
|
int numworkers, // number of worker tasks
|
|
source, // task id of message source
|
|
dest, // task id of message destination
|
|
mtype, // message type
|
|
rows, // rows of matrix A sent to each worker
|
|
averow, extra, offset, // used to determine rows sent to each worker
|
|
i, j, k;
|
|
static int min(int x, int y) {
|
|
return x < y ? x : y;
|
|
}
|
|
static void mat_mul_omp() {
|
|
// TODO: parallelize & optimize matrix multiplication
|
|
// Use num_threads per node
|
|
int is = 0;
|
|
int ie = rows;
|
|
|
|
#pragma omp parallel for num_threads(num_threads) schedule(dynamic)
|
|
|
|
for (int ii = is; ii < ie; ii += ITILESIZE) {
|
|
for (int jj = 0; jj < N; jj += JTILESIZE) {
|
|
for (int kk = 0; kk < K; kk += KTILESIZE) {
|
|
for ( k = kk; k < min(K, kk + KTILESIZE); k++) {
|
|
for ( i = ii; i < min(ie, ii + ITILESIZE); i++) {
|
|
float ar = A[i * K + k];
|
|
for ( j = jj; j < min(N, jj + JTILESIZE); j+=1) {
|
|
C[i * N + j] += ar * B[k * N + j];
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
void mat_mul(float *_A, float *_B, float *_C, int _M, int _N, int _K,
|
|
int _num_threads, int _mpi_rank, int _mpi_world_size) {
|
|
A = _A, B = _B, C = _C;
|
|
M = _M, N = _N, K = _K;
|
|
num_threads = _num_threads, mpi_rank = _mpi_rank,
|
|
mpi_world_size = _mpi_world_size;
|
|
// TODO: parallelize & optimize matrix multiplication on multi-node
|
|
// You must allocate & initialize A, B, C for non-root processes
|
|
numworkers = mpi_world_size;
|
|
if (numworkers == 0) {
|
|
rows = M;
|
|
mat_mul_omp();
|
|
} else {
|
|
|
|
// MASTER initialization & send matrix data to the worker tasks
|
|
if (mpi_rank == MASTER) {
|
|
averow = M / numworkers;
|
|
int os, de;
|
|
extra = M % numworkers;
|
|
offset = 0;
|
|
mtype = FROM_MASTER;
|
|
for (dest = 1; dest < numworkers; dest++) {
|
|
os = offset = dest * averow;
|
|
de = dest == numworkers - 1 ? M : (dest + 1) * averow;
|
|
rows = de - os;
|
|
printf("sending %d rows to task %d offset =%d\n", rows, dest, offset);
|
|
MPI_Isend(&offset, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD, &request);
|
|
MPI_Isend(&rows, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD, &request);
|
|
MPI_Isend(&A[offset * K], rows * K, MPI_FLOAT, dest, mtype, MPI_COMM_WORLD, &request);
|
|
MPI_Isend(B, K * N, MPI_FLOAT, dest, mtype, MPI_COMM_WORLD, &request);
|
|
}
|
|
rows = averow;
|
|
|
|
mat_mul_omp();
|
|
|
|
// Receive results from worker tasks
|
|
mtype = FROM_WORKER;
|
|
for (i = 1; i < numworkers; i++){
|
|
source = i;
|
|
MPI_Recv(&offset, 1, MPI_INT, source, mtype, MPI_COMM_WORLD, &status);
|
|
MPI_Recv(&rows, 1, MPI_INT, source, mtype, MPI_COMM_WORLD, &status);
|
|
MPI_Recv(&C[offset * N], rows * N, MPI_FLOAT, source, mtype, MPI_COMM_WORLD, &status);
|
|
printf("Received result from task %d\n", source);
|
|
}
|
|
}
|
|
|
|
if (mpi_rank > MASTER) {
|
|
alloc_mat(&A, M, K);
|
|
alloc_mat(&B, K, N);
|
|
alloc_mat(&C, M, N);
|
|
zero_mat(C, M, N);
|
|
mtype = FROM_MASTER;
|
|
MPI_Recv(&offset, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
|
|
MPI_Recv(&rows, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
|
|
MPI_Recv(A, rows * K, MPI_FLOAT, MASTER, mtype, MPI_COMM_WORLD, &status);
|
|
MPI_Recv(B, K * N, MPI_FLOAT, MASTER, mtype, MPI_COMM_WORLD, &status);
|
|
|
|
mat_mul_omp();
|
|
|
|
mtype = FROM_WORKER;
|
|
MPI_Isend(&offset, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &request);
|
|
MPI_Isend(&rows, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &request);
|
|
MPI_Isend(C, rows * N, MPI_FLOAT, MASTER, mtype, MPI_COMM_WORLD, &request);
|
|
}
|
|
}
|
|
}
|