191 lines
5.6 KiB
C++
191 lines
5.6 KiB
C++
#include "mat_mul.h"
|
|
#include "util.h"
|
|
#include <algorithm>
|
|
#include <cstdio>
|
|
#include <cstdlib>
|
|
#include <mpi.h>
|
|
|
|
static float *A, *B, *C;
|
|
static int M, N, K;
|
|
// M: number of rows in matrix A
|
|
// K: number of columns in matrix A
|
|
// N: number of columns in matrix B
|
|
|
|
static int num_threads;
|
|
static int mpi_rank; // specific number of a process to be used for MPI
|
|
static int mpi_world_size; // number of process to be used for MPI
|
|
static int rows[4] = {0,}; // number of rows to be allocated for each process
|
|
static int offset[4] = {0,};
|
|
|
|
#define MASTER 0 // mpi_rank of first task
|
|
#define FROM_MASTER 1 // setting a message type
|
|
#define FROM_WORKER 2 // setting a message type
|
|
|
|
using namespace std;
|
|
|
|
#define ITILESIZE (32)
|
|
#define JTILESIZE (512)
|
|
#define KTILESIZE (32)
|
|
|
|
//#define BLOCKSIZE 64
|
|
|
|
static void mat_mul_omp() {
|
|
|
|
// TODO: parallelize & optimize matrix multiplication
|
|
int end = rows[mpi_rank];
|
|
|
|
#if 0
|
|
for (int kk = 0; kk < K; kk += block_s) {
|
|
#pragma omp parallel for num_threads(num_threads) schedule(dynamic) \
|
|
default(none) private(A_buf) shared(A, B, C, M, end, kk, N, K, block_s)
|
|
for (int i = 0; i < end; ++i) {
|
|
for (int k = kk; k < min(kk + block_s, K); ++k) {
|
|
A_buf = A[i * K + k];
|
|
|
|
// loop unrolling
|
|
int N_8 = (N >> 3) <<3;
|
|
int j = 0;
|
|
|
|
for (; j < N_8; j += 8) {
|
|
C[i * N + j] += A_buf * B[k * N + j];
|
|
C[i * N + j + 1] += A_buf * B[k * N + j + 1];
|
|
C[i * N + j + 2] += A_buf * B[k * N + j + 2];
|
|
C[i * N + j + 3] += A_buf * B[k * N + j + 3];
|
|
C[i * N + j + 4] += A_buf * B[k * N + j + 4];
|
|
C[i * N + j + 5] += A_buf * B[k * N + j + 5];
|
|
C[i * N + j + 6] += A_buf * B[k * N + j + 6];
|
|
C[i * N + j + 7] += A_buf * B[k * N + j + 7];
|
|
}
|
|
|
|
// unrolling remainder
|
|
for (; j < N; ++j) {
|
|
C[i * N + j] += A_buf * B[k * N + j];
|
|
}
|
|
}
|
|
}
|
|
}
|
|
#endif
|
|
|
|
#if 1
|
|
#pragma omp parallel for num_threads(num_threads) schedule(dynamic)
|
|
for (int ii = 0; ii < end; ii += ITILESIZE) {
|
|
for (int kk = 0; kk < K; kk += KTILESIZE) {
|
|
for (int jj = 0; jj < N; jj += JTILESIZE) {
|
|
int end_k = kk + KTILESIZE < K ? (kk + KTILESIZE) : K;
|
|
int end_m = ii + ITILESIZE < end ? (ii + ITILESIZE) : end;
|
|
int end_n = jj + JTILESIZE < N ? (jj + JTILESIZE) : N;
|
|
|
|
for (int i = ii; i < end_m; ++i) {
|
|
for (int k = kk; k < end_k; ++k) {
|
|
for (int j = jj; j < end_n; ++j) {
|
|
C[i * N + j] += A[i * K + k] * B[k * N + j];
|
|
}
|
|
}
|
|
}
|
|
|
|
}
|
|
}
|
|
}
|
|
}
|
|
#endif
|
|
|
|
#if 0
|
|
int end_k, end_m, end_n;
|
|
#pragma omp parallel for num_threads(num_threads) \
|
|
default(none) shared(A, B, C, M, end_m, end_k, end_n, end, K, N)
|
|
for (int ii = 0; ii < end; ii += ITILESIZE) {
|
|
for (int kk = 0; kk < K; kk += KTILESIZE) {
|
|
for (int jj = 0; jj < N; jj += JTILESIZE) {
|
|
end_k = kk + KTILESIZE < K ? (kk + KTILESIZE) : K;
|
|
end_m = ii + ITILESIZE < end ? (ii + ITILESIZE) : end;
|
|
end_n = jj + JTILESIZE < N ? (jj + JTILESIZE) : N;
|
|
|
|
for (int i = ii; i < end_m; ++i) {
|
|
for (int k = kk; k < end_k; ++k) {
|
|
for (int j = jj; j < end_n; ++j) {
|
|
C[i * N + j] += A[i * K + k] * B[k * N + j];
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
#endif
|
|
void mat_mul(float *_A, float *_B, float *_C, int _M, int _N, int _K,
|
|
int _num_threads, int _mpi_rank, int _mpi_world_size) {
|
|
|
|
// TODO: parallelize & optimize matrix multiplication on multi-node
|
|
// You must allocate & initialize A, B, C for non-root processes
|
|
A = _A, B = _B, C = _C;
|
|
M = _M, N = _N, K = _K;
|
|
num_threads = _num_threads;
|
|
mpi_rank = _mpi_rank;
|
|
mpi_world_size = _mpi_world_size;
|
|
MPI_Request request;
|
|
MPI_Status status;
|
|
|
|
// indexing in common
|
|
int row_size = M / mpi_world_size;
|
|
offset[0] = 0;
|
|
rows[0] = row_size;
|
|
|
|
for (int i = 1; i < mpi_world_size; i++){
|
|
// number of rows to be allocated for each process
|
|
offset[i] = i * row_size;
|
|
rows[i] = (i == (mpi_world_size - 1)) ? (M - offset[i]) : row_size;
|
|
}
|
|
|
|
/**************************** Transmitting time (in Master side) ************************************/
|
|
// timely parallel sequence in both Master and Worker
|
|
|
|
//MPI_Bcast(B, K*N, MPI_FLOAT, 0, MPI_COMM_WORLD);
|
|
if (mpi_rank == MASTER) {
|
|
/* Send matrix data to the workers */
|
|
// dest = destination address (relative address) to worker
|
|
for (int dest = 1; dest < mpi_world_size; dest++) {
|
|
MPI_Isend(&A[offset[dest]*K], rows[dest]*K, MPI_FLOAT, dest, FROM_MASTER, MPI_COMM_WORLD, &request);
|
|
MPI_Isend(B, K*N, MPI_FLOAT, dest, FROM_MASTER, MPI_COMM_WORLD, &request);
|
|
}
|
|
|
|
}
|
|
else {
|
|
// array A, B, C -> memory allocation for worker nodes
|
|
// array C -> initialization for add & assignment calculation
|
|
alloc_mat(&A, rows[mpi_rank], K);
|
|
alloc_mat(&B, K, N);
|
|
alloc_mat(&C, rows[mpi_rank], N);
|
|
//zero_mat(C, M, N);
|
|
|
|
MPI_Recv(A, rows[mpi_rank]*K, MPI_FLOAT, MASTER, FROM_MASTER, MPI_COMM_WORLD, &status);
|
|
MPI_Recv(B, K*N, MPI_FLOAT, MASTER, FROM_MASTER, MPI_COMM_WORLD, &status);
|
|
}
|
|
|
|
mat_mul_omp();
|
|
|
|
/**************************** Receiving time (in Master side) ************************************/
|
|
// timely parallel sequence in both Master and Worker
|
|
|
|
/* Receive MAC results from workers */
|
|
// src = source address (relative address) to master
|
|
if (mpi_rank == MASTER) {
|
|
for (int src = 1; src < mpi_world_size; src++) {
|
|
MPI_Recv(&C[offset[src]*N], rows[src]*N, MPI_FLOAT, src, FROM_WORKER, MPI_COMM_WORLD, &status);
|
|
}
|
|
}
|
|
else {
|
|
MPI_Isend(C, rows[mpi_rank]*N, MPI_FLOAT, MASTER, FROM_WORKER, MPI_COMM_WORLD, &request);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|