221 lines
7.5 KiB
C++
221 lines
7.5 KiB
C++
#include "mat_mul.h"
|
|
|
|
#include <cstdio>
|
|
#include <cstdlib>
|
|
#include <mpi.h>
|
|
#include <omp.h>
|
|
#include <immintrin.h>
|
|
#include "util.h"
|
|
|
|
#define BLOCK_SIZE (45)
|
|
#define UNROLL_SIZE (8)
|
|
#define MAX_NODE (8)
|
|
#define MIN(a,b) ((a < b) ? (a) : (b))
|
|
|
|
#define MATRIX_SEND_DATA_MSG_ID 1000
|
|
#define MATRIX_SEND_RESULT_MSG_ID 1001
|
|
|
|
#define ITILESIZE (32)
|
|
#define JTILESIZE (1024)
|
|
#define KTILESIZE (1024)
|
|
|
|
#define likely(x) __builtin_expect((x),1)
|
|
#define unlikely(x) __builtin_expect((x),0)
|
|
|
|
static float *A, *B, *C;
|
|
static int M, N, K;
|
|
static int num_threads;
|
|
static int mpi_rank, mpi_world_size;
|
|
|
|
static void mat_mul_omp(int startM, int endM) {
|
|
// TODO: parallelize & optimize matrix multiplication
|
|
int bs = BLOCK_SIZE;
|
|
|
|
register float Aik;
|
|
register int kk, i, k, j;
|
|
|
|
for (kk = 0; kk < K; kk += bs) {
|
|
#pragma omp parallel for schedule(static) num_threads(num_threads)
|
|
for (i = startM; i < endM; ++i) {
|
|
for (k = kk; k < MIN(kk + bs, K); ++k) {
|
|
Aik = A[i * K + k];
|
|
for (j = 0; j < N; j++ ) {
|
|
C[i * N + j] += Aik * B[k * N + j];
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
static void mat_mul_omp32(int startM, int endM) {
|
|
#pragma omp parallel for schedule(static) num_threads(num_threads)
|
|
for (int i = startM; i < endM; i += ITILESIZE) {
|
|
for (int j = 0; j < N; j += JTILESIZE) {
|
|
for (int k = 0; k < K; k += KTILESIZE) {
|
|
|
|
for (int kk = k; kk < k + KTILESIZE; kk+=8) {
|
|
for (int ii = i; ii < i + ITILESIZE; ii++) {
|
|
__m256 a0 = _mm256_set1_ps(A[(ii+0)*K+(kk+0)]);
|
|
__m256 a1 = _mm256_set1_ps(A[(ii+0)*K+(kk+1)]);
|
|
__m256 a2 = _mm256_set1_ps(A[(ii+0)*K+(kk+2)]);
|
|
__m256 a3 = _mm256_set1_ps(A[(ii+0)*K+(kk+3)]);
|
|
__m256 a4 = _mm256_set1_ps(A[(ii+0)*K+(kk+4)]);
|
|
__m256 a5 = _mm256_set1_ps(A[(ii+0)*K+(kk+5)]);
|
|
__m256 a6 = _mm256_set1_ps(A[(ii+0)*K+(kk+6)]);
|
|
__m256 a7 = _mm256_set1_ps(A[(ii+0)*K+(kk+7)]);
|
|
|
|
for (int jj = j; jj < j + JTILESIZE; jj+=16) {
|
|
__m256 c0 = _mm256_load_ps(&C[(ii+0) * N + jj]);
|
|
|
|
|
|
__m256 b0 = _mm256_load_ps(&B[(kk+0) * N + jj]);
|
|
__m256 b1 = _mm256_load_ps(&B[(kk+1) * N + jj]);
|
|
__m256 b2 = _mm256_load_ps(&B[(kk+2) * N + jj]);
|
|
__m256 b3 = _mm256_load_ps(&B[(kk+3) * N + jj]);
|
|
__m256 b4 = _mm256_load_ps(&B[(kk+4) * N + jj]);
|
|
__m256 b5 = _mm256_load_ps(&B[(kk+5) * N + jj]);
|
|
__m256 b6 = _mm256_load_ps(&B[(kk+6) * N + jj]);
|
|
__m256 b7 = _mm256_load_ps(&B[(kk+7) * N + jj]);
|
|
|
|
c0 = _mm256_fmadd_ps(a0, b0, c0);
|
|
c0 = _mm256_fmadd_ps(a1, b1, c0);
|
|
c0 = _mm256_fmadd_ps(a2, b2, c0);
|
|
c0 = _mm256_fmadd_ps(a3, b3, c0);
|
|
c0 = _mm256_fmadd_ps(a4, b4, c0);
|
|
c0 = _mm256_fmadd_ps(a5, b5, c0);
|
|
c0 = _mm256_fmadd_ps(a6, b6, c0);
|
|
c0 = _mm256_fmadd_ps(a7, b7, c0);
|
|
|
|
__m256 d0 = _mm256_load_ps(&C[(ii+0) * N + jj+8]);
|
|
|
|
__m256 e0 = _mm256_load_ps(&B[(kk+0) * N + jj+8]);
|
|
__m256 e1 = _mm256_load_ps(&B[(kk+1) * N + jj+8]);
|
|
__m256 e2 = _mm256_load_ps(&B[(kk+2) * N + jj+8]);
|
|
__m256 e3 = _mm256_load_ps(&B[(kk+3) * N + jj+8]);
|
|
__m256 e4 = _mm256_load_ps(&B[(kk+4) * N + jj+8]);
|
|
__m256 e5 = _mm256_load_ps(&B[(kk+5) * N + jj+8]);
|
|
__m256 e6 = _mm256_load_ps(&B[(kk+6) * N + jj+8]);
|
|
__m256 e7 = _mm256_load_ps(&B[(kk+7) * N + jj+8]);
|
|
|
|
d0 = _mm256_fmadd_ps(a0, e0, d0);
|
|
d0 = _mm256_fmadd_ps(a1, e1, d0);
|
|
d0 = _mm256_fmadd_ps(a2, e2, d0);
|
|
d0 = _mm256_fmadd_ps(a3, e3, d0);
|
|
d0 = _mm256_fmadd_ps(a4, e4, d0);
|
|
d0 = _mm256_fmadd_ps(a5, e5, d0);
|
|
d0 = _mm256_fmadd_ps(a6, e6, d0);
|
|
d0 = _mm256_fmadd_ps(a7, e7, d0);
|
|
|
|
_mm256_store_ps(&C[(ii+0)*N+jj], c0);
|
|
_mm256_store_ps(&C[(ii+0)*N+jj+8], d0);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
void mat_mul(float *_A, float *_B, float *_C, int _M, int _N, int _K,
|
|
int _num_threads, int _mpi_rank, int _mpi_world_size) {
|
|
A = _A, B = _B, C = _C;
|
|
M = _M, N = _N, K = _K;
|
|
num_threads = _num_threads, mpi_rank = _mpi_rank,
|
|
mpi_world_size = _mpi_world_size;
|
|
int i, slice = 0, m_size, startM, endM, is_omp32 = 0;
|
|
MPI_Status status;
|
|
MPI_Request req1[MAX_NODE], req2[MAX_NODE], req3[MAX_NODE], req4[MAX_NODE], req5[MAX_NODE];
|
|
int slice_idx[MAX_NODE] = {768, 768, 512, 512, 256, 256, 256, 128}, slice_start, idx;
|
|
|
|
// TODO: parallelize & optimize matrix multiplication on multi-node
|
|
// You must allocate & initialize A, B, C for non-root processes
|
|
|
|
// FIXME: for now, only root process runs the matrix multiplication.
|
|
if ((K % 1024) == 0 && (N % 1024) == 0) {
|
|
is_omp32 = 1;
|
|
}
|
|
|
|
if (mpi_rank == 0) {
|
|
if (mpi_world_size > M) {
|
|
mpi_world_size = M;
|
|
}
|
|
slice = M / mpi_world_size;
|
|
if (num_threads > slice) {
|
|
num_threads = slice;
|
|
}
|
|
|
|
if (mpi_world_size > 1 && (mpi_world_size & 0x1) == 1 && is_omp32 && M >= 2048) {
|
|
idx = mpi_world_size - 1;
|
|
slice_start = (idx < MAX_NODE) ? slice_idx[idx] : slice_idx[7];
|
|
slice = M / idx;
|
|
slice -= slice_start * idx;
|
|
if (slice < 0) slice = 32;
|
|
}
|
|
|
|
// Send Matrix Information
|
|
for (i = 1; i < mpi_world_size; i++) {
|
|
startM = i * slice;
|
|
endM = (i == mpi_world_size - 1) ? M : (i + 1) * slice;
|
|
m_size = (endM - startM);
|
|
MPI_Isend(&m_size, 1, MPI_INT, i, MATRIX_SEND_DATA_MSG_ID, MPI_COMM_WORLD, &req1[i]);
|
|
MPI_Isend(&num_threads, 1, MPI_INT, i, MATRIX_SEND_DATA_MSG_ID, MPI_COMM_WORLD, &req2[i]);
|
|
MPI_Isend(&A[startM*K], m_size*K, MPI_FLOAT, i, MATRIX_SEND_DATA_MSG_ID, MPI_COMM_WORLD, &req3[i]);
|
|
MPI_Isend(&B[0], K*N, MPI_FLOAT, i, MATRIX_SEND_DATA_MSG_ID, MPI_COMM_WORLD, &req4[i]);
|
|
}
|
|
startM = 0;
|
|
endM = slice;
|
|
} else {
|
|
startM = 0;
|
|
MPI_Recv(&endM, 1, MPI_INT, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
|
|
m_size = endM * K;
|
|
MPI_Irecv(&num_threads, 1, MPI_INT, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &req2[0]);
|
|
alloc_mat(&A, endM, K);
|
|
MPI_Irecv(&A[0], m_size, MPI_FLOAT, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &req3[0]);
|
|
alloc_mat(&B, K, N);
|
|
MPI_Irecv(&B[0], K*N, MPI_FLOAT, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &req4[0]);
|
|
alloc_mat(&C, endM, N);
|
|
zero_mat(C, endM, N);
|
|
// wait for receiving
|
|
MPI_Wait(&req2[0], &status);
|
|
MPI_Wait(&req3[0], &status);
|
|
MPI_Wait(&req4[0], &status);
|
|
}
|
|
|
|
// calculating the matrix
|
|
if (is_omp32 && (endM % 32) == 0)
|
|
mat_mul_omp32(startM, endM);
|
|
else
|
|
mat_mul_omp(startM, endM);
|
|
|
|
if (mpi_rank == 0) {
|
|
// wait for sending
|
|
for (i = 1; i < mpi_world_size; i++) {
|
|
MPI_Wait(&req1[i], &status);
|
|
MPI_Wait(&req2[i], &status);
|
|
MPI_Wait(&req3[i], &status);
|
|
MPI_Wait(&req4[i], &status);
|
|
}
|
|
|
|
// receiving the result
|
|
for (i = 1; i < mpi_world_size; i++) {
|
|
startM = i * slice;
|
|
endM = (i == mpi_world_size - 1) ? M : (i + 1) * slice;
|
|
m_size = (endM - startM) * N;
|
|
MPI_Irecv(&C[startM*N], m_size, MPI_FLOAT, i, MPI_ANY_TAG, MPI_COMM_WORLD, &req5[i]);
|
|
}
|
|
|
|
// wait for receiving
|
|
for (i = 1; i < mpi_world_size; i++) {
|
|
MPI_Wait(&req5[i], &status);
|
|
}
|
|
} else {
|
|
// sending the result
|
|
m_size = endM * N;
|
|
MPI_Isend(&C[0], m_size, MPI_FLOAT, 0, MATRIX_SEND_RESULT_MSG_ID, MPI_COMM_WORLD, &req5[0]);
|
|
MPI_Wait(&req5[0], &status);
|
|
//free(A);
|
|
//free(B);
|
|
//free(C);
|
|
}
|
|
}
|