chundoong-lab-ta/SamsungDS22/submit/HW4/mat_mul.cpp

114 lines
4.1 KiB
C++
Raw Normal View History

2022-09-29 18:01:45 +09:00
#include "mat_mul.h"
#include <cstdio>
#include <cstdlib>
#include <mpi.h>
#include <immintrin.h>
static float *A, *B, *C;
static int M, N, K;
static int num_threads;
static int mpi_rank, mpi_world_size;
int ITILESIZE = (32);
int JTILESIZE = (1024);
int KTILESIZE = (1024);
static void mat_mul_omp() {
// TODO: parallelize & optimize matrix multiplication
while (ITILESIZE > M && ITILESIZE > 32) {
ITILESIZE /= 2;
}
while (JTILESIZE > M && JTILESIZE > 32) {
JTILESIZE /= 2;
}
while (KTILESIZE > M && KTILESIZE > 32) {
KTILESIZE /= 2;
}
int M_STRIP = M / ITILESIZE * ITILESIZE;
int N_STRIP = N / JTILESIZE * JTILESIZE;
int K_STRIP = K / KTILESIZE * KTILESIZE;
#pragma omp parallel for num_threads(num_threads)
for (int i = 0; i < M_STRIP; i += ITILESIZE) {
for (int j = 0; j < N_STRIP; j += JTILESIZE) {
for (int k = 0; k < K_STRIP; k += KTILESIZE) {
for (int kk = k; kk < k + KTILESIZE; kk+=8) {
for (int ii = i; ii < i + ITILESIZE; ii++) {
__m256 a0 = _mm256_set1_ps(A[(ii+0)*K+(kk+0)]);
__m256 a1 = _mm256_set1_ps(A[(ii+0)*K+(kk+1)]);
__m256 a2 = _mm256_set1_ps(A[(ii+0)*K+(kk+2)]);
__m256 a3 = _mm256_set1_ps(A[(ii+0)*K+(kk+3)]);
__m256 a4 = _mm256_set1_ps(A[(ii+0)*K+(kk+4)]);
__m256 a5 = _mm256_set1_ps(A[(ii+0)*K+(kk+5)]);
__m256 a6 = _mm256_set1_ps(A[(ii+0)*K+(kk+6)]);
__m256 a7 = _mm256_set1_ps(A[(ii+0)*K+(kk+7)]);
for (int jj = j; jj < j + JTILESIZE; jj+=16) {
__m256 c0 = _mm256_load_ps(&C[(ii+0) * N + jj]);
__m256 b0 = _mm256_load_ps(&B[(kk+0) * N + jj]);
__m256 b1 = _mm256_load_ps(&B[(kk+1) * N + jj]);
__m256 b2 = _mm256_load_ps(&B[(kk+2) * N + jj]);
__m256 b3 = _mm256_load_ps(&B[(kk+3) * N + jj]);
__m256 b4 = _mm256_load_ps(&B[(kk+4) * N + jj]);
__m256 b5 = _mm256_load_ps(&B[(kk+5) * N + jj]);
__m256 b6 = _mm256_load_ps(&B[(kk+6) * N + jj]);
__m256 b7 = _mm256_load_ps(&B[(kk+7) * N + jj]);
c0 = _mm256_fmadd_ps(a0, b0, c0);
c0 = _mm256_fmadd_ps(a1, b1, c0);
c0 = _mm256_fmadd_ps(a2, b2, c0);
c0 = _mm256_fmadd_ps(a3, b3, c0);
c0 = _mm256_fmadd_ps(a4, b4, c0);
c0 = _mm256_fmadd_ps(a5, b5, c0);
c0 = _mm256_fmadd_ps(a6, b6, c0);
c0 = _mm256_fmadd_ps(a7, b7, c0);
__m256 d0 = _mm256_load_ps(&C[(ii+0) * N + jj+8]);
__m256 e0 = _mm256_load_ps(&B[(kk+0) * N + jj+8]);
__m256 e1 = _mm256_load_ps(&B[(kk+1) * N + jj+8]);
__m256 e2 = _mm256_load_ps(&B[(kk+2) * N + jj+8]);
__m256 e3 = _mm256_load_ps(&B[(kk+3) * N + jj+8]);
__m256 e4 = _mm256_load_ps(&B[(kk+4) * N + jj+8]);
__m256 e5 = _mm256_load_ps(&B[(kk+5) * N + jj+8]);
__m256 e6 = _mm256_load_ps(&B[(kk+6) * N + jj+8]);
__m256 e7 = _mm256_load_ps(&B[(kk+7) * N + jj+8]);
d0 = _mm256_fmadd_ps(a0, e0, d0);
d0 = _mm256_fmadd_ps(a1, e1, d0);
d0 = _mm256_fmadd_ps(a2, e2, d0);
d0 = _mm256_fmadd_ps(a3, e3, d0);
d0 = _mm256_fmadd_ps(a4, e4, d0);
d0 = _mm256_fmadd_ps(a5, e5, d0);
d0 = _mm256_fmadd_ps(a6, e6, d0);
d0 = _mm256_fmadd_ps(a7, e7, d0);
_mm256_store_ps(&C[(ii+0)*N+jj], c0);
_mm256_store_ps(&C[(ii+0)*N+jj+8], d0);
}
}
}
}
}
}
}
void mat_mul(float *_A, float *_B, float *_C, int _M, int _N, int _K,
int _num_threads, int _mpi_rank, int _mpi_world_size) {
A = _A, B = _B, C = _C;
M = _M, N = _N, K = _K;
num_threads = _num_threads, mpi_rank = _mpi_rank,
mpi_world_size = _mpi_world_size;
// TODO: parallelize & optimize matrix multiplication on multi-node
// You must allocate & initialize A, B, C for non-root processes
// FIXME: for now, only root process runs the matrix multiplication.
if (mpi_rank == 0)
mat_mul_omp();
}