#include "mat_mul.h" #include "util.h" #include #include #include #include // by hongpooh static float *A, *B, *C; static int M, N, K; static int num_threads; static int mpi_rank, mpi_world_size; MPI_Status status; MPI_Request request; static int offset, rows; #define min(x, y) (x < y ? x : y) #define ITILESIZE (32) #define JTILESIZE (1024) #define KTILESIZE (1024) //#define JTILESIZE (2048) //#define KTILESIZE (2048) #define MASTER 0 #define FROM_MASTER 1 #define FROM_WORKER 2 static void mat_mul_omp() { // TODO: parallelize & optimize matrix multiplication // Use num_threads per node int is = 0; int ie = rows; /* 1) original code #pragma omp parallel for for (int i = 0; i < M; ++i) { for (int j = 0; j < N; ++j) { for (int k = 0; k < K; ++k) { C[i * N + j] += A[i * K + k] * B[k * N + j]; } } } */ /* time over #pragma omp parallel num_threads(num_threads) //#pragma omp parallel for #pragma omp for for (int i = 0; i < rows; ++i) { for (int j = 0; j < N; ++j) { for (int k = 0; k < K; ++k) { C[i * N + j] += A[i * K + k] * B[k * N + j]; } } } */ /* working code #1 - run_valid.sh OK #pragma omp parallel num_threads(num_threads) #pragma omp for //#pragma omp parallel for for (int i = 0; i < rows; ++i) { for (int k = 0; k < K; ++k) { float ar = A[i * K + k]; for (int j = 0; j < N; ++j) { C[i * N + j] += ar * B[k * N + j]; } } } */ /* OpenMP - ref. coce from HW3 //int tid = (long)data; //int is = M / num_threads * tid + min(tid, M % num_threads); //int ie = M / num_threads * (tid + 1) + min(tid + 1, M % num_threads); //for (int ii = is; ii < ie; ii += ITILESIZE) { #pragma omp parallel for // (original 8192, 8192, 8192) omp: 490 GFLOPS //#pragma omp parallel for shared(A, B, C) for (int ii = 0; ii < M; ii += ITILESIZE) { for (int jj = 0; jj < N; jj += JTILESIZE) { for (int kk = 0; kk < K; kk += KTILESIZE) { for (int k = kk; k < min(K, kk + KTILESIZE); k++) { //for (int i = ii; i < min(ie, ii + ITILESIZE); i++) { for (int i = ii; i < min(M, ii + ITILESIZE); i++) { float ar = A[i * K + k]; for (int j = jj; j < min(N, jj + JTILESIZE); j++) { C[i * N + j] += ar * B[k * N + j]; //C[i * N + j] += A[i * K + k] * B[k * N + j]; // w/o 'float ar': 450~500 GFLOPS } } } } } } */ //#pragma omp parallel for // (original 8192, 8192, 8192) omp: 490 GFLOPS //#pragma omp parallel for shared(A, B, C) #pragma omp parallel num_threads(num_threads) #pragma omp for for (int ii = is; ii < ie; ii += ITILESIZE) { //for (int ii = 0; ii < M; ii += ITILESIZE) { for (int jj = 0; jj < N; jj += JTILESIZE) { for (int kk = 0; kk < K; kk += KTILESIZE) { for (int k = kk; k < min(K, kk + KTILESIZE); k++) { //for (int i = ii; i < min(M, ii + ITILESIZE); i++) { for (int i = ii; i < min(ie, ii + ITILESIZE); i++) { float ar = A[i * K + k]; for (int j = jj; j < min(N, jj + JTILESIZE); j++) { C[i * N + j] += ar * B[k * N + j]; //C[i * N + j] += A[i * K + k] * B[k * N + j]; } } } } } } } void mat_mul(float *_A, float *_B, float *_C, int _M, int _N, int _K, int _num_threads, int _mpi_rank, int _mpi_world_size) { A = _A, B = _B, C = _C; M = _M, N = _N, K = _K; num_threads = _num_threads, mpi_rank = _mpi_rank, mpi_world_size = _mpi_world_size; // TODO: parallelize & optimize matrix multiplication on multi-node // You must allocate & initialize A, B, C for non-root processes //printf("\n[hong] mpi_rank: %d & mpi_world_size: %d\n", mpi_rank, mpi_world_size); // FIXME: for now, only root process runs the matrix multiplication. if (mpi_rank == 0) { //printf("\nmpi_rank=0\n"); int row_size = M / mpi_world_size; int st, ed; for (int node = 1; node < mpi_world_size; node++) { offset = node * row_size; st = offset; ed = ((node == mpi_world_size -1) ? M : (node + 1) * row_size); rows = ed -st; /* data send: NON-blocking */ MPI_Isend(&offset, 1, MPI_INT, node, FROM_MASTER, MPI_COMM_WORLD, &request); MPI_Isend(&rows, 1, MPI_INT, node, FROM_MASTER, MPI_COMM_WORLD, &request); MPI_Isend(&A[offset * K], rows * K, MPI_FLOAT, node, FROM_MASTER, MPI_COMM_WORLD, &request); MPI_Isend(B, K * N, MPI_FLOAT, node, FROM_MASTER, MPI_COMM_WORLD, &request); } rows = row_size; mat_mul_omp(); /* receive data from each node */ for (int node = 1; node < mpi_world_size; node++) { MPI_Recv(&offset, 1, MPI_INT, node, FROM_WORKER, MPI_COMM_WORLD, &status); MPI_Recv(&rows, 1, MPI_INT, node, FROM_WORKER, MPI_COMM_WORLD, &status); MPI_Recv(&C[offset * N], rows * N, MPI_FLOAT, node, FROM_WORKER, MPI_COMM_WORLD, &status); } } else { // printf("\nmpi_rank != 0\n"); alloc_mat(&A, M, K); alloc_mat(&B, K, N); alloc_mat(&C, M, N); zero_mat(C, M, N); /* receive data from master */ MPI_Recv(&offset, 1, MPI_INT, MASTER, FROM_MASTER, MPI_COMM_WORLD, &status); MPI_Recv(&rows, 1, MPI_INT, MASTER, FROM_MASTER, MPI_COMM_WORLD, &status); MPI_Recv(A, rows * K, MPI_FLOAT, MASTER, FROM_MASTER, MPI_COMM_WORLD, &status); MPI_Recv(B, K * N, MPI_FLOAT, MASTER, FROM_MASTER, MPI_COMM_WORLD, &status); mat_mul_omp(); /* result send to master: NON-blocking */ MPI_Isend(&offset, 1, MPI_INT, MASTER, FROM_WORKER, MPI_COMM_WORLD, &request); MPI_Isend(&rows, 1, MPI_INT, MASTER, FROM_WORKER, MPI_COMM_WORLD, &request); MPI_Isend(C, rows * N, MPI_FLOAT, MASTER, FROM_WORKER, MPI_COMM_WORLD, &request); } /* test only if (mpi_rank == 0) { printf("\nmpi_rank=0\n"); mat_mul_omp(); } else printf("\nmpi_rank!=0! - 1 or 2\n"); mat_mul_omp(); // test only } */ }