#include "mat_mul.h" #include #include #include #include #include "util.h" #define MASTER_0 0 #define FROM_MASTER_1 1 #define FROM_WORKER_2 2 #define ITILESIZE (32) #define JTILESIZE (1024) #define KTILESIZE (1024) static float *A, *B, *C; static int M, N, K; static int num_threads; static int mpi_rank, mpi_world_size; static int min(int x, int y) { return x < y ? x : y; } static void mat_mul_omp(float *A, float *C, int rows) { // TODO: parallelize & optimize matrix multiplication // Use num_threads per node int tid, start_T_M, end_T_M, p_size ; //float A_temp ; //#pragma omp parallel for //#pragma omp parallel for num_threads(num_threads) private(tid, start_T_M, end_T_M, p_size, A_temp) shared(A, B, C, M, N, K) #pragma omp parallel num_threads(num_threads) private(tid, start_T_M, end_T_M, p_size) shared(A, B, C, M, N, K) { tid = omp_get_thread_num() ; //if( (M%num_threads) != 0 ) { //p_size = (M/num_threads) ; if( (rows%num_threads) != 0 ) { p_size = (rows/num_threads) ; if(tid!=(num_threads-1)) { start_T_M = tid * p_size; end_T_M = start_T_M + p_size ; } else { start_T_M = tid * p_size; end_T_M = rows ; } } else { p_size = (rows/num_threads) ; start_T_M = tid * p_size; end_T_M = start_T_M + p_size ; } for (int ii = start_T_M; ii < end_T_M; ii += ITILESIZE) { for (int jj = 0; jj < N; jj += JTILESIZE) { for (int kk = 0; kk < K; kk += KTILESIZE) { for (int k = kk; k < min(K, kk + KTILESIZE); k++) { for (int i = ii; i < min(end_T_M, ii + ITILESIZE); i++) { float ar = A[i * K + k]; for (int j = jj; j < min(N, jj + JTILESIZE); j+=1) { C[i * N + j] += ar * B[k * N + j]; } } } } } } }//pragma } void mat_mul(float *_A, float *_B, float *_C, int _M, int _N, int _K, int _num_threads, int _mpi_rank, int _mpi_world_size) { A = _A, B = _B, C = _C; M = _M, N = _N, K = _K; num_threads = _num_threads, mpi_rank = _mpi_rank, mpi_world_size = _mpi_world_size; MPI_Status status; // TODO: parallelize & optimize matrix multiplication on multi-node // You must allocate & initialize A, B, C for non-root processes MPI_Barrier(MPI_COMM_WORLD); int row_size, rows ; int start_M, end_M ; if(mpi_rank==0) { for(int node = 1; node < mpi_world_size; node++) { row_size = (M/mpi_world_size) ; rows = row_size ; start_M = (node-1) * row_size; MPI_Send(&A[start_M*K], rows*K, MPI_INT, node, FROM_MASTER_1, MPI_COMM_WORLD) ; MPI_Send(B, K*N, MPI_INT, node, FROM_MASTER_1, MPI_COMM_WORLD) ; } row_size = (M/mpi_world_size) ; start_M = (mpi_world_size-1) * row_size; end_M = M ; rows = end_M - start_M ; mat_mul_omp(&A[start_M*K], &C[start_M*N], rows); for(int node = 1; node < mpi_world_size; node++) { row_size = (M/mpi_world_size) ; rows = row_size ; start_M = (node-1) * row_size; MPI_Recv(&C[start_M*N], rows*N, MPI_INT, node, FROM_WORKER_2, MPI_COMM_WORLD, &status) ; } } else { alloc_mat(&A, M, K) ; alloc_mat(&B, K, N) ; alloc_mat(&C, M, N) ; //zero_mat(C, M, N) ; row_size = (M/mpi_world_size) ; rows = row_size ; MPI_Recv(A, rows*K, MPI_INT, MASTER_0, FROM_MASTER_1, MPI_COMM_WORLD, &status) ; MPI_Recv(B, K*N, MPI_INT, MASTER_0, FROM_MASTER_1, MPI_COMM_WORLD, &status) ; mat_mul_omp (A, C, rows) ; MPI_Send(C, rows*N, MPI_INT, MASTER_0, FROM_WORKER_2, MPI_COMM_WORLD) ; } MPI_Barrier(MPI_COMM_WORLD); // // FIXME: for now, only root process runs the matrix multiplication. // if (mpi_rank == 0) // mat_mul_omp(); }