#include "mat_mul.h" #include "util.h" #include #include #include #include static float *A, *B, *C; static int M, N, K; static int num_threads; static int mpi_rank, mpi_world_size; #define II_BLOCK 16 #define KK_BLOCK 32 #define JJ_BLOCK 2048 static void mat_mul_omp() { // TODO: parallelize & optimize matrix multiplication int node_slice = (M/mpi_world_size); int tid,i,ii,j,jj,k,kk,start,end; int using_threads = num_threads; int slice = (M/mpi_world_size) / using_threads; omp_set_num_threads(using_threads); #pragma omp parallel private(tid,i,ii,kk,k,j,jj,start,end) { tid = omp_get_thread_num(); start = mpi_rank * node_slice + slice * tid; end = (tid == using_threads - 1)?(mpi_rank == mpi_world_size - 1 ? M : (mpi_rank+1)*node_slice) : mpi_rank * node_slice+(tid+1)*slice; //printf("mpi_rank : %d, tid : %d, start : %d, end : %d\n", mpi_rank, tid, start, end); //printf("mpi_rank : %d, tid : %d, start : %d, end : %d\n", mpi_rank, tid, start, end); for (jj = 0; jj < N; jj += JJ_BLOCK) { for (kk = 0; kk < K; kk += KK_BLOCK) { for (ii = start; ii < end; ii += II_BLOCK) { for (i = ii; i < (ii+II_BLOCK < end ? ii+II_BLOCK : end); ++i) { for(k = kk; k < (kk+KK_BLOCK < K ? kk+KK_BLOCK : K); ++k) { for (j = jj; j < (jj+JJ_BLOCK < N ? jj+JJ_BLOCK : N); ++j) { C[i * N + j] += A[i * K + k] * B[k * N + j]; } } } } } } } } void mat_mul(float *_A, float *_B, float *_C, int _M, int _N, int _K, int _num_threads, int _mpi_rank, int _mpi_world_size) { A = _A, B = _B, C = _C; M = _M, N = _N, K = _K; num_threads = _num_threads, mpi_rank = _mpi_rank, mpi_world_size = _mpi_world_size; // TODO: parallelize & optimize matrix multiplication on multi-node // You must allocate & initialize A, B, C for non-root processes // FIXME: for now, only root process runs the matrix multiplication. //int offset = size_by_node * mpi_rank; MPI_Status status; if (mpi_rank == 0){ //timer_start(5); for(int i = 1; i <= mpi_world_size-1; i++){ MPI_Send(&A[i * K*(M/mpi_world_size)], (i==mpi_world_size-1)?K*M - i*K*(M/mpi_world_size):K*(M/mpi_world_size), MPI_FLOAT, i, 1, MPI_COMM_WORLD); MPI_Send(&B[0], K*N, MPI_FLOAT, i, 1, MPI_COMM_WORLD); MPI_Send(&C[i * N*(M/mpi_world_size)], (i==mpi_world_size-1)?N*M - i*N*(M/mpi_world_size):N*(M/mpi_world_size), MPI_FLOAT, i, 1, MPI_COMM_WORLD); } //double r0_time = timer_stop(5); //printf("r0_time before mat_mul : %f sec\n", r0_time); //timer_start(5); mat_mul_omp(); //r0_time = timer_stop(5); //printf("0_time mat_mul : %f sec\n", r0_time); for(int i=1; i<= mpi_world_size-1; i++){ MPI_Recv(&C[i * N*(M/mpi_world_size)], (i == mpi_world_size-1)?N*M - mpi_rank *N*(M/mpi_world_size):N*(M/mpi_world_size), MPI_FLOAT, i, 1, MPI_COMM_WORLD, &status); } } else{ double * slave_rank = (double *)malloc((mpi_world_size) * sizeof(double)); timer_start(mpi_rank); alloc_mat(&A, M, K); alloc_mat(&B, K, N); alloc_mat(&C, M, N); int A_arr_offset = mpi_rank*K*(M/mpi_world_size); int C_arr_offset = mpi_rank*N*(M/mpi_world_size); MPI_Recv(&A[A_arr_offset], (mpi_rank==mpi_world_size-1)?K*M - mpi_rank*K*(M/mpi_world_size):K*(M/mpi_world_size), MPI_FLOAT, 0, 1, MPI_COMM_WORLD, &status); MPI_Recv(&B[0], K*N, MPI_FLOAT, 0, 1, MPI_COMM_WORLD, &status); MPI_Recv(&C[C_arr_offset], (mpi_rank==mpi_world_size-1)?N*M - mpi_rank*N*(M/mpi_world_size):N*(M/mpi_world_size), MPI_FLOAT, 0, 1, MPI_COMM_WORLD, &status); slave_rank[mpi_rank]=timer_stop(mpi_rank); //printf("%d time before mat_mul : %f sec\n", mpi_rank, slave_rank[mpi_rank]); timer_start(mpi_rank); mat_mul_omp(); slave_rank[mpi_rank]=timer_stop(mpi_rank); //printf("%d time mat_mul : %f sec\n", mpi_rank, slave_rank[mpi_rank]); MPI_Send(&C[C_arr_offset], (mpi_rank == mpi_world_size-1)?N*M - mpi_rank *N*(M/mpi_world_size):N*(M/mpi_world_size), MPI_FLOAT, 0, 1, MPI_COMM_WORLD); } }