#include "mat_mul.h" #include #include #include #include #include "util.h" #define MASTER_TO_SLAVE_TAG 1 //tag for messages sent from master to slaves #define SLAVE_TO_MASTER_TAG 4 //tag for messages sent from slaves to master static float *A, *B, *C; static int M, N, K; static int num_threads; static int mpi_rank, mpi_world_size; MPI_Request request; MPI_Status status; static int min(int x, int y) { return x < y ? x : y; } /* Node Num 1 [rank 0] Avg. time: 3.782508 sec [rank 0] Avg. throughput: 290.683216 GFLOPS Node Num 2 [rank 0] Avg. time: 2.597820 sec [rank 0] Avg. throughput: 423.243954 GFLOPS Node Num 3 [rank 0] Avg. time: 1.980322 sec [rank 0] Avg. throughput: 555.218642 GFLOPS Node Num 4 [rank 0] Avg. time: 1.788115 sec [rank 0] Avg. throughput: 614.899922 GFLOPS */ static void mat_mul_omp(int start_m, int end_m) { // TODO: parallelize & optimize matrix multiplication // Use num_threads per node /* #pragma omp parallel for num_threads(num_threads) schedule(guided, 1) for (int i = start_m; i < end_m; ++i) { for (int k = 0; k < K; ++k) { float ar = A[i * K + k]; for (int j = 0; j < N; ++j){ //printf("thread %d, i (%d) , k (%d), j(%d) \n", omp_get_thread_num(), i, k, j ); C[i * N + j] += ar * B[k * N + j]; } } } */ int ITILESIZE = 16; int JTILESIZE = 128; int KTILESIZE = 128; if(K == 8192 && N ==8192 && M == 8192){ ITILESIZE = 32; JTILESIZE = 1024; KTILESIZE = 1024; } int size = (end_m - start_m); #pragma omp parallel num_threads(num_threads) firstprivate(ITILESIZE) firstprivate(JTILESIZE) firstprivate(KTILESIZE) { int tid = omp_get_thread_num(); // int is = M / num_threads * tid + min(tid, M % num_threads); // int ie = M / num_threads * (tid + 1) + min(tid + 1, M % num_threads); int is = start_m + ((size / num_threads) * tid); int ie; if(tid == (num_threads-1)){ ie = start_m + ((size / num_threads) * (tid + 1) + (size % num_threads)); } else{ ie = start_m + ((size / num_threads) * (tid + 1)); } //printf("tid (%d) size(%d) is(%d), ie(%d)\n", tid, size, is, ie); for (int ii = is; ii < ie; ii += ITILESIZE) { for (int jj = 0; jj < N; jj += JTILESIZE) { for (int kk = 0; kk < K; kk += KTILESIZE) { for (int k = kk; k < min(K, kk + KTILESIZE); k++) { for (int i = ii; i < min(ie, ii + ITILESIZE); i++) { float ar = A[i * K + k]; for (int j = jj; j < min(N, jj + JTILESIZE); j+=1) { C[i * N + j] += ar * B[k * N + j]; } } } } } } } } void mat_mul(float *_A, float *_B, float *_C, int _M, int _N, int _K, int _num_threads, int _mpi_rank, int _mpi_world_size) { A = _A, B = _B, C = _C; M = _M, N = _N, K = _K; num_threads = _num_threads, mpi_rank = _mpi_rank, mpi_world_size = _mpi_world_size; // TODO: parallelize & optimize matrix multiplication on multi-node // You must allocate & initialize A, B, C for non-root processes // FIXME: for now, only root process runs the matrix multiplication. // if (mpi_rank == 0) // mat_mul_omp(); int size_m; int start_m; int end_m; int start_m_no0; int end_m_no0; if(mpi_rank == 0){ for(int node=0; node< mpi_world_size; node++){ size_m = M / (mpi_world_size); start_m = (node)*size_m; if(node == 0) start_m_no0 = start_m; if(((node+1) == mpi_world_size) && ((M%(mpi_world_size)) != 0)){ end_m = M; if(node == 0) end_m_no0 = end_m; } else{ end_m = start_m + size_m; if(node == 0) end_m_no0 = end_m; } if(node >= 1){ //send the low bound first blocking, to the intended slave MPI_Send(&start_m, 1, MPI_INT, node, MASTER_TO_SLAVE_TAG, MPI_COMM_WORLD); //printf("SEND start_m (%d) to (%d)\n", start_m, node); //next send the upper bound blocking, to the intended slave MPI_Send(&end_m, 1, MPI_INT, node, MASTER_TO_SLAVE_TAG + 1, MPI_COMM_WORLD); //printf("SEND end_m (%d) to (%d)\n", end_m, node); //finally send the allocated row portion of [A] blocking, to the intended slave int sizeBuf = (end_m - start_m) * K; //printf("SEND A Buff Len : (%d) to (%d)\n", sizeBuf, node); MPI_Send(&A[start_m*K], sizeBuf, MPI_FLOAT, node, MASTER_TO_SLAVE_TAG + 2, MPI_COMM_WORLD); //printf("SEND A size (%d) to (%d)\n", sizeBuf, node); //print_mat(&A[start_m], (end_m - start_m), K); } } } else{ alloc_mat(&A, M, K); //receive low bound from the master MPI_Recv(&start_m, 1, MPI_INT, 0, MASTER_TO_SLAVE_TAG, MPI_COMM_WORLD, &status); //printf("RECV start_m (%d) at (%d)\n", start_m, mpi_rank); //next receive upper bound from the master MPI_Recv(&end_m, 1, MPI_INT, 0, MASTER_TO_SLAVE_TAG + 1, MPI_COMM_WORLD, &status); //printf("RECV end_m (%d) at (%d)\n", end_m, mpi_rank); //finally receive row portion of [A] to be processed from the master int sizeBuf = (end_m - start_m) * K; //printf("RECV A Buff Len : %d \n", sizeBuf); MPI_Recv(&A[start_m*K], sizeBuf, MPI_FLOAT, 0, MASTER_TO_SLAVE_TAG + 2, MPI_COMM_WORLD, &status); //printf("RECV A size (%d) at (%d)\n", sizeBuf, mpi_rank); //print_mat(&A[start_m], (end_m - start_m), K); } //MPI_Barrier(MPI_COMM_WORLD); if(mpi_rank > 0){ alloc_mat(&B, K, N); } MPI_Bcast(&B[0], K*N, MPI_FLOAT, 0, MPI_COMM_WORLD); //printf("Bcast B (%d) to (%d) \n", K*N, mpi_rank); //print_mat(&B[0], K, N); if(mpi_rank > 0){ alloc_mat(&C, M, N); } MPI_Bcast(&C[0], M*N, MPI_FLOAT, 0, MPI_COMM_WORLD); //printf("Bcast C (%d) to (%d) \n", M*N, mpi_rank); //print_mat(&C[0], M, N); MPI_Barrier(MPI_COMM_WORLD); if(mpi_rank == 0){ mat_mul_omp(start_m_no0, end_m_no0); } else{ mat_mul_omp(start_m, end_m); if(mpi_rank >= 1){ //send the low bound first blocking, to the intended slave MPI_Send(&start_m, 1, MPI_INT, 0, SLAVE_TO_MASTER_TAG, MPI_COMM_WORLD); //printf("SEND C start_m (%d) to (%d)\n", start_m, 0); //next send the upper bound blocking, to the intended slave MPI_Send(&end_m, 1, MPI_INT, 0, SLAVE_TO_MASTER_TAG + 1, MPI_COMM_WORLD); //printf("SEND C end_m (%d) to (%d)\n", end_m, 0); //finally send the allocated row portion of [A] blocking, to the intended slave int sizeBuf = (end_m - start_m) * N; //printf("SEND C Buff Len : (%d) to (%d)\n", sizeBuf, 0); MPI_Send(&C[start_m*N], sizeBuf, MPI_FLOAT, 0, SLAVE_TO_MASTER_TAG + 2, MPI_COMM_WORLD); //printf("SEND C size (%d) to (%d)\n", sizeBuf, 0); //print_mat(&C[start_m], (end_m - start_m), N); } } if(mpi_rank == 0){ for (int node = 1; node < mpi_world_size; node++) {// untill all slaves have handed back the processed data //receive low bound from a slave MPI_Recv(&start_m, 1, MPI_INT, node, SLAVE_TO_MASTER_TAG, MPI_COMM_WORLD, &status); //receive upper bound from a slave MPI_Recv(&end_m, 1, MPI_INT, node, SLAVE_TO_MASTER_TAG + 1, MPI_COMM_WORLD, &status); //receive processed data from a slave MPI_Recv(&C[start_m*N], (end_m - start_m) * N, MPI_FLOAT, node, SLAVE_TO_MASTER_TAG + 2, MPI_COMM_WORLD, &status); } } }