140 lines
3.6 KiB
C++
140 lines
3.6 KiB
C++
#include "mat_mul.h"
|
|
|
|
#include <cstdlib>
|
|
#include <cstdio>
|
|
#include <pthread.h>
|
|
|
|
//#define BLOCKSIZE 128 // 200.00 GFLOPS
|
|
//#define BLOCKSIZE 64 // 287.41 GFLOPS
|
|
#define BLOCKSIZE 32 // 293.77 GFLOPS (290 ~ 295)
|
|
//#define BLOCKSIZE 16 // 199.14 GFLOPS
|
|
//#define BLOCKSIZE 4 // 69.86 GFLOPS
|
|
#define MIN(blk, total) (blk < total ? blk : total)
|
|
|
|
static float *A, *B, *C;
|
|
static int M, N, K;
|
|
static int num_threads;
|
|
|
|
|
|
static void* mat_mul_thread(void *data) {
|
|
// TODO: parallelize & optimize matrix multiplication
|
|
int i, j, k, kk;
|
|
int pid;
|
|
int div;
|
|
int div_start, div_end;
|
|
|
|
pid = *(int *)data;
|
|
div = M / num_threads;
|
|
div_start = pid * div;
|
|
div_end = (pid == num_threads -1) ? M : (pid + 1) * div;
|
|
|
|
/* 1) original code
|
|
for (int i = 0; i < M; ++i) {
|
|
for (int j = 0; j < N; ++j) {
|
|
for (int k = 0; k < K; ++k) {
|
|
C[i * N + j] += A[i * K + k] * B[k * N + j];
|
|
}
|
|
}
|
|
}
|
|
*/
|
|
|
|
/* 2) pthread only
|
|
* ./run.sh -v -n 5 -t 1 1024 1024 1024 => 0.678401 GFLOPS
|
|
* ./run.sh -v -n 5 -t 2 1024 1024 1024 => 1.356989 GFLOPS
|
|
* ./run.sh -v -n 5 -t 3 1024 1024 1024 => 2.008897 GFLOPS
|
|
* ./run.sh -v -n 5 -t 4 1024 1024 1024 => 2.702098 GFLOPS
|
|
* ./run_performance.sh => Force Terminated (time over: 1 min)
|
|
|
|
for (int i = div_start; i < div_end; ++i) {
|
|
//for (int j = Ndiv_start; j < Ndiv_end ; ++j) {
|
|
for (int j = 0; j < N ; ++j) {
|
|
int k = 0;
|
|
for ( ; k < K; ++k) {
|
|
C[i * N + j] += A[i * K + k] * B[k * N + j];
|
|
}
|
|
}
|
|
}
|
|
*/
|
|
|
|
/* 3) prhead + block (final)
|
|
* ./run_performance.sh
|
|
* #define BLOCKSIZE 128 => 200.00 GFLOPS
|
|
* #define BLOCKSIZE 64 => 287.41 GFLOPS
|
|
* #define BLOCKSIZE 32 => 293.77 GFLOPS (290 ~ 295), 0.479 sec
|
|
* #define BLOCKSIZE 16 => 199.14 GFLOPS
|
|
* #define BLOCKSIZE 4 => 69.86 GFLOPS
|
|
*
|
|
* ./run.sh -v -n 10 -t 40 4096 4096 4096 => 0.411518 sec, 333.980281 GFLOPS
|
|
*/
|
|
for (kk =0; kk < K; kk += BLOCKSIZE) {
|
|
for (i = div_start; i < div_end; i++) {
|
|
for(k = kk; k < MIN(kk + BLOCKSIZE, K); k++) {
|
|
for (j = 0; j < N; j++) {
|
|
C[i * N + j] += A[i * K + k] * B[k * N + j];
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
/* test code - #1
|
|
* ./run.sh -v -n 5 -t 40 4096 4096 4096 => 4.808995 sec, 28.579559 GFLOPS
|
|
* ./run.sh -v -n 5 -t 20 4096 4096 4096 => time out
|
|
|
|
for (kk =0; kk < K; kk += BLOCKSIZE) {
|
|
for (i = div_start; i < div_end; i++) {
|
|
for (j = 0; j < N; j++) {
|
|
for(k = kk; k < MIN(kk + BLOCKSIZE, K); k++) {
|
|
C[i * N + j] += A[i * K + k] * B[k * N + j];
|
|
}
|
|
}
|
|
}
|
|
}
|
|
*/
|
|
|
|
/* test code - #2
|
|
* ./run_performance.sh => 4.621823 sec, 29.736956 GFLOPS
|
|
|
|
for (kk =0; kk < K; kk += BLOCKSIZE) {
|
|
for (i = div_start; i < div_end; i++) {
|
|
for(k = kk; k < MIN(kk + BLOCKSIZE, K); k++) {
|
|
float tmpC = 0;
|
|
for (j = 0; j < N; j++) {
|
|
//C[i * N + j] += A[i * K + k] * B[k * N + j];
|
|
tmpC += A[i * K + k] * B[k * N + j];
|
|
}
|
|
C[i * N + j] = tmpC;
|
|
}
|
|
}
|
|
}
|
|
*/
|
|
|
|
return NULL;
|
|
}
|
|
|
|
void mat_mul(float *_A, float *_B, float *_C, int _M, int _N, int _K, int _num_threads) {
|
|
A = _A, B = _B, C = _C;
|
|
M = _M, N = _N, K = _K;
|
|
num_threads = _num_threads;
|
|
|
|
// TODO: create '_num_threads' pthreads
|
|
/* single thread ref. code
|
|
pthread_t thread;
|
|
pthread_create(&thread, NULL, mat_mul_thread, NULL);
|
|
pthread_join(thread, NULL);
|
|
*/
|
|
|
|
pthread_t *threads;
|
|
threads = (pthread_t *) malloc(sizeof(pthread_t) * num_threads);
|
|
|
|
for (int i = 0; i < num_threads; i++) {
|
|
int *pid = (int *) malloc(sizeof(int));
|
|
*pid = i;
|
|
|
|
pthread_create(&threads[i], NULL, mat_mul_thread, pid);
|
|
}
|
|
|
|
for (int i = 0; i < num_threads; i++) {
|
|
pthread_join(threads[i], NULL);
|
|
}
|
|
}
|