2023-02-01 17:12:31 +09:00
|
|
|
#pragma once
|
|
|
|
|
2023-02-01 20:04:35 +09:00
|
|
|
#include <cstddef>
|
|
|
|
|
2023-02-01 17:12:31 +09:00
|
|
|
void matmul_cpu(float *A, float *B, float *C, size_t M, size_t N, size_t K);
|
|
|
|
void matmul_naive(float *A, float *B, float *C, size_t M, size_t N, size_t K);
|
2023-02-14 01:23:28 +09:00
|
|
|
void matmul_buffering(float *A, float *B, float *C, size_t M, size_t N,
|
|
|
|
size_t K);
|
2023-02-01 17:12:31 +09:00
|
|
|
void matmul_multigpu(float *A, float *B, float *C, size_t M, size_t N,
|
|
|
|
size_t K);
|
|
|
|
void matmul_cublas(float *A, float *B, float *C, size_t M, size_t N, size_t K);
|
2023-02-15 15:35:56 +09:00
|
|
|
void matmul_tiling(float *A, float *B, float *C, size_t M, size_t N, size_t K);
|
2023-02-01 17:12:31 +09:00
|
|
|
|
|
|
|
void matmul_cpu_initialize(size_t M, size_t N, size_t K);
|
|
|
|
void matmul_naive_initialize(size_t M, size_t N, size_t K);
|
2023-02-09 01:28:51 +09:00
|
|
|
void matmul_buffering_initialize(size_t M, size_t N, size_t K);
|
2023-02-01 17:12:31 +09:00
|
|
|
void matmul_multigpu_initialize(size_t M, size_t N, size_t K);
|
|
|
|
void matmul_cublas_initialize(size_t M, size_t N, size_t K);
|
2023-02-15 15:35:56 +09:00
|
|
|
void matmul_tiling_initialize(size_t M, size_t N, size_t K);
|
2023-02-01 17:12:31 +09:00
|
|
|
|
|
|
|
void matmul_cpu_finalize(size_t M, size_t N, size_t K);
|
|
|
|
void matmul_naive_finalize(size_t M, size_t N, size_t K);
|
2023-02-09 01:28:51 +09:00
|
|
|
void matmul_buffering_finalize(size_t M, size_t N, size_t K);
|
2023-02-01 17:12:31 +09:00
|
|
|
void matmul_multigpu_finalize(size_t M, size_t N, size_t K);
|
|
|
|
void matmul_cublas_finalize(size_t M, size_t N, size_t K);
|
2023-02-15 15:35:56 +09:00
|
|
|
void matmul_tiling_finalize(size_t M, size_t N, size_t K);
|