220 lines
7.0 KiB
C++
220 lines
7.0 KiB
C++
#include "mat_mul.h"
|
|
|
|
#include <stdio.h>
|
|
#include <CL/cl.h>
|
|
|
|
#define CHECK_ERROR(err) \
|
|
if (err != CL_SUCCESS) { \
|
|
printf("[%s:%d] OpenCL error %d\n", __FILE__, __LINE__, err); \
|
|
exit(EXIT_FAILURE); \
|
|
}
|
|
|
|
#define NUM_OF_GPU 4
|
|
#define TILE_SIZE 32
|
|
#define WPT 8 // work-per-thread
|
|
#define RTS (TILE_SIZE / WPT) // The reduced tile-size in one dimension
|
|
|
|
static cl_int err;
|
|
static cl_platform_id platform;
|
|
static cl_device_id device[NUM_OF_GPU];
|
|
static cl_context context;
|
|
static cl_command_queue queue[NUM_OF_GPU];
|
|
static cl_program program;
|
|
static cl_kernel kernel[NUM_OF_GPU];
|
|
static cl_mem a_d[NUM_OF_GPU], b_d[NUM_OF_GPU], c_d[NUM_OF_GPU];
|
|
|
|
static float *A, *B, *C;
|
|
static int M, N, K;
|
|
static int M_divided[NUM_OF_GPU], offset[NUM_OF_GPU];
|
|
|
|
void mat_mul(float *_A, float *_B, float *_C, int _M, int _N, int _K) {
|
|
A = _A, B = _B, C = _C;
|
|
M = _M, N = _N, K = _K;
|
|
|
|
for (int i = 0; i < NUM_OF_GPU; i++) {
|
|
// Setup kernel arguments
|
|
err = clSetKernelArg(kernel[i], 0, sizeof(cl_mem), &a_d[i]);
|
|
CHECK_ERROR(err);
|
|
err = clSetKernelArg(kernel[i], 1, sizeof(cl_mem), &b_d[i]);
|
|
CHECK_ERROR(err);
|
|
err = clSetKernelArg(kernel[i], 2, sizeof(cl_mem), &c_d[i]);
|
|
CHECK_ERROR(err);
|
|
err = clSetKernelArg(kernel[i], 3, sizeof(int), &M_divided[i]);
|
|
CHECK_ERROR(err);
|
|
err = clSetKernelArg(kernel[i], 4, sizeof(int), &N);
|
|
CHECK_ERROR(err);
|
|
err = clSetKernelArg(kernel[i], 5, sizeof(int), &K);
|
|
CHECK_ERROR(err);
|
|
|
|
// Setup global work size and local work size
|
|
size_t gws[2] = {(size_t)((M_divided[i] + WPT - 1) / WPT), (size_t)N};
|
|
size_t lws[2] = {RTS, TILE_SIZE};
|
|
|
|
for (int j = 0; j < 2; ++j) {
|
|
// By OpenCL spec, global work size should be MULTIPLE of local work size
|
|
// Formula below achieve it
|
|
// e.g., gws = 25, lws = 16, then (25 + 16 - 1) / 16 * 16 = 40 / 16 * 16 = 2 * 16 = 32
|
|
gws[j] = (gws[j] + lws[j] - 1) / lws[j] * lws[j];
|
|
}
|
|
|
|
// Run kernel
|
|
err = clEnqueueNDRangeKernel(queue[i], kernel[i], 2, NULL, gws, lws, 0, NULL, NULL);
|
|
CHECK_ERROR(err);
|
|
}
|
|
for (int i = 0; i < NUM_OF_GPU; i++) {
|
|
// DO NOT REMOVE; NEEDED FOR TIME MEASURE
|
|
err = clFinish(queue[i]);
|
|
CHECK_ERROR(err);
|
|
}
|
|
}
|
|
|
|
static void print_platform_info(cl_platform_id platform) {
|
|
size_t sz;
|
|
char *buf;
|
|
CHECK_ERROR(clGetPlatformInfo(platform, CL_PLATFORM_NAME, 0, NULL, &sz));
|
|
buf = (char*)malloc(sz);
|
|
CHECK_ERROR(clGetPlatformInfo(platform, CL_PLATFORM_NAME, sz, buf, NULL));
|
|
printf("Detected OpenCL platform: %s\n", buf);
|
|
free(buf);
|
|
}
|
|
|
|
static void print_device_info(cl_device_id device) {
|
|
size_t sz;
|
|
char *buf;
|
|
CHECK_ERROR(clGetDeviceInfo(device, CL_DEVICE_NAME, 0, NULL, &sz));
|
|
buf = (char*)malloc(sz);
|
|
CHECK_ERROR(clGetDeviceInfo(device, CL_DEVICE_NAME, sz, buf, NULL));
|
|
printf("Detected OpenCL device: %s\n", buf);
|
|
free(buf);
|
|
}
|
|
|
|
static cl_program create_and_build_program_with_source(cl_context context, cl_device_id* device, const char *file_name) {
|
|
FILE *file = fopen(file_name, "rb");
|
|
if (file == NULL) {
|
|
printf("Failed to open %s\n", file_name);
|
|
exit(EXIT_FAILURE);
|
|
}
|
|
fseek(file, 0, SEEK_END);
|
|
size_t source_size = ftell(file);
|
|
rewind(file);
|
|
char *source_code = (char*)malloc(source_size + 1);
|
|
size_t ntotal = 0;
|
|
while (ntotal < source_size) {
|
|
int nread = fread(source_code, sizeof(char), source_size, file);
|
|
ntotal += nread;
|
|
}
|
|
source_code[source_size] = '\0';
|
|
fclose(file);
|
|
cl_program program = clCreateProgramWithSource(context, 1, (const char **)&source_code, &source_size, &err);
|
|
CHECK_ERROR(err);
|
|
free(source_code);
|
|
err = clBuildProgram(program, NUM_OF_GPU, device, "", NULL, NULL);
|
|
if (err == CL_BUILD_PROGRAM_FAILURE) {
|
|
for (int i = 0; i < NUM_OF_GPU; i++) {
|
|
size_t log_size;
|
|
CHECK_ERROR(clGetProgramBuildInfo(program, device[i], CL_PROGRAM_BUILD_LOG, 0, NULL, &log_size));
|
|
char *log = (char*)malloc(log_size + 1);
|
|
CHECK_ERROR(clGetProgramBuildInfo(program, device[i], CL_PROGRAM_BUILD_LOG, log_size, log, NULL));
|
|
log[log_size] = 0;
|
|
printf("Compile error:\n%s\n", log);
|
|
free(log);
|
|
}
|
|
}
|
|
CHECK_ERROR(err);
|
|
return program;
|
|
}
|
|
|
|
void mat_mul_init(float *A, float *B, float *C, int M, int N, int K) {
|
|
int divided_m, remainder, tmp = 0;
|
|
unsigned int ndev;
|
|
|
|
// A matrix divided by the number of GPU
|
|
divided_m = M / NUM_OF_GPU;
|
|
remainder = M - divided_m * NUM_OF_GPU;
|
|
|
|
// Assign more area to the back GPU number
|
|
if(remainder != 0) {
|
|
for (int i = 0; i < (NUM_OF_GPU - remainder); i++) {
|
|
M_divided[i] = divided_m;
|
|
}
|
|
for (int i = (NUM_OF_GPU - remainder); i < NUM_OF_GPU; i++) {
|
|
M_divided[i] = divided_m + 1;
|
|
}
|
|
}
|
|
else {
|
|
for (int i = 0; i < NUM_OF_GPU; i++) {
|
|
M_divided[i] = divided_m;
|
|
}
|
|
}
|
|
|
|
for (int i = 1; i < NUM_OF_GPU; i++) {
|
|
tmp += M_divided[i - 1];
|
|
offset[i] = tmp; // Starting row number divided by node
|
|
}
|
|
|
|
// Get OpenCL platform
|
|
err = clGetPlatformIDs(1, &platform, NULL);
|
|
CHECK_ERROR(err);
|
|
print_platform_info(platform);
|
|
|
|
// Get OpenCL device
|
|
err = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, NUM_OF_GPU, device, &ndev);
|
|
CHECK_ERROR(err);
|
|
for (int i = 0; i < NUM_OF_GPU; i++) {
|
|
print_device_info(device[i]);
|
|
}
|
|
|
|
// Create OpenCL context
|
|
context = clCreateContext(NULL, NUM_OF_GPU, device, NULL, NULL, &err);
|
|
CHECK_ERROR(err);
|
|
|
|
// Compile program from "kernel.cl"
|
|
program = create_and_build_program_with_source(context, device, "kernel.cl");
|
|
|
|
for (int i = 0; i < NUM_OF_GPU; i++) {
|
|
// Create OpenCL command queue
|
|
queue[i] = clCreateCommandQueue(context, device[i], 0, &err);
|
|
CHECK_ERROR(err);
|
|
|
|
// Extract kernel from compiled program
|
|
kernel[i] = clCreateKernel(program, "sgemm", &err);
|
|
CHECK_ERROR(err);
|
|
|
|
// Create GPU buffers
|
|
a_d[i] = clCreateBuffer(context, CL_MEM_READ_WRITE, M_divided[i] * K * sizeof(float), NULL, &err);
|
|
CHECK_ERROR(err);
|
|
b_d[i] = clCreateBuffer(context, CL_MEM_READ_WRITE, K * N * sizeof(float), NULL, &err);
|
|
CHECK_ERROR(err);
|
|
c_d[i] = clCreateBuffer(context, CL_MEM_READ_WRITE, M_divided[i] * N * sizeof(float), NULL, &err);
|
|
CHECK_ERROR(err);
|
|
}
|
|
|
|
for (int i = 0; i < NUM_OF_GPU; i++) {
|
|
// Write to GPU; A (cpu) -> a_d (gpu), B (cpu) -> b_d (gpu)
|
|
err = clEnqueueWriteBuffer(queue[i], a_d[i], CL_TRUE, 0, M_divided[i] * K * sizeof(float), &A[offset[i] * K], 0, NULL, NULL);
|
|
CHECK_ERROR(err);
|
|
err = clEnqueueWriteBuffer(queue[i], b_d[i], CL_TRUE, 0, K * N * sizeof(float), B, 0, NULL, NULL);
|
|
CHECK_ERROR(err);
|
|
}
|
|
|
|
for (int i = 0; i < NUM_OF_GPU; i++) {
|
|
// DO NOT REMOVE; NEEDED FOR TIME MEASURE
|
|
err = clFinish(queue[i]);
|
|
CHECK_ERROR(err);
|
|
}
|
|
|
|
}
|
|
|
|
void mat_mul_final(float *A, float *B, float *C, int M, int N, int K) {
|
|
for (int i = 0; i < NUM_OF_GPU; i++) {
|
|
// Read from GPU; c_d (gpu) -> C (cpu)
|
|
err = clEnqueueReadBuffer(queue[i], c_d[i], CL_TRUE, 0, M_divided[i] * N * sizeof(float), &C[offset[i] * N], 0, NULL, NULL);
|
|
CHECK_ERROR(err);
|
|
}
|
|
for (int i = 0; i < NUM_OF_GPU; i++) {
|
|
// DO NOT REMOVE; NEEDED FOR TIME MEASURE
|
|
err = clFinish(queue[i]);
|
|
CHECK_ERROR(err);
|
|
}
|
|
}
|