218 lines
6.1 KiB
Plaintext
218 lines
6.1 KiB
Plaintext
|
#include "mat_mul.h"
|
||
|
#include <stdio.h>
|
||
|
#include "util.h"
|
||
|
|
||
|
#include <cstdio>
|
||
|
#include <cuda_runtime.h>
|
||
|
|
||
|
#define CUDA_CALL(f) \
|
||
|
{ \
|
||
|
cudaError_t err = (f); \
|
||
|
if (err != cudaSuccess) { \
|
||
|
fprintf(stderr, "CUDA error at [%s:%d] %d %s\n", __FILE__, __LINE__, \
|
||
|
err, cudaGetErrorString(err)); \
|
||
|
exit(1); \
|
||
|
} \
|
||
|
}
|
||
|
|
||
|
#define MAX_NUM_GPU 4
|
||
|
#define TS 8
|
||
|
#define WPT 8
|
||
|
#define RTS (TS/WPT)
|
||
|
|
||
|
// Array of device (GPU) pointers
|
||
|
static float *input, *output, *filter;
|
||
|
static float *in_d[MAX_NUM_GPU], *out_d[MAX_NUM_GPU], *fil_d[MAX_NUM_GPU];
|
||
|
|
||
|
static int N, C, H, W;
|
||
|
static int K, R, S;
|
||
|
static int pad, dilation, stride;
|
||
|
static int mpi_rank, mpi_world_size;
|
||
|
|
||
|
static int num_devices =1;
|
||
|
static int size[2];
|
||
|
static int MM[MAX_NUM_GPU];
|
||
|
static int OH, OW;
|
||
|
|
||
|
|
||
|
|
||
|
__global__ void convolution( float *_input, float *_output, float *_filter,
|
||
|
int _N, int _C, int _H, int _W,
|
||
|
int _K, int _R, int _S,
|
||
|
int _pad, int _dilation, int_stride){
|
||
|
|
||
|
const int globalRow = blockDim.x * blockIdx.x + threadIdx.x;
|
||
|
const int glocalCol = blockDim.y * blockIdx.y + threadIdx.y;
|
||
|
int OH, OW;
|
||
|
|
||
|
OH = (_H + 2*_pad - _dilation*(_R-1)-1)/_stride+1;
|
||
|
OW = (_W + 2*_pad - _dilation*(_S-1)-1)/_stride+1;
|
||
|
|
||
|
int n, k, w;
|
||
|
w= globalCol;
|
||
|
n = w/(_K*OW);
|
||
|
w = w-n*(_K*OW);
|
||
|
k = w/OW;
|
||
|
w = w - k*OW;
|
||
|
|
||
|
int col = w;
|
||
|
int row = globalRow;
|
||
|
|
||
|
if(globalRow >= OH || globalCol >= _N*_K*OW) return;
|
||
|
|
||
|
int start_row = row*_stride - _pad;
|
||
|
int start_col = col*_stride - _pad;
|
||
|
|
||
|
float o = 0.0f;
|
||
|
for(int c=0; c<_C; c++){
|
||
|
for(int i=0; i<_R; i++){
|
||
|
for(int j=0; j<_S; j++){
|
||
|
int h = start_row + i*_dilation;
|
||
|
int w = start_col + j*_dilation;
|
||
|
if(h<0 || w<0 || h>= H || w>= _W) continue;
|
||
|
float in = _input[n*_C*_W*_H + c*_W*_H + h*_W + w];
|
||
|
float fil = _filter[k*_C*_R*_S + c*_R*_S + i*_S + j];
|
||
|
o+= in*fil;
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
_output[n*_K*OH*OW + k*OH*OW + row*OW + col] = 0;
|
||
|
}
|
||
|
|
||
|
|
||
|
|
||
|
void convolution(
|
||
|
float *_input, float *_output, float *_filter,
|
||
|
int _N, int _C, int _H, int _W,
|
||
|
int _K, int _R, int _S,
|
||
|
int _pad, int _dilation, int_stride){
|
||
|
|
||
|
int offset = 0;
|
||
|
MPI_Request request;
|
||
|
MPI_Status status;
|
||
|
|
||
|
intput = _input;
|
||
|
output = _output;
|
||
|
filter = _filter;
|
||
|
|
||
|
if(mpi_rank == 0 && mpi_world_size ==2 && size[1] != 0){
|
||
|
MPI_Isend(&input[size[0]*C*H*W], size[1]*C*H*W, MPI_FLOAT, 1, 0, MPI_COMM_WORLD, &request);
|
||
|
MPI_Isend(filter, _K*_C*_R*_S, MPI_FLOAT, 1, 0, MPI_COMM_WORLD, &request);
|
||
|
if(size[mpi_rank] < MAX_NUM_GPU){
|
||
|
num_devices = size[mpi_rank];
|
||
|
}
|
||
|
}
|
||
|
else if(mpi_rank ==1 & size[mpi_rank] != 0){
|
||
|
alloc_tensor(&input, size[1], C, H, W);
|
||
|
alloc_tensor(&output, size[1], K, OH, OW);
|
||
|
alloc_tensor(&filter, _K, _C, _R, _S);
|
||
|
|
||
|
MPI_Recv(input, size[1]*C*H*W, MPI_FLOAT, 0, 0, MPI_COMM_WORLD< &status);
|
||
|
MPI_Recv(filter, _K*_C*_R*_S, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, &status);
|
||
|
if(size[mpi_rank] < MAX_NUM_GPU){
|
||
|
num_devices = size[mpi_rank];
|
||
|
}
|
||
|
}
|
||
|
|
||
|
offset = 0;
|
||
|
for(int i=0; i<num_devices; i++){
|
||
|
CUDA_CALL( cudaMemcpy(in_d[i], input+offset, NN[i]*C*H*W*sizeof(float), cudaMemcpyHostToDevice) );
|
||
|
CUDA_CALL( cudaMemcpy(fil_d[i], filter, K*C*R*S*sizeof(float), cudaMemcpyHostToDevice) );
|
||
|
offset += NN[i] * C * H * W;
|
||
|
}
|
||
|
|
||
|
// DO NOT REMOVE; NEEDED FOR TIME MEASURE
|
||
|
for (int i = 0; i < num_devices; i++) {
|
||
|
CUDA_CALL( cudaDeviceSynchronize() ); //memcpy를 async로 할때 필요
|
||
|
}
|
||
|
|
||
|
// Launch kernel on every GPU
|
||
|
for (int i = 0; i < num_devices; i++) { //최적화 필요
|
||
|
dim3 blockDim(TS, TS, 1);
|
||
|
dim3 gridDim((OH+TS-1)/TS, (NN[i]*K*OW+TS-1)/TS,1);
|
||
|
|
||
|
CUDA_CALL( cudaSetDevice(i) );
|
||
|
convolution<<<gridDim, blockDim>>>(in_d[i], out_d[i], fil_d[i], NN[i], _C, _H, _W, _K, _R, _S, _pad, _dilation, _stride);
|
||
|
}
|
||
|
|
||
|
// DO NOT REMOVE; NEEDED FOR TIME MEASURE
|
||
|
for (int i = 0; i < num_devices; i++) {
|
||
|
CUDA_CALL( cudaSetDevice(i) );
|
||
|
CUDA_CALL( cudaDeviceSynchronize() );
|
||
|
}
|
||
|
|
||
|
offset = 0;
|
||
|
for(int i=0; i<num_devices; i++){
|
||
|
CUDA_CALL( cudaSetDevice(i) );
|
||
|
CUDA_CALL( cudaMemcpy(output + offset, out_d[i],
|
||
|
NN[i]*K*OH*OW*sizeof(float), cudaMemcpyDeviceToHost) );
|
||
|
offset += NN[i]*K*OH*OW;
|
||
|
}
|
||
|
|
||
|
for(int i=0; i<num_devices; i++){
|
||
|
CUDA_CALL( cudaSetDevice(i) );
|
||
|
CUDA_CALL( cudaDeviceSynchronize() );
|
||
|
}
|
||
|
|
||
|
if(mpi_rank==0 && mpi_world_size==2 * size[1]!=0){
|
||
|
MPI_Recv(&output[size[0]*K*OH*OW], size[1]*K*OH*OW, MPI_FLOAT, 1, 0, MPI_COMM_WLROD, &status);
|
||
|
}
|
||
|
else if(mpi_rank==1 && size[1]!=0){
|
||
|
MPI_Isend(output, size[1]*K*OH*OW, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, &request);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
|
||
|
|
||
|
void convolution_init(
|
||
|
float *_input, float *_output, float *_filter,
|
||
|
int _N, int _C, int _H, int _W,
|
||
|
int _K, int _R, int _S,
|
||
|
int _pad, int _dilation, int_stride){
|
||
|
|
||
|
N = _N; C = _C; H = _H; W = _W;
|
||
|
K = _K; R = _R; S = _S;
|
||
|
pad = _pad; dilation = _dilation; stride = _stride;
|
||
|
|
||
|
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
|
||
|
MPI_Comm_size(MPI_COMM_WORLD, &mpi_world_size);
|
||
|
|
||
|
OH = (H + 2*pad - dilation*(R-1)-1)/stride+1;
|
||
|
OW = (W + 2*pad - dilation*(S-1)-1)/stride+1;
|
||
|
|
||
|
if(mpi_world_size ==2 && _N>4) size[1] = _N/2;
|
||
|
else size[1]=0;
|
||
|
size[0] = N-size[0];
|
||
|
|
||
|
if(size[mpi_rank] < MAX_NUM_GPU){
|
||
|
num_devices = size[mpi_rank];
|
||
|
for(int i=0; i<size[mpi_rank]; i++)
|
||
|
NN[i] = 1;
|
||
|
}
|
||
|
else{
|
||
|
num_devices = MAX_NUM_GPU;
|
||
|
int quotient = size[mpi_rank]/MAX_NUM_GPU;
|
||
|
int remain = size[mpi_rank]%MAX_NUM_GPU;
|
||
|
for(int i=0; i<MAX_NUM_GPU; i++){
|
||
|
NN[i] = quotient;
|
||
|
if(i<remain) NN[i]++;
|
||
|
}
|
||
|
}
|
||
|
|
||
|
for(int i=0; i<num_devices; i++){
|
||
|
CUDA_CALL( cudaSetDevice(i) );
|
||
|
CUDA_CALL( cudaMalloc(&in_d[i], NN[i]*C*H*W*sizeof(float)) );
|
||
|
CUDA_CALL( cudaMalloc(&out_d[i], NN[i]*K*OH*OW*sizeof(float)) );
|
||
|
CUDA_CALL( cudaMalloc(&fil_d[i], K*C*R*S*sizeof(float)) );
|
||
|
}
|
||
|
}
|
||
|
|
||
|
|
||
|
|
||
|
void mat_mul_final(
|
||
|
float *_input, float *_output, float *_filter,
|
||
|
int _N, int _C, int _H, int _W,
|
||
|
int _K, int _R, int _S,
|
||
|
int _pad, int _dilation, int_stride){
|
||
|
}
|