chundoong-lab-ta/SamsungDS22/submissions/final/jbeom37.lim/tmp-B/convolution.cu

211 lines
6.3 KiB
Plaintext
Raw Normal View History

2022-09-29 18:01:45 +09:00
#include "convolution.h"
#include <stdio.h>
#include <mpi.h>
#include <cuda_runtime.h>
#include "util.h"
#define CUDA_CALL(f) \
{ \
cudaError_t err = (f); \
if (err != cudaSuccess) { \
fprintf(stderr, "CUDA error at [%s:%d] %d %s\n", __FILE__, __LINE__, \
err, cudaGetErrorString(err)); \
exit(1); \
} \
}
#define TS 8
#define MAX_NUM_GPU 4
static float *input, *output, *filter;
static float *in_d[MAX_NUM_GPU], *out_d[MAX_NUM_GPU], *filter_d[MAX_NUM_GPU];
static int N, C, H, W;
static int K, R, S;
static int OH, OW;
static int pad;
static int stride;
static int dilation;
static int mpi_rank, mpi_world_size;
static int n_devices = 1;
static int MP_size[2];
static int NUM_OFN[MAX_NUM_GPU];
__global__ void cuda_conv(
float *_input, float *_output, float *_filter,
int _N, int _C, int _H, int _W,
int _K, int _R, int _S,
int _pad, int _dilation, int _stride) {
const int global_row = blockDim.x * blockIdx.x + threadIdx.x ;
const int global_col = blockDim.y * blockIdx.y + threadIdx.y;
int OH, OW;
int NN, KK, WW;
OH = (_H + 2 * _pad - _dilation * (_R - 1) - 1) / _stride + 1;
OW = (_W + 2 * _pad - _dilation * (_S - 1) - 1) / _stride + 1;
WW = global_col;
NN = WW / (_K * OW);
WW = WW - NN *(_K * OW);
KK = WW / OW;
WW = WW - KK * OW;
int col = WW;
int row = global_row;
if (global_row >= OH || global_col >= _N*_K*OW) return;
int start_row = row * _stride - _pad;
int start_col = col * _stride - _pad;
float OUT = 0.0f;
for (int c = 0 ; c < _C ; c++) {
for (int i = 0 ; i < _R ; i++) {
for (int j = 0 ; j < _S ; j++) {
int h = start_row + i * _dilation;
int WW = start_col + j * _dilation;
if (h < 0 || WW < 0 || h >= _H || WW >= _W) continue;
float in = _input[NN*_C*_W*_H + c*_W*_H + h*_W + WW];
float filt = _filter[KK*_C*_R*_S + c*_R*_S + i*_S + j];
OUT += in * filt;
}
}
}
_output[NN*_K*OH*OW + KK*OH*OW + row*OW + col] = OUT;
}
void convolution(
float *_input, float *_output, float *_filter,
int _N, int _C, int _H, int _W,
int _K, int _R, int _S,
int _pad, int _dilation, int _stride) {
int offset = 0;
input = _input;
output = _output;
filter = _filter;
MPI_Request request;
MPI_Status status;
if (mpi_rank == 0 && mpi_world_size == 2 && MP_size[1] != 0) {
MPI_Isend(&input[MP_size[0]*C*H*W], MP_size[1]*C*H*W, MPI_FLOAT, 1, 0, MPI_COMM_WORLD, &request);
MPI_Isend(filter, _K*_C*_R*_S, MPI_FLOAT, 1, 0, MPI_COMM_WORLD, &request);
if (MP_size[mpi_rank] < MAX_NUM_GPU) {
n_devices = MP_size[mpi_rank];
}
} else if (mpi_rank == 1 && MP_size[mpi_rank] != 0) {
alloc_tensor(&input, MP_size[1], C, H, W);
alloc_tensor(&output, MP_size[1], K, OH, OW);
alloc_tensor(&filter, _K, _C, _R, _S);
MPI_Recv(input, MP_size[1]*C*H*W, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, &status);
MPI_Recv(filter, _K*_C*_R*_S, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, &status);
if (MP_size[mpi_rank] < MAX_NUM_GPU) {
n_devices = MP_size[mpi_rank];
}
}
offset = 0;
for (int i = 0 ; i < n_devices ; i++) {
CUDA_CALL( cudaMemcpy(in_d[i], input + offset, NUM_OFN[i]*C*H*W*sizeof(float), cudaMemcpyHostToDevice) );
CUDA_CALL( cudaMemcpy(filter_d[i], filter, K*C*R*S*sizeof(float),cudaMemcpyHostToDevice) );
offset += NUM_OFN[i] * C * H * W;
}
for (int i = 0; i < n_devices; i++) {
CUDA_CALL( cudaDeviceSynchronize() );
}
for (int i = 0; i < n_devices; i++) {
dim3 gridDim((OH+TS-1)/TS, (NUM_OFN[i]*K*OW + TS - 1)/TS, 1);
dim3 blockDim(TS, TS, 1);
CUDA_CALL( cudaSetDevice(i) );
cuda_conv<<<gridDim, blockDim>>>(in_d[i], out_d[i], filter_d[i], NUM_OFN[i], _C, _H, _W, _K, _R, _S, _pad, _dilation, _stride);
}
for (int i = 0; i < n_devices; i++) {
CUDA_CALL( cudaSetDevice(i) );
CUDA_CALL( cudaDeviceSynchronize() );
}
offset = 0;
for (int i = 0; i < n_devices; i++) {
CUDA_CALL( cudaSetDevice(i) );
CUDA_CALL( cudaMemcpy(output + offset, out_d[i], NUM_OFN[i]*K*OH*OW * sizeof(float), cudaMemcpyDeviceToHost) );
offset += NUM_OFN[i]*K*OH*OW;
}
for (int i = 0; i < n_devices; i++) {
CUDA_CALL( cudaDeviceSynchronize() );
CUDA_CALL( cudaSetDevice(i) );
}
if (mpi_rank == 0 && mpi_world_size == 2 && MP_size[1] != 0) {
MPI_Recv(&output[MP_size[0]*K*OH*OW], MP_size[1]*K*OH*OW, MPI_FLOAT, 1, 0, MPI_COMM_WORLD, &status);
} else if(mpi_rank == 1 && MP_size[1] != 0){
MPI_Isend(output, MP_size[1]*K*OH*OW, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, &request);
}
}
void convolution_init(
int _N, int _C, int _H, int _W,
int _K, int _R, int _S,
int _pad, int _dilation, int _stride) {
N = _N; C = _C; H = _H; W = _W; K = _K; R = _R; S = _S;
pad = _pad;
dilation = _dilation;
stride = _stride;
MPI_Comm_size(MPI_COMM_WORLD, &mpi_world_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
OH = (H + 2 * pad - dilation * (R - 1) - 1) / stride + 1;
OW = (W + 2 * pad - dilation * (S - 1) - 1) / stride + 1;
if (mpi_world_size == 2) {
MP_size[1] = _N / 2;
}
MP_size[0] = N - MP_size[1];
if (MP_size[mpi_rank] < MAX_NUM_GPU) {
n_devices = MP_size[mpi_rank];
for (int i = 0 ; i < MP_size[mpi_rank] ; i++) {
NUM_OFN[i] = 1;
}
} else {
n_devices = MAX_NUM_GPU;
int remain = MP_size[mpi_rank] % MAX_NUM_GPU;
int quot = MP_size[mpi_rank] / MAX_NUM_GPU;
for (int i = 0 ; i < MAX_NUM_GPU ; i++) {
NUM_OFN[i] = quot;
if (i < remain) {
NUM_OFN[i]++;
}
}
}
for (int i = 0 ; i < n_devices ; i++) {
CUDA_CALL( cudaSetDevice(i) );
CUDA_CALL( cudaMalloc(&in_d[i], NUM_OFN[i]*C*H*W*sizeof(float)) );
CUDA_CALL( cudaMalloc(&out_d[i], NUM_OFN[i]*K*OH*OW*sizeof(float)) );
CUDA_CALL( cudaMalloc(&filter_d[i], K*C*R*S*sizeof(float)) );
}
}
void convolution_final(
int _N, int _C, int _H, int _W,
int _K, int _R, int _S,
int _pad, int _dilation, int _stride) {
}