chundoong-lab-ta/SamsungDS22/submissions/final/hello.kwak/tmp-B/convolution.cu

214 lines
6.6 KiB
Plaintext
Raw Normal View History

2022-09-29 18:01:45 +09:00
#include <stdio.h>
#include <cuda_runtime.h>
#include <mpi.h>
#include "convolution.h"
#include "util.h"
// tunable parameter
#define MAX_GPU_NUMBER 4
#define TS 8
#define CUDA_CALL(f) \
{ \
cudaError_t err = (f); \
if (err != cudaSuccess) { \
fprintf(stderr, "CUDA error at [%s:%d] %d %s\n", __FILE__, __LINE__, \
err, cudaGetErrorString(err)); \
exit(1); \
} \
}
static float *input, *output, *filter;
static float *input_d[MAX_GPU_NUMBER], *output_d[MAX_GPU_NUMBER], *filter_d[MAX_GPU_NUMBER];
static int N, C, H, W;
static int K, R, S;
static int OH, OW;
static int pad;
static int dilation;
static int stride;
static int mpi_rank, mpi_world_size;
static int num_devices = 1;
static int MP_size[2];
static int N_size[MAX_GPU_NUMBER];
__global__ void convolution_cuda(
float *_input, float *_output, float *_filter,
int _N, int _C, int _H, int _W,
int _K, int _R, int _S,
int _pad, int _dilation, int _stride) {
const int globalRow = blockDim.x * blockIdx.x + threadIdx.x;
const int globalCol = blockDim.y * blockIdx.y + threadIdx.y;
//indexing
int n, k, w;
int col = globalCol;
int row = globalRow;
int OH = (_H + 2 * _pad - _dilation * (_R - 1) - 1) / _stride + 1;
int OW = (_W + 2 * _pad - _dilation * (_S - 1) - 1) / _stride + 1;
w = globalCol;
n = w / (_K * OW);
w = w - n *(_K * OW);
k = w / OW;
w = w - k * OW;
col = w;
if (globalRow >= OH || globalCol >= _N *_K * OW) return;
int start_row = row * _stride - _pad;
int start_col = col * _stride - _pad;
float out_buf = 0.0f;
for (int c = 0 ; c < _C ; c++) {
for (int i = 0 ; i < _R ; i++) {
for (int j = 0 ; j < _S ; j++) {
int h = start_row + i * _dilation;
int w = start_col + j * _dilation;
if (h < 0 || w < 0 || h >= _H || w >= _W) continue;
float in_buf = _input[n *_C * _W * _H + c * _W * _H + h * _W + w];
float filt_buf = _filter[k *_C * _R * _S + c * _R * _S + i * _S + j];
out_buf += in_buf * filt_buf;
}
}
}
_output[n * _K * OH * OW + k * OH * OW + row * OW + col] = out_buf;
}
void convolution(
float *_input, float *_output, float *_filter,
int _N, int _C, int _H, int _W,
int _K, int _R, int _S,
int _pad, int _dilation, int _stride) {
N = _N; C = _C; H = _H; W = _W; K = _K; R = _R; S = _S;
pad = _pad;
dilation = _dilation;
stride = _stride;
input = _input;
output = _output;
filter = _filter;
MPI_Request request;
MPI_Status status;
if (mpi_rank == 0 && MP_size[1] != 0 && mpi_world_size == 2) {
MPI_Isend(&input[MP_size[0] * C * H * W], MP_size[1] * C * H * W, MPI_FLOAT, 1, 0, MPI_COMM_WORLD, &request);
MPI_Isend(filter, K * C * R * S, MPI_FLOAT, 1, 0, MPI_COMM_WORLD, &request);
if (MP_size[mpi_rank] < MAX_GPU_NUMBER) {
num_devices = MP_size[mpi_rank];
}
}
else if (mpi_rank == 1 && MP_size[mpi_rank] != 0) {
alloc_tensor(&input, MP_size[1], C, H, W);
alloc_tensor(&output, MP_size[1], K, OH, OW);
alloc_tensor(&filter, K, C, R, S);
MPI_Recv(input, MP_size[1]*C*H*W, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, &status);
MPI_Recv(filter, K * C * R * S, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, &status);
if (MP_size[mpi_rank] < MAX_GPU_NUMBER) {
num_devices = MP_size[mpi_rank];
}
}
int offset = 0;
for (int i = 0 ; i < num_devices ; i++) {
CUDA_CALL(cudaMemcpy(input_d[i], input + offset, N_size[i] * C * H * W * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(filter_d[i], filter, K * C * R * S * sizeof(float),cudaMemcpyHostToDevice));
offset += N_size[i] * C * H * W;
}
for (int i = 0; i < num_devices; i++) {
CUDA_CALL(cudaDeviceSynchronize());
}
for (int i = 0; i < num_devices; i++) {
dim3 gridDim((OH + TS - 1)/TS, (N_size[i] * K * OW + TS - 1)/ TS, 1);
dim3 blockDim(TS, TS, 1);
CUDA_CALL(cudaSetDevice(i));
convolution_cuda<<<gridDim, blockDim>>>(input_d[i], output_d[i], filter_d[i], N_size[i], C, H, W, K, R, S, pad, dilation, stride);
}
for (int i = 0; i < num_devices; i++) {
CUDA_CALL(cudaSetDevice(i));
CUDA_CALL(cudaDeviceSynchronize());
}
offset = 0;
for (int i = 0; i < num_devices; i++) {
CUDA_CALL(cudaSetDevice(i));
CUDA_CALL(cudaMemcpy(output + offset, output_d[i], N_size[i] * K * OH * OW * sizeof(float), cudaMemcpyDeviceToHost));
offset += N_size[i] * K * OH * OW;
}
for (int i = 0; i < num_devices; i++) {
CUDA_CALL(cudaDeviceSynchronize());
CUDA_CALL(cudaSetDevice(i));
}
if (mpi_rank == 0 && MP_size[1] != 0 && mpi_world_size == 2) {
MPI_Recv(&output[MP_size[0] * K * OH * OW], MP_size[1] * K * OH * OW, MPI_FLOAT, 1, 0, MPI_COMM_WORLD, &status);
} else if(mpi_rank == 1 && MP_size[1] != 0){
MPI_Isend(output, MP_size[1] * K * OH * OW, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, &request);
}
}
void convolution_init(
int _N, int _C, int _H, int _W,
int _K, int _R, int _S,
int _pad, int _dilation, int _stride) {
N = _N; C = _C; H = _H; W = _W; K = _K; R = _R; S = _S;
pad = _pad;
dilation = _dilation;
stride = _stride;
MPI_Comm_size(MPI_COMM_WORLD, &mpi_world_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
OH = (H + 2 * pad - dilation * (R - 1) - 1) / stride + 1;
OW = (W + 2 * pad - dilation * (S - 1) - 1) / stride + 1;
if (mpi_world_size == 2) MP_size[1] = _N / 2;
MP_size[0] = N - MP_size[1];
if (MP_size[mpi_rank] < MAX_GPU_NUMBER) {
num_devices = MP_size[mpi_rank];
for (int i = 0 ; i < MP_size[mpi_rank] ; i++) {
N_size[i] = 1;
}
}
else {
num_devices = MAX_GPU_NUMBER;
int remainder = MP_size[mpi_rank] % MAX_GPU_NUMBER;
int quotient = MP_size[mpi_rank] / MAX_GPU_NUMBER;
for (int i = 0 ; i < MAX_GPU_NUMBER ; i++) {
N_size[i] = quotient;
if (i < remainder) N_size[i]++;
}
}
for (int i = 0 ; i < num_devices ; i++) {
CUDA_CALL(cudaSetDevice(i));
CUDA_CALL(cudaMalloc(&input_d[i], N_size[i] * C * H * W * sizeof(float)));
CUDA_CALL(cudaMalloc(&output_d[i], N_size[i] * K * OH * OW * sizeof(float)));
CUDA_CALL(cudaMalloc(&filter_d[i], K * C * R * S * sizeof(float)));
}
}
void convolution_final(
int _N, int _C, int _H, int _W,
int _K, int _R, int _S,
int _pad, int _dilation, int _stride) {
}