392 lines
12 KiB
Plaintext
392 lines
12 KiB
Plaintext
#include "convolution.h"
|
|
#include <mpi.h>
|
|
#include <stdio.h>
|
|
#include "util.h"
|
|
|
|
#define MAX_NUM_GPU 4
|
|
|
|
#define TS 8
|
|
int num_devices = 0;
|
|
|
|
int num_threads = 40;
|
|
|
|
static float *input, *output, *filter;
|
|
static int N, C, H, W;
|
|
static int K, R, S;
|
|
static int outH, outW;
|
|
static int pad;
|
|
static int dilation;
|
|
static int stride;
|
|
static int mpi_rank, mpi_world_size;
|
|
|
|
//static float *input, *output, *filter;
|
|
|
|
static float *input_d[MAX_NUM_GPU];
|
|
static float *output_d[MAX_NUM_GPU];
|
|
static float *filter_d[MAX_NUM_GPU];
|
|
|
|
static int Nbegin[MAX_NUM_GPU], Nend[MAX_NUM_GPU];
|
|
|
|
#define CUDA_CALL(f) \
|
|
{ \
|
|
cudaError_t err = (f); \
|
|
if (err != cudaSuccess) { \
|
|
fprintf(stderr, "CUDA error at [%s:%d] %d %s\n", __FILE__, __LINE__, \
|
|
err, cudaGetErrorString(err)); \
|
|
exit(1); \
|
|
} \
|
|
}
|
|
|
|
|
|
__global__ void cuda_conv(
|
|
float *_input,float *_output, float *_filter,
|
|
int _N,int _C, int _H, int _W,
|
|
int _K, int _R, int _S,
|
|
int _pad, int _dilation, int _stride){
|
|
|
|
const int globalRow = blockDim.x * blockIdx.x + threadIdx.x;
|
|
const int globalCol = blockDim.y * blockIdx.y + threadIdx.y;
|
|
|
|
int OH, OW;
|
|
|
|
OH = (_H + 2 *_pad - _dilation * (_R - 1) -1) / _stride + 1;
|
|
OW = (_W + 2 *_pad - _dilation * (_S - 1) -1) / _stride + 1;
|
|
|
|
int n, k , w;
|
|
w= globalCol;
|
|
n = w/ (_K * OW);
|
|
w= w- n *(_K * OW);
|
|
k= w/ OW;
|
|
w= w-k*OW;
|
|
int col = w;
|
|
int row = globalRow;
|
|
|
|
if(globalRow>=OH || globalCol >= _N*_K*OW) return;
|
|
|
|
int start_row = row * _stride - _pad;
|
|
int start_col = col * _stride - _pad;
|
|
|
|
float o =0.0f;
|
|
|
|
for (int c = 0; c<_C;c++){
|
|
for(int i=0; i<_R;i++){
|
|
for(int j=0;j<_S;j++){
|
|
int h = start_row + i *_dilation;
|
|
int w = start_col + j *_dilation;
|
|
if(h<0|| w<0 || h>=_H || w>=_W) continue;
|
|
float in = _input[n*_C*_W*_H + c*_W*_H + h*_W+w];
|
|
float fil = _filter[k*_C*_R*_S + c*_R*_S + i*_S+j];
|
|
o += in * fil;
|
|
}
|
|
}
|
|
}
|
|
_output[n*_K*OH*OW + k*OH*OW + row*OW+col]=o;
|
|
}
|
|
|
|
|
|
void convolution(
|
|
float *_input, float *_output, float *_filter,
|
|
int _N, int _C, int _H, int _W,
|
|
int _K, int _R, int _S,
|
|
int _pad, int _dilation, int _stride) {
|
|
|
|
|
|
int size[2];
|
|
MPI_Request request;
|
|
MPI_Status status;
|
|
|
|
int OH, OW;
|
|
output=_output;
|
|
OH = (_H + 2 *_pad - _dilation * (_R - 1) -1) / _stride + 1;
|
|
OW = (_W + 2 *_pad - _dilation * (_S - 1) -1) / _stride + 1;
|
|
|
|
|
|
|
|
input = _input;
|
|
output = _output;
|
|
filter = _filter;
|
|
|
|
if (mpi_world_size == 2) size[1] = _N / 2;
|
|
else size[1] = 0;
|
|
size[0] = N - size[1];
|
|
|
|
outH = (H + 2 * pad - dilation * (R - 1) - 1) / stride + 1;
|
|
outW = (W + 2 * pad - dilation * (S - 1) - 1) / stride + 1;
|
|
|
|
if (mpi_rank == 0 && mpi_world_size == 2) {
|
|
MPI_Isend(&input[size[1]*C*H*W], size[1]*C*H*W, MPI_FLOAT, 1, 0, MPI_COMM_WORLD, &request);
|
|
MPI_Isend(filter, K*C*R*S, MPI_FLOAT, 1, 0, MPI_COMM_WORLD, &request);
|
|
} else if (mpi_world_size == 2) {
|
|
alloc_tensor(&input, size[1], C, H, W);
|
|
alloc_tensor(&output, size[1], K, outH, outW);
|
|
alloc_tensor(&filter, K, C, R, S);
|
|
MPI_Recv(input, size[1]*C*H*W, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, &status);
|
|
MPI_Recv(filter, K*C*R*S, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, &status);
|
|
}
|
|
|
|
|
|
|
|
for (int i = 0; i < num_devices; i++) {
|
|
CUDA_CALL( cudaSetDevice(i) );//jjlee
|
|
printf("%s %d num_devices %d n (begin %d end %d)%d \n",__func__,__LINE__,num_devices,Nbegin[i],Nend[i],Nend[i] - Nbegin[i]);
|
|
CUDA_CALL( cudaMemcpy(input_d[i], input + Nbegin[i] * _C*_H*_W,
|
|
(Nend[i] - Nbegin[i]) * _C*_H*_W * sizeof(float),
|
|
cudaMemcpyHostToDevice) );
|
|
CUDA_CALL( cudaMemcpy(filter_d[i], filter, _K * _C*_R*_S * sizeof(float), cudaMemcpyHostToDevice) );
|
|
}
|
|
printf("%s %d \n",__func__,__LINE__);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for (int i = 0; i < num_devices; i++) {
|
|
dim3 gridDim((OH+TS-1)/TS, ( (Nend[i] - Nbegin[i]) *K*OW + TS - 1)/TS, 1);
|
|
dim3 blockDim(TS, TS, 1);
|
|
|
|
CUDA_CALL( cudaSetDevice(i) );
|
|
cuda_conv<<<gridDim, blockDim>>>(input_d[i], output_d[i], filter_d[i], (Nend[i] - Nbegin[i]) , _C, _H, _W, _K, _R, _S, _pad, _dilation, _stride);
|
|
}
|
|
|
|
// DO NOT REMOVE; NEEDED FOR TIME MEASURE
|
|
for (int i = 0; i < num_devices; i++) {
|
|
CUDA_CALL( cudaSetDevice(i) );//jjlee
|
|
CUDA_CALL( cudaDeviceSynchronize() );
|
|
}
|
|
|
|
}
|
|
|
|
#if 0
|
|
void convolution(
|
|
float *_input, float *_output, float *_filter,
|
|
int _N, int _C, int _H, int _W,
|
|
int _K, int _R, int _S,
|
|
int _pad, int _dilation, int _stride) {
|
|
int size[2];
|
|
MPI_Request request;
|
|
MPI_Status status;
|
|
|
|
input = _input;
|
|
output = _output;
|
|
filter = _filter;
|
|
|
|
if (mpi_world_size == 2) size[1] = _N / 2;
|
|
else size[1] = 0;
|
|
size[0] = N - size[1];
|
|
|
|
outH = (H + 2 * pad - dilation * (R - 1) - 1) / stride + 1;
|
|
outW = (W + 2 * pad - dilation * (S - 1) - 1) / stride + 1;
|
|
|
|
if (mpi_rank == 0 && mpi_world_size == 2) {
|
|
MPI_Isend(&input[size[0]*C*H*W], size[1]*C*H*W, MPI_FLOAT, 1, 0, MPI_COMM_WORLD, &request);
|
|
MPI_Isend(filter, K*C*R*S, MPI_FLOAT, 1, 0, MPI_COMM_WORLD, &request);
|
|
} else if (mpi_world_size == 2) {
|
|
alloc_tensor(&input, size[1], C, H, W);
|
|
alloc_tensor(&output, size[1], K, outH, outW);
|
|
alloc_tensor(&filter, K, C, R, S);
|
|
MPI_Recv(input, size[1]*C*H*W, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, &status);
|
|
MPI_Recv(filter, K*C*R*S, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, &status);
|
|
}
|
|
|
|
/*
|
|
#pragma omp parallel for num_threads(num_threads) schedule(dynamic)
|
|
for (int n = 0; n < size[mpi_rank]; ++n) {
|
|
#pragma omp parallel for num_threads(num_threads) schedule(dynamic)
|
|
for (int k = 0; k < K; ++k) {
|
|
#pragma omp parallel for num_threads(num_threads) schedule(dynamic)
|
|
for (int oh = 0; oh < outH; ++oh) {
|
|
|
|
*/
|
|
#pragma omp parallel for num_threads(num_threads) collapse(3) schedule(dynamic)
|
|
for (int n = 0; n < size[mpi_rank]; ++n) {
|
|
for (int k = 0; k < K; ++k) {
|
|
for (int oh = 0; oh < outH; ++oh) {
|
|
#pragma omp parallel for num_threads(num_threads) schedule(dynamic)
|
|
for (int ow = 0; ow < outW; ++ow) {
|
|
float o = 0.f;
|
|
for (int c = 0; c < C; ++c) {// channel
|
|
for (int r = 0; r < R; ++r) { //filter width
|
|
for (int s = 0; s < S; ++s) { //filter height
|
|
int h = oh * stride - pad + r * dilation;
|
|
int w = ow * stride - pad + s * dilation;
|
|
if (h < 0 || h >= H || w < 0 || w >= W) continue;
|
|
float i = input[n * C * H * W + c * H * W + h * W + w];
|
|
float f = filter[k * C * R * S + c * R * S + r * S + s];
|
|
o += i * f;
|
|
}
|
|
} // r
|
|
} // c
|
|
|
|
output[n * K * outH * outW + k * outH * outW + oh * outW + ow] = o;
|
|
if(n==0&&k==0&&oh==0&&ow==0);
|
|
printf("output[%0][%0][%0][%0] : _value = %f\n",o);
|
|
} //ow
|
|
} // oh
|
|
} // k
|
|
} // n
|
|
|
|
if (mpi_rank == 0 && mpi_world_size == 2) {
|
|
MPI_Recv(&output[size[0]*K*outH*outW], size[1]*K*outH*outW, MPI_FLOAT, 1, 0, MPI_COMM_WORLD, &status);
|
|
} else if(mpi_world_size == 2){
|
|
MPI_Isend(output, size[1]*K*outH*outW, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, &request);
|
|
}
|
|
|
|
|
|
}
|
|
#endif
|
|
|
|
#if 0
|
|
void convolution(
|
|
float *_input, float *_output, float *_filter,
|
|
int _N, int _C, int _H, int _W,
|
|
int _K, int _R, int _S,
|
|
int _pad, int _dilation, int _stride) {
|
|
input = _input;
|
|
output = _output;
|
|
filter = _filter;
|
|
|
|
outH = (H + 2 * pad - dilation * (R - 1) - 1) / stride + 1;
|
|
outW = (W + 2 * pad - dilation * (S - 1) - 1) / stride + 1;
|
|
|
|
if (mpi_rank == 0) {
|
|
|
|
for (int n = 0; n < N; ++n) {
|
|
for (int k = 0; k < K; ++k) {
|
|
for (int oh = 0; oh < outH; ++oh) {
|
|
for (int ow = 0; ow < outW; ++ow) {
|
|
float o = 0.f;
|
|
for (int c = 0; c < C; ++c) {
|
|
for (int r = 0; r < R; ++r) {
|
|
for (int s = 0; s < S; ++s) {
|
|
int h = oh * stride - pad + r * dilation;
|
|
int w = ow * stride - pad + s * dilation;
|
|
if (h < 0 || h >= H || w < 0 || w >= W) continue;
|
|
float i = input[n * C * H * W + c * H * W + h * W + w];
|
|
float f = filter[k * C * R * S + c * R * S + r * S + s];
|
|
o += i * f;
|
|
}
|
|
}
|
|
}
|
|
output[n * K * outH * outW + k * outH * outW + oh * outW + ow] = o;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
}
|
|
}
|
|
#endif
|
|
|
|
|
|
void convolution_init(
|
|
int _N, int _C, int _H, int _W,
|
|
int _K, int _R, int _S,
|
|
int _pad, int _dilation, int _stride) {
|
|
N = _N; C = _C; H = _H; W = _W;
|
|
K = _K; R = _R; S = _S;
|
|
pad = _pad;
|
|
dilation = _dilation;
|
|
stride = _stride;
|
|
|
|
int size[2];
|
|
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
|
|
MPI_Comm_size(MPI_COMM_WORLD, &mpi_world_size);
|
|
|
|
if (mpi_world_size == 2) size[1] = _N / 2;
|
|
else size[1] = 0;
|
|
size[0] = N - size[1];
|
|
|
|
N=size[0];
|
|
|
|
int OH, OW;
|
|
|
|
OH = (_H + 2 *_pad - _dilation * (_R - 1) -1) / _stride + 1;
|
|
OW = (_W + 2 *_pad - _dilation * (_S - 1) -1) / _stride + 1;
|
|
|
|
CUDA_CALL( cudaGetDeviceCount(&num_devices) );
|
|
//num_devices=2;
|
|
|
|
printf("Using %d devices\n", num_devices);
|
|
|
|
for (int i = 0; i < num_devices; i++) {
|
|
cudaDeviceProp prop;
|
|
CUDA_CALL( cudaGetDeviceProperties(&prop, i) );
|
|
|
|
// Try printing more detailed information here
|
|
printf("[GPU %d] %s\n", i, prop.name);
|
|
}
|
|
|
|
if (num_devices <= 0) {
|
|
printf("No CUDA device found. Aborting\n");
|
|
exit(1);
|
|
}
|
|
|
|
// Setup problem size for each GPU
|
|
for (int i = 0; i < num_devices; i++) {
|
|
Nbegin[i] = (N / num_devices) * i;
|
|
Nend[i] = (N / num_devices) * (i + 1);
|
|
|
|
}
|
|
Nend[num_devices - 1] = N;
|
|
|
|
// Allocate device memory for each GPU
|
|
for (int i = 0; i < num_devices; i++) {
|
|
CUDA_CALL( cudaSetDevice(i) );
|
|
CUDA_CALL( cudaMalloc(&input_d[i], (Nend[i] - Nbegin[i]) * C*H*W * sizeof(float)) );
|
|
CUDA_CALL( cudaMalloc(&filter_d[i],K * C*R*S * sizeof(float)) );
|
|
CUDA_CALL( cudaMalloc(&output_d[i], (Nend[i] - Nbegin[i]) * K*OH*OW * sizeof(float)) );
|
|
}
|
|
|
|
// DO NOT REMOVE; NEEDED FOR TIME MEASURE
|
|
for (int i = 0; i < num_devices; i++) {
|
|
CUDA_CALL( cudaDeviceSynchronize() );
|
|
}
|
|
|
|
|
|
}
|
|
|
|
void convolution_final(
|
|
int _N, int _C, int _H, int _W,
|
|
int _K, int _R, int _S,
|
|
int _pad, int _dilation, int _stride) {
|
|
|
|
int OH, OW;
|
|
int size[2];
|
|
MPI_Request request;
|
|
MPI_Status status;
|
|
|
|
if (mpi_world_size == 2) size[1] = _N / 2;
|
|
else size[1] = 0;
|
|
size[0] = _N - size[1];
|
|
|
|
OH = (_H + 2 *_pad - _dilation * (_R - 1) -1) / _stride + 1;
|
|
OW = (_W + 2 *_pad - _dilation * (_S - 1) -1) / _stride + 1;
|
|
|
|
// Download C matrix from GPUs
|
|
for (int i = 0; i < num_devices; i++) {
|
|
CUDA_CALL( cudaSetDevice(i) );
|
|
CUDA_CALL( cudaMemcpy(output + Nbegin[i] * _K*OH*OW, output_d[i],
|
|
(Nend[i] - Nbegin[i]) * _K*OH*OW* sizeof(float),
|
|
cudaMemcpyDeviceToHost) );
|
|
}
|
|
|
|
// DO NOT REMOVE; NEEDED FOR TIME MEASURE
|
|
for (int i = 0; i < num_devices; i++) {
|
|
CUDA_CALL( cudaDeviceSynchronize() );
|
|
}
|
|
|
|
|
|
|
|
if (mpi_rank == 0 && mpi_world_size == 2) {
|
|
printf("%s %d IRecv size[1] %d %d \n",__func__,__LINE__,size[1],size[1]*K*outH*outW);
|
|
MPI_Recv(&output[size[1]*K*outH*outW], size[1]*K*outH*outW, MPI_FLOAT, 1, 0, MPI_COMM_WORLD, &status);
|
|
} else if(mpi_world_size == 2){
|
|
printf("%s %d ISend size[1] %d %d \n",__func__,__LINE__,size[1],size[1]*K*outH*outW);
|
|
MPI_Isend(output, size[1]*K*outH*outW, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, &request);
|
|
}
|
|
|
|
|
|
|
|
}
|