chundoong-lab-ta/SamsungDS22/submissions/final/yw0.kim/A/convolution.cpp

223 lines
6.2 KiB
C++

#include "convolution.h"
#include "util.h"
#include <mpi.h>
#include <stdio.h>
#include <omp.h>
static float *input, *output, *filter;
static int N, C, H, W;
static int K, R, S;
static int OH, OW;
static int pad;
static int dilation;
static int stride;
static int mpi_rank, mpi_world_size;
static int nstart, nend, nlen;
static MPI_Request request;
static MPI_Status status;
#define NUM_THREADS_PER_NODE 40
#define RTILE_SIZE 2
#define STILE_SIZE 64
#define KTILE_SIZE 16
#define OHTILE_SIZE 4
#define CTILE_SIZE 8
#define OWTILE_SIZE 32
#define UNROLL_SIZE 4
#define min(A, B) (((A) > (B)) ? (B) : (A))
#define H_W (H * W)
#define C_H_W (C * H * W)
#define R_S (R * S)
#define C_R_S (C * R * S)
#define OH_OW (OH * OW)
#define K_OH_OW (K * OH * OW)
void convolution_omp(void)
{
// int ss = STILE_SIZE;
// int rs = RTILE_SIZE;
int cs = CTILE_SIZE;
// int ohs = OHTILE_SIZE;
// int ows = OWTILE_SIZE;
// int ks = KTILE_SIZE;
// printf("Node #%d, (nstart, nend, nlen): (%d, %d, %d)\n", mpi_rank, nstart, nend, nlen);
// for(int i = 0; i < 10; i++)
// {
// printf("input[%d]: %f\n", i, input[i]);
// }
for (int n = 0; n < nlen; ++n)
{
#pragma omp parallel for collapse(2) schedule(dynamic)
for (int k = 0; k < K; ++k)
{
for (int oh = 0; oh < OH; ++oh)
{
for (int ow = 0; ow < OW; ++ow)
{
float o = 0.f;
for (int ctile = 0; ctile < C; ctile += cs)
{
for (int r = 0; r < R; ++r)
{
for (int s = 0; s < S; ++s)
{
int climit = min(ctile + cs, C);
for (int c = ctile; c < climit; ++c)
{
int h = oh * stride - pad + r * dilation;
int w = ow * stride - pad + s * dilation;
if (h < 0 || h >= H || w < 0 || w >= W)
continue;
float i = input[n * C * H * W + c * H * W + h * W + w];
float f = filter[k * C * R * S + c * R * S + r * S + s];
o += i * f;
}
}
}
output[n * K * OH * OW + k * OH * OW + oh * OW + ow] = o;
}
}
}
}
}
}
void convolution(
float *_input, float *_output, float *_filter,
int _N, int _C, int _H, int _W,
int _K, int _R, int _S,
int _pad, int _dilation, int _stride)
{
if (mpi_rank == 0)
{
input = _input;
output = _output;
filter = _filter;
}
if (mpi_world_size == 1)
{
convolution_omp();
}
else
{
if (mpi_rank == 0)
{
int dest = 1;
nstart = N / mpi_world_size * dest + min(dest, N % mpi_world_size);
nend = N / mpi_world_size * (dest + 1) + min(dest + 1, N % mpi_world_size);
nlen = nend - nstart;
MPI_Isend(&input[nstart * C_H_W], nlen * C_H_W, MPI_FLOAT, dest, 1, MPI_COMM_WORLD, &request);
MPI_Isend(filter, K * C_R_S, MPI_FLOAT, dest, 1, MPI_COMM_WORLD, &request);
nstart = N / mpi_world_size * mpi_rank + min(mpi_rank, N % mpi_world_size);
nend = N / mpi_world_size * (mpi_rank + 1) + min(mpi_rank + 1, N % mpi_world_size);
nlen = nend - nstart;
// printf("node #%d (nstart, nend, nlen): (%d, %d, %d) Send: %d B\n", mpi_rank, nstart, nend, nlen, nlen * C_H_W);
// printf("node #%d (nstart, nend, nlen): (%d, %d, %d) Send: %d B\n", mpi_rank, nstart, nend, nlen, N * C_R_S);
}
else
{
int source = 0;
// printf("node #%d (nstart, nend, nlen): (%d, %d, %d) Recv: %d B, input: %p\n", mpi_rank, nstart, nend, nlen, nlen * C_H_W, input);
MPI_Recv(input, nlen * C_H_W, MPI_FLOAT, source, 1, MPI_COMM_WORLD, &status);
// printf("node #%d (nstart, nend, nlen): (%d, %d, %d) Recv: %d B, filter: %p\n", mpi_rank, nstart, nend, nlen, N * C_R_S, filter);
MPI_Recv(filter, K * C_R_S, MPI_FLOAT, source, 1, MPI_COMM_WORLD, &status);
zero_tensor(output, nlen, K, OH, OW);
}
convolution_omp();
if (mpi_rank == 0)
{
int source = 1;
nstart = N / mpi_world_size * source + min(source, N % mpi_world_size);
nend = N / mpi_world_size * (source + 1) + min(source + 1, N % mpi_world_size);
nlen = nend - nstart;
MPI_Recv(&output[nstart * K_OH_OW], nlen * K_OH_OW, MPI_FLOAT, source, 1, MPI_COMM_WORLD, &status);
}
else
{
int dest = 0;
MPI_Isend(output, nlen * K_OH_OW, MPI_FLOAT, dest, 1, MPI_COMM_WORLD, &request);
}
}
}
void convolution_init(
int _N, int _C, int _H, int _W,
int _K, int _R, int _S,
int _pad, int _dilation, int _stride)
{
N = _N;
C = _C;
H = _H;
W = _W;
K = _K;
R = _R;
S = _S;
pad = _pad;
dilation = _dilation;
stride = _stride;
omp_set_num_threads(NUM_THREADS_PER_NODE);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_world_size);
OH = (H + 2 * pad - dilation * (R - 1) - 1) / stride + 1;
OW = (W + 2 * pad - dilation * (S - 1) - 1) / stride + 1;
if (mpi_world_size == 1)
{
// Only 1 node is usable
// Set start and end index for node #0
nstart = 0;
nend = N;
nlen = nend - nstart;
}
else
{
// 2 nodes are usable
if (mpi_rank == 0)
{
int dest = 1;
nstart = N / mpi_world_size * dest + min(dest, N % mpi_world_size);
nend = N / mpi_world_size * (dest + 1) + min(dest + 1, N % mpi_world_size);
// Send start and end index to node #1
MPI_Isend(&nstart, 1, MPI_INT, dest, 1, MPI_COMM_WORLD, &request);
MPI_Isend(&nend, 1, MPI_INT, dest, 1, MPI_COMM_WORLD, &request);
nstart = N / mpi_world_size * mpi_rank + min(mpi_rank, N % mpi_world_size);
nend = N / mpi_world_size * (mpi_rank + 1) + min(mpi_rank + 1, N % mpi_world_size);
nlen = nend - nstart;
}
else
{
int source = 0;
// Receive start and end index from node #0
MPI_Recv(&nstart, 1, MPI_INT, source, 1, MPI_COMM_WORLD, &status);
MPI_Recv(&nend, 1, MPI_INT, source, 1, MPI_COMM_WORLD, &status);
nlen = nend - nstart;
alloc_tensor(&input, nlen, C, H, W);
alloc_tensor(&output, nlen, K, OH, OW);
alloc_tensor(&filter, K, C, R, S);
}
}
}
void convolution_final(
int _N, int _C, int _H, int _W,
int _K, int _R, int _S,
int _pad, int _dilation, int _stride)
{
return;
}