chundoong-lab-ta/SamsungDS22/submissions/final/ys798.choi/A/convolution.cpp

107 lines
2.9 KiB
C++
Raw Normal View History

2022-09-29 18:01:45 +09:00
#include "convolution.h"
#include "util.h"
#include <mpi.h>
#include <stdio.h>
static int mpi_rank, mpi_world_size;
static float *input, *output, *filter;
static int N, C, H, W, K, R, S;
static int OH, OW;
static int pad;
static int dilation;
static int stride;
static int size[2];
int num_threads = 40;
void convolution(
float *_input, float *_output, float *_filter,
int _N, int _C, int _H, int _W,
int _K, int _R, int _S,
int _pad, int _dilation, int _stride) {
input = _input;
output = _output;
filter = _filter;
if ((mpi_rank == 0) && (mpi_world_size == 2))
{
MPI_Send(&input[size[0]*C*H*W],size[1]*C*H*W, MPI_FLOAT,1,0,MPI_COMM_WORLD);
MPI_Send(filter,K*C*R*S,MPI_FLOAT,1,0,MPI_COMM_WORLD);
}
else if (mpi_world_size == 2)
{
alloc_tensor(&input,size[1],C,H,W);
alloc_tensor(&output,size[1],K,OH,OW);
alloc_tensor(&filter,K,C,R,S);
MPI_Recv(input,size[1]*C*H*W,MPI_FLOAT,0,0,MPI_COMM_WORLD,nullptr);
MPI_Recv(filter,K*C*R*S,MPI_FLOAT,0,0,MPI_COMM_WORLD,nullptr);
}
#pragma omp parallel for num_threads(num_threads) collapse(3) schedule(dynamic)
for (int n = 0; n < size[mpi_rank]; ++n) {
for (int k = 0; k < K; ++k) {
for (int oh = 0; oh < OH; ++oh) {
#pragma omp parallel for num_threads(num_threads) schedule(dynamic)
for (int ow = 0; ow < OW; ++ow) {
float o = 0.f;
for (int c = 0; c < C; ++c) {
for (int r = 0; r < R; ++r) {
for (int s = 0; s < S; ++s) {
int h = oh * stride - pad + r * dilation;
int w = ow * stride - pad + s * dilation;
if (h < 0 || h >= H || w < 0 || w >= W) continue;
float i = input[n * C * H * W + c * H * W + h * W + w];
float f = filter[k * C * R * S + c * R * S + r * S + s];
o += i * f;
}
}
}
output[n * K * OH * OW + k * OH * OW + oh * OW + ow] = o;
}
}
}
}
}
void convolution_init(
int _N, int _C, int _H, int _W,
int _K, int _R, int _S,
int _pad, int _dilation, int _stride) {
N = _N; C = _C; H = _H; W = _W;
K = _K; R = _R; S = _S;
pad = _pad;
dilation = _dilation;
stride = _stride;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_world_size);
if(mpi_world_size==2)
{
size[1] = _N/2;
}
else
{
size[1] = 0;
}
size[0] = N - size[1];
OH = (H + 2 * pad - dilation * (R - 1) - 1) / stride + 1;
OW = (W + 2 * pad - dilation * (S - 1) - 1) / stride + 1;
}
void convolution_final(
int _N, int _C, int _H, int _W,
int _K, int _R, int _S,
int _pad, int _dilation, int _stride) {
if((mpi_rank == 0) && (mpi_world_size == 2)) {
MPI_Recv(&output[size[0]*K*OH*OW],size[1]*K*OH*OW,MPI_FLOAT,1,0,MPI_COMM_WORLD,nullptr);
}
else if (mpi_world_size == 2) {
MPI_Send(output,size[1]*K*OH*OW,MPI_FLOAT,0,0,MPI_COMM_WORLD);
}
}