chundoong-lab-ta/SamsungDS22/submissions/final/g.kwak/A_final/convolution.cpp

178 lines
4.0 KiB
C++
Raw Normal View History

2022-09-29 18:01:45 +09:00
#include "convolution.h"
#include <mpi.h>
#include <stdio.h>
static float *input, *output, *filter;
static int N, C, H, W;
static int K, R, S;
static int OH, OW;
static int pad;
static int dilation;
static int stride;
static int mpi_rank, mpi_world_size;
#include "util.h"
#define MASTER 0
#define FROM_MASTER 1
#define FROM_WORKER 2
static int Ndiv_rev[2];
static void mat_conv() {
#pragma omp parallel for num_threads(1024) collapse(3) schedule(dynamic)
for (int n = 0; n < Ndiv_rev[mpi_rank]; ++n) {
for (int k = 0; k < K; ++k) {
for (int oh = 0; oh < OH; ++oh) {
for (int ow = 0; ow < OW; ++ow) {
float o = 0.f;
for (int c = 0; c < C; ++c) {
for (int r = 0; r < R; ++r) {
for (int s = 0; s < S; ++s) {
int h = oh * stride - pad + r * dilation;
int w = ow * stride - pad + s * dilation;
if (h < 0 || h >= H || w < 0 || w >= W) continue;
float i = input[n * C * H * W + c * H * W + h * W + w];
float f = filter[k * C * R * S + c * R * S + r * S + s];
o += i * f;
}
}
}
output[n * K *OH *OW + k *OH *OW + oh *OW + ow] = o;
//input[n * C * H * W + c * H * W + h * W + w];
}
}
}
}
}
void convolution(
float *_input, float *_output, float *_filter,
int _N, int _C, int _H, int _W,
int _K, int _R, int _S,
int _pad, int _dilation, int _stride) {
int Ndiv, Nextra, dest;
int src;
int numworkers, mtype;
MPI_Status status;
MPI_Request request;
numworkers = mpi_world_size-1;
Ndiv = N/mpi_world_size;
Nextra = N%mpi_world_size;
OH = (H + 2 * pad - dilation * (R - 1) - 1) / stride + 1;
OW = (W + 2 * pad - dilation * (S - 1) - 1) / stride + 1;
int CHW = C*H*W;
int KCRS = K*C*R*S;
int KOHOW = K*OH*OW;
Ndiv_rev[1] = Ndiv+Nextra ;
Ndiv_rev[0] = Ndiv;
// WORKER initialize
if (mpi_rank > 0 && mpi_world_size==2) {
alloc_tensor(&input , Ndiv_rev[1], C, H, W);
alloc_tensor(&output, Ndiv_rev[1], K, OH, OW);
alloc_tensor(&filter, K, C, R, S);
}
// 1. MASTER send to WORKER
mtype = FROM_MASTER;
if (mpi_rank == 0) {
input = _input;
output = _output;
filter = _filter;
for (dest=1; dest<=numworkers; dest++)
{
MPI_Isend(&input[Ndiv_rev[0]*CHW] ,Ndiv_rev[1]*CHW, MPI_FLOAT, dest, mtype, MPI_COMM_WORLD, &request);
MPI_Isend(&filter[0] , KCRS, MPI_FLOAT, dest, mtype, MPI_COMM_WORLD, &request);
}
}
else { // 2. WORKER rcv from MASTER
MPI_Recv(&input[0] , Ndiv_rev[1]*CHW, MPI_FLOAT, MASTER , mtype, MPI_COMM_WORLD, &status);
MPI_Recv(&filter[0] , KCRS, MPI_FLOAT, MASTER , mtype, MPI_COMM_WORLD, &status);
mat_conv();
//printf("\nrank div off %f, %f, %f \n", output[0], output[1], output[2]);
// WORKER send to MASTER
mtype = FROM_WORKER;
MPI_Isend(&output[0] , Ndiv_rev[1]*KOHOW, MPI_FLOAT, MASTER, mtype, MPI_COMM_WORLD, &request);
//output[n * K *OH *OW + k *OH *OW + oh *OW + ow] = o;
//input[n * C * H * W + c * H * W + h * W + w];
}
//MASTER EXECUTE
if (mpi_rank == 0) {
mat_conv();
mtype = FROM_WORKER;
for (src=1; src<=numworkers; src++)
{
MPI_Recv(&output[Ndiv_rev[0]*KOHOW], Ndiv_rev[1]*KOHOW, MPI_FLOAT, src , mtype, MPI_COMM_WORLD, &status);
}
}
}
void convolution_init(
int _N, int _C, int _H, int _W,
int _K, int _R, int _S,
int _pad, int _dilation, int _stride) {
N = _N; C = _C; H = _H; W = _W;
K = _K; R = _R; S = _S;
pad = _pad;
dilation = _dilation;
stride = _stride;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_world_size);
}
void convolution_final(
int _N, int _C, int _H, int _W,
int _K, int _R, int _S,
int _pad, int _dilation, int _stride) {
}