172 lines
5.0 KiB
C++
172 lines
5.0 KiB
C++
|
#include "convolution.h"
|
||
|
#include <mpi.h>
|
||
|
#include <stdio.h>
|
||
|
#include "util.h"
|
||
|
|
||
|
#define MAX_NODE (8)
|
||
|
#define MAX_THREADS (100)
|
||
|
|
||
|
#define MATRIX_SEND_DATA_MSG_ID 1000
|
||
|
#define MATRIX_SEND_RESULT_MSG_ID 1001
|
||
|
|
||
|
static float *input, *output, *filter;
|
||
|
static int N, C, H, W;
|
||
|
static int K, R, S;
|
||
|
static int OH, OW;
|
||
|
static int pad;
|
||
|
static int dilation;
|
||
|
static int stride;
|
||
|
static int mpi_rank, mpi_world_size;
|
||
|
|
||
|
static int num_threads;
|
||
|
|
||
|
void run_convolution(
|
||
|
float *_input, float *_output,
|
||
|
int endM) {
|
||
|
|
||
|
for (int n = 0; n < endM; ++n) {
|
||
|
#pragma omp parallel for num_threads(MAX_THREADS) collapse(3) schedule(dynamic)
|
||
|
for (int k = 0; k < K; ++k) {
|
||
|
for (int oh = 0; oh < OH; ++oh) {
|
||
|
for (int ow = 0; ow < OW; ++ow) {
|
||
|
register float o = 0.f;
|
||
|
for (int c = 0; c < C; ++c) {
|
||
|
for (int r = 0; r < R; ++r) {
|
||
|
for (int s = 0; s < S; ++s) {
|
||
|
int h = oh * stride - pad + r * dilation;
|
||
|
int w = ow * stride - pad + s * dilation;
|
||
|
if (h < 0 || h >= H || w < 0 || w >= W) continue;
|
||
|
register float i = _input[n * C * H * W + c * H * W + h * W + w];
|
||
|
register float f = filter[k * C * R * S + c * R * S + r * S + s];
|
||
|
o += i * f;
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
_output[n * K * OH * OW + k * OH * OW + oh * OW + ow] = o;
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
void convolution(
|
||
|
float *_input, float *_output, float *_filter,
|
||
|
int _N, int _C, int _H, int _W,
|
||
|
int _K, int _R, int _S,
|
||
|
int _pad, int _dilation, int _stride) {
|
||
|
|
||
|
int input_size, filter_size, output_size;
|
||
|
int i, slice = 0, m_size, startM, endM;
|
||
|
MPI_Status status;
|
||
|
MPI_Request req1[MAX_NODE], req2[MAX_NODE], req3[MAX_NODE], req4[MAX_NODE];
|
||
|
|
||
|
if (mpi_world_size <= mpi_rank)
|
||
|
return;
|
||
|
|
||
|
input_size = C * H * W;
|
||
|
filter_size = K * C * R * S;
|
||
|
output_size = K * OH * OW;
|
||
|
|
||
|
if (mpi_rank == 0) {
|
||
|
input = _input;
|
||
|
output = _output;
|
||
|
filter = _filter;
|
||
|
|
||
|
slice = N / mpi_world_size;
|
||
|
|
||
|
// Send Matrix Information
|
||
|
for (i = 1; i < mpi_world_size; i++) {
|
||
|
startM = i * slice;
|
||
|
endM = (i == mpi_world_size - 1) ? N : (i + 1) * slice;
|
||
|
m_size = (endM - startM);
|
||
|
MPI_Isend(&m_size, 1, MPI_INT, i, MATRIX_SEND_DATA_MSG_ID, MPI_COMM_WORLD, &req1[i]);
|
||
|
MPI_Isend(&input[startM * input_size], m_size * input_size, MPI_FLOAT, i, MATRIX_SEND_DATA_MSG_ID, MPI_COMM_WORLD, &req2[i]);
|
||
|
MPI_Isend(&filter[0], filter_size, MPI_FLOAT, i, MATRIX_SEND_DATA_MSG_ID, MPI_COMM_WORLD, &req3[i]);
|
||
|
}
|
||
|
startM = 0;
|
||
|
endM = slice;
|
||
|
} else {
|
||
|
startM = 0;
|
||
|
MPI_Recv(&endM, 1, MPI_INT, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
|
||
|
m_size = (endM - startM);
|
||
|
MPI_Irecv(&input[0], m_size * input_size, MPI_FLOAT, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &req2[0]);
|
||
|
MPI_Irecv(&filter[0], filter_size, MPI_FLOAT, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &req3[0]);
|
||
|
//zero_tensor(output, m_size, K, OH, OW);
|
||
|
// wait for receiving
|
||
|
MPI_Wait(&req2[0], &status);
|
||
|
MPI_Wait(&req3[0], &status);
|
||
|
}
|
||
|
|
||
|
run_convolution(input, output, endM);
|
||
|
|
||
|
if (mpi_rank == 0) {
|
||
|
// wait for sending
|
||
|
for (i = 1; i < mpi_world_size; i++) {
|
||
|
MPI_Wait(&req1[i], &status);
|
||
|
MPI_Wait(&req2[i], &status);
|
||
|
MPI_Wait(&req3[i], &status);
|
||
|
}
|
||
|
|
||
|
// receiving the result
|
||
|
for (i = 1; i < mpi_world_size; i++) {
|
||
|
startM = i * slice;
|
||
|
endM = (i == mpi_world_size - 1) ? N : (i + 1) * slice;
|
||
|
m_size = (endM - startM) * output_size;
|
||
|
MPI_Irecv(&output[startM * output_size], m_size, MPI_FLOAT, i, MPI_ANY_TAG, MPI_COMM_WORLD, &req4[i]);
|
||
|
}
|
||
|
|
||
|
// wait for receiving
|
||
|
for (i = 1; i < mpi_world_size; i++) {
|
||
|
MPI_Wait(&req4[i], &status);
|
||
|
}
|
||
|
} else {
|
||
|
// sending the result
|
||
|
m_size = (endM - startM) * output_size;
|
||
|
MPI_Isend(&output[0], m_size, MPI_FLOAT, 0, MATRIX_SEND_RESULT_MSG_ID, MPI_COMM_WORLD, &req4[0]);
|
||
|
MPI_Wait(&req4[0], &status);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
void convolution_init(
|
||
|
int _N, int _C, int _H, int _W,
|
||
|
int _K, int _R, int _S,
|
||
|
int _pad, int _dilation, int _stride) {
|
||
|
N = _N; C = _C; H = _H; W = _W;
|
||
|
K = _K; R = _R; S = _S;
|
||
|
pad = _pad;
|
||
|
dilation = _dilation;
|
||
|
stride = _stride;
|
||
|
int i, slice = 0, m_size, startM, endM;
|
||
|
|
||
|
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
|
||
|
MPI_Comm_size(MPI_COMM_WORLD, &mpi_world_size);
|
||
|
|
||
|
OH = (H + 2 * pad - dilation * (R - 1) - 1) / stride + 1;
|
||
|
OW = (W + 2 * pad - dilation * (S - 1) - 1) / stride + 1;
|
||
|
|
||
|
num_threads = MAX_THREADS;
|
||
|
slice = N / mpi_world_size;
|
||
|
//if (num_threads > K) {
|
||
|
// num_threads = K;
|
||
|
//}
|
||
|
|
||
|
if (N < 2) {
|
||
|
mpi_world_size = 1;
|
||
|
}
|
||
|
|
||
|
for (i = 1; i < mpi_world_size; i++) {
|
||
|
startM = i * slice;
|
||
|
endM = (i == mpi_world_size - 1) ? N : (i + 1) * slice;
|
||
|
m_size = (endM - startM);
|
||
|
alloc_tensor(&input, m_size, C, H, W);
|
||
|
alloc_tensor(&output, m_size, K, OH, OW);
|
||
|
alloc_tensor(&filter, K, C, R, S);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
void convolution_final(
|
||
|
int _N, int _C, int _H, int _W,
|
||
|
int _K, int _R, int _S,
|
||
|
int _pad, int _dilation, int _stride) {
|
||
|
}
|