160 lines
4.4 KiB
C++
160 lines
4.4 KiB
C++
|
#include "convolution.h"
|
||
|
#include "util.h"
|
||
|
#include <mpi.h>
|
||
|
#include <stdio.h>
|
||
|
#include <immintrin.h>
|
||
|
|
||
|
static float *input, *input_col, *output, *filter;
|
||
|
static int N, C, H, W;
|
||
|
static int K, R, S;
|
||
|
static int OH, OW;
|
||
|
static int pad;
|
||
|
static int dilation;
|
||
|
static int stride;
|
||
|
static int mpi_rank, mpi_world_size;
|
||
|
|
||
|
#define MAX_NODES (2)
|
||
|
static int ns[MAX_NODES], ne[MAX_NODES];
|
||
|
|
||
|
inline int min(int a, int b) { return a < b ? a : b; }
|
||
|
|
||
|
#define ITILESIZE (4)
|
||
|
#define JTILESIZE (4096)
|
||
|
#define KTILESIZE (4096)
|
||
|
|
||
|
void mat_mul_omp(const float *A, const float *B,
|
||
|
const int MM, const int NN, const int KK,
|
||
|
float *C) {
|
||
|
#pragma omp parallel for num_threads(20) schedule(dynamic)
|
||
|
for (int ii = 0; ii < MM; ii += ITILESIZE) {
|
||
|
for (int jj = 0; jj < NN; jj += JTILESIZE) {
|
||
|
for (int kk = 0; kk < KK; kk += KTILESIZE) {
|
||
|
|
||
|
for (int k = kk; k < min(KK, kk + KTILESIZE); k++) {
|
||
|
for (int i = ii; i < min(MM, ii + ITILESIZE); i++) {
|
||
|
float ar = A[i * KK + k];
|
||
|
for (int j = jj; j < min(NN, jj + JTILESIZE); j++) {
|
||
|
C[i * NN + j] += ar * B[k * NN + j];
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
void im2col_cpu(const float* data_im, const int channels,
|
||
|
const int height, const int width, const int kernel_h, const int kernel_w,
|
||
|
const int pad_h, const int pad_w,
|
||
|
const int stride_h, const int stride_w,
|
||
|
const int dilation_h, const int dilation_w,
|
||
|
float* data_col) {
|
||
|
int channels_col = channels * kernel_h * kernel_w;
|
||
|
#pragma omp parallel for num_threads(20) schedule(dynamic)
|
||
|
for (int c = 0; c < channels_col; ++c) {
|
||
|
int w_offset = c % kernel_w;
|
||
|
int h_offset = (c / kernel_w) % kernel_h;
|
||
|
int c_im = c / kernel_h / kernel_w;
|
||
|
|
||
|
const int hc0 = h_offset * dilation_h - pad_h;
|
||
|
const int wc0 = w_offset * dilation_w - pad_w;
|
||
|
for (int h = 0; h < OH; ++h) {
|
||
|
int h_pad = h * stride_h + hc0;
|
||
|
|
||
|
const int row_offset = (c * OH + h) * OW;
|
||
|
const int srow_offset = (c_im * height + h_pad) * width;
|
||
|
for (int w = 0; w < OW; ++w) {
|
||
|
int w_pad = w * stride_w + wc0;
|
||
|
if ((((unsigned)h_pad) < ((unsigned)height)) && (((unsigned)w_pad) < ((unsigned)width)))
|
||
|
data_col[row_offset + w] = data_im[srow_offset + w_pad];
|
||
|
else {
|
||
|
data_col[row_offset + w] = 0.;
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
void convolution(
|
||
|
float *_input, float *_output, float *_filter,
|
||
|
int _N, int _C, int _H, int _W,
|
||
|
int _K, int _R, int _S,
|
||
|
int _pad, int _dilation, int _stride) {
|
||
|
input = _input;
|
||
|
output = _output;
|
||
|
filter = _filter;
|
||
|
|
||
|
if(mpi_rank != 0) {
|
||
|
alloc_tensor(&input, N, C, H, W);
|
||
|
alloc_tensor(&output, N, K, OH, OW);
|
||
|
alloc_tensor(&filter, K, C, R, S);
|
||
|
zero_tensor(output, N, K, OH, OW);
|
||
|
}
|
||
|
|
||
|
// Scatter input
|
||
|
if(mpi_rank == 0) {
|
||
|
for(int i=1; i < mpi_world_size; i++) {
|
||
|
MPI_Send(input + ns[i]*C*H*W, (ne[i]-ns[i])*C*H*W, MPI_FLOAT, i, 0,
|
||
|
MPI_COMM_WORLD);
|
||
|
}
|
||
|
}
|
||
|
else {
|
||
|
MPI_Recv(input + ns[mpi_rank]*C*H*W, (ne[mpi_rank]-ns[mpi_rank])*C*H*W, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, nullptr);
|
||
|
}
|
||
|
|
||
|
// Broadcast filter
|
||
|
MPI_Bcast(filter, K*C*R*S, MPI_FLOAT, 0, MPI_COMM_WORLD);
|
||
|
|
||
|
for(int n=ns[mpi_rank]; n<ne[mpi_rank]; n++) {
|
||
|
im2col_cpu(input + n*C*H*W, C, H, W, R, S, pad, pad,
|
||
|
stride, stride, dilation, dilation, input_col);
|
||
|
mat_mul_omp(filter, input_col, K, OH*OW, R*S*C, output + n*K*OH*OW);
|
||
|
}
|
||
|
|
||
|
// Gather output
|
||
|
if(mpi_rank == 0) {
|
||
|
for(int i=1; i < mpi_world_size; i++) {
|
||
|
MPI_Recv(output + ns[i]*K*OH*OW, (ne[i]-ns[i])*K*OH*OW, MPI_FLOAT, i, 0,
|
||
|
MPI_COMM_WORLD, nullptr);
|
||
|
}
|
||
|
}
|
||
|
else {
|
||
|
MPI_Send(output + ns[mpi_rank]*K*OH*OW, (ne[mpi_rank]-ns[mpi_rank])*K*OH*OW,
|
||
|
MPI_FLOAT, 0, 0, MPI_COMM_WORLD);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
void convolution_init(
|
||
|
int _N, int _C, int _H, int _W,
|
||
|
int _K, int _R, int _S,
|
||
|
int _pad, int _dilation, int _stride) {
|
||
|
N = _N; C = _C; H = _H; W = _W;
|
||
|
K = _K; R = _R; S = _S;
|
||
|
pad = _pad;
|
||
|
dilation = _dilation;
|
||
|
stride = _stride;
|
||
|
|
||
|
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
|
||
|
MPI_Comm_size(MPI_COMM_WORLD, &mpi_world_size);
|
||
|
|
||
|
OH = (H + 2 * pad - dilation * (R - 1) - 1) / stride + 1;
|
||
|
OW = (W + 2 * pad - dilation * (S - 1) - 1) / stride + 1;
|
||
|
input_col = (float*)malloc((size_t)R*S*C*OH*OW*sizeof(float));
|
||
|
if(!input_col) {
|
||
|
printf("memory alloc failed\n");
|
||
|
exit(0);
|
||
|
}
|
||
|
|
||
|
for(int i=0; i < mpi_world_size; i++) {
|
||
|
ns[i] = N/mpi_world_size *i;
|
||
|
ne[i] = N/mpi_world_size*(i+1);
|
||
|
}
|
||
|
ne[mpi_world_size-1] = N;
|
||
|
}
|
||
|
|
||
|
void convolution_final(
|
||
|
int _N, int _C, int _H, int _W,
|
||
|
int _K, int _R, int _S,
|
||
|
int _pad, int _dilation, int _stride) {
|
||
|
}
|