164 lines
5.3 KiB
C++
164 lines
5.3 KiB
C++
|
#include "convolution.h"
|
||
|
#include <mpi.h>
|
||
|
#include <stdio.h>
|
||
|
#include "util.h"
|
||
|
|
||
|
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
|
||
|
|
||
|
static float *input, *output, *filter, *colbuf;
|
||
|
static int N, C, H, W;
|
||
|
static int K, R, S;
|
||
|
static int OH, OW;
|
||
|
static int pad;
|
||
|
static int dilation;
|
||
|
static int stride;
|
||
|
static int mpi_rank, mpi_world_size;
|
||
|
|
||
|
void im2col_cpu(float *data_im, int batches, int channels, int height, int width,
|
||
|
int kernel_h, int kernel_w, int pad, int stride,
|
||
|
int dilation, float *data_col) {
|
||
|
register const long dil_kernel_h = (kernel_h - 1) * dilation + 1;
|
||
|
register const long dil_kernel_w = (kernel_w - 1) * dilation + 1;
|
||
|
register const long height_col = (height + 2 * pad - dil_kernel_h) / stride + 1;
|
||
|
register const long width_col = (width + 2 * pad - dil_kernel_w) / stride + 1;
|
||
|
register const long channels_col = channels * kernel_h * kernel_w;
|
||
|
register const long batch_stride = channels_col * height_col * width_col;
|
||
|
register const long sbatch_stride = C * H * W;
|
||
|
|
||
|
#pragma omp parallel for num_threads(40)
|
||
|
for (int b = 0; b < batches; ++b) {
|
||
|
for (long c = 0; c < channels_col; ++c) {
|
||
|
long w_offset = c % kernel_w;
|
||
|
long h_offset = (c / kernel_w) % kernel_h;
|
||
|
long c_im = c / kernel_h / kernel_w;
|
||
|
|
||
|
const long hc0 = h_offset * dilation - pad;
|
||
|
const long wc0 = w_offset * dilation - pad;
|
||
|
for (long h = 0; h < height_col; ++h) {
|
||
|
long h_pad = h * stride + hc0;
|
||
|
|
||
|
const long row_offset = (c * height_col + h) * width_col;
|
||
|
const long srow_offset = (c_im * height + h_pad) * width;
|
||
|
for (long w = 0; w < width_col; ++w) {
|
||
|
long w_pad = w * stride + wc0;
|
||
|
if ((((unsigned long)h_pad) < ((unsigned long)height)) && (((unsigned long)w_pad) < ((unsigned long)width)))
|
||
|
data_col[b * batch_stride + row_offset + w] = data_im[b * sbatch_stride + srow_offset + w_pad];
|
||
|
else {
|
||
|
data_col[b * batch_stride + row_offset + w] = 0.;
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
#define ITILESIZE (128)
|
||
|
#define JTILESIZE (1024)
|
||
|
#define KTILESIZE (1024)
|
||
|
void sgemm_cpu(float *A, float *B, float *C, int _B, int _M, int _N, int _K) {
|
||
|
register const long batch_stride = _M * _N;
|
||
|
register const long sbatch_stride = _K * _N;
|
||
|
|
||
|
#pragma omp parallel for num_threads(40)
|
||
|
for (long b = 0; b < _B; b++) {
|
||
|
for (long ii = 0; ii < _M; ii += ITILESIZE) {
|
||
|
for (long jj = 0; jj < _N; jj += JTILESIZE) {
|
||
|
for (long kk = 0; kk < _K; kk += KTILESIZE) {
|
||
|
for (long k = kk; k < MIN(_K, kk + KTILESIZE); k++) {
|
||
|
for (long i = ii; i < MIN(_M, ii + ITILESIZE); i++) {
|
||
|
register float ar = A[i * _K + k];
|
||
|
for (long j = jj; j < MIN(_N, jj + JTILESIZE); j++) {
|
||
|
C[b * batch_stride + i * _N + j] += ar * B[b * sbatch_stride + k * _N + j];
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
void convolution(
|
||
|
float *_input, float *_output, float *_filter,
|
||
|
int _N, int _C, int _H, int _W,
|
||
|
int _K, int _R, int _S,
|
||
|
int _pad, int _dilation, int _stride) {
|
||
|
input = _input;
|
||
|
output = _output;
|
||
|
filter = _filter;
|
||
|
|
||
|
N = _N, C = _C, H = _H, W = _W;
|
||
|
K = _K, R = _R, S = _S;
|
||
|
pad = _pad, dilation = _dilation, stride = _stride;
|
||
|
|
||
|
OH = (H + 2 * pad - dilation * (R - 1) - 1) / stride + 1;
|
||
|
OW = (W + 2 * pad - dilation * (S - 1) - 1) / stride + 1;
|
||
|
|
||
|
// ws (work-start) and we (work-end)
|
||
|
int ws[mpi_world_size], we[mpi_world_size];
|
||
|
for (int i = 0; i < mpi_world_size; i++) {
|
||
|
ws[i] = N / mpi_world_size * i;
|
||
|
we[i] = N / mpi_world_size * (i + 1);
|
||
|
}
|
||
|
we[mpi_world_size - 1] = N;
|
||
|
|
||
|
alloc_tensor(&colbuf, 1, (we[mpi_rank] - ws[mpi_rank]), R * S * C, OH * OW);
|
||
|
|
||
|
// scattering
|
||
|
if (mpi_rank == 0) {
|
||
|
for (int i = 1; i < mpi_world_size; i++) {
|
||
|
MPI_Send(input + ws[i] * C * H * W, (we[i] - ws[i]) * C * H * W, MPI_FLOAT, i, 0,
|
||
|
MPI_COMM_WORLD);
|
||
|
}
|
||
|
} else {
|
||
|
alloc_tensor(&input, we[mpi_rank] - ws[mpi_rank], C, H, W);
|
||
|
alloc_tensor(&filter, K, C, R, S);
|
||
|
alloc_tensor(&output, we[mpi_rank] - ws[mpi_rank], K, OH, OW);
|
||
|
|
||
|
MPI_Recv(input, (we[mpi_rank] - ws[mpi_rank]) * C * H * W,
|
||
|
MPI_FLOAT, 0, 0, MPI_COMM_WORLD, NULL);
|
||
|
}
|
||
|
|
||
|
// broadcasting
|
||
|
MPI_Bcast(filter, K * C * R * S, MPI_FLOAT, 0, MPI_COMM_WORLD);
|
||
|
|
||
|
// computation
|
||
|
im2col_cpu(input, we[mpi_rank] - ws[mpi_rank], C, H, W, R, S,
|
||
|
pad, stride, dilation, colbuf);
|
||
|
|
||
|
sgemm_cpu(filter, colbuf, output,
|
||
|
we[mpi_rank] - ws[mpi_rank], K, OH * OW, R * S * C);
|
||
|
|
||
|
// gathering
|
||
|
if (mpi_rank == 0) {
|
||
|
for (int i = 1; i < mpi_world_size; i++) {
|
||
|
MPI_Recv(output + ws[i] * K * OH * OW, (we[i] - ws[i]) * K * OH * OW, MPI_FLOAT, i, 1,
|
||
|
MPI_COMM_WORLD, NULL);
|
||
|
}
|
||
|
} else {
|
||
|
MPI_Send(output, (we[mpi_rank] - ws[mpi_rank]) * K * OH * OW,
|
||
|
MPI_FLOAT, 0, 1, MPI_COMM_WORLD);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
void convolution_init(
|
||
|
int _N, int _C, int _H, int _W,
|
||
|
int _K, int _R, int _S,
|
||
|
int _pad, int _dilation, int _stride) {
|
||
|
N = _N; C = _C; H = _H; W = _W;
|
||
|
K = _K; R = _R; S = _S;
|
||
|
pad = _pad;
|
||
|
dilation = _dilation;
|
||
|
stride = _stride;
|
||
|
|
||
|
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
|
||
|
MPI_Comm_size(MPI_COMM_WORLD, &mpi_world_size);
|
||
|
}
|
||
|
|
||
|
void convolution_final(
|
||
|
int _N, int _C, int _H, int _W,
|
||
|
int _K, int _R, int _S,
|
||
|
int _pad, int _dilation, int _stride) {
|
||
|
|
||
|
}
|