259 lines
8.9 KiB
C++
259 lines
8.9 KiB
C++
|
#include "convolution.h"
|
||
|
#include "util.h"
|
||
|
#include <mpi.h>
|
||
|
#include <stdio.h>
|
||
|
#include <immintrin.h>
|
||
|
#include <omp.h>
|
||
|
|
||
|
static float *input, *output, *filter;
|
||
|
static int N, C, H, W;
|
||
|
static int K, R, S;
|
||
|
static int OH, OW;
|
||
|
static int pad;
|
||
|
static int dilation;
|
||
|
static int stride;
|
||
|
static int mpi_rank, mpi_world_size;
|
||
|
|
||
|
#define min(a,b) (a>b?b:a)
|
||
|
#define HSLICE OH
|
||
|
#define WSLICE 128
|
||
|
int num_threads = 100;
|
||
|
|
||
|
void convolution_omp_slc()
|
||
|
{
|
||
|
if((dilation == 1) && (stride == 1) && (pad == 0) && (W%16 == 0) && (S%16 == 0))
|
||
|
{
|
||
|
//printf("AVX\n");
|
||
|
#pragma omp parallel for num_threads(num_threads) collapse(3) schedule(dynamic)
|
||
|
for (int n = 0; n < N; ++n) {
|
||
|
for (int k = 0; k < K; ++k) {
|
||
|
//#pragma omp parallel for num_threads(num_threads) schedule(dynamic)
|
||
|
for (int oh = 0; oh < OH; ++oh) {
|
||
|
//#pragma omp parallel for num_threads(num_threads) schedule(dynamic)
|
||
|
for (int ow = 0; ow < OW; ++ow) {
|
||
|
//float o = 0.f;
|
||
|
__m512 vo = {0.0f};
|
||
|
for (int c = 0; c < C; ++c) {
|
||
|
for (int r = 0; r < R; ++r) {
|
||
|
//for (int s = 0; s < S; ++s) {
|
||
|
for (int s = 0; s < S; s+=16) {
|
||
|
int h = oh * stride - pad + r;
|
||
|
int w = ow * stride - pad + s;
|
||
|
if (h < 0 || h >= H || w < 0 || w >= W) continue;
|
||
|
//float i = input[n * C * H * W + c * H * W + h * W + w];
|
||
|
//float f = filter[k * C * R * S + c * R * S + r * S + s];
|
||
|
__m512 vi = _mm512_loadu_ps(&input[n * C * H * W + c * H * W + h * W + w]);
|
||
|
__m512 vf = _mm512_loadu_ps(&filter[k * C * R * S + c * R * S + r * S + s]);
|
||
|
|
||
|
//o += i * f;
|
||
|
vo = _mm512_fmadd_ps(vi, vf, vo);
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
//_mm512_storeu_ps(&output[n * K * OH * OW + k * OH * OW + oh * OW + ow], vo);
|
||
|
float o = _mm512_reduce_add_ps(vo);
|
||
|
output[n * K * OH * OW + k * OH * OW + oh * OW + ow] = o;
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
else
|
||
|
{
|
||
|
#pragma omp parallel for num_threads(num_threads) collapse(3) schedule(dynamic)
|
||
|
for (int n = 0; n < N; ++n) {
|
||
|
for (int k = 0; k < K; ++k) {
|
||
|
for (int ohs = 0; ohs<OH; ohs += HSLICE) {
|
||
|
for (int ows = 0; ows<OW; ows += WSLICE) {
|
||
|
//#pragma omp parallel for num_threads(num_threads) schedule(dynamic)
|
||
|
for (int oh = ohs; oh < min(ohs + HSLICE, OH); ++oh) {
|
||
|
//#pragma omp parallel for num_threads(num_threads) schedule(dynamic)
|
||
|
for (int ow = ows; ow < min(ows + WSLICE, OW); ++ow) {
|
||
|
float o = 0.f;
|
||
|
for (int c = 0; c < C; ++c) {
|
||
|
for (int r = 0; r < R; ++r) {
|
||
|
for (int s = 0; s < S; ++s) {
|
||
|
int h = oh * stride - pad + r * dilation;
|
||
|
int w = ow * stride - pad + s * dilation;
|
||
|
if (h < 0 || h >= H || w < 0 || w >= W) continue;
|
||
|
float i = input[n * C * H * W + c * H * W + h * W + w];
|
||
|
float f = filter[k * C * R * S + c * R * S + r * S + s];
|
||
|
o += i * f;
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
output[n * K * OH * OW + k * OH * OW + oh * OW + ow] = o;
|
||
|
}
|
||
|
}
|
||
|
//Slice
|
||
|
}}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
}
|
||
|
|
||
|
void convolution_omp()
|
||
|
{
|
||
|
#pragma omp parallel for num_threads(num_threads) collapse(3) schedule(dynamic)
|
||
|
for (int n = 0; n < N; ++n) {
|
||
|
for (int k = 0; k < K; ++k) {
|
||
|
//#pragma omp parallel for num_threads(num_threads) schedule(dynamic)
|
||
|
for (int oh = 0; oh < OH; ++oh) {
|
||
|
//#pragma omp parallel for num_threads(num_threads) schedule(dynamic)
|
||
|
for (int ow = 0; ow < OW; ++ow) {
|
||
|
float o = 0.f;
|
||
|
for (int c = 0; c < C; ++c) {
|
||
|
for (int r = 0; r < R; ++r) {
|
||
|
for (int s = 0; s < S; ++s) {
|
||
|
int h = oh * stride - pad + r * dilation;
|
||
|
int w = ow * stride - pad + s * dilation;
|
||
|
if (h < 0 || h >= H || w < 0 || w >= W) continue;
|
||
|
float i = input[n * C * H * W + c * H * W + h * W + w];
|
||
|
float f = filter[k * C * R * S + c * R * S + r * S + s];
|
||
|
o += i * f;
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
output[n * K * OH * OW + k * OH * OW + oh * OW + ow] = o;
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
}
|
||
|
|
||
|
void convolution(
|
||
|
float *_input, float *_output, float *_filter,
|
||
|
int _N, int _C, int _H, int _W,
|
||
|
int _K, int _R, int _S,
|
||
|
int _pad, int _dilation, int _stride) {
|
||
|
input = _input;
|
||
|
output = _output;
|
||
|
filter = _filter;
|
||
|
|
||
|
OH = (H + 2 * pad - dilation * (R - 1) - 1) / stride + 1;
|
||
|
OW = (W + 2 * pad - dilation * (S - 1) - 1) / stride + 1;
|
||
|
|
||
|
int default_div_size = N/mpi_world_size;
|
||
|
MPI_Status status;
|
||
|
//MPI_Request request;
|
||
|
|
||
|
if (mpi_rank == 0) {
|
||
|
// 1. Distribute batch to the other nodes
|
||
|
//timer_start(1);
|
||
|
|
||
|
MPI_Request arrA_req[4];
|
||
|
MPI_Status arrA_status[4];
|
||
|
for(int target_rank = 1; target_rank < mpi_world_size; target_rank++){
|
||
|
int div_start, div_size;
|
||
|
div_start = target_rank * default_div_size;
|
||
|
div_size = default_div_size;
|
||
|
if(target_rank == (mpi_world_size - 1))
|
||
|
div_size += N - (default_div_size * mpi_world_size);
|
||
|
|
||
|
//printf("send. target_rank=%d, div_start=%d, div_size=%d, tot_size=%d\n", target_rank, div_start, div_size, div_size * C*H*W);
|
||
|
MPI_Isend(input + (div_start * C*H*W), div_size * C*H*W, MPI_FLOAT, target_rank, 0, MPI_COMM_WORLD, &arrA_req[target_rank-1]);
|
||
|
|
||
|
//MPI_Isend(B, K * N, MPI_FLOAT, target_rank, 0, MPI_COMM_WORLD, &request);
|
||
|
}
|
||
|
// 2. Broadcase all Filters
|
||
|
MPI_Bcast(filter, K*C*R*S, MPI_FLOAT, 0, MPI_COMM_WORLD);
|
||
|
|
||
|
//double elapsed_time = timer_stop(1);
|
||
|
//printf("[rank %d] scatter time: %f sec\n", mpi_rank, elapsed_time);
|
||
|
|
||
|
int original_N = N;
|
||
|
N = default_div_size;
|
||
|
|
||
|
// 3. Do Convolution
|
||
|
//timer_start(1);
|
||
|
convolution_omp_slc();
|
||
|
//elapsed_time = timer_stop(1);
|
||
|
//printf("[rank %d] time: %f sec\n", mpi_rank, elapsed_time);
|
||
|
N = original_N;
|
||
|
|
||
|
//timer_start(1);
|
||
|
// 4. Receive result from the other node
|
||
|
|
||
|
MPI_Request arrC_req[4];
|
||
|
MPI_Status arrC_status[4];
|
||
|
for(int target_rank = 1; target_rank < mpi_world_size; target_rank++){
|
||
|
int div_start, div_size;
|
||
|
div_start = target_rank * default_div_size;
|
||
|
div_size = default_div_size;
|
||
|
if(target_rank == (mpi_world_size - 1))
|
||
|
div_size += N - (default_div_size * mpi_world_size);
|
||
|
|
||
|
//printf("wait div_size=%d\n", div_size);
|
||
|
MPI_Irecv(output + (div_start * K*OH*OW), div_size * K*OH*OW, MPI_FLOAT, target_rank, 0, MPI_COMM_WORLD, &arrC_req[target_rank-1]);
|
||
|
//MPI_Recv(output + (div_start * K*OH*OW), div_size * K*OH*OW, MPI_FLOAT, target_rank, 0, MPI_COMM_WORLD, &arrC_status[target_rank-1]);
|
||
|
}
|
||
|
//MPI_Waitall(mpi_world_size-1, arrA_req, arrA_status);
|
||
|
MPI_Waitall(mpi_world_size-1, arrC_req, arrC_status);
|
||
|
|
||
|
//elapsed_time = timer_stop(1);
|
||
|
//printf("[rank %d] collect time: %f sec\n", mpi_rank, elapsed_time);
|
||
|
}else{
|
||
|
//0. alloc local memory
|
||
|
int div_size;
|
||
|
|
||
|
div_size = default_div_size;
|
||
|
if(mpi_rank == (mpi_world_size - 1))
|
||
|
div_size += N - (default_div_size * mpi_world_size);
|
||
|
|
||
|
int original_N = N;
|
||
|
N = div_size; // Adjust N size
|
||
|
//printf("defulat div size=%d\n", default_div_size);
|
||
|
|
||
|
alloc_tensor(&input, N, C, H, W);
|
||
|
alloc_tensor(&filter, K, C, R, S);
|
||
|
alloc_tensor(&output, N, K, OH, OW);
|
||
|
|
||
|
// 1. Recv part of A
|
||
|
//printf("sub. rank=%d, div_size=%d, Recv start, tot_size=%d\n", mpi_rank, div_size, N*C*H*W);
|
||
|
MPI_Recv(input, N*C*H*W, MPI_FLOAT, 0, 0, MPI_COMM_WORLD, &status);
|
||
|
//printf("sub. rank=%d, div_size=%d, Recv end\n", mpi_rank, div_size);
|
||
|
|
||
|
// 2. Recv full Filter
|
||
|
MPI_Bcast(filter, K*C*R*S, MPI_FLOAT, 0, MPI_COMM_WORLD);
|
||
|
|
||
|
// 3. Do Convolution
|
||
|
//timer_start(1);
|
||
|
convolution_omp();
|
||
|
//double elapsed_time = timer_stop(1);
|
||
|
//printf("[rank %d] time: %f sec\n", mpi_rank, elapsed_time);
|
||
|
|
||
|
// 4. Send C to rank 0 node.
|
||
|
//printf("sub. end. my rank=%d, div_size=%d\n", mpi_rank, div_size);
|
||
|
MPI_Send(output, N*K*OH*OW, MPI_FLOAT, 0, 0, MPI_COMM_WORLD);
|
||
|
|
||
|
N = original_N;
|
||
|
|
||
|
//free
|
||
|
free(input);
|
||
|
free(filter);
|
||
|
free(output);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
void convolution_init(
|
||
|
int _N, int _C, int _H, int _W,
|
||
|
int _K, int _R, int _S,
|
||
|
int _pad, int _dilation, int _stride) {
|
||
|
N = _N; C = _C; H = _H; W = _W;
|
||
|
K = _K; R = _R; S = _S;
|
||
|
pad = _pad;
|
||
|
dilation = _dilation;
|
||
|
stride = _stride;
|
||
|
|
||
|
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
|
||
|
MPI_Comm_size(MPI_COMM_WORLD, &mpi_world_size);
|
||
|
}
|
||
|
|
||
|
void convolution_final(
|
||
|
int _N, int _C, int _H, int _W,
|
||
|
int _K, int _R, int _S,
|
||
|
int _pad, int _dilation, int _stride) {
|
||
|
}
|