2023-02-14 17:45:28 +09:00
|
|
|
#include <stdlib.h>
|
2023-02-15 01:33:28 +09:00
|
|
|
|
|
|
|
#include <cmath>
|
2023-02-02 19:02:36 +09:00
|
|
|
#include <cstdint>
|
2023-02-14 17:45:28 +09:00
|
|
|
#include <cstdio>
|
2023-02-15 01:33:28 +09:00
|
|
|
#include <cstdlib>
|
|
|
|
|
|
|
|
#include "tensor.h"
|
|
|
|
#include "uNet.h"
|
|
|
|
#include "util.h"
|
2023-02-02 19:02:36 +09:00
|
|
|
|
2023-02-06 15:07:20 +09:00
|
|
|
// Parameters for U-Net
|
2023-02-02 19:34:50 +09:00
|
|
|
Tensor *inc_double_conv_0_weight;
|
|
|
|
Tensor *inc_double_conv_1_weight;
|
|
|
|
Tensor *inc_double_conv_1_bias;
|
|
|
|
Tensor *inc_double_conv_3_weight;
|
|
|
|
Tensor *inc_double_conv_4_weight;
|
|
|
|
Tensor *inc_double_conv_4_bias;
|
|
|
|
Tensor *down1_maxpool_conv_1_double_conv_0_weight;
|
|
|
|
Tensor *down1_maxpool_conv_1_double_conv_1_weight;
|
|
|
|
Tensor *down1_maxpool_conv_1_double_conv_1_bias;
|
|
|
|
Tensor *down1_maxpool_conv_1_double_conv_3_weight;
|
|
|
|
Tensor *down1_maxpool_conv_1_double_conv_4_weight;
|
|
|
|
Tensor *down1_maxpool_conv_1_double_conv_4_bias;
|
|
|
|
Tensor *down2_maxpool_conv_1_double_conv_0_weight;
|
|
|
|
Tensor *down2_maxpool_conv_1_double_conv_1_weight;
|
|
|
|
Tensor *down2_maxpool_conv_1_double_conv_1_bias;
|
|
|
|
Tensor *down2_maxpool_conv_1_double_conv_3_weight;
|
|
|
|
Tensor *down2_maxpool_conv_1_double_conv_4_weight;
|
|
|
|
Tensor *down2_maxpool_conv_1_double_conv_4_bias;
|
|
|
|
Tensor *up1_up_weight;
|
|
|
|
Tensor *up1_up_bias;
|
|
|
|
Tensor *up1_conv_double_conv_0_weight;
|
|
|
|
Tensor *up1_conv_double_conv_1_weight;
|
2023-02-10 02:15:36 +09:00
|
|
|
Tensor *up1_conv_double_conv_1_bias;
|
2023-02-02 19:34:50 +09:00
|
|
|
Tensor *up1_conv_double_conv_3_weight;
|
|
|
|
Tensor *up1_conv_double_conv_4_weight;
|
2023-02-10 02:15:36 +09:00
|
|
|
Tensor *up1_conv_double_conv_4_bias;
|
2023-02-02 19:34:50 +09:00
|
|
|
Tensor *up2_up_weight;
|
|
|
|
Tensor *up2_up_bias;
|
|
|
|
Tensor *up2_conv_double_conv_0_weight;
|
|
|
|
Tensor *up2_conv_double_conv_1_weight;
|
|
|
|
Tensor *up2_conv_double_conv_1_bias;
|
|
|
|
Tensor *up2_conv_double_conv_3_weight;
|
|
|
|
Tensor *up2_conv_double_conv_4_weight;
|
|
|
|
Tensor *up2_conv_double_conv_4_bias;
|
|
|
|
Tensor *outc_conv_weight;
|
|
|
|
Tensor *outc_conv_bias;
|
2023-02-04 21:32:46 +09:00
|
|
|
Tensor *inc_batchnorm_0_running_mean;
|
|
|
|
Tensor *inc_batchnorm_0_running_var;
|
|
|
|
Tensor *down1_batchnorm_0_running_mean;
|
|
|
|
Tensor *down1_batchnorm_0_running_var;
|
|
|
|
Tensor *down2_batchnorm_0_running_mean;
|
|
|
|
Tensor *down2_batchnorm_0_running_var;
|
|
|
|
Tensor *up1_batchnorm_0_running_mean;
|
|
|
|
Tensor *up1_batchnorm_0_running_var;
|
|
|
|
Tensor *up2_batchnorm_0_running_mean;
|
|
|
|
Tensor *up2_batchnorm_0_running_var;
|
|
|
|
Tensor *inc_batchnorm_1_running_mean;
|
|
|
|
Tensor *inc_batchnorm_1_running_var;
|
|
|
|
Tensor *down1_batchnorm_1_running_mean;
|
|
|
|
Tensor *down1_batchnorm_1_running_var;
|
|
|
|
Tensor *down2_batchnorm_1_running_mean;
|
|
|
|
Tensor *down2_batchnorm_1_running_var;
|
|
|
|
Tensor *up1_batchnorm_1_running_mean;
|
|
|
|
Tensor *up1_batchnorm_1_running_var;
|
|
|
|
Tensor *up2_batchnorm_1_running_mean;
|
|
|
|
Tensor *up2_batchnorm_1_running_var;
|
|
|
|
|
2023-02-06 15:07:20 +09:00
|
|
|
// intermediate features
|
2023-02-06 00:54:39 +09:00
|
|
|
Tensor *inc_conv_0_output;
|
|
|
|
Tensor *inc_batchnorm_0_output;
|
|
|
|
Tensor *inc_conv_1_output;
|
|
|
|
Tensor *inc_batchnorm_1_output;
|
|
|
|
Tensor *down1_maxpool2d_0_output;
|
|
|
|
Tensor *down1_conv_0_output;
|
|
|
|
Tensor *down1_batchnorm_0_output;
|
|
|
|
Tensor *down1_conv_1_output;
|
|
|
|
Tensor *down1_batchnorm_1_output;
|
|
|
|
Tensor *down2_maxpool2d_0_output;
|
|
|
|
Tensor *down2_conv_0_output;
|
|
|
|
Tensor *down2_batchnorm_0_output;
|
|
|
|
Tensor *down2_conv_1_output;
|
|
|
|
Tensor *down2_batchnorm_1_output;
|
|
|
|
Tensor *up1_convt_0_output;
|
|
|
|
Tensor *up1_concat_0_output;
|
2023-02-15 01:33:28 +09:00
|
|
|
Tensor *up1_conv_0_output;
|
2023-02-06 00:54:39 +09:00
|
|
|
Tensor *up1_batchnorm_0_output;
|
|
|
|
Tensor *up1_conv_1_output;
|
|
|
|
Tensor *up1_batchnorm_1_output;
|
|
|
|
Tensor *up2_convt_0_output;
|
|
|
|
Tensor *up2_concat_0_output;
|
|
|
|
Tensor *up2_conv_0_output;
|
|
|
|
Tensor *up2_batchnorm_0_output;
|
|
|
|
Tensor *up2_conv_1_output;
|
|
|
|
Tensor *up2_batchnorm_1_output;
|
|
|
|
Tensor *outc_conv_0_output;
|
2023-02-03 01:25:19 +09:00
|
|
|
|
2023-02-02 23:37:40 +09:00
|
|
|
// forward declaration, prototype
|
2023-02-15 01:33:28 +09:00
|
|
|
void Conv2d(Tensor *input, Tensor *weight, Tensor *bias, Tensor *output,
|
|
|
|
int stride, int pad, int dilation, bool has_bias);
|
2023-02-02 23:37:40 +09:00
|
|
|
void ReLU(Tensor *inout);
|
2023-02-15 01:33:28 +09:00
|
|
|
void BatchNorm2d(Tensor *input, Tensor *gamma, Tensor *beta,
|
|
|
|
Tensor *running_mean, Tensor *running_var, Tensor *output,
|
|
|
|
const float eps, const float momentum);
|
|
|
|
void ConvTranspose2d(Tensor *input, Tensor *weight, Tensor *bias,
|
|
|
|
Tensor *output, int stride, int pad);
|
2023-02-05 18:26:22 +09:00
|
|
|
void MaxPool2d(Tensor *input, Tensor *output);
|
2023-02-06 00:54:39 +09:00
|
|
|
void Concat(Tensor *input1, Tensor *input2, Tensor *output);
|
2023-02-15 01:33:28 +09:00
|
|
|
void uNet_initialize(int, int, char *);
|
|
|
|
void uNet(Tensor *, Tensor *);
|
2023-02-02 23:37:40 +09:00
|
|
|
void uNet_finalize();
|
|
|
|
|
2023-02-15 01:33:28 +09:00
|
|
|
/*
|
|
|
|
* uNet
|
2023-02-06 01:38:42 +09:00
|
|
|
* This model identifies the boundaries of the cars in an image file (input.bin)
|
|
|
|
* and removes the background.
|
2023-02-02 23:37:40 +09:00
|
|
|
*/
|
2023-02-06 15:07:20 +09:00
|
|
|
void uNet(Tensor *inputN, Tensor *outputN, int N) {
|
2023-02-12 03:06:22 +09:00
|
|
|
Tensor *input = new Tensor({1, 3, 128, 191});
|
|
|
|
Tensor *output = new Tensor({1, 2, 128, 191});
|
2023-02-06 15:07:20 +09:00
|
|
|
|
2023-02-15 01:33:28 +09:00
|
|
|
for (int idx = 0; idx < N; ++idx) {
|
|
|
|
memcpy(input->buf, inputN->buf + (idx * 1 * 3 * 128 * 191),
|
|
|
|
sizeof(float) * 1 * 3 * 128 * 191);
|
2023-02-06 15:07:20 +09:00
|
|
|
|
|
|
|
// inc(n_channels, 64)
|
2023-02-15 01:33:28 +09:00
|
|
|
Conv2d(input, inc_double_conv_0_weight, NULL, inc_conv_0_output, 1, 1, 1,
|
|
|
|
false);
|
|
|
|
BatchNorm2d(inc_conv_0_output, inc_double_conv_1_weight,
|
|
|
|
inc_double_conv_1_bias, inc_batchnorm_0_running_mean,
|
|
|
|
inc_batchnorm_0_running_var, inc_batchnorm_0_output, 1e-5, 0.1);
|
2023-02-06 15:07:20 +09:00
|
|
|
ReLU(inc_batchnorm_0_output);
|
2023-02-15 01:33:28 +09:00
|
|
|
Conv2d(inc_batchnorm_0_output, inc_double_conv_3_weight, NULL,
|
|
|
|
inc_conv_1_output, 1, 1, 1, false);
|
|
|
|
BatchNorm2d(inc_conv_1_output, inc_double_conv_4_weight,
|
|
|
|
inc_double_conv_4_bias, inc_batchnorm_1_running_mean,
|
|
|
|
inc_batchnorm_1_running_var, inc_batchnorm_1_output, 1e-5, 0.1);
|
2023-02-06 15:07:20 +09:00
|
|
|
ReLU(inc_batchnorm_1_output);
|
2023-02-15 01:33:28 +09:00
|
|
|
|
2023-02-06 15:07:20 +09:00
|
|
|
// down1(64, 128)
|
|
|
|
MaxPool2d(inc_batchnorm_1_output, down1_maxpool2d_0_output);
|
2023-02-15 01:33:28 +09:00
|
|
|
Conv2d(down1_maxpool2d_0_output, down1_maxpool_conv_1_double_conv_0_weight,
|
|
|
|
NULL, down1_conv_0_output, 1, 1, 1, false);
|
|
|
|
BatchNorm2d(down1_conv_0_output, down1_maxpool_conv_1_double_conv_1_weight,
|
|
|
|
down1_maxpool_conv_1_double_conv_1_bias,
|
|
|
|
down1_batchnorm_0_running_mean, down1_batchnorm_0_running_var,
|
|
|
|
down1_batchnorm_0_output, 1e-5, 0.1);
|
2023-02-06 15:07:20 +09:00
|
|
|
ReLU(down1_batchnorm_0_output);
|
2023-02-15 01:33:28 +09:00
|
|
|
Conv2d(down1_batchnorm_0_output, down1_maxpool_conv_1_double_conv_3_weight,
|
|
|
|
NULL, down1_conv_1_output, 1, 1, 1, false);
|
|
|
|
BatchNorm2d(down1_conv_1_output, down1_maxpool_conv_1_double_conv_4_weight,
|
|
|
|
down1_maxpool_conv_1_double_conv_4_bias,
|
|
|
|
down1_batchnorm_1_running_mean, down1_batchnorm_1_running_var,
|
|
|
|
down1_batchnorm_1_output, 1e-5, 0.1);
|
2023-02-06 15:07:20 +09:00
|
|
|
ReLU(down1_batchnorm_1_output);
|
2023-02-15 01:33:28 +09:00
|
|
|
|
2023-02-06 15:07:20 +09:00
|
|
|
// down2(128, 256)
|
|
|
|
MaxPool2d(down1_batchnorm_1_output, down2_maxpool2d_0_output);
|
2023-02-15 01:33:28 +09:00
|
|
|
Conv2d(down2_maxpool2d_0_output, down2_maxpool_conv_1_double_conv_0_weight,
|
|
|
|
NULL, down2_conv_0_output, 1, 1, 1, false);
|
|
|
|
BatchNorm2d(down2_conv_0_output, down2_maxpool_conv_1_double_conv_1_weight,
|
|
|
|
down2_maxpool_conv_1_double_conv_1_bias,
|
|
|
|
down2_batchnorm_0_running_mean, down2_batchnorm_0_running_var,
|
|
|
|
down2_batchnorm_0_output, 1e-5, 0.1);
|
2023-02-06 15:07:20 +09:00
|
|
|
ReLU(down2_batchnorm_0_output);
|
2023-02-15 01:33:28 +09:00
|
|
|
Conv2d(down2_batchnorm_0_output, down2_maxpool_conv_1_double_conv_3_weight,
|
|
|
|
NULL, down2_conv_1_output, 1, 1, 1, false);
|
|
|
|
BatchNorm2d(down2_conv_1_output, down2_maxpool_conv_1_double_conv_4_weight,
|
|
|
|
down2_maxpool_conv_1_double_conv_4_bias,
|
|
|
|
down2_batchnorm_1_running_mean, down2_batchnorm_1_running_var,
|
|
|
|
down2_batchnorm_1_output, 1e-5, 0.1);
|
2023-02-06 15:07:20 +09:00
|
|
|
ReLU(down2_batchnorm_1_output);
|
2023-02-15 01:33:28 +09:00
|
|
|
|
2023-02-10 02:15:36 +09:00
|
|
|
// up1(256, 128), (up2_concat_0_output, down1_batchnorm_1_output)
|
2023-02-15 01:33:28 +09:00
|
|
|
ConvTranspose2d(down2_batchnorm_1_output, up1_up_weight, up1_up_bias,
|
|
|
|
up1_convt_0_output, 2, 0);
|
2023-02-10 02:15:36 +09:00
|
|
|
Concat(up1_convt_0_output, down1_batchnorm_1_output, up1_concat_0_output);
|
2023-02-15 01:33:28 +09:00
|
|
|
Conv2d(up1_concat_0_output, up1_conv_double_conv_0_weight, NULL,
|
|
|
|
up1_conv_0_output, 1, 1, 1, false);
|
|
|
|
BatchNorm2d(up1_conv_0_output, up1_conv_double_conv_1_weight,
|
|
|
|
up1_conv_double_conv_1_bias, up1_batchnorm_0_running_mean,
|
|
|
|
up1_batchnorm_0_running_var, up1_batchnorm_0_output, 1e-5, 0.1);
|
2023-02-06 15:07:20 +09:00
|
|
|
ReLU(up1_batchnorm_0_output);
|
2023-02-15 01:33:28 +09:00
|
|
|
Conv2d(up1_batchnorm_0_output, up1_conv_double_conv_3_weight, NULL,
|
|
|
|
up1_conv_1_output, 1, 1, 1, false);
|
|
|
|
BatchNorm2d(up1_conv_1_output, up1_conv_double_conv_4_weight,
|
|
|
|
up1_conv_double_conv_4_bias, up1_batchnorm_1_running_mean,
|
|
|
|
up1_batchnorm_1_running_var, up1_batchnorm_1_output, 1e-5, 0.1);
|
2023-02-06 15:07:20 +09:00
|
|
|
ReLU(up1_batchnorm_1_output);
|
2023-02-15 01:33:28 +09:00
|
|
|
|
2023-02-10 02:15:36 +09:00
|
|
|
// up2(128, 64), (up1_concat_0_output, inc_batchnorm_1_output)
|
2023-02-15 01:33:28 +09:00
|
|
|
ConvTranspose2d(up1_batchnorm_1_output, up2_up_weight, up2_up_bias,
|
|
|
|
up2_convt_0_output, 2, 0);
|
2023-02-10 02:15:36 +09:00
|
|
|
Concat(up2_convt_0_output, inc_batchnorm_1_output, up2_concat_0_output);
|
2023-02-15 01:33:28 +09:00
|
|
|
Conv2d(up2_concat_0_output, up2_conv_double_conv_0_weight, NULL,
|
|
|
|
up2_conv_0_output, 1, 1, 1, false);
|
|
|
|
BatchNorm2d(up2_conv_0_output, up2_conv_double_conv_1_weight,
|
|
|
|
up2_conv_double_conv_1_bias, up2_batchnorm_0_running_mean,
|
|
|
|
up2_batchnorm_0_running_var, up2_batchnorm_0_output, 1e-5, 0.1);
|
2023-02-06 15:07:20 +09:00
|
|
|
ReLU(up2_batchnorm_0_output);
|
2023-02-15 01:33:28 +09:00
|
|
|
Conv2d(up2_batchnorm_0_output, up2_conv_double_conv_3_weight, NULL,
|
|
|
|
up2_conv_1_output, 1, 1, 1, false);
|
|
|
|
BatchNorm2d(up2_conv_1_output, up2_conv_double_conv_4_weight,
|
|
|
|
up2_conv_double_conv_4_bias, up2_batchnorm_1_running_mean,
|
|
|
|
up2_batchnorm_1_running_var, up2_batchnorm_1_output, 1e-5, 0.1);
|
2023-02-06 15:07:20 +09:00
|
|
|
ReLU(up2_batchnorm_1_output);
|
|
|
|
|
|
|
|
// outc(64, 2)
|
2023-02-15 01:33:28 +09:00
|
|
|
Conv2d(up2_batchnorm_1_output, outc_conv_weight, outc_conv_bias, output, 1,
|
|
|
|
0, 1, true);
|
2023-02-06 15:07:20 +09:00
|
|
|
|
2023-02-15 01:33:28 +09:00
|
|
|
memcpy(outputN->buf + (idx * 1 * 2 * 128 * 191), output->buf,
|
|
|
|
sizeof(float) * (1 * 2 * 128 * 191));
|
2023-02-06 15:07:20 +09:00
|
|
|
}
|
2023-02-02 23:37:40 +09:00
|
|
|
}
|
2023-02-02 19:02:36 +09:00
|
|
|
|
2023-02-06 01:38:42 +09:00
|
|
|
/* Operations */
|
2023-02-03 01:25:19 +09:00
|
|
|
|
2023-02-02 19:02:36 +09:00
|
|
|
/*
|
|
|
|
* Convolution
|
2023-02-04 17:43:29 +09:00
|
|
|
* input shape = (N, C, H, W)
|
2023-02-02 19:02:36 +09:00
|
|
|
* weight shape = (K, C, R, S)
|
|
|
|
* bias shape = (K)
|
2023-02-04 17:43:29 +09:00
|
|
|
* output shape = (N, K, OH, OW)
|
2023-02-02 19:02:36 +09:00
|
|
|
* where OH = (H + 2 * pad - dilation * (R - 1) - 1) / stride + 1,
|
|
|
|
* OW = (W + 2 * pad - dilation * (S - 1) - 1) / stride + 1
|
|
|
|
*/
|
2023-02-15 01:33:28 +09:00
|
|
|
void Conv2d(Tensor *input, Tensor *weight, Tensor *bias, Tensor *output,
|
|
|
|
int stride, int pad, int dilation, bool has_bias) {
|
|
|
|
int C = input->shape[1], H = input->shape[2], W = input->shape[3];
|
2023-02-02 19:02:36 +09:00
|
|
|
int K = weight->shape[0], R = weight->shape[2], S = weight->shape[3];
|
2023-02-04 17:43:29 +09:00
|
|
|
int OH = output->shape[2], OW = output->shape[3];
|
2023-02-15 01:33:28 +09:00
|
|
|
|
|
|
|
CHECK_ERROR(OH == (H + 2 * pad - dilation * (R - 1) - 1) / stride + 1,
|
|
|
|
"[Conv2d] Output height mismatch");
|
|
|
|
CHECK_ERROR(OW == (W + 2 * pad - dilation * (S - 1) - 1) / stride + 1,
|
|
|
|
"[Conv2d] Output width mismatch");
|
|
|
|
CHECK_ERROR(weight->shape[1] == C && (!has_bias || bias->shape[0] == K) &&
|
|
|
|
output->shape[1] == K,
|
|
|
|
"[Conv2d] Channel size mismatch");
|
|
|
|
|
|
|
|
#ifdef TEST
|
|
|
|
#pragma omp parallel for
|
|
|
|
#endif
|
2023-02-02 19:02:36 +09:00
|
|
|
for (int k = 0; k < K; ++k) {
|
|
|
|
for (int oh = 0; oh < OH; ++oh) {
|
|
|
|
for (int ow = 0; ow < OW; ++ow) {
|
|
|
|
float o = has_bias ? bias->buf[k] : 0;
|
|
|
|
for (int c = 0; c < C; ++c) {
|
|
|
|
for (int r = 0; r < R; ++r) {
|
|
|
|
for (int s = 0; s < S; ++s) {
|
|
|
|
int h = oh * stride - pad + r * dilation;
|
|
|
|
int w = ow * stride - pad + s * dilation;
|
|
|
|
if (h < 0 || h >= H || w < 0 || w >= W) continue;
|
|
|
|
float i = input->buf[c * H * W + h * W + w];
|
|
|
|
float f = weight->buf[k * C * R * S + c * R * S + r * S + s];
|
|
|
|
o += i * f;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
output->buf[k * OH * OW + oh * OW + ow] = o;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ReLU
|
2023-02-04 18:51:46 +09:00
|
|
|
* input shape = (N, C, H, W)
|
|
|
|
* output shape = (N, C, H, W)
|
2023-02-02 19:02:36 +09:00
|
|
|
* Formula: y = max(x, 0)
|
|
|
|
*/
|
|
|
|
void ReLU(Tensor *inout) {
|
2023-02-04 18:51:46 +09:00
|
|
|
int C = inout->shape[1], H = inout->shape[2], W = inout->shape[3];
|
2023-02-15 01:33:28 +09:00
|
|
|
|
2023-02-02 19:02:36 +09:00
|
|
|
for (int c = 0; c < C; ++c) {
|
|
|
|
for (int h = 0; h < H; ++h) {
|
|
|
|
for (int w = 0; w < W; ++w) {
|
|
|
|
int idx = c * H * W + h * W + w;
|
|
|
|
inout->buf[idx] = inout->buf[idx] > 0 ? inout->buf[idx] : 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Batch Normaliztion
|
2023-02-04 18:51:46 +09:00
|
|
|
* input shape = (N, C, H, W)
|
|
|
|
* gamma shape = (C)
|
|
|
|
* beta shape = (C)
|
|
|
|
* output shape = (N, C, H, W)
|
2023-02-02 19:02:36 +09:00
|
|
|
*/
|
2023-02-15 01:33:28 +09:00
|
|
|
void BatchNorm2d(Tensor *input, Tensor *gamma, Tensor *beta,
|
|
|
|
Tensor *running_mean, Tensor *running_var, Tensor *output,
|
|
|
|
const float eps, const float momentum) {
|
|
|
|
int N = input->shape[0], C = input->shape[1], H = input->shape[2],
|
|
|
|
W = input->shape[3];
|
|
|
|
|
|
|
|
CHECK_ERROR(gamma->shape[0] == C && beta->shape[0] == C,
|
|
|
|
"[BatchNorm2d] gamma, beta shape mismatch");
|
|
|
|
CHECK_ERROR(
|
|
|
|
output->shape[1] == C && output->shape[2] == H && output->shape[3] == W,
|
|
|
|
"[BatchNorm2d] Output shape mismatch");
|
|
|
|
|
|
|
|
for (int c = 0; c < C; ++c) {
|
|
|
|
for (int n = 0; n < N; ++n) {
|
|
|
|
for (int h = 0; h < H; ++h) {
|
|
|
|
for (int w = 0; w < W; ++w) {
|
2023-02-05 18:26:22 +09:00
|
|
|
float mean = running_mean->buf[c];
|
|
|
|
float variance = running_var->buf[c];
|
2023-02-04 18:51:46 +09:00
|
|
|
float x = input->buf[n * C * H * W + c * H * W + h * W + w];
|
|
|
|
float x_hat = (x - mean) / sqrt(variance + eps);
|
2023-02-15 01:33:28 +09:00
|
|
|
output->buf[n * C * H * W + c * H * W + h * W + w] =
|
|
|
|
gamma->buf[c] * x_hat + beta->buf[c];
|
2023-02-04 18:51:46 +09:00
|
|
|
}
|
2023-02-02 19:02:36 +09:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Transposed convolution
|
2023-02-04 18:51:46 +09:00
|
|
|
* input shape = (N, C, H, W)
|
2023-02-02 19:02:36 +09:00
|
|
|
* weight shape = (C, K, R, S)
|
|
|
|
* bias shape = (K)
|
2023-02-04 18:51:46 +09:00
|
|
|
* output shape = (N, K, OH, OW)
|
2023-02-02 19:02:36 +09:00
|
|
|
* where OH = (H - 1) * stride - 2 * pad + R
|
|
|
|
* OW = (W - 1) * stride - 2 * pad + S
|
|
|
|
*/
|
2023-02-15 01:33:28 +09:00
|
|
|
void ConvTranspose2d(Tensor *input, Tensor *weight, Tensor *bias,
|
|
|
|
Tensor *output, int stride, int pad) {
|
2023-02-04 18:51:46 +09:00
|
|
|
int C = input->shape[1], H = input->shape[2], W = input->shape[3];
|
2023-02-02 19:02:36 +09:00
|
|
|
int K = weight->shape[1], R = weight->shape[2], S = weight->shape[3];
|
2023-02-04 18:51:46 +09:00
|
|
|
int OH = output->shape[2], OW = output->shape[3];
|
2023-02-02 19:02:36 +09:00
|
|
|
|
2023-02-15 01:33:28 +09:00
|
|
|
CHECK_ERROR(OH == (H - 1) * stride - 2 * pad + R,
|
|
|
|
"[ConvT2d] Output height mismatch");
|
|
|
|
CHECK_ERROR(OW == (W - 1) * stride - 2 * pad + S,
|
|
|
|
"[ConvT2d] Output width mismatch");
|
|
|
|
CHECK_ERROR(
|
|
|
|
weight->shape[0] == C && bias->shape[0] == K && output->shape[1] == K,
|
|
|
|
"[ConvT2d] Channel size mismatch");
|
|
|
|
|
2023-02-02 19:02:36 +09:00
|
|
|
for (int k = 0; k < K; ++k) {
|
|
|
|
for (int oh = 0; oh < OH; ++oh) {
|
|
|
|
for (int ow = 0; ow < OW; ++ow) {
|
|
|
|
float o = bias->buf[k];
|
|
|
|
for (int c = 0; c < C; ++c) {
|
|
|
|
for (int r = 0; r < R; ++r) {
|
|
|
|
for (int s = 0; s < S; ++s) {
|
|
|
|
if ((oh + pad - r) % stride != 0) continue;
|
|
|
|
if ((ow + pad - s) % stride != 0) continue;
|
|
|
|
int h = (oh + pad - r) / stride;
|
|
|
|
int w = (ow + pad - s) / stride;
|
|
|
|
if (h < 0 || h >= H || w < 0 || w >= W) continue;
|
|
|
|
float i = input->buf[c * H * W + h * W + w];
|
|
|
|
float f = weight->buf[c * K * R * S + k * R * S + r * S + s];
|
|
|
|
o += i * f;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
output->buf[k * OH * OW + oh * OW + ow] = o;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-15 01:33:28 +09:00
|
|
|
float max4(float in0, float in1, float in2, float in3) {
|
2023-02-14 17:45:28 +09:00
|
|
|
float max = in0;
|
2023-02-15 01:33:28 +09:00
|
|
|
|
2023-02-05 18:26:22 +09:00
|
|
|
if (in1 > max) max = in1;
|
|
|
|
if (in2 > max) max = in2;
|
|
|
|
if (in3 > max) max = in3;
|
|
|
|
return max;
|
|
|
|
}
|
|
|
|
|
2023-02-06 00:54:39 +09:00
|
|
|
/*
|
|
|
|
* MaxPool2d
|
|
|
|
* input shape = (N, C, H, W)
|
|
|
|
* output shape = (N, OC, OH, OW)
|
|
|
|
* where OH = H / 2
|
|
|
|
* OW = W / 2
|
|
|
|
*/
|
2023-02-15 01:33:28 +09:00
|
|
|
void MaxPool2d(Tensor *input, Tensor *output) {
|
2023-02-05 18:26:22 +09:00
|
|
|
int C = input->shape[1], H = input->shape[2], W = input->shape[3];
|
|
|
|
int OC = output->shape[1], OH = output->shape[2], OW = output->shape[3];
|
|
|
|
|
2023-02-06 00:54:39 +09:00
|
|
|
CHECK_ERROR(OW == W / 2, "[MaxPool2d] Output width mismatch");
|
|
|
|
CHECK_ERROR(OH == H / 2, "[MaxPool2d] Output height mismatch");
|
2023-02-15 01:33:28 +09:00
|
|
|
CHECK_ERROR(OC == C, "[MaxPool2d] Output channel mismatch");
|
|
|
|
|
|
|
|
for (int oc = 0; oc < OC; ++oc) {
|
|
|
|
for (int oh = 0; oh < OH; ++oh) {
|
|
|
|
for (int ow = 0; ow < OW; ++ow) {
|
|
|
|
float in0 = input->buf[oc * H * W + 2 * oh * W + 2 * ow];
|
|
|
|
float in1 = input->buf[oc * H * W + 2 * oh * W + 2 * ow + 1];
|
|
|
|
float in2 = input->buf[oc * H * W + (2 * oh + 1) * W + 2 * ow];
|
|
|
|
float in3 = input->buf[oc * H * W + (2 * oh + 1) * W + 2 * ow + 1];
|
2023-02-05 18:26:22 +09:00
|
|
|
output->buf[oc * OH * OW + oh * OW + ow] = max4(in0, in1, in2, in3);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-02-04 17:43:29 +09:00
|
|
|
}
|
2023-02-05 22:44:57 +09:00
|
|
|
|
2023-02-06 00:54:39 +09:00
|
|
|
/*
|
|
|
|
* Concat
|
|
|
|
* input1 shape = (N, C1, H1, W1)
|
|
|
|
* input2 shape = (N, C2, H2, W2)
|
|
|
|
* output shape = (N, OC, OH, OW)
|
|
|
|
* where OH = H2, H1
|
|
|
|
* OW = W2 = W1 + 1
|
|
|
|
*/
|
2023-02-15 01:33:28 +09:00
|
|
|
void Concat(Tensor *input1, Tensor *input2, Tensor *output) {
|
2023-02-06 00:54:39 +09:00
|
|
|
int C1 = input1->shape[1], H1 = input1->shape[2], W1 = input1->shape[3];
|
2023-02-05 22:44:57 +09:00
|
|
|
int C2 = input2->shape[1], H2 = input2->shape[2], W2 = input2->shape[3];
|
|
|
|
int OC = output->shape[1], OH = output->shape[2], OW = output->shape[3];
|
2023-02-15 01:33:28 +09:00
|
|
|
|
2023-02-06 00:54:39 +09:00
|
|
|
CHECK_ERROR(OC == C1 * 2 && OC == C2 * 2, "[Concat] Output channel mismatch");
|
|
|
|
CHECK_ERROR(OW == W1 + 1 && OW == W2, "[Concat] Output width mismatch");
|
|
|
|
CHECK_ERROR(OH == H1 && OH == H2, "[Concat] Output height mismatch");
|
2023-02-05 22:44:57 +09:00
|
|
|
|
2023-02-15 01:33:28 +09:00
|
|
|
for (int oc = 0; oc < OC / 2; ++oc) {
|
|
|
|
for (int oh = 0; oh < OH; ++oh) {
|
|
|
|
for (int ow = 0; ow < OW; ++ow) {
|
|
|
|
output->buf[oc * OH * OW + oh * OW + ow] =
|
|
|
|
input2->buf[oc * OH * OW + oh * OW + ow];
|
2023-02-05 22:44:57 +09:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-02-15 01:33:28 +09:00
|
|
|
|
|
|
|
for (int oc = OC / 2; oc < OC; ++oc) {
|
|
|
|
for (int oh = 0; oh < OH; ++oh) {
|
|
|
|
for (int ow = 0; ow < OW; ++ow) {
|
|
|
|
if (ow == OW - 1)
|
|
|
|
output->buf[oc * OH * OW + oh * OW + ow] = 0.0; // zero padding
|
|
|
|
else
|
|
|
|
output->buf[oc * OH * OW + oh * OW + ow] =
|
|
|
|
input1->buf[(oc - OC / 2) * H1 * W1 + oh * W1 + ow];
|
2023-02-05 22:44:57 +09:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-02-04 17:43:29 +09:00
|
|
|
}
|
2023-02-03 01:25:19 +09:00
|
|
|
|
2023-02-02 23:37:40 +09:00
|
|
|
/*
|
|
|
|
* uNet_initialize
|
|
|
|
* Initialize the model. Do input-independent job here.
|
|
|
|
*/
|
2023-02-06 01:38:42 +09:00
|
|
|
void uNet_initialize(int N, char *parameter_fname) {
|
2023-02-15 01:33:28 +09:00
|
|
|
size_t parameter_binary_size = 0;
|
|
|
|
float *parameter =
|
|
|
|
(float *) read_binary(parameter_fname, ¶meter_binary_size);
|
2023-02-02 19:02:36 +09:00
|
|
|
|
2023-02-06 01:38:42 +09:00
|
|
|
// Parameters
|
2023-02-15 01:33:28 +09:00
|
|
|
inc_double_conv_0_weight = new Tensor({64, 3, 3, 3}, parameter + OFFSET0);
|
2023-02-02 19:34:50 +09:00
|
|
|
inc_double_conv_1_weight = new Tensor({64}, parameter + OFFSET1);
|
|
|
|
inc_double_conv_1_bias = new Tensor({64}, parameter + OFFSET2);
|
2023-02-15 01:33:28 +09:00
|
|
|
inc_double_conv_3_weight = new Tensor({64, 64, 3, 3}, parameter + OFFSET3);
|
2023-02-02 19:34:50 +09:00
|
|
|
inc_double_conv_4_weight = new Tensor({64}, parameter + OFFSET4);
|
|
|
|
inc_double_conv_4_bias = new Tensor({64}, parameter + OFFSET5);
|
2023-02-15 01:33:28 +09:00
|
|
|
down1_maxpool_conv_1_double_conv_0_weight =
|
|
|
|
new Tensor({128, 64, 3, 3}, parameter + OFFSET6);
|
|
|
|
down1_maxpool_conv_1_double_conv_1_weight =
|
|
|
|
new Tensor({128}, parameter + OFFSET7);
|
|
|
|
down1_maxpool_conv_1_double_conv_1_bias =
|
|
|
|
new Tensor({128}, parameter + OFFSET8);
|
|
|
|
down1_maxpool_conv_1_double_conv_3_weight =
|
|
|
|
new Tensor({128, 128, 3, 3}, parameter + OFFSET9);
|
|
|
|
down1_maxpool_conv_1_double_conv_4_weight =
|
|
|
|
new Tensor({128}, parameter + OFFSET10);
|
|
|
|
down1_maxpool_conv_1_double_conv_4_bias =
|
|
|
|
new Tensor({128}, parameter + OFFSET11);
|
|
|
|
down2_maxpool_conv_1_double_conv_0_weight =
|
|
|
|
new Tensor({256, 128, 3, 3}, parameter + OFFSET12);
|
|
|
|
down2_maxpool_conv_1_double_conv_1_weight =
|
|
|
|
new Tensor({256}, parameter + OFFSET13);
|
|
|
|
down2_maxpool_conv_1_double_conv_1_bias =
|
|
|
|
new Tensor({256}, parameter + OFFSET14);
|
|
|
|
down2_maxpool_conv_1_double_conv_3_weight =
|
|
|
|
new Tensor({256, 256, 3, 3}, parameter + OFFSET15);
|
|
|
|
down2_maxpool_conv_1_double_conv_4_weight =
|
|
|
|
new Tensor({256}, parameter + OFFSET16);
|
|
|
|
down2_maxpool_conv_1_double_conv_4_bias =
|
|
|
|
new Tensor({256}, parameter + OFFSET17);
|
|
|
|
up1_up_weight = new Tensor({256, 128, 2, 2}, parameter + OFFSET18);
|
2023-02-10 02:15:36 +09:00
|
|
|
up1_up_bias = new Tensor({128}, parameter + OFFSET19);
|
2023-02-15 01:33:28 +09:00
|
|
|
up1_conv_double_conv_0_weight =
|
|
|
|
new Tensor({128, 256, 3, 3}, parameter + OFFSET20);
|
2023-02-10 02:15:36 +09:00
|
|
|
up1_conv_double_conv_1_weight = new Tensor({128}, parameter + OFFSET21);
|
|
|
|
up1_conv_double_conv_1_bias = new Tensor({128}, parameter + OFFSET22);
|
2023-02-15 01:33:28 +09:00
|
|
|
up1_conv_double_conv_3_weight =
|
|
|
|
new Tensor({128, 128, 3, 3}, parameter + OFFSET23);
|
2023-02-10 02:15:36 +09:00
|
|
|
up1_conv_double_conv_4_weight = new Tensor({128}, parameter + OFFSET24);
|
|
|
|
up1_conv_double_conv_4_bias = new Tensor({128}, parameter + OFFSET25);
|
2023-02-15 01:33:28 +09:00
|
|
|
up2_up_weight = new Tensor({128, 64, 2, 2}, parameter + OFFSET26);
|
2023-02-10 02:15:36 +09:00
|
|
|
up2_up_bias = new Tensor({64}, parameter + OFFSET27);
|
2023-02-15 01:33:28 +09:00
|
|
|
up2_conv_double_conv_0_weight =
|
|
|
|
new Tensor({64, 128, 3, 3}, parameter + OFFSET28);
|
2023-02-10 02:15:36 +09:00
|
|
|
up2_conv_double_conv_1_weight = new Tensor({64}, parameter + OFFSET29);
|
|
|
|
up2_conv_double_conv_1_bias = new Tensor({64}, parameter + OFFSET30);
|
2023-02-15 01:33:28 +09:00
|
|
|
up2_conv_double_conv_3_weight =
|
|
|
|
new Tensor({64, 64, 3, 3}, parameter + OFFSET31);
|
2023-02-10 02:15:36 +09:00
|
|
|
up2_conv_double_conv_4_weight = new Tensor({64}, parameter + OFFSET32);
|
|
|
|
up2_conv_double_conv_4_bias = new Tensor({64}, parameter + OFFSET33);
|
2023-02-15 01:33:28 +09:00
|
|
|
outc_conv_weight = new Tensor({2, 64, 1, 1}, parameter + OFFSET34);
|
2023-02-10 02:15:36 +09:00
|
|
|
outc_conv_bias = new Tensor({2}, parameter + OFFSET35);
|
|
|
|
inc_batchnorm_0_running_mean = new Tensor({64}, parameter + OFFSET36);
|
|
|
|
inc_batchnorm_0_running_var = new Tensor({64}, parameter + OFFSET37);
|
|
|
|
inc_batchnorm_1_running_mean = new Tensor({64}, parameter + OFFSET38);
|
|
|
|
inc_batchnorm_1_running_var = new Tensor({64}, parameter + OFFSET39);
|
|
|
|
down1_batchnorm_0_running_mean = new Tensor({128}, parameter + OFFSET40);
|
|
|
|
down1_batchnorm_0_running_var = new Tensor({128}, parameter + OFFSET41);
|
|
|
|
down1_batchnorm_1_running_mean = new Tensor({128}, parameter + OFFSET42);
|
|
|
|
down1_batchnorm_1_running_var = new Tensor({128}, parameter + OFFSET43);
|
|
|
|
down2_batchnorm_0_running_mean = new Tensor({256}, parameter + OFFSET44);
|
|
|
|
down2_batchnorm_0_running_var = new Tensor({256}, parameter + OFFSET45);
|
|
|
|
down2_batchnorm_1_running_mean = new Tensor({256}, parameter + OFFSET46);
|
|
|
|
down2_batchnorm_1_running_var = new Tensor({256}, parameter + OFFSET47);
|
|
|
|
up1_batchnorm_0_running_mean = new Tensor({128}, parameter + OFFSET48);
|
|
|
|
up1_batchnorm_0_running_var = new Tensor({128}, parameter + OFFSET49);
|
|
|
|
up1_batchnorm_1_running_mean = new Tensor({128}, parameter + OFFSET50);
|
|
|
|
up1_batchnorm_1_running_var = new Tensor({128}, parameter + OFFSET51);
|
|
|
|
up2_batchnorm_0_running_mean = new Tensor({64}, parameter + OFFSET52);
|
|
|
|
up2_batchnorm_0_running_var = new Tensor({64}, parameter + OFFSET53);
|
|
|
|
up2_batchnorm_1_running_mean = new Tensor({64}, parameter + OFFSET54);
|
|
|
|
up2_batchnorm_1_running_var = new Tensor({64}, parameter + OFFSET55);
|
2023-02-06 00:54:39 +09:00
|
|
|
|
2023-02-06 01:38:42 +09:00
|
|
|
// Activations
|
2023-02-12 03:06:22 +09:00
|
|
|
inc_conv_0_output = new Tensor({1, 64, 128, 191});
|
|
|
|
inc_batchnorm_0_output = new Tensor({1, 64, 128, 191});
|
|
|
|
inc_conv_1_output = new Tensor({1, 64, 128, 191});
|
|
|
|
inc_batchnorm_1_output = new Tensor({1, 64, 128, 191});
|
|
|
|
|
|
|
|
down1_maxpool2d_0_output = new Tensor({1, 64, 64, 95});
|
|
|
|
down1_conv_0_output = new Tensor({1, 128, 64, 95});
|
|
|
|
down1_batchnorm_0_output = new Tensor({1, 128, 64, 95});
|
|
|
|
down1_conv_1_output = new Tensor({1, 128, 64, 95});
|
|
|
|
down1_batchnorm_1_output = new Tensor({1, 128, 64, 95});
|
|
|
|
|
|
|
|
down2_maxpool2d_0_output = new Tensor({1, 128, 32, 47});
|
|
|
|
down2_conv_0_output = new Tensor({1, 256, 32, 47});
|
|
|
|
down2_batchnorm_0_output = new Tensor({1, 256, 32, 47});
|
|
|
|
down2_conv_1_output = new Tensor({1, 256, 32, 47});
|
|
|
|
down2_batchnorm_1_output = new Tensor({1, 256, 32, 47});
|
|
|
|
|
|
|
|
up1_convt_0_output = new Tensor({1, 128, 64, 94});
|
|
|
|
up1_concat_0_output = new Tensor({1, 256, 64, 95});
|
2023-02-15 01:33:28 +09:00
|
|
|
up1_conv_0_output = new Tensor({1, 128, 64, 95});
|
2023-02-12 03:06:22 +09:00
|
|
|
up1_batchnorm_0_output = new Tensor({1, 128, 64, 95});
|
|
|
|
up1_conv_1_output = new Tensor({1, 128, 64, 95});
|
|
|
|
up1_batchnorm_1_output = new Tensor({1, 128, 64, 95});
|
|
|
|
|
|
|
|
up2_convt_0_output = new Tensor({1, 64, 128, 190});
|
|
|
|
up2_concat_0_output = new Tensor({1, 128, 128, 191});
|
|
|
|
up2_conv_0_output = new Tensor({1, 64, 128, 191});
|
|
|
|
up2_batchnorm_0_output = new Tensor({1, 64, 128, 191});
|
|
|
|
up2_conv_1_output = new Tensor({1, 64, 128, 191});
|
|
|
|
up2_batchnorm_1_output = new Tensor({1, 64, 128, 191});
|
|
|
|
outc_conv_0_output = new Tensor({1, 2, 128, 191});
|
2023-02-02 19:02:36 +09:00
|
|
|
}
|
|
|
|
|
2023-02-02 23:37:40 +09:00
|
|
|
/*
|
|
|
|
* uNet_finalize
|
|
|
|
* Finalize the model.
|
|
|
|
*/
|
|
|
|
void uNet_finalize() {
|
2023-02-15 01:33:28 +09:00
|
|
|
// delete parameters
|
2023-02-06 00:54:39 +09:00
|
|
|
delete inc_double_conv_0_weight;
|
2023-02-15 01:33:28 +09:00
|
|
|
delete inc_double_conv_1_weight;
|
|
|
|
delete inc_double_conv_1_bias;
|
|
|
|
delete inc_double_conv_3_weight;
|
|
|
|
delete inc_double_conv_4_weight;
|
|
|
|
delete inc_double_conv_4_bias;
|
|
|
|
delete down1_maxpool_conv_1_double_conv_0_weight;
|
|
|
|
delete down1_maxpool_conv_1_double_conv_1_weight;
|
|
|
|
delete down1_maxpool_conv_1_double_conv_1_bias;
|
|
|
|
delete down1_maxpool_conv_1_double_conv_3_weight;
|
|
|
|
delete down1_maxpool_conv_1_double_conv_4_weight;
|
|
|
|
delete down1_maxpool_conv_1_double_conv_4_bias;
|
|
|
|
delete down2_maxpool_conv_1_double_conv_0_weight;
|
|
|
|
delete down2_maxpool_conv_1_double_conv_1_weight;
|
|
|
|
delete down2_maxpool_conv_1_double_conv_1_bias;
|
|
|
|
delete down2_maxpool_conv_1_double_conv_3_weight;
|
|
|
|
delete down2_maxpool_conv_1_double_conv_4_weight;
|
|
|
|
delete down2_maxpool_conv_1_double_conv_4_bias;
|
|
|
|
delete up1_up_weight;
|
|
|
|
delete up1_up_bias;
|
|
|
|
delete up1_conv_double_conv_0_weight;
|
|
|
|
delete up1_conv_double_conv_1_weight;
|
|
|
|
delete up1_conv_double_conv_1_bias;
|
|
|
|
delete up1_conv_double_conv_3_weight;
|
|
|
|
delete up1_conv_double_conv_4_weight;
|
|
|
|
delete up1_conv_double_conv_4_bias;
|
|
|
|
delete up2_up_weight;
|
|
|
|
delete up2_up_bias;
|
|
|
|
delete up2_conv_double_conv_0_weight;
|
|
|
|
delete up2_conv_double_conv_1_weight;
|
|
|
|
delete up2_conv_double_conv_1_bias;
|
|
|
|
delete up2_conv_double_conv_3_weight;
|
|
|
|
delete up2_conv_double_conv_4_weight;
|
|
|
|
delete up2_conv_double_conv_4_bias;
|
|
|
|
delete outc_conv_weight;
|
|
|
|
delete outc_conv_bias;
|
2023-02-06 00:54:39 +09:00
|
|
|
delete inc_batchnorm_0_running_mean;
|
|
|
|
delete inc_batchnorm_0_running_var;
|
|
|
|
delete down1_batchnorm_0_running_mean;
|
|
|
|
delete down1_batchnorm_0_running_var;
|
|
|
|
delete down2_batchnorm_0_running_mean;
|
|
|
|
delete down2_batchnorm_0_running_var;
|
|
|
|
delete up1_batchnorm_0_running_mean;
|
|
|
|
delete up1_batchnorm_0_running_var;
|
|
|
|
delete up2_batchnorm_0_running_mean;
|
|
|
|
delete up2_batchnorm_0_running_var;
|
|
|
|
delete inc_batchnorm_1_running_mean;
|
|
|
|
delete inc_batchnorm_1_running_var;
|
|
|
|
delete down1_batchnorm_1_running_mean;
|
|
|
|
delete down1_batchnorm_1_running_var;
|
|
|
|
delete down2_batchnorm_1_running_mean;
|
|
|
|
delete down2_batchnorm_1_running_var;
|
|
|
|
delete up1_batchnorm_1_running_mean;
|
|
|
|
delete up1_batchnorm_1_running_var;
|
|
|
|
delete up2_batchnorm_1_running_mean;
|
|
|
|
delete up2_batchnorm_1_running_var;
|
|
|
|
|
|
|
|
// delete activations
|
|
|
|
delete inc_conv_0_output;
|
2023-02-15 01:33:28 +09:00
|
|
|
delete inc_batchnorm_0_output;
|
|
|
|
delete inc_conv_1_output;
|
|
|
|
delete inc_batchnorm_1_output;
|
2023-02-10 02:15:36 +09:00
|
|
|
delete down1_maxpool2d_0_output;
|
2023-02-15 01:33:28 +09:00
|
|
|
delete down1_conv_0_output;
|
|
|
|
delete down1_batchnorm_0_output;
|
|
|
|
delete down1_conv_1_output;
|
|
|
|
delete down1_batchnorm_1_output;
|
2023-02-10 02:15:36 +09:00
|
|
|
delete down2_maxpool2d_0_output;
|
2023-02-15 01:33:28 +09:00
|
|
|
delete down2_conv_0_output;
|
|
|
|
delete down2_batchnorm_0_output;
|
|
|
|
delete down2_conv_1_output;
|
|
|
|
delete down2_batchnorm_1_output;
|
2023-02-10 02:15:36 +09:00
|
|
|
delete up1_convt_0_output;
|
2023-02-15 01:33:28 +09:00
|
|
|
delete up1_concat_0_output;
|
|
|
|
delete up1_conv_0_output;
|
2023-02-06 00:54:39 +09:00
|
|
|
delete up1_batchnorm_0_output;
|
|
|
|
delete up1_conv_1_output;
|
|
|
|
delete up1_batchnorm_1_output;
|
2023-02-10 02:15:36 +09:00
|
|
|
delete up2_convt_0_output;
|
2023-02-15 01:33:28 +09:00
|
|
|
delete up2_concat_0_output;
|
2023-02-06 00:54:39 +09:00
|
|
|
delete up2_conv_0_output;
|
|
|
|
delete up2_batchnorm_0_output;
|
2023-02-15 01:33:28 +09:00
|
|
|
delete up2_conv_1_output;
|
2023-02-06 00:54:39 +09:00
|
|
|
delete up2_batchnorm_1_output;
|
2023-02-10 02:15:36 +09:00
|
|
|
delete outc_conv_0_output;
|
2023-02-02 19:02:36 +09:00
|
|
|
}
|