From 481b57a96a9ef29b112caec1bb3e17ffb043ceae Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Sun, 25 Sep 2016 06:12:54 +0000
Subject: [PATCH] So I have this new programming paradigm.......
---
src/batchnorm_layer.c | 20 ++++++++++++++++++++
1 files changed, 20 insertions(+), 0 deletions(-)
diff --git a/src/batchnorm_layer.c b/src/batchnorm_layer.c
index 6ea4040..510f1b2 100644
--- a/src/batchnorm_layer.c
+++ b/src/batchnorm_layer.c
@@ -28,7 +28,13 @@
layer.rolling_mean = calloc(c, sizeof(float));
layer.rolling_variance = calloc(c, sizeof(float));
+
+ layer.forward = forward_batchnorm_layer;
+ layer.backward = backward_batchnorm_layer;
#ifdef GPU
+ layer.forward_gpu = forward_batchnorm_layer_gpu;
+ layer.backward_gpu = backward_batchnorm_layer_gpu;
+
layer.output_gpu = cuda_make_array(layer.output, h * w * c * batch);
layer.delta_gpu = cuda_make_array(layer.delta, h * w * c * batch);
@@ -135,6 +141,20 @@
}
#ifdef GPU
+
+void pull_batchnorm_layer(layer l)
+{
+ cuda_pull_array(l.scales_gpu, l.scales, l.c);
+ cuda_pull_array(l.rolling_mean_gpu, l.rolling_mean, l.c);
+ cuda_pull_array(l.rolling_variance_gpu, l.rolling_variance, l.c);
+}
+void push_batchnorm_layer(layer l)
+{
+ cuda_push_array(l.scales_gpu, l.scales, l.c);
+ cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.c);
+ cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.c);
+}
+
void forward_batchnorm_layer_gpu(layer l, network_state state)
{
if(l.type == BATCHNORM) copy_ongpu(l.outputs*l.batch, state.input, 1, l.output_gpu, 1);
--
Gitblit v1.10.0