From 08c7cf9c88befd845f00c00d85e40a9eead4b1b3 Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Sun, 19 Jun 2016 21:28:15 +0000
Subject: [PATCH] no mean on input binarization
---
src/batchnorm_layer.c | 14 ++++++++++++++
1 files changed, 14 insertions(+), 0 deletions(-)
diff --git a/src/batchnorm_layer.c b/src/batchnorm_layer.c
index 6ea4040..9b68277 100644
--- a/src/batchnorm_layer.c
+++ b/src/batchnorm_layer.c
@@ -135,6 +135,20 @@
}
#ifdef GPU
+
+void pull_batchnorm_layer(layer l)
+{
+ cuda_pull_array(l.scales_gpu, l.scales, l.c);
+ cuda_pull_array(l.rolling_mean_gpu, l.rolling_mean, l.c);
+ cuda_pull_array(l.rolling_variance_gpu, l.rolling_variance, l.c);
+}
+void push_batchnorm_layer(layer l)
+{
+ cuda_push_array(l.scales_gpu, l.scales, l.c);
+ cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.c);
+ cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.c);
+}
+
void forward_batchnorm_layer_gpu(layer l, network_state state)
{
if(l.type == BATCHNORM) copy_ongpu(l.outputs*l.batch, state.input, 1, l.output_gpu, 1);
--
Gitblit v1.10.0