From 352ae7e65b6a74bcd768aa88b866a44c713284c8 Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Wed, 26 Oct 2016 15:35:44 +0000
Subject: [PATCH] ADAM
---
src/batchnorm_layer.c | 20 ++++++++++++++++++++
1 files changed, 20 insertions(+), 0 deletions(-)
diff --git a/src/batchnorm_layer.c b/src/batchnorm_layer.c
index 6ea4040..510f1b2 100644
--- a/src/batchnorm_layer.c
+++ b/src/batchnorm_layer.c
@@ -28,7 +28,13 @@
layer.rolling_mean = calloc(c, sizeof(float));
layer.rolling_variance = calloc(c, sizeof(float));
+
+ layer.forward = forward_batchnorm_layer;
+ layer.backward = backward_batchnorm_layer;
#ifdef GPU
+ layer.forward_gpu = forward_batchnorm_layer_gpu;
+ layer.backward_gpu = backward_batchnorm_layer_gpu;
+
layer.output_gpu = cuda_make_array(layer.output, h * w * c * batch);
layer.delta_gpu = cuda_make_array(layer.delta, h * w * c * batch);
@@ -135,6 +141,20 @@
}
#ifdef GPU
+
+void pull_batchnorm_layer(layer l)
+{
+ cuda_pull_array(l.scales_gpu, l.scales, l.c);
+ cuda_pull_array(l.rolling_mean_gpu, l.rolling_mean, l.c);
+ cuda_pull_array(l.rolling_variance_gpu, l.rolling_variance, l.c);
+}
+void push_batchnorm_layer(layer l)
+{
+ cuda_push_array(l.scales_gpu, l.scales, l.c);
+ cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.c);
+ cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.c);
+}
+
void forward_batchnorm_layer_gpu(layer l, network_state state)
{
if(l.type == BATCHNORM) copy_ongpu(l.outputs*l.batch, state.input, 1, l.output_gpu, 1);
--
Gitblit v1.10.0