From 132251d72325e1005ef6c47f83d6a4e9b9355d12 Mon Sep 17 00:00:00 2001
From: AlexeyAB <alexeyab84@gmail.com>
Date: Sun, 03 Jun 2018 21:37:08 +0000
Subject: [PATCH] You can do: fuse_conv_batchnorm(network net) from DLL/SO-library

---
 src/normalization_layer.c |   23 +++++++++++++++--------
 1 files changed, 15 insertions(+), 8 deletions(-)

diff --git a/src/normalization_layer.c b/src/normalization_layer.c
index d080559..069a079 100644
--- a/src/normalization_layer.c
+++ b/src/normalization_layer.c
@@ -21,11 +21,17 @@
     layer.norms = calloc(h * w * c * batch, sizeof(float));
     layer.inputs = w*h*c;
     layer.outputs = layer.inputs;
+
+    layer.forward = forward_normalization_layer;
+    layer.backward = backward_normalization_layer;
     #ifdef GPU
-    layer.output_gpu =  cuda_make_array(0, h * w * c * batch);
-    layer.delta_gpu =   cuda_make_array(0, h * w * c * batch);
-    layer.squared_gpu = cuda_make_array(0, h * w * c * batch);
-    layer.norms_gpu =   cuda_make_array(0, h * w * c * batch);
+    layer.forward_gpu = forward_normalization_layer_gpu;
+    layer.backward_gpu = backward_normalization_layer_gpu;
+
+    layer.output_gpu =  cuda_make_array(layer.output, h * w * c * batch);
+    layer.delta_gpu =   cuda_make_array(layer.delta, h * w * c * batch);
+    layer.squared_gpu = cuda_make_array(layer.squared, h * w * c * batch);
+    layer.norms_gpu =   cuda_make_array(layer.norms, h * w * c * batch);
     #endif
     return layer;
 }
@@ -49,10 +55,10 @@
     cuda_free(layer->delta_gpu); 
     cuda_free(layer->squared_gpu); 
     cuda_free(layer->norms_gpu);   
-    layer->output_gpu =  cuda_make_array(0, h * w * c * batch);
-    layer->delta_gpu =   cuda_make_array(0, h * w * c * batch);
-    layer->squared_gpu = cuda_make_array(0, h * w * c * batch);
-    layer->norms_gpu =   cuda_make_array(0, h * w * c * batch);
+    layer->output_gpu =  cuda_make_array(layer->output, h * w * c * batch);
+    layer->delta_gpu =   cuda_make_array(layer->delta, h * w * c * batch);
+    layer->squared_gpu = cuda_make_array(layer->squared, h * w * c * batch);
+    layer->norms_gpu =   cuda_make_array(layer->norms, h * w * c * batch);
 #endif
 }
 
@@ -90,6 +96,7 @@
 void backward_normalization_layer(const layer layer, network_state state)
 {
     // TODO This is approximate ;-)
+    // Also this should add in to delta instead of overwritting.
 
     int w = layer.w;
     int h = layer.h;

--
Gitblit v1.10.0