From 5a49c1d962895b4b2835ae47201d6b07e669153b Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Fri, 10 Apr 2015 06:00:33 +0000
Subject: [PATCH] rotation

---
 src/normalization_layer.c |   19 ++++++++++---------
 1 files changed, 10 insertions(+), 9 deletions(-)

diff --git a/src/normalization_layer.c b/src/normalization_layer.c
index d82451b..93c2ad9 100644
--- a/src/normalization_layer.c
+++ b/src/normalization_layer.c
@@ -6,7 +6,7 @@
     int h = layer.h;
     int w = layer.w;
     int c = layer.c;
-    return float_to_image(h,w,c,layer.output);
+    return float_to_image(w,h,c,layer.output);
 }
 
 image get_normalization_delta(normalization_layer layer)
@@ -14,7 +14,7 @@
     int h = layer.h;
     int w = layer.w;
     int c = layer.c;
-    return float_to_image(h,w,c,layer.delta);
+    return float_to_image(w,h,c,layer.delta);
 }
 
 normalization_layer *make_normalization_layer(int batch, int h, int w, int c, int size, float alpha, float beta, float kappa)
@@ -59,28 +59,29 @@
     }
 }
 
-void forward_normalization_layer(const normalization_layer layer, float *in)
+void forward_normalization_layer(const normalization_layer layer, network_state state)
 {
     int i,j,k;
     memset(layer.sums, 0, layer.h*layer.w*sizeof(float));
     int imsize = layer.h*layer.w;
     for(j = 0; j < layer.size/2; ++j){
-        if(j < layer.c) add_square_array(in+j*imsize, layer.sums, imsize);
+        if(j < layer.c) add_square_array(state.input+j*imsize, layer.sums, imsize);
     }
     for(k = 0; k < layer.c; ++k){
         int next = k+layer.size/2;
         int prev = k-layer.size/2-1;
-        if(next < layer.c) add_square_array(in+next*imsize, layer.sums, imsize);
-        if(prev > 0)       sub_square_array(in+prev*imsize, layer.sums, imsize);
+        if(next < layer.c) add_square_array(state.input+next*imsize, layer.sums, imsize);
+        if(prev > 0)       sub_square_array(state.input+prev*imsize, layer.sums, imsize);
         for(i = 0; i < imsize; ++i){
-            layer.output[k*imsize + i] = in[k*imsize+i] / pow(layer.kappa + layer.alpha * layer.sums[i], layer.beta);
+            layer.output[k*imsize + i] = state.input[k*imsize+i] / pow(layer.kappa + layer.alpha * layer.sums[i], layer.beta);
         }
     }
 }
 
-void backward_normalization_layer(const normalization_layer layer, float *in, float *delta)
+void backward_normalization_layer(const normalization_layer layer, network_state state)
 {
-    //TODO!
+    // TODO!
+    // OR NOT TODO!!
 }
 
 void visualize_normalization_layer(normalization_layer layer, char *window)

--
Gitblit v1.10.0