From cd8d53df21f3ad2810add2a8cff766c745f55a17 Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Fri, 09 May 2014 22:14:52 +0000
Subject: [PATCH] So there WAS this huge bug. Gone now

---
 src/parser.c |   39 ++++++++++++++++++++++++++++++++++++++-
 1 files changed, 38 insertions(+), 1 deletions(-)

diff --git a/src/parser.c b/src/parser.c
index cf64b55..5d6aa1c 100644
--- a/src/parser.c
+++ b/src/parser.c
@@ -7,6 +7,7 @@
 #include "convolutional_layer.h"
 #include "connected_layer.h"
 #include "maxpool_layer.h"
+#include "normalization_layer.h"
 #include "softmax_layer.h"
 #include "list.h"
 #include "option_list.h"
@@ -21,6 +22,7 @@
 int is_connected(section *s);
 int is_maxpool(section *s);
 int is_softmax(section *s);
+int is_normalization(section *s);
 list *read_cfg(char *filename);
 
 void free_section(section *s)
@@ -87,6 +89,7 @@
     int i;
     int input;
     int output = option_find_int(options, "output",1);
+    float dropout = option_find_float(options, "dropout", 0.);
     char *activation_s = option_find_str(options, "activation", "sigmoid");
     ACTIVATION activation = get_activation(activation_s);
     if(count == 0){
@@ -95,7 +98,7 @@
     }else{
         input =  get_network_output_size_layer(net, count-1);
     }
-    connected_layer *layer = make_connected_layer(net.batch, input, output, activation);
+    connected_layer *layer = make_connected_layer(net.batch, input, output, dropout, activation);
     char *data = option_find_str(options, "data", 0);
     if(data){
         char *curr = data;
@@ -152,6 +155,30 @@
     return layer;
 }
 
+normalization_layer *parse_normalization(list *options, network net, int count)
+{
+    int h,w,c;
+    int size = option_find_int(options, "size",1);
+    float alpha = option_find_float(options, "alpha", 0.);
+    float beta = option_find_float(options, "beta", 1.);
+    float kappa = option_find_float(options, "kappa", 1.);
+    if(count == 0){
+        h = option_find_int(options, "height",1);
+        w = option_find_int(options, "width",1);
+        c = option_find_int(options, "channels",1);
+        net.batch = option_find_int(options, "batch",1);
+    }else{
+        image m =  get_network_image_layer(net, count-1);
+        h = m.h;
+        w = m.w;
+        c = m.c;
+        if(h == 0) error("Layer before convolutional layer must output image.");
+    }
+    normalization_layer *layer = make_normalization_layer(net.batch,h,w,c,size, alpha, beta, kappa);
+    option_unused(options);
+    return layer;
+}
+
 network parse_network_cfg(char *filename)
 {
     list *sections = read_cfg(filename);
@@ -182,6 +209,11 @@
             net.types[count] = MAXPOOL;
             net.layers[count] = layer;
             net.batch = layer->batch;
+        }else if(is_normalization(s)){
+            normalization_layer *layer = parse_normalization(options, net, count);
+            net.types[count] = NORMALIZATION;
+            net.layers[count] = layer;
+            net.batch = layer->batch;
         }else{
             fprintf(stderr, "Type not recognized: %s\n", s->type);
         }
@@ -216,6 +248,11 @@
     return (strcmp(s->type, "[soft]")==0
             || strcmp(s->type, "[softmax]")==0);
 }
+int is_normalization(section *s)
+{
+    return (strcmp(s->type, "[lrnorm]")==0
+            || strcmp(s->type, "[localresponsenormalization]")==0);
+}
 
 int read_option(char *s, list *options)
 {

--
Gitblit v1.10.0