From 0f1a31648c5292fa49b35eac90a2ee676d6c13e6 Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Sat, 31 Jan 2015 06:05:23 +0000
Subject: [PATCH] idk, probably something changed

---
 src/parser.c |   97 +++++++++++++++++++++++-------------------------
 1 files changed, 47 insertions(+), 50 deletions(-)

diff --git a/src/parser.c b/src/parser.c
index 9bd2eb7..a00feec 100644
--- a/src/parser.c
+++ b/src/parser.c
@@ -67,7 +67,6 @@
 
 convolutional_layer *parse_convolutional(list *options, network *net, int count)
 {
-    int i;
     int h,w,c;
     float learning_rate, momentum, decay;
     int n = option_find_int(options, "filters",1);
@@ -87,6 +86,7 @@
         net->learning_rate = learning_rate;
         net->momentum = momentum;
         net->decay = decay;
+        net->seen = option_find_int(options, "seen",0);
     }else{
         learning_rate = option_find_float_quiet(options, "learning_rate", net->learning_rate);
         momentum = option_find_float_quiet(options, "momentum", net->momentum);
@@ -98,34 +98,19 @@
         if(h == 0) error("Layer before convolutional layer must output image.");
     }
     convolutional_layer *layer = make_convolutional_layer(net->batch,h,w,c,n,size,stride,pad,activation,learning_rate,momentum,decay);
-    char *data = option_find_str(options, "data", 0);
-    if(data){
-        char *curr = data;
-        char *next = data;
-        for(i = 0; i < n; ++i){
-            while(*++next !='\0' && *next != ',');
-            *next = '\0';
-            sscanf(curr, "%g", &layer->biases[i]);
-            curr = next+1;
-        }
-        for(i = 0; i < c*n*size*size; ++i){
-            while(*++next !='\0' && *next != ',');
-            *next = '\0';
-            sscanf(curr, "%g", &layer->filters[i]);
-            curr = next+1;
-        }
-    }
     char *weights = option_find_str(options, "weights", 0);
     char *biases = option_find_str(options, "biases", 0);
-    parse_data(biases, layer->biases, n);
     parse_data(weights, layer->filters, c*n*size*size);
+    parse_data(biases, layer->biases, n);
+    #ifdef GPU
+    push_convolutional_layer(*layer);
+    #endif
     option_unused(options);
     return layer;
 }
 
 connected_layer *parse_connected(list *options, network *net, int count)
 {
-    int i;
     int input;
     float learning_rate, momentum, decay;
     int output = option_find_int(options, "output",1);
@@ -147,27 +132,13 @@
         input =  get_network_output_size_layer(*net, count-1);
     }
     connected_layer *layer = make_connected_layer(net->batch, input, output, activation,learning_rate,momentum,decay);
-    char *data = option_find_str(options, "data", 0);
-    if(data){
-        char *curr = data;
-        char *next = data;
-        for(i = 0; i < output; ++i){
-            while(*++next !='\0' && *next != ',');
-            *next = '\0';
-            sscanf(curr, "%g", &layer->biases[i]);
-            curr = next+1;
-        }
-        for(i = 0; i < input*output; ++i){
-            while(*++next !='\0' && *next != ',');
-            *next = '\0';
-            sscanf(curr, "%g", &layer->weights[i]);
-            curr = next+1;
-        }
-    }
     char *weights = option_find_str(options, "weights", 0);
     char *biases = option_find_str(options, "biases", 0);
     parse_data(biases, layer->biases, output);
     parse_data(weights, layer->weights, input*output);
+    #ifdef GPU
+    push_connected_layer(*layer);
+    #endif
     option_unused(options);
     return layer;
 }
@@ -178,6 +149,7 @@
     if(count == 0){
         input = option_find_int(options, "input",1);
         net->batch = option_find_int(options, "batch",1);
+        net->seen = option_find_int(options, "seen",0);
     }else{
         input =  get_network_output_size_layer(*net, count-1);
     }
@@ -192,10 +164,13 @@
     if(count == 0){
         input = option_find_int(options, "input",1);
         net->batch = option_find_int(options, "batch",1);
+        net->seen = option_find_int(options, "seen",0);
     }else{
         input =  get_network_output_size_layer(*net, count-1);
     }
-    cost_layer *layer = make_cost_layer(net->batch, input);
+    char *type_s = option_find_str(options, "type", "sse");
+    COST_TYPE type = get_cost_type(type_s);
+    cost_layer *layer = make_cost_layer(net->batch, input, type);
     option_unused(options);
     return layer;
 }
@@ -218,6 +193,7 @@
         net->learning_rate = learning_rate;
         net->momentum = momentum;
         net->decay = decay;
+        net->seen = option_find_int(options, "seen",0);
     }else{
         image m =  get_network_image_layer(*net, count-1);
         h = m.h;
@@ -240,6 +216,7 @@
         w = option_find_int(options, "width",1);
         c = option_find_int(options, "channels",1);
         net->batch = option_find_int(options, "batch",1);
+        net->seen = option_find_int(options, "seen",0);
     }else{
         image m =  get_network_image_layer(*net, count-1);
         h = m.h;
@@ -252,6 +229,7 @@
     return layer;
 }
 
+/*
 freeweight_layer *parse_freeweight(list *options, network *net, int count)
 {
     int input;
@@ -265,6 +243,7 @@
     option_unused(options);
     return layer;
 }
+*/
 
 dropout_layer *parse_dropout(list *options, network *net, int count)
 {
@@ -273,6 +252,13 @@
     if(count == 0){
         net->batch = option_find_int(options, "batch",1);
         input = option_find_int(options, "input",1);
+        float learning_rate = option_find_float(options, "learning_rate", .001);
+        float momentum = option_find_float(options, "momentum", .9);
+        float decay = option_find_float(options, "decay", .0001);
+        net->learning_rate = learning_rate;
+        net->momentum = momentum;
+        net->decay = decay;
+        net->seen = option_find_int(options, "seen",0);
     }else{
         input =  get_network_output_size_layer(*net, count-1);
     }
@@ -293,6 +279,7 @@
         w = option_find_int(options, "width",1);
         c = option_find_int(options, "channels",1);
         net->batch = option_find_int(options, "batch",1);
+        net->seen = option_find_int(options, "seen",0);
     }else{
         image m =  get_network_image_layer(*net, count-1);
         h = m.h;
@@ -348,9 +335,10 @@
             net.types[count] = DROPOUT;
             net.layers[count] = layer;
         }else if(is_freeweight(s)){
-            freeweight_layer *layer = parse_freeweight(options, &net, count);
-            net.types[count] = FREEWEIGHT;
-            net.layers[count] = layer;
+            //freeweight_layer *layer = parse_freeweight(options, &net, count);
+            //net.types[count] = FREEWEIGHT;
+            //net.layers[count] = layer;
+            fprintf(stderr, "Type not recognized: %s\n", s->type);
         }else{
             fprintf(stderr, "Type not recognized: %s\n", s->type);
         }
@@ -409,8 +397,8 @@
 
 int read_option(char *s, list *options)
 {
-    int i;
-    int len = strlen(s);
+    size_t i;
+    size_t len = strlen(s);
     char *val = 0;
     for(i = 0; i < len; ++i){
         if(s[i] == '='){
@@ -462,6 +450,9 @@
 
 void print_convolutional_cfg(FILE *fp, convolutional_layer *l, network net, int count)
 {
+    #ifdef GPU
+    if(gpu_index >= 0)  pull_convolutional_layer(*l);
+    #endif
     int i;
     fprintf(fp, "[convolutional]\n");
     if(count == 0) {
@@ -471,8 +462,9 @@
                 "channels=%d\n"
                 "learning_rate=%g\n"
                 "momentum=%g\n"
-                "decay=%g\n",
-                l->batch,l->h, l->w, l->c, l->learning_rate, l->momentum, l->decay);
+                "decay=%g\n"
+                "seen=%d\n",
+                l->batch,l->h, l->w, l->c, l->learning_rate, l->momentum, l->decay, net.seen);
     } else {
         if(l->learning_rate != net.learning_rate)
             fprintf(fp, "learning_rate=%g\n", l->learning_rate);
@@ -516,6 +508,9 @@
 
 void print_connected_cfg(FILE *fp, connected_layer *l, network net, int count)
 {
+    #ifdef GPU
+    if(gpu_index >= 0) pull_connected_layer(*l);
+    #endif
     int i;
     fprintf(fp, "[connected]\n");
     if(count == 0){
@@ -523,8 +518,9 @@
                 "input=%d\n"
                 "learning_rate=%g\n"
                 "momentum=%g\n"
-                "decay=%g\n",
-                l->batch, l->inputs, l->learning_rate, l->momentum, l->decay);
+                "decay=%g\n"
+                "seen=%d\n",
+                l->batch, l->inputs, l->learning_rate, l->momentum, l->decay, net.seen);
     } else {
         if(l->learning_rate != net.learning_rate)
             fprintf(fp, "learning_rate=%g\n", l->learning_rate);
@@ -555,8 +551,9 @@
                 "channels=%d\n"
                 "learning_rate=%g\n"
                 "momentum=%g\n"
-                "decay=%g\n",
-                l->batch,l->h, l->w, l->c, net.learning_rate, net.momentum, net.decay);
+                "decay=%g\n"
+                "seen=%d\n",
+                l->batch,l->h, l->w, l->c, net.learning_rate, net.momentum, net.decay, net.seen);
     }
     fprintf(fp, "crop_height=%d\ncrop_width=%d\nflip=%d\n\n", l->crop_height, l->crop_width, l->flip);
 }
@@ -595,7 +592,7 @@
 
 void print_cost_cfg(FILE *fp, cost_layer *l, network net, int count)
 {
-    fprintf(fp, "[cost]\n");
+    fprintf(fp, "[cost]\ntype=%s\n", get_cost_string(l->type));
     if(count == 0) fprintf(fp, "batch=%d\ninput=%d\n", l->batch, l->inputs);
     fprintf(fp, "\n");
 }

--
Gitblit v1.10.0