From 913d355ec1cf34aad71fdd75202fc3b0309e63a0 Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Thu, 28 Jan 2016 20:30:38 +0000
Subject: [PATCH] lots of stuff

---
 src/parser.c |  219 ++++++++++++++++++++++++++++++++++++++++++++++++++----
 1 files changed, 203 insertions(+), 16 deletions(-)

diff --git a/src/parser.c b/src/parser.c
index b095294..a48f207 100644
--- a/src/parser.c
+++ b/src/parser.c
@@ -7,15 +7,19 @@
 #include "crop_layer.h"
 #include "cost_layer.h"
 #include "convolutional_layer.h"
+#include "activation_layer.h"
 #include "normalization_layer.h"
 #include "deconvolutional_layer.h"
 #include "connected_layer.h"
+#include "rnn_layer.h"
 #include "maxpool_layer.h"
 #include "softmax_layer.h"
 #include "dropout_layer.h"
 #include "detection_layer.h"
 #include "avgpool_layer.h"
+#include "local_layer.h"
 #include "route_layer.h"
+#include "shortcut_layer.h"
 #include "list.h"
 #include "option_list.h"
 #include "utils.h"
@@ -27,14 +31,18 @@
 
 int is_network(section *s);
 int is_convolutional(section *s);
+int is_activation(section *s);
+int is_local(section *s);
 int is_deconvolutional(section *s);
 int is_connected(section *s);
+int is_rnn(section *s);
 int is_maxpool(section *s);
 int is_avgpool(section *s);
 int is_dropout(section *s);
 int is_softmax(section *s);
 int is_normalization(section *s);
 int is_crop(section *s);
+int is_shortcut(section *s);
 int is_cost(section *s);
 int is_detection(section *s);
 int is_route(section *s);
@@ -78,6 +86,8 @@
     int h;
     int w;
     int c;
+    int index;
+    int time_steps;
 } size_params;
 
 deconvolutional_layer parse_deconvolutional(list *options, size_params params)
@@ -107,6 +117,27 @@
     return layer;
 }
 
+local_layer parse_local(list *options, size_params params)
+{
+    int n = option_find_int(options, "filters",1);
+    int size = option_find_int(options, "size",1);
+    int stride = option_find_int(options, "stride",1);
+    int pad = option_find_int(options, "pad",0);
+    char *activation_s = option_find_str(options, "activation", "logistic");
+    ACTIVATION activation = get_activation(activation_s);
+
+    int batch,h,w,c;
+    h = params.h;
+    w = params.w;
+    c = params.c;
+    batch=params.batch;
+    if(!(h && w && c)) error("Layer before local layer must output image.");
+
+    local_layer layer = make_local_layer(batch,h,w,c,n,size,stride,pad,activation);
+
+    return layer;
+}
+
 convolutional_layer parse_convolutional(list *options, size_params params)
 {
     int n = option_find_int(options, "filters",1);
@@ -123,8 +154,10 @@
     batch=params.batch;
     if(!(h && w && c)) error("Layer before convolutional layer must output image.");
     int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0);
+    int binary = option_find_int_quiet(options, "binary", 0);
 
-    convolutional_layer layer = make_convolutional_layer(batch,h,w,c,n,size,stride,pad,activation, batch_normalize);
+    convolutional_layer layer = make_convolutional_layer(batch,h,w,c,n,size,stride,pad,activation, batch_normalize, binary);
+    layer.flipped = option_find_int_quiet(options, "flipped", 0);
 
     char *weights = option_find_str(options, "weights", 0);
     char *biases = option_find_str(options, "biases", 0);
@@ -136,13 +169,27 @@
     return layer;
 }
 
+layer parse_rnn(list *options, size_params params)
+{
+    int output = option_find_int(options, "output",1);
+    int hidden = option_find_int(options, "hidden",1);
+    char *activation_s = option_find_str(options, "activation", "logistic");
+    ACTIVATION activation = get_activation(activation_s);
+    int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0);
+
+    layer l = make_rnn_layer(params.batch, params.inputs, hidden, output, params.time_steps, activation, batch_normalize);
+
+    return l;
+}
+
 connected_layer parse_connected(list *options, size_params params)
 {
     int output = option_find_int(options, "output",1);
     char *activation_s = option_find_str(options, "activation", "logistic");
     ACTIVATION activation = get_activation(activation_s);
+    int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0);
 
-    connected_layer layer = make_connected_layer(params.batch, params.inputs, output, activation);
+    connected_layer layer = make_connected_layer(params.batch, params.inputs, output, activation, batch_normalize);
 
     char *weights = option_find_str(options, "weights", 0);
     char *biases = option_find_str(options, "biases", 0);
@@ -156,8 +203,9 @@
 
 softmax_layer parse_softmax(list *options, size_params params)
 {
-    int groups = option_find_int(options, "groups",1);
+    int groups = option_find_int_quiet(options, "groups",1);
     softmax_layer layer = make_softmax_layer(params.batch, params.inputs, groups);
+    layer.temperature = option_find_float_quiet(options, "temperature", 1);
     return layer;
 }
 
@@ -264,6 +312,41 @@
     return l;
 }
 
+layer parse_shortcut(list *options, size_params params, network net)
+{
+    char *l = option_find(options, "from");   
+    int index = atoi(l);
+    if(index < 0) index = params.index + index;
+
+    int batch = params.batch;
+    layer from = net.layers[index];
+
+    layer s = make_shortcut_layer(batch, index, params.w, params.h, params.c, from.out_w, from.out_h, from.out_c);
+
+    char *activation_s = option_find_str(options, "activation", "linear");
+    ACTIVATION activation = get_activation(activation_s);
+    s.activation = activation;
+    return s;
+}
+
+
+layer parse_activation(list *options, size_params params)
+{
+    char *activation_s = option_find_str(options, "activation", "linear");
+    ACTIVATION activation = get_activation(activation_s);
+
+    layer l = make_activation_layer(params.batch, params.inputs, activation);
+
+    l.out_h = params.h;
+    l.out_w = params.w;
+    l.out_c = params.c;
+    l.h = params.h;
+    l.w = params.w;
+    l.c = params.c;
+
+    return l;
+}
+
 route_layer parse_route(list *options, size_params params, network net)
 {
     char *l = option_find(options, "layers");   
@@ -280,13 +363,14 @@
     for(i = 0; i < n; ++i){
         int index = atoi(l);
         l = strchr(l, ',')+1;
+        if(index < 0) index = params.index + index;
         layers[i] = index;
         sizes[i] = net.layers[index].outputs;
     }
     int batch = params.batch;
 
     route_layer layer = make_route_layer(batch, n, layers, sizes);
-    
+
     convolutional_layer first = net.layers[layers[0]];
     layer.out_w = first.out_w;
     layer.out_h = first.out_h;
@@ -323,7 +407,9 @@
     net->momentum = option_find_float(options, "momentum", .9);
     net->decay = option_find_float(options, "decay", .0001);
     int subdivs = option_find_int(options, "subdivisions",1);
+    net->time_steps = option_find_int_quiet(options, "time_steps",1);
     net->batch /= subdivs;
+    net->batch *= net->time_steps;
     net->subdivisions = subdivs;
 
     net->h = option_find_int_quiet(options, "height",0);
@@ -391,19 +477,27 @@
     params.c = net.c;
     params.inputs = net.inputs;
     params.batch = net.batch;
+    params.time_steps = net.time_steps;
 
     n = n->next;
     int count = 0;
     free_section(s);
     while(n){
+        params.index = count;
         fprintf(stderr, "%d: ", count);
         s = (section *)n->val;
         options = s->options;
         layer l = {0};
         if(is_convolutional(s)){
             l = parse_convolutional(options, params);
+        }else if(is_local(s)){
+            l = parse_local(options, params);
+        }else if(is_activation(s)){
+            l = parse_activation(options, params);
         }else if(is_deconvolutional(s)){
             l = parse_deconvolutional(options, params);
+        }else if(is_rnn(s)){
+            l = parse_rnn(options, params);
         }else if(is_connected(s)){
             l = parse_connected(options, params);
         }else if(is_crop(s)){
@@ -422,6 +516,8 @@
             l = parse_avgpool(options, params);
         }else if(is_route(s)){
             l = parse_route(options, params, net);
+        }else if(is_shortcut(s)){
+            l = parse_shortcut(options, params, net);
         }else if(is_dropout(s)){
             l = parse_dropout(options, params);
             l.output = net.layers[count-1].output;
@@ -439,13 +535,13 @@
         net.layers[count] = l;
         free_section(s);
         n = n->next;
+        ++count;
         if(n){
             params.h = l.out_h;
             params.w = l.out_w;
             params.c = l.out_c;
             params.inputs = l.outputs;
         }
-        ++count;
     }   
     free_list(sections);
     net.outputs = get_network_output_size(net);
@@ -453,6 +549,10 @@
     return net;
 }
 
+int is_shortcut(section *s)
+{
+    return (strcmp(s->type, "[shortcut]")==0);
+}
 int is_crop(section *s)
 {
     return (strcmp(s->type, "[crop]")==0);
@@ -465,6 +565,10 @@
 {
     return (strcmp(s->type, "[detection]")==0);
 }
+int is_local(section *s)
+{
+    return (strcmp(s->type, "[local]")==0);
+}
 int is_deconvolutional(section *s)
 {
     return (strcmp(s->type, "[deconv]")==0
@@ -475,11 +579,19 @@
     return (strcmp(s->type, "[conv]")==0
             || strcmp(s->type, "[convolutional]")==0);
 }
+int is_activation(section *s)
+{
+    return (strcmp(s->type, "[activation]")==0);
+}
 int is_network(section *s)
 {
     return (strcmp(s->type, "[net]")==0
             || strcmp(s->type, "[network]")==0);
 }
+int is_rnn(section *s)
+{
+    return (strcmp(s->type, "[rnn]")==0);
+}
 int is_connected(section *s)
 {
     return (strcmp(s->type, "[conn]")==0
@@ -590,15 +702,34 @@
     fclose(fp);
 }
 
+void save_connected_weights(layer l, FILE *fp)
+{
+#ifdef GPU
+    if(gpu_index >= 0){
+        pull_connected_layer(l);
+    }
+#endif
+    fwrite(l.biases, sizeof(float), l.outputs, fp);
+    fwrite(l.weights, sizeof(float), l.outputs*l.inputs, fp);
+    if (l.batch_normalize){
+        fwrite(l.scales, sizeof(float), l.outputs, fp);
+        fwrite(l.rolling_mean, sizeof(float), l.outputs, fp);
+        fwrite(l.rolling_variance, sizeof(float), l.outputs, fp);
+    }
+}
+
 void save_weights_upto(network net, char *filename, int cutoff)
 {
     fprintf(stderr, "Saving weights to %s\n", filename);
     FILE *fp = fopen(filename, "w");
     if(!fp) file_error(filename);
 
-    fwrite(&net.learning_rate, sizeof(float), 1, fp);
-    fwrite(&net.momentum, sizeof(float), 1, fp);
-    fwrite(&net.decay, sizeof(float), 1, fp);
+    int major = 0;
+    int minor = 1;
+    int revision = 0;
+    fwrite(&major, sizeof(int), 1, fp);
+    fwrite(&minor, sizeof(int), 1, fp);
+    fwrite(&revision, sizeof(int), 1, fp);
     fwrite(net.seen, sizeof(int), 1, fp);
 
     int i;
@@ -619,13 +750,21 @@
             }
             fwrite(l.filters, sizeof(float), num, fp);
         } if(l.type == CONNECTED){
+            save_connected_weights(l, fp);
+        } if(l.type == RNN){
+            save_connected_weights(*(l.input_layer), fp);
+            save_connected_weights(*(l.self_layer), fp);
+            save_connected_weights(*(l.output_layer), fp);
+        } if(l.type == LOCAL){
 #ifdef GPU
             if(gpu_index >= 0){
-                pull_connected_layer(l);
+                pull_local_layer(l);
             }
 #endif
+            int locations = l.out_w*l.out_h;
+            int size = l.size*l.size*l.c*l.n*locations;
             fwrite(l.biases, sizeof(float), l.outputs, fp);
-            fwrite(l.weights, sizeof(float), l.outputs*l.inputs, fp);
+            fwrite(l.filters, sizeof(float), size, fp);
         }
     }
     fclose(fp);
@@ -635,6 +774,38 @@
     save_weights_upto(net, filename, net.n);
 }
 
+void transpose_matrix(float *a, int rows, int cols)
+{
+    float *transpose = calloc(rows*cols, sizeof(float));
+    int x, y;
+    for(x = 0; x < rows; ++x){
+        for(y = 0; y < cols; ++y){
+            transpose[y*rows + x] = a[x*cols + y];
+        }
+    }
+    memcpy(a, transpose, rows*cols*sizeof(float));
+    free(transpose);
+}
+
+void load_connected_weights(layer l, FILE *fp, int transpose)
+{
+    fread(l.biases, sizeof(float), l.outputs, fp);
+    fread(l.weights, sizeof(float), l.outputs*l.inputs, fp);
+    if(transpose){
+        transpose_matrix(l.weights, l.inputs, l.outputs);
+    }
+    if (l.batch_normalize && (!l.dontloadscales)){
+        fread(l.scales, sizeof(float), l.outputs, fp);
+        fread(l.rolling_mean, sizeof(float), l.outputs, fp);
+        fread(l.rolling_variance, sizeof(float), l.outputs, fp);
+    }
+#ifdef GPU
+    if(gpu_index >= 0){
+        push_connected_layer(l);
+    }
+#endif
+}
+
 void load_weights_upto(network *net, char *filename, int cutoff)
 {
     fprintf(stderr, "Loading weights from %s...", filename);
@@ -642,11 +813,14 @@
     FILE *fp = fopen(filename, "r");
     if(!fp) file_error(filename);
 
-    float garbage;
-    fread(&garbage, sizeof(float), 1, fp);
-    fread(&garbage, sizeof(float), 1, fp);
-    fread(&garbage, sizeof(float), 1, fp);
+    int major;
+    int minor;
+    int revision;
+    fread(&major, sizeof(int), 1, fp);
+    fread(&minor, sizeof(int), 1, fp);
+    fread(&revision, sizeof(int), 1, fp);
     fread(net->seen, sizeof(int), 1, fp);
+    int transpose = (major > 1000) || (minor > 1000);
 
     int i;
     for(i = 0; i < net->n && i < cutoff; ++i){
@@ -661,6 +835,9 @@
                 fread(l.rolling_variance, sizeof(float), l.n, fp);
             }
             fread(l.filters, sizeof(float), num, fp);
+            if (l.flipped) {
+                transpose_matrix(l.filters, l.c*l.size*l.size, l.n);
+            }
 #ifdef GPU
             if(gpu_index >= 0){
                 push_convolutional_layer(l);
@@ -678,11 +855,21 @@
 #endif
         }
         if(l.type == CONNECTED){
+            load_connected_weights(l, fp, transpose);
+        }
+        if(l.type == RNN){
+            load_connected_weights(*(l.input_layer), fp, transpose);
+            load_connected_weights(*(l.self_layer), fp, transpose);
+            load_connected_weights(*(l.output_layer), fp, transpose);
+        }
+        if(l.type == LOCAL){
+            int locations = l.out_w*l.out_h;
+            int size = l.size*l.size*l.c*l.n*locations;
             fread(l.biases, sizeof(float), l.outputs, fp);
-            fread(l.weights, sizeof(float), l.outputs*l.inputs, fp);
+            fread(l.filters, sizeof(float), size, fp);
 #ifdef GPU
             if(gpu_index >= 0){
-                push_connected_layer(l);
+                push_local_layer(l);
             }
 #endif
         }

--
Gitblit v1.10.0