From 2f62fe33c913cd9484fe7f2486889d12292c66e0 Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Sat, 07 Feb 2015 02:53:53 +0000
Subject: [PATCH] saving weight files as binaries, hell yeah

---
 src/parser.c |  114 ++++++++++++++++++++++++++++++++++++++++++++++++++------
 1 files changed, 101 insertions(+), 13 deletions(-)

diff --git a/src/parser.c b/src/parser.c
index 768f48b..6a107cc 100644
--- a/src/parser.c
+++ b/src/parser.c
@@ -16,7 +16,6 @@
 #include "list.h"
 #include "option_list.h"
 #include "utils.h"
-#include "opencl.h"
 
 typedef struct{
     char *type;
@@ -87,6 +86,7 @@
         net->learning_rate = learning_rate;
         net->momentum = momentum;
         net->decay = decay;
+        net->seen = option_find_int(options, "seen",0);
     }else{
         learning_rate = option_find_float_quiet(options, "learning_rate", net->learning_rate);
         momentum = option_find_float_quiet(options, "momentum", net->momentum);
@@ -103,7 +103,7 @@
     parse_data(weights, layer->filters, c*n*size*size);
     parse_data(biases, layer->biases, n);
     #ifdef GPU
-    push_convolutional_layer(*layer);
+    if(weights || biases) push_convolutional_layer(*layer);
     #endif
     option_unused(options);
     return layer;
@@ -137,7 +137,7 @@
     parse_data(biases, layer->biases, output);
     parse_data(weights, layer->weights, input*output);
     #ifdef GPU
-    push_connected_layer(*layer);
+    if(weights || biases) push_connected_layer(*layer);
     #endif
     option_unused(options);
     return layer;
@@ -149,6 +149,7 @@
     if(count == 0){
         input = option_find_int(options, "input",1);
         net->batch = option_find_int(options, "batch",1);
+        net->seen = option_find_int(options, "seen",0);
     }else{
         input =  get_network_output_size_layer(*net, count-1);
     }
@@ -163,6 +164,7 @@
     if(count == 0){
         input = option_find_int(options, "input",1);
         net->batch = option_find_int(options, "batch",1);
+        net->seen = option_find_int(options, "seen",0);
     }else{
         input =  get_network_output_size_layer(*net, count-1);
     }
@@ -191,6 +193,7 @@
         net->learning_rate = learning_rate;
         net->momentum = momentum;
         net->decay = decay;
+        net->seen = option_find_int(options, "seen",0);
     }else{
         image m =  get_network_image_layer(*net, count-1);
         h = m.h;
@@ -213,6 +216,7 @@
         w = option_find_int(options, "width",1);
         c = option_find_int(options, "channels",1);
         net->batch = option_find_int(options, "batch",1);
+        net->seen = option_find_int(options, "seen",0);
     }else{
         image m =  get_network_image_layer(*net, count-1);
         h = m.h;
@@ -225,6 +229,7 @@
     return layer;
 }
 
+/*
 freeweight_layer *parse_freeweight(list *options, network *net, int count)
 {
     int input;
@@ -238,6 +243,7 @@
     option_unused(options);
     return layer;
 }
+*/
 
 dropout_layer *parse_dropout(list *options, network *net, int count)
 {
@@ -252,6 +258,7 @@
         net->learning_rate = learning_rate;
         net->momentum = momentum;
         net->decay = decay;
+        net->seen = option_find_int(options, "seen",0);
     }else{
         input =  get_network_output_size_layer(*net, count-1);
     }
@@ -272,6 +279,7 @@
         w = option_find_int(options, "width",1);
         c = option_find_int(options, "channels",1);
         net->batch = option_find_int(options, "batch",1);
+        net->seen = option_find_int(options, "seen",0);
     }else{
         image m =  get_network_image_layer(*net, count-1);
         h = m.h;
@@ -327,9 +335,10 @@
             net.types[count] = DROPOUT;
             net.layers[count] = layer;
         }else if(is_freeweight(s)){
-            freeweight_layer *layer = parse_freeweight(options, &net, count);
-            net.types[count] = FREEWEIGHT;
-            net.layers[count] = layer;
+            //freeweight_layer *layer = parse_freeweight(options, &net, count);
+            //net.types[count] = FREEWEIGHT;
+            //net.layers[count] = layer;
+            fprintf(stderr, "Type not recognized: %s\n", s->type);
         }else{
             fprintf(stderr, "Type not recognized: %s\n", s->type);
         }
@@ -442,7 +451,7 @@
 void print_convolutional_cfg(FILE *fp, convolutional_layer *l, network net, int count)
 {
     #ifdef GPU
-    if(gpu_index >= 0) pull_convolutional_layer(*l);
+    if(gpu_index >= 0)  pull_convolutional_layer(*l);
     #endif
     int i;
     fprintf(fp, "[convolutional]\n");
@@ -453,8 +462,9 @@
                 "channels=%d\n"
                 "learning_rate=%g\n"
                 "momentum=%g\n"
-                "decay=%g\n",
-                l->batch,l->h, l->w, l->c, l->learning_rate, l->momentum, l->decay);
+                "decay=%g\n"
+                "seen=%d\n",
+                l->batch,l->h, l->w, l->c, l->learning_rate, l->momentum, l->decay, net.seen);
     } else {
         if(l->learning_rate != net.learning_rate)
             fprintf(fp, "learning_rate=%g\n", l->learning_rate);
@@ -508,8 +518,9 @@
                 "input=%d\n"
                 "learning_rate=%g\n"
                 "momentum=%g\n"
-                "decay=%g\n",
-                l->batch, l->inputs, l->learning_rate, l->momentum, l->decay);
+                "decay=%g\n"
+                "seen=%d\n",
+                l->batch, l->inputs, l->learning_rate, l->momentum, l->decay, net.seen);
     } else {
         if(l->learning_rate != net.learning_rate)
             fprintf(fp, "learning_rate=%g\n", l->learning_rate);
@@ -540,8 +551,9 @@
                 "channels=%d\n"
                 "learning_rate=%g\n"
                 "momentum=%g\n"
-                "decay=%g\n",
-                l->batch,l->h, l->w, l->c, net.learning_rate, net.momentum, net.decay);
+                "decay=%g\n"
+                "seen=%d\n",
+                l->batch,l->h, l->w, l->c, net.learning_rate, net.momentum, net.decay, net.seen);
     }
     fprintf(fp, "crop_height=%d\ncrop_width=%d\nflip=%d\n\n", l->crop_height, l->crop_width, l->flip);
 }
@@ -585,6 +597,82 @@
     fprintf(fp, "\n");
 }
 
+void save_weights(network net, char *filename)
+{
+    printf("Saving weights to %s\n", filename);
+    FILE *fp = fopen(filename, "w");
+    if(!fp) file_error(filename);
+
+    fwrite(&net.learning_rate, sizeof(float), 1, fp);
+    fwrite(&net.momentum, sizeof(float), 1, fp);
+    fwrite(&net.decay, sizeof(float), 1, fp);
+    fwrite(&net.seen, sizeof(int), 1, fp);
+
+    int i;
+    for(i = 0; i < net.n; ++i){
+        if(net.types[i] == CONVOLUTIONAL){
+            convolutional_layer layer = *(convolutional_layer *) net.layers[i];
+            #ifdef GPU
+            if(gpu_index >= 0){
+                pull_convolutional_layer(layer);
+            }
+            #endif
+            int num = layer.n*layer.c*layer.size*layer.size;
+            fwrite(layer.biases, sizeof(float), layer.n, fp);
+            fwrite(layer.filters, sizeof(float), num, fp);
+        }
+        if(net.types[i] == CONNECTED){
+            connected_layer layer = *(connected_layer *) net.layers[i];
+            #ifdef GPU
+            if(gpu_index >= 0){
+                pull_connected_layer(layer);
+            }
+            #endif
+            fwrite(layer.biases, sizeof(float), layer.outputs, fp);
+            fwrite(layer.weights, sizeof(float), layer.outputs*layer.inputs, fp);
+        }
+    }
+    fclose(fp);
+}
+
+void load_weights(network *net, char *filename)
+{
+    printf("Loading weights from %s\n", filename);
+    FILE *fp = fopen(filename, "r");
+    if(!fp) file_error(filename);
+
+    fread(&net->learning_rate, sizeof(float), 1, fp);
+    fread(&net->momentum, sizeof(float), 1, fp);
+    fread(&net->decay, sizeof(float), 1, fp);
+    fread(&net->seen, sizeof(int), 1, fp);
+    set_learning_network(net, net->learning_rate, net->momentum, net->decay);
+    
+    int i;
+    for(i = 0; i < net->n; ++i){
+        if(net->types[i] == CONVOLUTIONAL){
+            convolutional_layer layer = *(convolutional_layer *) net->layers[i];
+            int num = layer.n*layer.c*layer.size*layer.size;
+            fread(layer.biases, sizeof(float), layer.n, fp);
+            fread(layer.filters, sizeof(float), num, fp);
+            #ifdef GPU
+            if(gpu_index >= 0){
+                push_convolutional_layer(layer);
+            }
+            #endif
+        }
+        if(net->types[i] == CONNECTED){
+            connected_layer layer = *(connected_layer *) net->layers[i];
+            fread(layer.biases, sizeof(float), layer.outputs, fp);
+            fread(layer.weights, sizeof(float), layer.outputs*layer.inputs, fp);
+            #ifdef GPU
+            if(gpu_index >= 0){
+                push_connected_layer(layer);
+            }
+            #endif
+        }
+    }
+    fclose(fp);
+}
 
 void save_network(network net, char *filename)
 {

--
Gitblit v1.10.0