From 23955b9fa0a29465ad2a2d13c445b49e6d5adef2 Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Mon, 08 Feb 2016 19:50:45 +0000
Subject: [PATCH] binary reading weights
---
src/parser.c | 132 +++++++++++++++++++++++++++++++++++++-------
1 files changed, 111 insertions(+), 21 deletions(-)
diff --git a/src/parser.c b/src/parser.c
index 8efafad..8051fd7 100644
--- a/src/parser.c
+++ b/src/parser.c
@@ -7,9 +7,11 @@
#include "crop_layer.h"
#include "cost_layer.h"
#include "convolutional_layer.h"
+#include "activation_layer.h"
#include "normalization_layer.h"
#include "deconvolutional_layer.h"
#include "connected_layer.h"
+#include "rnn_layer.h"
#include "maxpool_layer.h"
#include "softmax_layer.h"
#include "dropout_layer.h"
@@ -29,9 +31,11 @@
int is_network(section *s);
int is_convolutional(section *s);
+int is_activation(section *s);
int is_local(section *s);
int is_deconvolutional(section *s);
int is_connected(section *s);
+int is_rnn(section *s);
int is_maxpool(section *s);
int is_avgpool(section *s);
int is_dropout(section *s);
@@ -83,6 +87,7 @@
int w;
int c;
int index;
+ int time_steps;
} size_params;
deconvolutional_layer parse_deconvolutional(list *options, size_params params)
@@ -149,8 +154,9 @@
batch=params.batch;
if(!(h && w && c)) error("Layer before convolutional layer must output image.");
int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0);
+ int binary = option_find_int_quiet(options, "binary", 0);
- convolutional_layer layer = make_convolutional_layer(batch,h,w,c,n,size,stride,pad,activation, batch_normalize);
+ convolutional_layer layer = make_convolutional_layer(batch,h,w,c,n,size,stride,pad,activation, batch_normalize, binary);
layer.flipped = option_find_int_quiet(options, "flipped", 0);
char *weights = option_find_str(options, "weights", 0);
@@ -163,13 +169,30 @@
return layer;
}
+layer parse_rnn(list *options, size_params params)
+{
+ int output = option_find_int(options, "output",1);
+ int hidden = option_find_int(options, "hidden",1);
+ char *activation_s = option_find_str(options, "activation", "logistic");
+ ACTIVATION activation = get_activation(activation_s);
+ int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0);
+ int logistic = option_find_int_quiet(options, "logistic", 0);
+
+ layer l = make_rnn_layer(params.batch, params.inputs, hidden, output, params.time_steps, activation, batch_normalize, logistic);
+
+ l.shortcut = option_find_int_quiet(options, "shortcut", 0);
+
+ return l;
+}
+
connected_layer parse_connected(list *options, size_params params)
{
int output = option_find_int(options, "output",1);
char *activation_s = option_find_str(options, "activation", "logistic");
ACTIVATION activation = get_activation(activation_s);
+ int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0);
- connected_layer layer = make_connected_layer(params.batch, params.inputs, output, activation);
+ connected_layer layer = make_connected_layer(params.batch, params.inputs, output, activation, batch_normalize);
char *weights = option_find_str(options, "weights", 0);
char *biases = option_find_str(options, "biases", 0);
@@ -183,8 +206,9 @@
softmax_layer parse_softmax(list *options, size_params params)
{
- int groups = option_find_int(options, "groups",1);
+ int groups = option_find_int_quiet(options, "groups",1);
softmax_layer layer = make_softmax_layer(params.batch, params.inputs, groups);
+ layer.temperature = option_find_float_quiet(options, "temperature", 1);
return layer;
}
@@ -301,10 +325,31 @@
layer from = net.layers[index];
layer s = make_shortcut_layer(batch, index, params.w, params.h, params.c, from.out_w, from.out_h, from.out_c);
+
+ char *activation_s = option_find_str(options, "activation", "linear");
+ ACTIVATION activation = get_activation(activation_s);
+ s.activation = activation;
return s;
}
+layer parse_activation(list *options, size_params params)
+{
+ char *activation_s = option_find_str(options, "activation", "linear");
+ ACTIVATION activation = get_activation(activation_s);
+
+ layer l = make_activation_layer(params.batch, params.inputs, activation);
+
+ l.out_h = params.h;
+ l.out_w = params.w;
+ l.out_c = params.c;
+ l.h = params.h;
+ l.w = params.w;
+ l.c = params.c;
+
+ return l;
+}
+
route_layer parse_route(list *options, size_params params, network net)
{
char *l = option_find(options, "layers");
@@ -365,7 +410,9 @@
net->momentum = option_find_float(options, "momentum", .9);
net->decay = option_find_float(options, "decay", .0001);
int subdivs = option_find_int(options, "subdivisions",1);
+ net->time_steps = option_find_int_quiet(options, "time_steps",1);
net->batch /= subdivs;
+ net->batch *= net->time_steps;
net->subdivisions = subdivs;
net->h = option_find_int_quiet(options, "height",0);
@@ -433,6 +480,7 @@
params.c = net.c;
params.inputs = net.inputs;
params.batch = net.batch;
+ params.time_steps = net.time_steps;
n = n->next;
int count = 0;
@@ -447,8 +495,12 @@
l = parse_convolutional(options, params);
}else if(is_local(s)){
l = parse_local(options, params);
+ }else if(is_activation(s)){
+ l = parse_activation(options, params);
}else if(is_deconvolutional(s)){
l = parse_deconvolutional(options, params);
+ }else if(is_rnn(s)){
+ l = parse_rnn(options, params);
}else if(is_connected(s)){
l = parse_connected(options, params);
}else if(is_crop(s)){
@@ -530,11 +582,19 @@
return (strcmp(s->type, "[conv]")==0
|| strcmp(s->type, "[convolutional]")==0);
}
+int is_activation(section *s)
+{
+ return (strcmp(s->type, "[activation]")==0);
+}
int is_network(section *s)
{
return (strcmp(s->type, "[net]")==0
|| strcmp(s->type, "[network]")==0);
}
+int is_rnn(section *s)
+{
+ return (strcmp(s->type, "[rnn]")==0);
+}
int is_connected(section *s)
{
return (strcmp(s->type, "[conn]")==0
@@ -645,6 +705,22 @@
fclose(fp);
}
+void save_connected_weights(layer l, FILE *fp)
+{
+#ifdef GPU
+ if(gpu_index >= 0){
+ pull_connected_layer(l);
+ }
+#endif
+ fwrite(l.biases, sizeof(float), l.outputs, fp);
+ fwrite(l.weights, sizeof(float), l.outputs*l.inputs, fp);
+ if (l.batch_normalize){
+ fwrite(l.scales, sizeof(float), l.outputs, fp);
+ fwrite(l.rolling_mean, sizeof(float), l.outputs, fp);
+ fwrite(l.rolling_variance, sizeof(float), l.outputs, fp);
+ }
+}
+
void save_weights_upto(network net, char *filename, int cutoff)
{
fprintf(stderr, "Saving weights to %s\n", filename);
@@ -677,13 +753,11 @@
}
fwrite(l.filters, sizeof(float), num, fp);
} if(l.type == CONNECTED){
-#ifdef GPU
- if(gpu_index >= 0){
- pull_connected_layer(l);
- }
-#endif
- fwrite(l.biases, sizeof(float), l.outputs, fp);
- fwrite(l.weights, sizeof(float), l.outputs*l.inputs, fp);
+ save_connected_weights(l, fp);
+ } if(l.type == RNN){
+ save_connected_weights(*(l.input_layer), fp);
+ save_connected_weights(*(l.self_layer), fp);
+ save_connected_weights(*(l.output_layer), fp);
} if(l.type == LOCAL){
#ifdef GPU
if(gpu_index >= 0){
@@ -716,11 +790,30 @@
free(transpose);
}
+void load_connected_weights(layer l, FILE *fp, int transpose)
+{
+ fread(l.biases, sizeof(float), l.outputs, fp);
+ fread(l.weights, sizeof(float), l.outputs*l.inputs, fp);
+ if(transpose){
+ transpose_matrix(l.weights, l.inputs, l.outputs);
+ }
+ if (l.batch_normalize && (!l.dontloadscales)){
+ fread(l.scales, sizeof(float), l.outputs, fp);
+ fread(l.rolling_mean, sizeof(float), l.outputs, fp);
+ fread(l.rolling_variance, sizeof(float), l.outputs, fp);
+ }
+#ifdef GPU
+ if(gpu_index >= 0){
+ push_connected_layer(l);
+ }
+#endif
+}
+
void load_weights_upto(network *net, char *filename, int cutoff)
{
fprintf(stderr, "Loading weights from %s...", filename);
fflush(stdout);
- FILE *fp = fopen(filename, "r");
+ FILE *fp = fopen(filename, "rb");
if(!fp) file_error(filename);
int major;
@@ -730,6 +823,7 @@
fread(&minor, sizeof(int), 1, fp);
fread(&revision, sizeof(int), 1, fp);
fread(net->seen, sizeof(int), 1, fp);
+ int transpose = (major > 1000) || (minor > 1000);
int i;
for(i = 0; i < net->n && i < cutoff; ++i){
@@ -764,16 +858,12 @@
#endif
}
if(l.type == CONNECTED){
- fread(l.biases, sizeof(float), l.outputs, fp);
- fread(l.weights, sizeof(float), l.outputs*l.inputs, fp);
- if(major > 1000 || minor > 1000){
- transpose_matrix(l.weights, l.inputs, l.outputs);
- }
-#ifdef GPU
- if(gpu_index >= 0){
- push_connected_layer(l);
- }
-#endif
+ load_connected_weights(l, fp, transpose);
+ }
+ if(l.type == RNN){
+ load_connected_weights(*(l.input_layer), fp, transpose);
+ load_connected_weights(*(l.self_layer), fp, transpose);
+ load_connected_weights(*(l.output_layer), fp, transpose);
}
if(l.type == LOCAL){
int locations = l.out_w*l.out_h;
--
Gitblit v1.10.0