From 8c5364f58569eaeb5582a4915b36b24fc5570c76 Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Mon, 09 Nov 2015 19:31:39 +0000
Subject: [PATCH] New YOLO
---
src/parser.c | 242 +++++++++++++++++++++++++++++++++++++-----------
1 files changed, 186 insertions(+), 56 deletions(-)
diff --git a/src/parser.c b/src/parser.c
index 240c6ee..b095294 100644
--- a/src/parser.c
+++ b/src/parser.c
@@ -7,12 +7,14 @@
#include "crop_layer.h"
#include "cost_layer.h"
#include "convolutional_layer.h"
+#include "normalization_layer.h"
#include "deconvolutional_layer.h"
#include "connected_layer.h"
#include "maxpool_layer.h"
#include "softmax_layer.h"
#include "dropout_layer.h"
#include "detection_layer.h"
+#include "avgpool_layer.h"
#include "route_layer.h"
#include "list.h"
#include "option_list.h"
@@ -28,8 +30,10 @@
int is_deconvolutional(section *s);
int is_connected(section *s);
int is_maxpool(section *s);
+int is_avgpool(section *s);
int is_dropout(section *s);
int is_softmax(section *s);
+int is_normalization(section *s);
int is_crop(section *s);
int is_cost(section *s);
int is_detection(section *s);
@@ -100,7 +104,6 @@
#ifdef GPU
if(weights || biases) push_deconvolutional_layer(layer);
#endif
- option_unused(options);
return layer;
}
@@ -119,8 +122,9 @@
c = params.c;
batch=params.batch;
if(!(h && w && c)) error("Layer before convolutional layer must output image.");
+ int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0);
- convolutional_layer layer = make_convolutional_layer(batch,h,w,c,n,size,stride,pad,activation);
+ convolutional_layer layer = make_convolutional_layer(batch,h,w,c,n,size,stride,pad,activation, batch_normalize);
char *weights = option_find_str(options, "weights", 0);
char *biases = option_find_str(options, "biases", 0);
@@ -129,7 +133,6 @@
#ifdef GPU
if(weights || biases) push_convolutional_layer(layer);
#endif
- option_unused(options);
return layer;
}
@@ -148,7 +151,6 @@
#ifdef GPU
if(weights || biases) push_connected_layer(layer);
#endif
- option_unused(options);
return layer;
}
@@ -156,7 +158,6 @@
{
int groups = option_find_int(options, "groups",1);
softmax_layer layer = make_softmax_layer(params.batch, params.inputs, groups);
- option_unused(options);
return layer;
}
@@ -165,11 +166,19 @@
int coords = option_find_int(options, "coords", 1);
int classes = option_find_int(options, "classes", 1);
int rescore = option_find_int(options, "rescore", 0);
- int joint = option_find_int(options, "joint", 0);
- int objectness = option_find_int(options, "objectness", 0);
- int background = option_find_int(options, "background", 1);
- detection_layer layer = make_detection_layer(params.batch, params.inputs, classes, coords, joint, rescore, background, objectness);
- option_unused(options);
+ int num = option_find_int(options, "num", 1);
+ int side = option_find_int(options, "side", 7);
+ detection_layer layer = make_detection_layer(params.batch, params.inputs, num, side, classes, coords, rescore);
+
+ layer.softmax = option_find_int(options, "softmax", 0);
+ layer.sqrt = option_find_int(options, "sqrt", 0);
+
+ layer.coord_scale = option_find_float(options, "coord_scale", 1);
+ layer.forced = option_find_int(options, "forced", 0);
+ layer.object_scale = option_find_float(options, "object_scale", 1);
+ layer.noobject_scale = option_find_float(options, "noobject_scale", 1);
+ layer.class_scale = option_find_float(options, "class_scale", 1);
+ layer.jitter = option_find_float(options, "jitter", .2);
return layer;
}
@@ -177,8 +186,8 @@
{
char *type_s = option_find_str(options, "type", "sse");
COST_TYPE type = get_cost_type(type_s);
- cost_layer layer = make_cost_layer(params.batch, params.inputs, type);
- option_unused(options);
+ float scale = option_find_float_quiet(options, "scale",1);
+ cost_layer layer = make_cost_layer(params.batch, params.inputs, type, scale);
return layer;
}
@@ -198,8 +207,11 @@
batch=params.batch;
if(!(h && w && c)) error("Layer before crop layer must output image.");
+ int noadjust = option_find_int_quiet(options, "noadjust",0);
+
crop_layer l = make_crop_layer(batch,h,w,c,crop_height,crop_width,flip, angle, saturation, exposure);
- option_unused(options);
+ l.shift = option_find_float(options, "shift", 0);
+ l.noadjust = noadjust;
return l;
}
@@ -216,7 +228,19 @@
if(!(h && w && c)) error("Layer before maxpool layer must output image.");
maxpool_layer layer = make_maxpool_layer(batch,h,w,c,size,stride);
- option_unused(options);
+ return layer;
+}
+
+avgpool_layer parse_avgpool(list *options, size_params params)
+{
+ int batch,w,h,c;
+ w = params.w;
+ h = params.h;
+ c = params.c;
+ batch=params.batch;
+ if(!(h && w && c)) error("Layer before avgpool layer must output image.");
+
+ avgpool_layer layer = make_avgpool_layer(batch,w,h,c);
return layer;
}
@@ -224,10 +248,22 @@
{
float probability = option_find_float(options, "probability", .5);
dropout_layer layer = make_dropout_layer(params.batch, params.inputs, probability);
- option_unused(options);
+ layer.out_w = params.w;
+ layer.out_h = params.h;
+ layer.out_c = params.c;
return layer;
}
+layer parse_normalization(list *options, size_params params)
+{
+ float alpha = option_find_float(options, "alpha", .0001);
+ float beta = option_find_float(options, "beta" , .75);
+ float kappa = option_find_float(options, "kappa", 1);
+ int size = option_find_int(options, "size", 5);
+ layer l = make_normalization_layer(params.batch, params.w, params.h, params.c, size, alpha, beta, kappa);
+ return l;
+}
+
route_layer parse_route(list *options, size_params params, network net)
{
char *l = option_find(options, "layers");
@@ -265,17 +301,27 @@
}
}
- option_unused(options);
return layer;
}
+learning_rate_policy get_policy(char *s)
+{
+ if (strcmp(s, "poly")==0) return POLY;
+ if (strcmp(s, "constant")==0) return CONSTANT;
+ if (strcmp(s, "step")==0) return STEP;
+ if (strcmp(s, "exp")==0) return EXP;
+ if (strcmp(s, "sigmoid")==0) return SIG;
+ if (strcmp(s, "steps")==0) return STEPS;
+ fprintf(stderr, "Couldn't find policy %s, going with constant\n", s);
+ return CONSTANT;
+}
+
void parse_net_options(list *options, network *net)
{
net->batch = option_find_int(options, "batch",1);
net->learning_rate = option_find_float(options, "learning_rate", .001);
net->momentum = option_find_float(options, "momentum", .9);
net->decay = option_find_float(options, "decay", .0001);
- net->seen = option_find_int(options, "seen",0);
int subdivs = option_find_int(options, "subdivisions",1);
net->batch /= subdivs;
net->subdivisions = subdivs;
@@ -284,8 +330,47 @@
net->w = option_find_int_quiet(options, "width",0);
net->c = option_find_int_quiet(options, "channels",0);
net->inputs = option_find_int_quiet(options, "inputs", net->h * net->w * net->c);
+
if(!net->inputs && !(net->h && net->w && net->c)) error("No input parameters supplied");
- option_unused(options);
+
+ char *policy_s = option_find_str(options, "policy", "constant");
+ net->policy = get_policy(policy_s);
+ if(net->policy == STEP){
+ net->step = option_find_int(options, "step", 1);
+ net->scale = option_find_float(options, "scale", 1);
+ } else if (net->policy == STEPS){
+ char *l = option_find(options, "steps");
+ char *p = option_find(options, "scales");
+ if(!l || !p) error("STEPS policy must have steps and scales in cfg file");
+
+ int len = strlen(l);
+ int n = 1;
+ int i;
+ for(i = 0; i < len; ++i){
+ if (l[i] == ',') ++n;
+ }
+ int *steps = calloc(n, sizeof(int));
+ float *scales = calloc(n, sizeof(float));
+ for(i = 0; i < n; ++i){
+ int step = atoi(l);
+ float scale = atof(p);
+ l = strchr(l, ',')+1;
+ p = strchr(p, ',')+1;
+ steps[i] = step;
+ scales[i] = scale;
+ }
+ net->scales = scales;
+ net->steps = steps;
+ net->num_steps = n;
+ } else if (net->policy == EXP){
+ net->gamma = option_find_float(options, "gamma", 1);
+ } else if (net->policy == SIG){
+ net->gamma = option_find_float(options, "gamma", 1);
+ net->step = option_find_int(options, "step", 1);
+ } else if (net->policy == POLY){
+ net->power = option_find_float(options, "power", 1);
+ }
+ net->max_batches = option_find_int(options, "max_batches", 0);
}
network parse_network_cfg(char *filename)
@@ -309,6 +394,7 @@
n = n->next;
int count = 0;
+ free_section(s);
while(n){
fprintf(stderr, "%d: ", count);
s = (section *)n->val;
@@ -328,21 +414,28 @@
l = parse_detection(options, params);
}else if(is_softmax(s)){
l = parse_softmax(options, params);
+ }else if(is_normalization(s)){
+ l = parse_normalization(options, params);
}else if(is_maxpool(s)){
l = parse_maxpool(options, params);
+ }else if(is_avgpool(s)){
+ l = parse_avgpool(options, params);
}else if(is_route(s)){
l = parse_route(options, params, net);
}else if(is_dropout(s)){
l = parse_dropout(options, params);
l.output = net.layers[count-1].output;
l.delta = net.layers[count-1].delta;
- #ifdef GPU
+#ifdef GPU
l.output_gpu = net.layers[count-1].output_gpu;
l.delta_gpu = net.layers[count-1].delta_gpu;
- #endif
+#endif
}else{
fprintf(stderr, "Type not recognized: %s\n", s->type);
}
+ l.dontload = option_find_int_quiet(options, "dontload", 0);
+ l.dontloadscales = option_find_int_quiet(options, "dontloadscales", 0);
+ option_unused(options);
net.layers[count] = l;
free_section(s);
n = n->next;
@@ -397,11 +490,22 @@
return (strcmp(s->type, "[max]")==0
|| strcmp(s->type, "[maxpool]")==0);
}
+int is_avgpool(section *s)
+{
+ return (strcmp(s->type, "[avg]")==0
+ || strcmp(s->type, "[avgpool]")==0);
+}
int is_dropout(section *s)
{
return (strcmp(s->type, "[dropout]")==0);
}
+int is_normalization(section *s)
+{
+ return (strcmp(s->type, "[lrn]")==0
+ || strcmp(s->type, "[normalization]")==0);
+}
+
int is_softmax(section *s)
{
return (strcmp(s->type, "[soft]")==0
@@ -412,24 +516,6 @@
return (strcmp(s->type, "[route]")==0);
}
-int read_option(char *s, list *options)
-{
- size_t i;
- size_t len = strlen(s);
- char *val = 0;
- for(i = 0; i < len; ++i){
- if(s[i] == '='){
- s[i] = '\0';
- val = s+i+1;
- break;
- }
- }
- if(i == len-1) return 0;
- char *key = s;
- option_insert(options, key, val);
- return 1;
-}
-
list *read_cfg(char *filename)
{
FILE *file = fopen(filename, "r");
@@ -465,7 +551,46 @@
return sections;
}
-void save_weights(network net, char *filename)
+void save_weights_double(network net, char *filename)
+{
+ fprintf(stderr, "Saving doubled weights to %s\n", filename);
+ FILE *fp = fopen(filename, "w");
+ if(!fp) file_error(filename);
+
+ fwrite(&net.learning_rate, sizeof(float), 1, fp);
+ fwrite(&net.momentum, sizeof(float), 1, fp);
+ fwrite(&net.decay, sizeof(float), 1, fp);
+ fwrite(net.seen, sizeof(int), 1, fp);
+
+ int i,j,k;
+ for(i = 0; i < net.n; ++i){
+ layer l = net.layers[i];
+ if(l.type == CONVOLUTIONAL){
+#ifdef GPU
+ if(gpu_index >= 0){
+ pull_convolutional_layer(l);
+ }
+#endif
+ float zero = 0;
+ fwrite(l.biases, sizeof(float), l.n, fp);
+ fwrite(l.biases, sizeof(float), l.n, fp);
+
+ for (j = 0; j < l.n; ++j){
+ int index = j*l.c*l.size*l.size;
+ fwrite(l.filters+index, sizeof(float), l.c*l.size*l.size, fp);
+ for (k = 0; k < l.c*l.size*l.size; ++k) fwrite(&zero, sizeof(float), 1, fp);
+ }
+ for (j = 0; j < l.n; ++j){
+ int index = j*l.c*l.size*l.size;
+ for (k = 0; k < l.c*l.size*l.size; ++k) fwrite(&zero, sizeof(float), 1, fp);
+ fwrite(l.filters+index, sizeof(float), l.c*l.size*l.size, fp);
+ }
+ }
+ }
+ fclose(fp);
+}
+
+void save_weights_upto(network net, char *filename, int cutoff)
{
fprintf(stderr, "Saving weights to %s\n", filename);
FILE *fp = fopen(filename, "w");
@@ -474,10 +599,10 @@
fwrite(&net.learning_rate, sizeof(float), 1, fp);
fwrite(&net.momentum, sizeof(float), 1, fp);
fwrite(&net.decay, sizeof(float), 1, fp);
- fwrite(&net.seen, sizeof(int), 1, fp);
+ fwrite(net.seen, sizeof(int), 1, fp);
int i;
- for(i = 0; i < net.n; ++i){
+ for(i = 0; i < net.n && i < cutoff; ++i){
layer l = net.layers[i];
if(l.type == CONVOLUTIONAL){
#ifdef GPU
@@ -487,19 +612,13 @@
#endif
int num = l.n*l.c*l.size*l.size;
fwrite(l.biases, sizeof(float), l.n, fp);
- fwrite(l.filters, sizeof(float), num, fp);
- }
- if(l.type == DECONVOLUTIONAL){
-#ifdef GPU
- if(gpu_index >= 0){
- pull_deconvolutional_layer(l);
+ if (l.batch_normalize){
+ fwrite(l.scales, sizeof(float), l.n, fp);
+ fwrite(l.rolling_mean, sizeof(float), l.n, fp);
+ fwrite(l.rolling_variance, sizeof(float), l.n, fp);
}
-#endif
- int num = l.n*l.c*l.size*l.size;
- fwrite(l.biases, sizeof(float), l.n, fp);
fwrite(l.filters, sizeof(float), num, fp);
- }
- if(l.type == CONNECTED){
+ } if(l.type == CONNECTED){
#ifdef GPU
if(gpu_index >= 0){
pull_connected_layer(l);
@@ -511,6 +630,10 @@
}
fclose(fp);
}
+void save_weights(network net, char *filename)
+{
+ save_weights_upto(net, filename, net.n);
+}
void load_weights_upto(network *net, char *filename, int cutoff)
{
@@ -519,17 +642,24 @@
FILE *fp = fopen(filename, "r");
if(!fp) file_error(filename);
- fread(&net->learning_rate, sizeof(float), 1, fp);
- fread(&net->momentum, sizeof(float), 1, fp);
- fread(&net->decay, sizeof(float), 1, fp);
- fread(&net->seen, sizeof(int), 1, fp);
+ float garbage;
+ fread(&garbage, sizeof(float), 1, fp);
+ fread(&garbage, sizeof(float), 1, fp);
+ fread(&garbage, sizeof(float), 1, fp);
+ fread(net->seen, sizeof(int), 1, fp);
int i;
for(i = 0; i < net->n && i < cutoff; ++i){
layer l = net->layers[i];
+ if (l.dontload) continue;
if(l.type == CONVOLUTIONAL){
int num = l.n*l.c*l.size*l.size;
fread(l.biases, sizeof(float), l.n, fp);
+ if (l.batch_normalize && (!l.dontloadscales)){
+ fread(l.scales, sizeof(float), l.n, fp);
+ fread(l.rolling_mean, sizeof(float), l.n, fp);
+ fread(l.rolling_variance, sizeof(float), l.n, fp);
+ }
fread(l.filters, sizeof(float), num, fp);
#ifdef GPU
if(gpu_index >= 0){
--
Gitblit v1.10.0