From 8f1b4e0962857d402f9d017fcbf387ef0eceb7c4 Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Thu, 01 Sep 2016 23:48:41 +0000
Subject: [PATCH] updates and things
---
src/parser.c | 156 ++++++++++++++++++++++++++++++++++++++++++---------
1 files changed, 127 insertions(+), 29 deletions(-)
diff --git a/src/parser.c b/src/parser.c
index 6c88fd5..626f510 100644
--- a/src/parser.c
+++ b/src/parser.c
@@ -2,7 +2,9 @@
#include <string.h>
#include <stdlib.h>
+#include "blas.h"
#include "parser.h"
+#include "assert.h"
#include "activations.h"
#include "crop_layer.h"
#include "cost_layer.h"
@@ -16,9 +18,11 @@
#include "gru_layer.h"
#include "crnn_layer.h"
#include "maxpool_layer.h"
+#include "reorg_layer.h"
#include "softmax_layer.h"
#include "dropout_layer.h"
#include "detection_layer.h"
+#include "region_layer.h"
#include "avgpool_layer.h"
#include "local_layer.h"
#include "route_layer.h"
@@ -42,6 +46,7 @@
int is_gru(section *s);
int is_crnn(section *s);
int is_maxpool(section *s);
+int is_reorg(section *s);
int is_avgpool(section *s);
int is_dropout(section *s);
int is_softmax(section *s);
@@ -51,6 +56,7 @@
int is_shortcut(section *s);
int is_cost(section *s);
int is_detection(section *s);
+int is_region(section *s);
int is_route(section *s);
list *read_cfg(char *filename);
@@ -113,13 +119,6 @@
deconvolutional_layer layer = make_deconvolutional_layer(batch,h,w,c,n,size,stride,activation);
- char *weights = option_find_str(options, "weights", 0);
- char *biases = option_find_str(options, "biases", 0);
- parse_data(weights, layer.filters, c*n*size*size);
- parse_data(biases, layer.biases, n);
- #ifdef GPU
- if(weights || biases) push_deconvolutional_layer(layer);
- #endif
return layer;
}
@@ -149,7 +148,10 @@
int n = option_find_int(options, "filters",1);
int size = option_find_int(options, "size",1);
int stride = option_find_int(options, "stride",1);
- int pad = option_find_int(options, "pad",0);
+ int pad = option_find_int_quiet(options, "pad",0);
+ int padding = option_find_int_quiet(options, "padding",0);
+ if(pad) padding = size/2;
+
char *activation_s = option_find_str(options, "activation", "logistic");
ACTIVATION activation = get_activation(activation_s);
@@ -163,17 +165,10 @@
int binary = option_find_int_quiet(options, "binary", 0);
int xnor = option_find_int_quiet(options, "xnor", 0);
- convolutional_layer layer = make_convolutional_layer(batch,h,w,c,n,size,stride,pad,activation, batch_normalize, binary, xnor);
+ convolutional_layer layer = make_convolutional_layer(batch,h,w,c,n,size,stride,padding,activation, batch_normalize, binary, xnor);
layer.flipped = option_find_int_quiet(options, "flipped", 0);
layer.dot = option_find_float_quiet(options, "dot", 0);
- char *weights = option_find_str(options, "weights", 0);
- char *biases = option_find_str(options, "biases", 0);
- parse_data(weights, layer.filters, c*n*size*size);
- parse_data(biases, layer.biases, n);
- #ifdef GPU
- if(weights || biases) push_convolutional_layer(layer);
- #endif
return layer;
}
@@ -227,13 +222,6 @@
connected_layer layer = make_connected_layer(params.batch, params.inputs, output, activation, batch_normalize);
- char *weights = option_find_str(options, "weights", 0);
- char *biases = option_find_str(options, "biases", 0);
- parse_data(biases, layer.biases, output);
- parse_data(weights, layer.weights, params.inputs*output);
- #ifdef GPU
- if(weights || biases) push_connected_layer(layer);
- #endif
return layer;
}
@@ -245,6 +233,32 @@
return layer;
}
+layer parse_region(list *options, size_params params)
+{
+ int coords = option_find_int(options, "coords", 4);
+ int classes = option_find_int(options, "classes", 20);
+ int num = option_find_int(options, "num", 1);
+
+ params.w = option_find_int(options, "side", params.w);
+ params.h = option_find_int(options, "side", params.h);
+
+ layer l = make_region_layer(params.batch, params.w, params.h, num, classes, coords);
+ assert(l.outputs == params.inputs);
+
+ l.log = option_find_int_quiet(options, "log", 0);
+ l.sqrt = option_find_int_quiet(options, "sqrt", 0);
+
+ l.softmax = option_find_int(options, "softmax", 0);
+ l.max_boxes = option_find_int_quiet(options, "max",30);
+ l.jitter = option_find_float(options, "jitter", .2);
+ l.rescore = option_find_int_quiet(options, "rescore",0);
+
+ l.coord_scale = option_find_float(options, "coord_scale", 1);
+ l.object_scale = option_find_float(options, "object_scale", 1);
+ l.noobject_scale = option_find_float(options, "noobject_scale", 1);
+ l.class_scale = option_find_float(options, "class_scale", 1);
+ return l;
+}
detection_layer parse_detection(list *options, size_params params)
{
int coords = option_find_int(options, "coords", 1);
@@ -257,12 +271,15 @@
layer.softmax = option_find_int(options, "softmax", 0);
layer.sqrt = option_find_int(options, "sqrt", 0);
+ layer.max_boxes = option_find_int_quiet(options, "max",30);
layer.coord_scale = option_find_float(options, "coord_scale", 1);
layer.forced = option_find_int(options, "forced", 0);
layer.object_scale = option_find_float(options, "object_scale", 1);
layer.noobject_scale = option_find_float(options, "noobject_scale", 1);
layer.class_scale = option_find_float(options, "class_scale", 1);
layer.jitter = option_find_float(options, "jitter", .2);
+ layer.random = option_find_int_quiet(options, "random", 0);
+ layer.reorg = option_find_int_quiet(options, "reorg", 0);
return layer;
}
@@ -272,6 +289,7 @@
COST_TYPE type = get_cost_type(type_s);
float scale = option_find_float_quiet(options, "scale",1);
cost_layer layer = make_cost_layer(params.batch, params.inputs, type, scale);
+ layer.ratio = option_find_float_quiet(options, "ratio",0);
return layer;
}
@@ -299,10 +317,26 @@
return l;
}
+layer parse_reorg(list *options, size_params params)
+{
+ int stride = option_find_int(options, "stride",1);
+
+ int batch,h,w,c;
+ h = params.h;
+ w = params.w;
+ c = params.c;
+ batch=params.batch;
+ if(!(h && w && c)) error("Layer before reorg layer must output image.");
+
+ layer layer = make_reorg_layer(batch,w,h,c,stride);
+ return layer;
+}
+
maxpool_layer parse_maxpool(list *options, size_params params)
{
int stride = option_find_int(options, "stride",1);
int size = option_find_int(options, "size",stride);
+ int padding = option_find_int_quiet(options, "padding", (size-1)/2);
int batch,h,w,c;
h = params.h;
@@ -311,7 +345,7 @@
batch=params.batch;
if(!(h && w && c)) error("Layer before maxpool layer must output image.");
- maxpool_layer layer = make_maxpool_layer(batch,h,w,c,size,stride);
+ maxpool_layer layer = make_maxpool_layer(batch,h,w,c,size,stride,padding);
return layer;
}
@@ -432,6 +466,7 @@
learning_rate_policy get_policy(char *s)
{
+ if (strcmp(s, "random")==0) return RANDOM;
if (strcmp(s, "poly")==0) return POLY;
if (strcmp(s, "constant")==0) return CONSTANT;
if (strcmp(s, "step")==0) return STEP;
@@ -461,10 +496,16 @@
net->max_crop = option_find_int_quiet(options, "max_crop",net->w*2);
net->min_crop = option_find_int_quiet(options, "min_crop",net->w);
+ net->angle = option_find_float_quiet(options, "angle", 0);
+ net->saturation = option_find_float_quiet(options, "saturation", 1);
+ net->exposure = option_find_float_quiet(options, "exposure", 1);
+ net->hue = option_find_float_quiet(options, "hue", 0);
+
if(!net->inputs && !(net->h && net->w && net->c)) error("No input parameters supplied");
char *policy_s = option_find_str(options, "policy", "constant");
net->policy = get_policy(policy_s);
+ net->burn_in = option_find_int_quiet(options, "burn_in", 0);
if(net->policy == STEP){
net->step = option_find_int(options, "step", 1);
net->scale = option_find_float(options, "scale", 1);
@@ -497,7 +538,7 @@
} else if (net->policy == SIG){
net->gamma = option_find_float(options, "gamma", 1);
net->step = option_find_int(options, "step", 1);
- } else if (net->policy == POLY){
+ } else if (net->policy == POLY || net->policy == RANDOM){
net->power = option_find_float(options, "power", 1);
}
net->max_batches = option_find_int(options, "max_batches", 0);
@@ -523,6 +564,7 @@
params.batch = net.batch;
params.time_steps = net.time_steps;
+ size_t workspace_size = 0;
n = n->next;
int count = 0;
free_section(s);
@@ -552,6 +594,8 @@
l = parse_crop(options, params);
}else if(is_cost(s)){
l = parse_cost(options, params);
+ }else if(is_region(s)){
+ l = parse_region(options, params);
}else if(is_detection(s)){
l = parse_detection(options, params);
}else if(is_softmax(s)){
@@ -562,6 +606,8 @@
l = parse_batchnorm(options, params);
}else if(is_maxpool(s)){
l = parse_maxpool(options, params);
+ }else if(is_reorg(s)){
+ l = parse_reorg(options, params);
}else if(is_avgpool(s)){
l = parse_avgpool(options, params);
}else if(is_route(s)){
@@ -583,6 +629,7 @@
l.dontloadscales = option_find_int_quiet(options, "dontloadscales", 0);
option_unused(options);
net.layers[count] = l;
+ if (l.workspace_size > workspace_size) workspace_size = l.workspace_size;
free_section(s);
n = n->next;
++count;
@@ -596,6 +643,18 @@
free_list(sections);
net.outputs = get_network_output_size(net);
net.output = get_network_output(net);
+ if(workspace_size){
+ //printf("%ld\n", workspace_size);
+#ifdef GPU
+ if(gpu_index >= 0){
+ net.workspace = cuda_make_array(0, (workspace_size-1)/sizeof(float)+1);
+ }else {
+ net.workspace = calloc(1, workspace_size);
+ }
+#else
+ net.workspace = calloc(1, workspace_size);
+#endif
+ }
return net;
}
@@ -606,6 +665,7 @@
if (strcmp(type, "[crop]")==0) return CROP;
if (strcmp(type, "[cost]")==0) return COST;
if (strcmp(type, "[detection]")==0) return DETECTION;
+ if (strcmp(type, "[region]")==0) return REGION;
if (strcmp(type, "[local]")==0) return LOCAL;
if (strcmp(type, "[deconv]")==0
|| strcmp(type, "[deconvolutional]")==0) return DECONVOLUTIONAL;
@@ -621,6 +681,7 @@
|| strcmp(type, "[connected]")==0) return CONNECTED;
if (strcmp(type, "[max]")==0
|| strcmp(type, "[maxpool]")==0) return MAXPOOL;
+ if (strcmp(type, "[reorg]")==0) return REORG;
if (strcmp(type, "[avg]")==0
|| strcmp(type, "[avgpool]")==0) return AVGPOOL;
if (strcmp(type, "[dropout]")==0) return DROPOUT;
@@ -645,6 +706,10 @@
{
return (strcmp(s->type, "[cost]")==0);
}
+int is_region(section *s)
+{
+ return (strcmp(s->type, "[region]")==0);
+}
int is_detection(section *s)
{
return (strcmp(s->type, "[detection]")==0);
@@ -689,6 +754,10 @@
return (strcmp(s->type, "[conn]")==0
|| strcmp(s->type, "[connected]")==0);
}
+int is_reorg(section *s)
+{
+ return (strcmp(s->type, "[reorg]")==0);
+}
int is_maxpool(section *s)
{
return (strcmp(s->type, "[max]")==0
@@ -852,6 +921,18 @@
fwrite(l.filters, sizeof(float), num, fp);
}
+void save_batchnorm_weights(layer l, FILE *fp)
+{
+#ifdef GPU
+ if(gpu_index >= 0){
+ pull_batchnorm_layer(l);
+ }
+#endif
+ fwrite(l.scales, sizeof(float), l.c, fp);
+ fwrite(l.rolling_mean, sizeof(float), l.c, fp);
+ fwrite(l.rolling_variance, sizeof(float), l.c, fp);
+}
+
void save_connected_weights(layer l, FILE *fp)
{
#ifdef GPU
@@ -889,6 +970,8 @@
save_convolutional_weights(l, fp);
} if(l.type == CONNECTED){
save_connected_weights(l, fp);
+ } if(l.type == BATCHNORM){
+ save_batchnorm_weights(l, fp);
} if(l.type == RNN){
save_connected_weights(*(l.input_layer), fp);
save_connected_weights(*(l.self_layer), fp);
@@ -943,8 +1026,8 @@
if(transpose){
transpose_matrix(l.weights, l.inputs, l.outputs);
}
- //printf("Biases: %f mean %f variance\n", mean_array(l.biases, l.outputs), variance_array(l.biases, l.outputs));
- //printf("Weights: %f mean %f variance\n", mean_array(l.weights, l.outputs*l.inputs), variance_array(l.weights, l.outputs*l.inputs));
+ //printf("Biases: %f mean %f variance\n", mean_array(l.biases, l.outputs), variance_array(l.biases, l.outputs));
+ //printf("Weights: %f mean %f variance\n", mean_array(l.weights, l.outputs*l.inputs), variance_array(l.weights, l.outputs*l.inputs));
if (l.batch_normalize && (!l.dontloadscales)){
fread(l.scales, sizeof(float), l.outputs, fp);
fread(l.rolling_mean, sizeof(float), l.outputs, fp);
@@ -960,6 +1043,18 @@
#endif
}
+void load_batchnorm_weights(layer l, FILE *fp)
+{
+ fread(l.scales, sizeof(float), l.c, fp);
+ fread(l.rolling_mean, sizeof(float), l.c, fp);
+ fread(l.rolling_variance, sizeof(float), l.c, fp);
+#ifdef GPU
+ if(gpu_index >= 0){
+ push_batchnorm_layer(l);
+ }
+#endif
+}
+
void load_convolutional_weights_binary(layer l, FILE *fp)
{
fread(l.biases, sizeof(float), l.n, fp);
@@ -983,7 +1078,6 @@
}
}
}
- binarize_filters2(l.filters, l.n, l.c*l.size*l.size, l.cfilters, l.scales);
#ifdef GPU
if(gpu_index >= 0){
push_convolutional_layer(l);
@@ -1005,10 +1099,11 @@
fread(l.rolling_variance, sizeof(float), l.n, fp);
}
fread(l.filters, sizeof(float), num, fp);
+ //if(l.c == 3) scal_cpu(num, 1./256, l.filters, 1);
if (l.flipped) {
transpose_matrix(l.filters, l.c*l.size*l.size, l.n);
}
- if (l.binary) binarize_filters(l.filters, l.n, l.c*l.size*l.size, l.filters);
+ //if (l.binary) binarize_filters(l.filters, l.n, l.c*l.size*l.size, l.filters);
#ifdef GPU
if(gpu_index >= 0){
push_convolutional_layer(l);
@@ -1053,6 +1148,9 @@
if(l.type == CONNECTED){
load_connected_weights(l, fp, transpose);
}
+ if(l.type == BATCHNORM){
+ load_batchnorm_weights(l, fp);
+ }
if(l.type == CRNN){
load_convolutional_weights(*(l.input_layer), fp);
load_convolutional_weights(*(l.self_layer), fp);
--
Gitblit v1.10.0