From 1b5afb45838e603fa6780762eb8cc59246dc2d81 Mon Sep 17 00:00:00 2001
From: IlyaOvodov <b@ovdv.ru>
Date: Tue, 08 May 2018 11:09:35 +0000
Subject: [PATCH] Output improvements for detector results: When printing detector results, output was done in random order, obfuscating results for interpreting. Now: 1. Text output includes coordinates of rects in (left,right,top,bottom in pixels) along with label and score 2. Text output is sorted by rect lefts to simplify finding appropriate rects on image 3. If several class probs are > thresh for some detection, the most probable is written first and coordinates for others are not repeated 4. Rects are imprinted in image in order by their best class prob, so most probable rects are always on top and not overlayed by less probable ones 5. Most probable label for rect is always written first Also: 6. Message about low GPU memory include required amount

---
 src/network.c |  203 ++++++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 202 insertions(+), 1 deletions(-)

diff --git a/src/network.c b/src/network.c
index 10d1ead..ce6d28a 100644
--- a/src/network.c
+++ b/src/network.c
@@ -27,6 +27,20 @@
 #include "dropout_layer.h"
 #include "route_layer.h"
 #include "shortcut_layer.h"
+#include "yolo_layer.h"
+#include "parser.h"
+
+network *load_network(char *cfg, char *weights, int clear)
+{
+	printf(" Try to load cfg: %s, weights: %s, clear = %d \n", cfg, weights, clear);
+	network *net = calloc(1, sizeof(network));
+	*net = parse_network_cfg(cfg);
+	if (weights && weights[0] != 0) {
+		load_weights(net, weights);
+	}
+	if (clear) (*net->seen) = 0;
+	return net;
+}
 
 int get_current_batch(network net)
 {
@@ -45,6 +59,27 @@
     #endif
 }
 
+void reset_network_state(network *net, int b)
+{
+	int i;
+	for (i = 0; i < net->n; ++i) {
+#ifdef GPU
+		layer l = net->layers[i];
+		if (l.state_gpu) {
+			fill_ongpu(l.outputs, 0, l.state_gpu + l.outputs*b, 1);
+		}
+		if (l.h_gpu) {
+			fill_ongpu(l.outputs, 0, l.h_gpu + l.outputs*b, 1);
+		}
+#endif
+	}
+}
+
+void reset_rnn(network *net)
+{
+	reset_network_state(net, 0);
+}
+
 float get_current_rate(network net)
 {
     int batch_num = get_current_batch(net);
@@ -140,6 +175,11 @@
     #ifdef GPU
     net.input_gpu = calloc(1, sizeof(float *));
     net.truth_gpu = calloc(1, sizeof(float *));
+
+	net.input16_gpu = calloc(1, sizeof(float *));
+	net.output16_gpu = calloc(1, sizeof(float *));
+	net.max_input16_size = calloc(1, sizeof(size_t));
+	net.max_output16_size = calloc(1, sizeof(size_t));
     #endif
     return net;
 }
@@ -316,6 +356,8 @@
         net->layers[i].batch = b;
 #ifdef CUDNN
         if(net->layers[i].type == CONVOLUTIONAL){
+			cudnn_convolutional_setup(net->layers + i, cudnn_fastest);
+			/*
 			layer *l = net->layers + i;
             cudnn_convolutional_setup(l, cudnn_fastest);
 			// check for excessive memory consumption 
@@ -327,6 +369,7 @@
 				cudnn_convolutional_setup(l, cudnn_smallest);
 				l->workspace_size = get_workspace_size(*l);
 			}
+			*/
         }
 #endif
     }
@@ -356,7 +399,7 @@
     //fflush(stderr);
     for (i = 0; i < net->n; ++i){
         layer l = net->layers[i];
-		printf(" %d: layer = %d,", i, l.type);
+		//printf(" %d: layer = %d,", i, l.type);
         if(l.type == CONVOLUTIONAL){
             resize_convolutional_layer(&l, w, h);
         }else if(l.type == CROP){
@@ -365,8 +408,14 @@
             resize_maxpool_layer(&l, w, h);
         }else if(l.type == REGION){
             resize_region_layer(&l, w, h);
+		}else if (l.type == YOLO) {
+			resize_yolo_layer(&l, w, h);
         }else if(l.type == ROUTE){
             resize_route_layer(&l, net);
+		}else if (l.type == SHORTCUT) {
+			resize_shortcut_layer(&l, w, h);
+		}else if (l.type == UPSAMPLE) {
+			resize_upsample_layer(&l, w, h);
         }else if(l.type == REORG){
             resize_reorg_layer(&l, w, h);
         }else if(l.type == AVGPOOL){
@@ -489,6 +538,112 @@
     return out;
 }
 
+int num_detections(network *net, float thresh)
+{
+	int i;
+	int s = 0;
+	for (i = 0; i < net->n; ++i) {
+		layer l = net->layers[i];
+		if (l.type == YOLO) {
+			s += yolo_num_detections(l, thresh);
+		}
+		if (l.type == DETECTION || l.type == REGION) {
+			s += l.w*l.h*l.n;
+		}
+	}
+	return s;
+}
+
+detection *make_network_boxes(network *net, float thresh, int *num)
+{
+	layer l = net->layers[net->n - 1];
+	int i;
+	int nboxes = num_detections(net, thresh);
+	if (num) *num = nboxes;
+	detection *dets = calloc(nboxes, sizeof(detection));
+	for (i = 0; i < nboxes; ++i) {
+		dets[i].prob = calloc(l.classes, sizeof(float));
+		if (l.coords > 4) {
+			dets[i].mask = calloc(l.coords - 4, sizeof(float));
+		}
+	}
+	return dets;
+}
+
+
+void custom_get_region_detections(layer l, int w, int h, int net_w, int net_h, float thresh, int *map, float hier, int relative, detection *dets, int letter)
+{
+	box *boxes = calloc(l.w*l.h*l.n, sizeof(box));
+	float **probs = calloc(l.w*l.h*l.n, sizeof(float *));
+	int i, j;
+	for (j = 0; j < l.w*l.h*l.n; ++j) probs[j] = calloc(l.classes, sizeof(float *));
+	get_region_boxes(l, 1, 1, thresh, probs, boxes, 0, map);
+	for (j = 0; j < l.w*l.h*l.n; ++j) {
+		dets[j].classes = l.classes;
+		dets[j].bbox = boxes[j];
+		dets[j].objectness = 1;
+		for (i = 0; i < l.classes; ++i) {
+			dets[j].prob[i] = probs[j][i];
+		}
+	}
+
+	free(boxes);
+	free_ptrs((void **)probs, l.w*l.h*l.n);
+
+	//correct_region_boxes(dets, l.w*l.h*l.n, w, h, net_w, net_h, relative);
+	correct_yolo_boxes(dets, l.w*l.h*l.n, w, h, net_w, net_h, relative, letter);
+}
+
+void fill_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, detection *dets, int letter)
+{
+	int j;
+	for (j = 0; j < net->n; ++j) {
+		layer l = net->layers[j];
+		if (l.type == YOLO) {
+			int count = get_yolo_detections(l, w, h, net->w, net->h, thresh, map, relative, dets, letter);
+			dets += count;
+		}
+		if (l.type == REGION) {
+			custom_get_region_detections(l, w, h, net->w, net->h, thresh, map, hier, relative, dets, letter);
+			//get_region_detections(l, w, h, net->w, net->h, thresh, map, hier, relative, dets);
+			dets += l.w*l.h*l.n;
+		}
+		if (l.type == DETECTION) {
+			get_detection_detections(l, w, h, thresh, dets);
+			dets += l.w*l.h*l.n;
+		}
+	}
+}
+
+detection *get_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, int *num, int letter)
+{
+	detection *dets = make_network_boxes(net, thresh, num);
+	fill_network_boxes(net, w, h, thresh, hier, map, relative, dets, letter);
+	return dets;
+}
+
+void free_detections(detection *dets, int n)
+{
+	int i;
+	for (i = 0; i < n; ++i) {
+		free(dets[i].prob);
+		if (dets[i].mask) free(dets[i].mask);
+	}
+	free(dets);
+}
+
+float *network_predict_image(network *net, image im)
+{
+	image imr = letterbox_image(im, net->w, net->h);
+	set_batch_network(net, 1);
+	float *p = network_predict(*net, imr.data);
+	free_image(imr);
+	return p;
+}
+
+int network_width(network *net) { return net->w; }
+int network_height(network *net) { return net->h; }
+
 matrix network_predict_data_multi(network net, data test, int n)
 {
     int i,j,b,m;
@@ -619,7 +774,53 @@
 	if (*net.truth_gpu) cuda_free(*net.truth_gpu);
 	if (net.input_gpu) free(net.input_gpu);
 	if (net.truth_gpu) free(net.truth_gpu);
+
+	if (*net.input16_gpu) cuda_free(*net.input16_gpu);
+	if (*net.output16_gpu) cuda_free(*net.output16_gpu);
+	if (net.input16_gpu) free(net.input16_gpu);
+	if (net.output16_gpu) free(net.output16_gpu);
+	if (net.max_input16_size) free(net.max_input16_size);
+	if (net.max_output16_size) free(net.max_output16_size);
 #else
 	free(net.workspace);
 #endif
 }
+
+
+void fuse_conv_batchnorm(network net)
+{
+	int j;
+	for (j = 0; j < net.n; ++j) {
+		layer *l = &net.layers[j];
+
+		if (l->type == CONVOLUTIONAL) {
+			//printf(" Merges Convolutional-%d and batch_norm \n", j);
+
+			if (l->batch_normalize) {
+				int f;
+				for (f = 0; f < l->n; ++f)
+				{
+					l->biases[f] = l->biases[f] - (double)l->scales[f] * l->rolling_mean[f] / (sqrt((double)l->rolling_variance[f]) + .000001f);
+
+					const size_t filter_size = l->size*l->size*l->c;
+					int i;
+					for (i = 0; i < filter_size; ++i) {
+						int w_index = f*filter_size + i;
+
+						l->weights[w_index] = (double)l->weights[w_index] * l->scales[f] / (sqrt((double)l->rolling_variance[f]) + .000001f);
+					}
+				}
+
+				l->batch_normalize = 0;
+#ifdef GPU
+				if (gpu_index >= 0) {
+					push_convolutional_layer(*l);
+				}
+#endif
+			}
+		}
+		else {
+			//printf(" Fusion skip layer type: %d \n", l->type);
+		}
+	}
+}

--
Gitblit v1.10.0