From 028696bf15efeca3acb3db8c42a96f7b9e0f55ff Mon Sep 17 00:00:00 2001
From: iovodov <b@ovdv.ru>
Date: Thu, 03 May 2018 13:33:46 +0000
Subject: [PATCH] Output improvements for detector results: When printing detector results, output was done in random order, obfuscating results for interpreting. Now: 1. Text output includes coordinates of rects in (left,right,top,bottom in pixels) along with label and score 2. Text output is sorted by rect lefts to simplify finding appropriate rects on image 3. If several class probs are > thresh for some detection, the most probable is written first and coordinates for others are not repeated 4. Rects are imprinted in image in order by their best class prob, so most probable rects are always on top and not overlayed by less probable ones 5. Most probable label for rect is always written first Also: 6. Message about low GPU memory include required amount
---
src/network_kernels.cu | 161 +++++++++++++++--------------------------------------
1 files changed, 46 insertions(+), 115 deletions(-)
diff --git a/src/network_kernels.cu b/src/network_kernels.cu
index b7d1d2b..a11d61f 100644
--- a/src/network_kernels.cu
+++ b/src/network_kernels.cu
@@ -22,7 +22,6 @@
#include "region_layer.h"
#include "convolutional_layer.h"
#include "activation_layer.h"
-#include "deconvolutional_layer.h"
#include "maxpool_layer.h"
#include "reorg_layer.h"
#include "avgpool_layer.h"
@@ -37,6 +36,10 @@
#include "blas.h"
}
+#ifdef OPENCV
+#include "opencv2/highgui/highgui_c.h"
+#endif
+
float * get_network_output_gpu_layer(network net, int i);
float * get_network_delta_gpu_layer(network net, int i);
float * get_network_output_gpu(network net);
@@ -51,50 +54,25 @@
if(l.delta_gpu){
fill_ongpu(l.outputs * l.batch, 0, l.delta_gpu, 1);
}
- if(l.type == CONVOLUTIONAL){
- forward_convolutional_layer_gpu(l, state);
- } else if(l.type == DECONVOLUTIONAL){
- forward_deconvolutional_layer_gpu(l, state);
- } else if(l.type == ACTIVE){
- forward_activation_layer_gpu(l, state);
- } else if(l.type == LOCAL){
- forward_local_layer_gpu(l, state);
- } else if(l.type == DETECTION){
- forward_detection_layer_gpu(l, state);
- } else if(l.type == REGION){
- forward_region_layer_gpu(l, state);
- } else if(l.type == CONNECTED){
- forward_connected_layer_gpu(l, state);
- } else if(l.type == RNN){
- forward_rnn_layer_gpu(l, state);
- } else if(l.type == GRU){
- forward_gru_layer_gpu(l, state);
- } else if(l.type == CRNN){
- forward_crnn_layer_gpu(l, state);
- } else if(l.type == CROP){
- forward_crop_layer_gpu(l, state);
- } else if(l.type == COST){
- forward_cost_layer_gpu(l, state);
- } else if(l.type == SOFTMAX){
- forward_softmax_layer_gpu(l, state);
- } else if(l.type == NORMALIZATION){
- forward_normalization_layer_gpu(l, state);
- } else if(l.type == BATCHNORM){
- forward_batchnorm_layer_gpu(l, state);
- } else if(l.type == MAXPOOL){
- forward_maxpool_layer_gpu(l, state);
- } else if(l.type == REORG){
- forward_reorg_layer_gpu(l, state);
- } else if(l.type == AVGPOOL){
- forward_avgpool_layer_gpu(l, state);
- } else if(l.type == DROPOUT){
- forward_dropout_layer_gpu(l, state);
- } else if(l.type == ROUTE){
- forward_route_layer_gpu(l, net);
- } else if(l.type == SHORTCUT){
- forward_shortcut_layer_gpu(l, state);
- }
+ l.forward_gpu(l, state);
+ if(net.wait_stream)
+ cudaStreamSynchronize(get_cuda_stream());
state.input = l.output_gpu;
+/*
+ cuda_pull_array(l.output_gpu, l.output, l.batch*l.outputs);
+ if (l.out_w >= 0 && l.out_h >= 1 && l.c >= 3) {
+ int j;
+ for (j = 0; j < l.out_c; ++j) {
+ image img = make_image(l.out_w, l.out_h, 3);
+ memcpy(img.data, l.output+ l.out_w*l.out_h*j, l.out_w*l.out_h * 1 * sizeof(float));
+ char buff[256];
+ sprintf(buff, "layer-%d slice-%d", i, j);
+ show_image(img, buff);
+ }
+ cvWaitKey(0); // wait press-key in console
+ cvDestroyAllWindows();
+ }
+*/
}
}
@@ -107,6 +85,7 @@
for(i = net.n-1; i >= 0; --i){
state.index = i;
layer l = net.layers[i];
+ if (l.stopbackward) break;
if(i == 0){
state.input = original_input;
state.delta = original_delta;
@@ -115,71 +94,21 @@
state.input = prev.output_gpu;
state.delta = prev.delta_gpu;
}
- if(l.type == CONVOLUTIONAL){
- backward_convolutional_layer_gpu(l, state);
- } else if(l.type == DECONVOLUTIONAL){
- backward_deconvolutional_layer_gpu(l, state);
- } else if(l.type == ACTIVE){
- backward_activation_layer_gpu(l, state);
- } else if(l.type == LOCAL){
- backward_local_layer_gpu(l, state);
- } else if(l.type == MAXPOOL){
- if(i != 0) backward_maxpool_layer_gpu(l, state);
- } else if(l.type == REORG){
- backward_reorg_layer_gpu(l, state);
- } else if(l.type == AVGPOOL){
- if(i != 0) backward_avgpool_layer_gpu(l, state);
- } else if(l.type == DROPOUT){
- backward_dropout_layer_gpu(l, state);
- } else if(l.type == DETECTION){
- backward_detection_layer_gpu(l, state);
- } else if(l.type == REGION){
- backward_region_layer_gpu(l, state);
- } else if(l.type == NORMALIZATION){
- backward_normalization_layer_gpu(l, state);
- } else if(l.type == BATCHNORM){
- backward_batchnorm_layer_gpu(l, state);
- } else if(l.type == SOFTMAX){
- if(i != 0) backward_softmax_layer_gpu(l, state);
- } else if(l.type == CONNECTED){
- backward_connected_layer_gpu(l, state);
- } else if(l.type == RNN){
- backward_rnn_layer_gpu(l, state);
- } else if(l.type == GRU){
- backward_gru_layer_gpu(l, state);
- } else if(l.type == CRNN){
- backward_crnn_layer_gpu(l, state);
- } else if(l.type == COST){
- backward_cost_layer_gpu(l, state);
- } else if(l.type == ROUTE){
- backward_route_layer_gpu(l, net);
- } else if(l.type == SHORTCUT){
- backward_shortcut_layer_gpu(l, state);
- }
+ l.backward_gpu(l, state);
}
}
void update_network_gpu(network net)
{
+ cuda_set_device(net.gpu_index);
int i;
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
for(i = 0; i < net.n; ++i){
layer l = net.layers[i];
- if(l.type == CONVOLUTIONAL){
- update_convolutional_layer_gpu(l, update_batch, rate, net.momentum, net.decay);
- } else if(l.type == DECONVOLUTIONAL){
- update_deconvolutional_layer_gpu(l, rate, net.momentum, net.decay);
- } else if(l.type == CONNECTED){
- update_connected_layer_gpu(l, update_batch, rate, net.momentum, net.decay);
- } else if(l.type == GRU){
- update_gru_layer_gpu(l, update_batch, rate, net.momentum, net.decay);
- } else if(l.type == RNN){
- update_rnn_layer_gpu(l, update_batch, rate, net.momentum, net.decay);
- } else if(l.type == CRNN){
- update_crnn_layer_gpu(l, update_batch, rate, net.momentum, net.decay);
- } else if(l.type == LOCAL){
- update_local_layer_gpu(l, update_batch, rate, net.momentum, net.decay);
+ l.t = get_current_batch(net);
+ if(l.update_gpu){
+ l.update_gpu(l, update_batch, rate, net.momentum, net.decay);
}
}
}
@@ -203,7 +132,15 @@
state.delta = 0;
state.truth = *net.truth_gpu;
state.train = 1;
+#ifdef CUDNN_HALF
+ int i;
+ for (i = 0; i < net.n; ++i) {
+ layer l = net.layers[i];
+ cuda_convert_f32_to_f16(l.weights_gpu, l.c*l.n*l.size*l.size, l.weights_gpu16);
+ }
+#endif
forward_network_gpu(net, state);
+ //cudaStreamSynchronize(get_cuda_stream());
backward_network_gpu(net, state);
}
@@ -271,20 +208,9 @@
{
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
- if(l.type == CONVOLUTIONAL){
- update_convolutional_layer_gpu(l, update_batch, rate, net.momentum, net.decay);
- } else if(l.type == DECONVOLUTIONAL){
- update_deconvolutional_layer_gpu(l, rate, net.momentum, net.decay);
- } else if(l.type == CONNECTED){
- update_connected_layer_gpu(l, update_batch, rate, net.momentum, net.decay);
- } else if(l.type == RNN){
- update_rnn_layer_gpu(l, update_batch, rate, net.momentum, net.decay);
- } else if(l.type == GRU){
- update_gru_layer_gpu(l, update_batch, rate, net.momentum, net.decay);
- } else if(l.type == CRNN){
- update_crnn_layer_gpu(l, update_batch, rate, net.momentum, net.decay);
- } else if(l.type == LOCAL){
- update_local_layer_gpu(l, update_batch, rate, net.momentum, net.decay);
+ l.t = get_current_batch(net);
+ if(l.update_gpu){
+ l.update_gpu(l, update_batch, rate, net.momentum, net.decay);
}
}
@@ -463,14 +389,17 @@
}
for(i = 0; i < n; ++i){
pthread_join(threads[i], 0);
- printf("%f\n", errors[i]);
+ //printf("%f\n", errors[i]);
sum += errors[i];
}
+ //cudaDeviceSynchronize();
if (get_current_batch(nets[0]) % interval == 0) {
printf("Syncing... ");
+ fflush(stdout);
sync_nets(nets, n, interval);
printf("Done!\n");
}
+ //cudaDeviceSynchronize();
free(threads);
free(errors);
return (float)sum/(n);
@@ -479,7 +408,7 @@
float *get_network_output_layer_gpu(network net, int i)
{
layer l = net.layers[i];
- cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch);
+ if(l.type != REGION) cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch);
return l.output;
}
@@ -492,6 +421,8 @@
float *network_predict_gpu(network net, float *input)
{
+ if (net.gpu_index != cuda_get_device())
+ cuda_set_device(net.gpu_index);
int size = get_network_input_size(net) * net.batch;
network_state state;
state.index = 0;
--
Gitblit v1.10.0