From 1b5afb45838e603fa6780762eb8cc59246dc2d81 Mon Sep 17 00:00:00 2001
From: IlyaOvodov <b@ovdv.ru>
Date: Tue, 08 May 2018 11:09:35 +0000
Subject: [PATCH] Output improvements for detector results: When printing detector results, output was done in random order, obfuscating results for interpreting. Now: 1. Text output includes coordinates of rects in (left,right,top,bottom in pixels) along with label and score 2. Text output is sorted by rect lefts to simplify finding appropriate rects on image 3. If several class probs are > thresh for some detection, the most probable is written first and coordinates for others are not repeated 4. Rects are imprinted in image in order by their best class prob, so most probable rects are always on top and not overlayed by less probable ones 5. Most probable label for rect is always written first Also: 6. Message about low GPU memory include required amount
---
src/normalization_layer.c | 195 +++++++++++++++++++++++++++++++-----------------
1 files changed, 125 insertions(+), 70 deletions(-)
diff --git a/src/normalization_layer.c b/src/normalization_layer.c
index d82451b..069a079 100644
--- a/src/normalization_layer.c
+++ b/src/normalization_layer.c
@@ -1,95 +1,150 @@
#include "normalization_layer.h"
+#include "blas.h"
#include <stdio.h>
-image get_normalization_image(normalization_layer layer)
+layer make_normalization_layer(int batch, int w, int h, int c, int size, float alpha, float beta, float kappa)
{
- int h = layer.h;
- int w = layer.w;
- int c = layer.c;
- return float_to_image(h,w,c,layer.output);
-}
+ fprintf(stderr, "Local Response Normalization Layer: %d x %d x %d image, %d size\n", w,h,c,size);
+ layer layer = {0};
+ layer.type = NORMALIZATION;
+ layer.batch = batch;
+ layer.h = layer.out_h = h;
+ layer.w = layer.out_w = w;
+ layer.c = layer.out_c = c;
+ layer.kappa = kappa;
+ layer.size = size;
+ layer.alpha = alpha;
+ layer.beta = beta;
+ layer.output = calloc(h * w * c * batch, sizeof(float));
+ layer.delta = calloc(h * w * c * batch, sizeof(float));
+ layer.squared = calloc(h * w * c * batch, sizeof(float));
+ layer.norms = calloc(h * w * c * batch, sizeof(float));
+ layer.inputs = w*h*c;
+ layer.outputs = layer.inputs;
-image get_normalization_delta(normalization_layer layer)
-{
- int h = layer.h;
- int w = layer.w;
- int c = layer.c;
- return float_to_image(h,w,c,layer.delta);
-}
+ layer.forward = forward_normalization_layer;
+ layer.backward = backward_normalization_layer;
+ #ifdef GPU
+ layer.forward_gpu = forward_normalization_layer_gpu;
+ layer.backward_gpu = backward_normalization_layer_gpu;
-normalization_layer *make_normalization_layer(int batch, int h, int w, int c, int size, float alpha, float beta, float kappa)
-{
- fprintf(stderr, "Local Response Normalization Layer: %d x %d x %d image, %d size\n", h,w,c,size);
- normalization_layer *layer = calloc(1, sizeof(normalization_layer));
- layer->batch = batch;
- layer->h = h;
- layer->w = w;
- layer->c = c;
- layer->kappa = kappa;
- layer->size = size;
- layer->alpha = alpha;
- layer->beta = beta;
- layer->output = calloc(h * w * c * batch, sizeof(float));
- layer->delta = calloc(h * w * c * batch, sizeof(float));
- layer->sums = calloc(h*w, sizeof(float));
+ layer.output_gpu = cuda_make_array(layer.output, h * w * c * batch);
+ layer.delta_gpu = cuda_make_array(layer.delta, h * w * c * batch);
+ layer.squared_gpu = cuda_make_array(layer.squared, h * w * c * batch);
+ layer.norms_gpu = cuda_make_array(layer.norms, h * w * c * batch);
+ #endif
return layer;
}
-void resize_normalization_layer(normalization_layer *layer, int h, int w)
+void resize_normalization_layer(layer *layer, int w, int h)
{
+ int c = layer->c;
+ int batch = layer->batch;
layer->h = h;
layer->w = w;
- layer->output = realloc(layer->output, h * w * layer->c * layer->batch * sizeof(float));
- layer->delta = realloc(layer->delta, h * w * layer->c * layer->batch * sizeof(float));
- layer->sums = realloc(layer->sums, h*w * sizeof(float));
+ layer->out_h = h;
+ layer->out_w = w;
+ layer->inputs = w*h*c;
+ layer->outputs = layer->inputs;
+ layer->output = realloc(layer->output, h * w * c * batch * sizeof(float));
+ layer->delta = realloc(layer->delta, h * w * c * batch * sizeof(float));
+ layer->squared = realloc(layer->squared, h * w * c * batch * sizeof(float));
+ layer->norms = realloc(layer->norms, h * w * c * batch * sizeof(float));
+#ifdef GPU
+ cuda_free(layer->output_gpu);
+ cuda_free(layer->delta_gpu);
+ cuda_free(layer->squared_gpu);
+ cuda_free(layer->norms_gpu);
+ layer->output_gpu = cuda_make_array(layer->output, h * w * c * batch);
+ layer->delta_gpu = cuda_make_array(layer->delta, h * w * c * batch);
+ layer->squared_gpu = cuda_make_array(layer->squared, h * w * c * batch);
+ layer->norms_gpu = cuda_make_array(layer->norms, h * w * c * batch);
+#endif
}
-void add_square_array(float *src, float *dest, int n)
+void forward_normalization_layer(const layer layer, network_state state)
{
- int i;
- for(i = 0; i < n; ++i){
- dest[i] += src[i]*src[i];
- }
-}
-void sub_square_array(float *src, float *dest, int n)
-{
- int i;
- for(i = 0; i < n; ++i){
- dest[i] -= src[i]*src[i];
- }
-}
+ int k,b;
+ int w = layer.w;
+ int h = layer.h;
+ int c = layer.c;
+ scal_cpu(w*h*c*layer.batch, 0, layer.squared, 1);
-void forward_normalization_layer(const normalization_layer layer, float *in)
-{
- int i,j,k;
- memset(layer.sums, 0, layer.h*layer.w*sizeof(float));
- int imsize = layer.h*layer.w;
- for(j = 0; j < layer.size/2; ++j){
- if(j < layer.c) add_square_array(in+j*imsize, layer.sums, imsize);
- }
- for(k = 0; k < layer.c; ++k){
- int next = k+layer.size/2;
- int prev = k-layer.size/2-1;
- if(next < layer.c) add_square_array(in+next*imsize, layer.sums, imsize);
- if(prev > 0) sub_square_array(in+prev*imsize, layer.sums, imsize);
- for(i = 0; i < imsize; ++i){
- layer.output[k*imsize + i] = in[k*imsize+i] / pow(layer.kappa + layer.alpha * layer.sums[i], layer.beta);
+ for(b = 0; b < layer.batch; ++b){
+ float *squared = layer.squared + w*h*c*b;
+ float *norms = layer.norms + w*h*c*b;
+ float *input = state.input + w*h*c*b;
+ pow_cpu(w*h*c, 2, input, 1, squared, 1);
+
+ const_cpu(w*h, layer.kappa, norms, 1);
+ for(k = 0; k < layer.size/2; ++k){
+ axpy_cpu(w*h, layer.alpha, squared + w*h*k, 1, norms, 1);
+ }
+
+ for(k = 1; k < layer.c; ++k){
+ copy_cpu(w*h, norms + w*h*(k-1), 1, norms + w*h*k, 1);
+ int prev = k - ((layer.size-1)/2) - 1;
+ int next = k + (layer.size/2);
+ if(prev >= 0) axpy_cpu(w*h, -layer.alpha, squared + w*h*prev, 1, norms + w*h*k, 1);
+ if(next < layer.c) axpy_cpu(w*h, layer.alpha, squared + w*h*next, 1, norms + w*h*k, 1);
}
}
+ pow_cpu(w*h*c*layer.batch, -layer.beta, layer.norms, 1, layer.output, 1);
+ mul_cpu(w*h*c*layer.batch, state.input, 1, layer.output, 1);
}
-void backward_normalization_layer(const normalization_layer layer, float *in, float *delta)
+void backward_normalization_layer(const layer layer, network_state state)
{
- //TODO!
+ // TODO This is approximate ;-)
+ // Also this should add in to delta instead of overwritting.
+
+ int w = layer.w;
+ int h = layer.h;
+ int c = layer.c;
+ pow_cpu(w*h*c*layer.batch, -layer.beta, layer.norms, 1, state.delta, 1);
+ mul_cpu(w*h*c*layer.batch, layer.delta, 1, state.delta, 1);
}
-void visualize_normalization_layer(normalization_layer layer, char *window)
+#ifdef GPU
+void forward_normalization_layer_gpu(const layer layer, network_state state)
{
- image delta = get_normalization_image(layer);
- image dc = collapse_image_layers(delta, 1);
- char buff[256];
- sprintf(buff, "%s: Output", window);
- show_image(dc, buff);
- save_image(dc, buff);
- free_image(dc);
+ int k,b;
+ int w = layer.w;
+ int h = layer.h;
+ int c = layer.c;
+ scal_ongpu(w*h*c*layer.batch, 0, layer.squared_gpu, 1);
+
+ for(b = 0; b < layer.batch; ++b){
+ float *squared = layer.squared_gpu + w*h*c*b;
+ float *norms = layer.norms_gpu + w*h*c*b;
+ float *input = state.input + w*h*c*b;
+ pow_ongpu(w*h*c, 2, input, 1, squared, 1);
+
+ const_ongpu(w*h, layer.kappa, norms, 1);
+ for(k = 0; k < layer.size/2; ++k){
+ axpy_ongpu(w*h, layer.alpha, squared + w*h*k, 1, norms, 1);
+ }
+
+ for(k = 1; k < layer.c; ++k){
+ copy_ongpu(w*h, norms + w*h*(k-1), 1, norms + w*h*k, 1);
+ int prev = k - ((layer.size-1)/2) - 1;
+ int next = k + (layer.size/2);
+ if(prev >= 0) axpy_ongpu(w*h, -layer.alpha, squared + w*h*prev, 1, norms + w*h*k, 1);
+ if(next < layer.c) axpy_ongpu(w*h, layer.alpha, squared + w*h*next, 1, norms + w*h*k, 1);
+ }
+ }
+ pow_ongpu(w*h*c*layer.batch, -layer.beta, layer.norms_gpu, 1, layer.output_gpu, 1);
+ mul_ongpu(w*h*c*layer.batch, state.input, 1, layer.output_gpu, 1);
}
+
+void backward_normalization_layer_gpu(const layer layer, network_state state)
+{
+ // TODO This is approximate ;-)
+
+ int w = layer.w;
+ int h = layer.h;
+ int c = layer.c;
+ pow_ongpu(w*h*c*layer.batch, -layer.beta, layer.norms_gpu, 1, state.delta, 1);
+ mul_ongpu(w*h*c*layer.batch, layer.delta_gpu, 1, state.delta, 1);
+}
+#endif
--
Gitblit v1.10.0