From 028696bf15efeca3acb3db8c42a96f7b9e0f55ff Mon Sep 17 00:00:00 2001
From: iovodov <b@ovdv.ru>
Date: Thu, 03 May 2018 13:33:46 +0000
Subject: [PATCH] Output improvements for detector results: When printing detector results, output was done in random order, obfuscating results for interpreting. Now: 1. Text output includes coordinates of rects in (left,right,top,bottom in pixels) along with label and score 2. Text output is sorted by rect lefts to simplify finding appropriate rects on image 3. If several class probs are > thresh for some detection, the most probable is written first and coordinates for others are not repeated 4. Rects are imprinted in image in order by their best class prob, so most probable rects are always on top and not overlayed by less probable ones 5. Most probable label for rect is always written first Also: 6. Message about low GPU memory include required amount

---
 src/maxpool_layer.c |  216 +++++++++++++++++++++--------------------------------
 1 files changed, 86 insertions(+), 130 deletions(-)

diff --git a/src/maxpool_layer.c b/src/maxpool_layer.c
index c05e939..031d116 100644
--- a/src/maxpool_layer.c
+++ b/src/maxpool_layer.c
@@ -1,171 +1,127 @@
 #include "maxpool_layer.h"
+#include "cuda.h"
 #include <stdio.h>
 
-image get_maxpool_image(maxpool_layer layer)
+image get_maxpool_image(maxpool_layer l)
 {
-    int h = (layer.h-1)/layer.stride + 1;
-    int w = (layer.w-1)/layer.stride + 1;
-    int c = layer.c;
-    return float_to_image(h,w,c,layer.output);
+    int h = l.out_h;
+    int w = l.out_w;
+    int c = l.c;
+    return float_to_image(w,h,c,l.output);
 }
 
-image get_maxpool_delta(maxpool_layer layer)
+image get_maxpool_delta(maxpool_layer l)
 {
-    int h = (layer.h-1)/layer.stride + 1;
-    int w = (layer.w-1)/layer.stride + 1;
-    int c = layer.c;
-    return float_to_image(h,w,c,layer.delta);
+    int h = l.out_h;
+    int w = l.out_w;
+    int c = l.c;
+    return float_to_image(w,h,c,l.delta);
 }
 
-maxpool_layer *make_maxpool_layer(int batch, int h, int w, int c, int size, int stride)
+maxpool_layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride, int padding)
 {
-    fprintf(stderr, "Maxpool Layer: %d x %d x %d image, %d size, %d stride\n", h,w,c,size,stride);
-    maxpool_layer *layer = calloc(1, sizeof(maxpool_layer));
-    layer->batch = batch;
-    layer->h = h;
-    layer->w = w;
-    layer->c = c;
-    layer->size = size;
-    layer->stride = stride;
-    int output_size = ((h-1)/stride+1) * ((w-1)/stride+1) * c * batch;
-    layer->indexes = calloc(output_size, sizeof(int));
-    layer->output =  calloc(output_size, sizeof(float));
-    layer->delta =   calloc(output_size, sizeof(float));
+    maxpool_layer l = {0};
+    l.type = MAXPOOL;
+    l.batch = batch;
+    l.h = h;
+    l.w = w;
+    l.c = c;
+    l.pad = padding;
+    l.out_w = (w + 2*padding)/stride;
+    l.out_h = (h + 2*padding)/stride;
+    l.out_c = c;
+    l.outputs = l.out_h * l.out_w * l.out_c;
+    l.inputs = h*w*c;
+    l.size = size;
+    l.stride = stride;
+    int output_size = l.out_h * l.out_w * l.out_c * batch;
+    l.indexes = calloc(output_size, sizeof(int));
+    l.output =  calloc(output_size, sizeof(float));
+    l.delta =   calloc(output_size, sizeof(float));
+    l.forward = forward_maxpool_layer;
+    l.backward = backward_maxpool_layer;
     #ifdef GPU
-    layer->indexes_cl = cl_make_int_array(layer->indexes, output_size);
-    layer->output_cl  = cl_make_array(layer->output, output_size);
-    layer->delta_cl   = cl_make_array(layer->delta, output_size);
+    l.forward_gpu = forward_maxpool_layer_gpu;
+    l.backward_gpu = backward_maxpool_layer_gpu;
+    l.indexes_gpu = cuda_make_int_array(output_size);
+    l.output_gpu  = cuda_make_array(l.output, output_size);
+    l.delta_gpu   = cuda_make_array(l.delta, output_size);
     #endif
-    return layer;
+    fprintf(stderr, "max          %d x %d / %d  %4d x%4d x%4d   ->  %4d x%4d x%4d\n", size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c);
+    return l;
 }
 
-void resize_maxpool_layer(maxpool_layer *layer, int h, int w, int c)
+void resize_maxpool_layer(maxpool_layer *l, int w, int h)
 {
-    layer->h = h;
-    layer->w = w;
-    layer->c = c;
-    layer->output = realloc(layer->output, ((h-1)/layer->stride+1) * ((w-1)/layer->stride+1) * c * layer->batch* sizeof(float));
-    layer->delta = realloc(layer->delta, ((h-1)/layer->stride+1) * ((w-1)/layer->stride+1) * c * layer->batch*sizeof(float));
+    l->h = h;
+    l->w = w;
+    l->inputs = h*w*l->c;
+
+    l->out_w = (w + 2*l->pad)/l->stride;
+    l->out_h = (h + 2*l->pad)/l->stride;
+    l->outputs = l->out_w * l->out_h * l->c;
+    int output_size = l->outputs * l->batch;
+
+    l->indexes = realloc(l->indexes, output_size * sizeof(int));
+    l->output = realloc(l->output, output_size * sizeof(float));
+    l->delta = realloc(l->delta, output_size * sizeof(float));
+
+    #ifdef GPU
+    cuda_free((float *)l->indexes_gpu);
+    cuda_free(l->output_gpu);
+    cuda_free(l->delta_gpu);
+    l->indexes_gpu = cuda_make_int_array(output_size);
+    l->output_gpu  = cuda_make_array(l->output, output_size);
+    l->delta_gpu   = cuda_make_array(l->delta,  output_size);
+    #endif
 }
 
-void forward_maxpool_layer(const maxpool_layer layer, float *input)
+void forward_maxpool_layer(const maxpool_layer l, network_state state)
 {
-    int b,i,j,k,l,m;
-    int w_offset = (-layer.size-1)/2 + 1;
-    int h_offset = (-layer.size-1)/2 + 1;
+    int b,i,j,k,m,n;
+    int w_offset = -l.pad;
+    int h_offset = -l.pad;
 
-    int h = (layer.h-1)/layer.stride + 1;
-    int w = (layer.w-1)/layer.stride + 1;
-    int c = layer.c;
+    int h = l.out_h;
+    int w = l.out_w;
+    int c = l.c;
 
-    for(b = 0; b < layer.batch; ++b){
+    for(b = 0; b < l.batch; ++b){
         for(k = 0; k < c; ++k){
             for(i = 0; i < h; ++i){
                 for(j = 0; j < w; ++j){
                     int out_index = j + w*(i + h*(k + c*b));
                     float max = -FLT_MAX;
                     int max_i = -1;
-                    for(l = 0; l < layer.size; ++l){
-                        for(m = 0; m < layer.size; ++m){
-                            int cur_h = h_offset + i*layer.stride + l;
-                            int cur_w = w_offset + j*layer.stride + m;
-                            int index = cur_w + layer.w*(cur_h + layer.h*(k + b*layer.c));
-                            int valid = (cur_h >= 0 && cur_h < layer.h &&
-                                         cur_w >= 0 && cur_w < layer.w);
-                            float val = (valid != 0) ? input[index] : -FLT_MAX;
+                    for(n = 0; n < l.size; ++n){
+                        for(m = 0; m < l.size; ++m){
+                            int cur_h = h_offset + i*l.stride + n;
+                            int cur_w = w_offset + j*l.stride + m;
+                            int index = cur_w + l.w*(cur_h + l.h*(k + b*l.c));
+                            int valid = (cur_h >= 0 && cur_h < l.h &&
+                                         cur_w >= 0 && cur_w < l.w);
+                            float val = (valid != 0) ? state.input[index] : -FLT_MAX;
                             max_i = (val > max) ? index : max_i;
                             max   = (val > max) ? val   : max;
                         }
                     }
-                    layer.output[out_index] = max;
-                    layer.indexes[out_index] = max_i;
+                    l.output[out_index] = max;
+                    l.indexes[out_index] = max_i;
                 }
             }
         }
     }
 }
 
-void backward_maxpool_layer(const maxpool_layer layer, float *delta)
+void backward_maxpool_layer(const maxpool_layer l, network_state state)
 {
     int i;
-    int h = (layer.h-1)/layer.stride + 1;
-    int w = (layer.w-1)/layer.stride + 1;
-    int c = layer.c;
-    memset(delta, 0, layer.batch*layer.h*layer.w*layer.c*sizeof(float));
-    for(i = 0; i < h*w*c*layer.batch; ++i){
-        int index = layer.indexes[i];
-        delta[index] += layer.delta[i];
+    int h = l.out_h;
+    int w = l.out_w;
+    int c = l.c;
+    for(i = 0; i < h*w*c*l.batch; ++i){
+        int index = l.indexes[i];
+        state.delta[index] += l.delta[i];
     }
 }
 
-#ifdef GPU
-cl_kernel get_forward_kernel()
-{
-    static int init = 0;
-    static cl_kernel kernel;
-    if(!init){
-        kernel = get_kernel("src/maxpool_layer.cl", "forward", 0);
-        init = 1;
-    }
-    return kernel;
-}
-
-void forward_maxpool_layer_gpu(maxpool_layer layer, cl_mem input)
-{
-    int h = (layer.h-1)/layer.stride + 1;
-    int w = (layer.w-1)/layer.stride + 1;
-    int c = layer.c;
-    cl_kernel kernel = get_forward_kernel();
-    cl_command_queue queue = cl.queue;
-
-    cl_uint i = 0;
-    cl.error = clSetKernelArg(kernel, i++, sizeof(layer.h), (void*) &layer.h);
-    cl.error = clSetKernelArg(kernel, i++, sizeof(layer.w), (void*) &layer.w);
-    cl.error = clSetKernelArg(kernel, i++, sizeof(layer.c), (void*) &layer.c);
-    cl.error = clSetKernelArg(kernel, i++, sizeof(layer.stride), (void*) &layer.stride);
-    cl.error = clSetKernelArg(kernel, i++, sizeof(layer.size), (void*) &layer.size);
-    cl.error = clSetKernelArg(kernel, i++, sizeof(input), (void*) &input);
-    cl.error = clSetKernelArg(kernel, i++, sizeof(layer.output_cl), (void*) &layer.output_cl);
-    cl.error = clSetKernelArg(kernel, i++, sizeof(layer.indexes_cl), (void*) &layer.indexes_cl);
-    check_error(cl);
-
-    const size_t global_size[] = {h*w*c*layer.batch};
-
-    cl.error = clEnqueueNDRangeKernel(queue, kernel, 1, 0, global_size, 0, 0, 0, 0);
-    check_error(cl);
-}
-
-cl_kernel get_backward_kernel()
-{
-    static int init = 0;
-    static cl_kernel kernel;
-    if(!init){
-        kernel = get_kernel("src/maxpool_layer.cl", "backward", 0);
-        init = 1;
-    }
-    return kernel;
-}
-
-void backward_maxpool_layer_gpu(maxpool_layer layer, cl_mem delta)
-{
-    cl_kernel kernel = get_backward_kernel();
-    cl_command_queue queue = cl.queue;
-
-    cl_uint i = 0;
-    cl.error = clSetKernelArg(kernel, i++, sizeof(layer.h), (void*) &layer.h);
-    cl.error = clSetKernelArg(kernel, i++, sizeof(layer.w), (void*) &layer.w);
-    cl.error = clSetKernelArg(kernel, i++, sizeof(layer.c), (void*) &layer.c);
-    cl.error = clSetKernelArg(kernel, i++, sizeof(layer.stride), (void*) &layer.stride);
-    cl.error = clSetKernelArg(kernel, i++, sizeof(layer.size), (void*) &layer.size);
-    cl.error = clSetKernelArg(kernel, i++, sizeof(layer.delta_cl), (void*) &layer.delta_cl);
-    cl.error = clSetKernelArg(kernel, i++, sizeof(delta), (void*) &delta);
-    cl.error = clSetKernelArg(kernel, i++, sizeof(layer.indexes_cl), (void*) &layer.indexes_cl);
-    check_error(cl);
-
-    const size_t global_size[] = {layer.h*layer.w*layer.c*layer.batch};
-
-    cl.error = clEnqueueNDRangeKernel(queue, kernel, 1, 0, global_size, 0, 0, 0, 0);
-    check_error(cl);
-}
-
-#endif

--
Gitblit v1.10.0