From 1b5afb45838e603fa6780762eb8cc59246dc2d81 Mon Sep 17 00:00:00 2001
From: IlyaOvodov <b@ovdv.ru>
Date: Tue, 08 May 2018 11:09:35 +0000
Subject: [PATCH] Output improvements for detector results: When printing detector results, output was done in random order, obfuscating results for interpreting. Now: 1. Text output includes coordinates of rects in (left,right,top,bottom in pixels) along with label and score 2. Text output is sorted by rect lefts to simplify finding appropriate rects on image 3. If several class probs are > thresh for some detection, the most probable is written first and coordinates for others are not repeated 4. Rects are imprinted in image in order by their best class prob, so most probable rects are always on top and not overlayed by less probable ones 5. Most probable label for rect is always written first Also: 6. Message about low GPU memory include required amount

---
 src/cost_layer.c |  148 +++++++++++++++++++++++++++++++++---------------
 1 files changed, 101 insertions(+), 47 deletions(-)

diff --git a/src/cost_layer.c b/src/cost_layer.c
index 8158275..39d2398 100644
--- a/src/cost_layer.c
+++ b/src/cost_layer.c
@@ -10,7 +10,9 @@
 COST_TYPE get_cost_type(char *s)
 {
     if (strcmp(s, "sse")==0) return SSE;
-    fprintf(stderr, "Couldn't find activation function %s, going with SSE\n", s);
+    if (strcmp(s, "masked")==0) return MASKED;
+    if (strcmp(s, "smooth")==0) return SMOOTH;
+    fprintf(stderr, "Couldn't find cost type %s, going with SSE\n", s);
     return SSE;
 }
 
@@ -19,76 +21,128 @@
     switch(a){
         case SSE:
             return "sse";
+        case MASKED:
+            return "masked";
+        case SMOOTH:
+            return "smooth";
     }
     return "sse";
 }
 
-cost_layer *make_cost_layer(int batch, int inputs, COST_TYPE type)
+cost_layer make_cost_layer(int batch, int inputs, COST_TYPE cost_type, float scale)
 {
-    fprintf(stderr, "Cost Layer: %d inputs\n", inputs);
-    cost_layer *layer = calloc(1, sizeof(cost_layer));
-    layer->batch = batch;
-    layer->inputs = inputs;
-    layer->type = type;
-    layer->delta = calloc(inputs*batch, sizeof(float));
-    layer->output = calloc(1, sizeof(float));
+    fprintf(stderr, "cost                                           %4d\n",  inputs);
+    cost_layer l = {0};
+    l.type = COST;
+
+    l.scale = scale;
+    l.batch = batch;
+    l.inputs = inputs;
+    l.outputs = inputs;
+    l.cost_type = cost_type;
+    l.delta = calloc(inputs*batch, sizeof(float));
+    l.output = calloc(inputs*batch, sizeof(float));
+    l.cost = calloc(1, sizeof(float));
+
+    l.forward = forward_cost_layer;
+    l.backward = backward_cost_layer;
     #ifdef GPU
-    layer->delta_gpu = cuda_make_array(layer->delta, inputs*batch);
+    l.forward_gpu = forward_cost_layer_gpu;
+    l.backward_gpu = backward_cost_layer_gpu;
+
+    l.delta_gpu = cuda_make_array(l.output, inputs*batch);
+    l.output_gpu = cuda_make_array(l.delta, inputs*batch);
     #endif
-    return layer;
+    return l;
 }
 
-void pull_cost_layer(cost_layer layer)
+void resize_cost_layer(cost_layer *l, int inputs)
 {
-    cuda_pull_array(layer.delta_gpu, layer.delta, layer.batch*layer.inputs);
-}
-void push_cost_layer(cost_layer layer)
-{
-    cuda_push_array(layer.delta_gpu, layer.delta, layer.batch*layer.inputs);
+    l->inputs = inputs;
+    l->outputs = inputs;
+    l->delta = realloc(l->delta, inputs*l->batch*sizeof(float));
+    l->output = realloc(l->output, inputs*l->batch*sizeof(float));
+#ifdef GPU
+    cuda_free(l->delta_gpu);
+    cuda_free(l->output_gpu);
+    l->delta_gpu = cuda_make_array(l->delta, inputs*l->batch);
+    l->output_gpu = cuda_make_array(l->output, inputs*l->batch);
+#endif
 }
 
-void forward_cost_layer(cost_layer layer, float *input, float *truth)
+void forward_cost_layer(cost_layer l, network_state state)
 {
-    if (!truth) return;
-    copy_cpu(layer.batch*layer.inputs, truth, 1, layer.delta, 1);
-    axpy_cpu(layer.batch*layer.inputs, -1, input, 1, layer.delta, 1);
-    *(layer.output) = dot_cpu(layer.batch*layer.inputs, layer.delta, 1, layer.delta, 1);
-    //printf("cost: %f\n", *layer.output);
+    if (!state.truth) return;
+    if(l.cost_type == MASKED){
+        int i;
+        for(i = 0; i < l.batch*l.inputs; ++i){
+            if(state.truth[i] == SECRET_NUM) state.input[i] = SECRET_NUM;
+        }
+    }
+    if(l.cost_type == SMOOTH){
+        smooth_l1_cpu(l.batch*l.inputs, state.input, state.truth, l.delta, l.output);
+    } else {
+        l2_cpu(l.batch*l.inputs, state.input, state.truth, l.delta, l.output);
+    }
+    l.cost[0] = sum_array(l.output, l.batch*l.inputs);
 }
 
-void backward_cost_layer(const cost_layer layer, float *input, float *delta)
+void backward_cost_layer(const cost_layer l, network_state state)
 {
-    copy_cpu(layer.batch*layer.inputs, layer.delta, 1, delta, 1);
+    axpy_cpu(l.batch*l.inputs, l.scale, l.delta, 1, state.delta, 1);
 }
 
 #ifdef GPU
 
-void forward_cost_layer_gpu(cost_layer layer, float * input, float * truth)
+void pull_cost_layer(cost_layer l)
 {
-    if (!truth) return;
-    
-    /*
-    float *in = calloc(layer.inputs*layer.batch, sizeof(float));
-    float *t = calloc(layer.inputs*layer.batch, sizeof(float));
-    cuda_pull_array(input, in, layer.batch*layer.inputs);
-    cuda_pull_array(truth, t, layer.batch*layer.inputs);
-    forward_cost_layer(layer, in, t);
-    cuda_push_array(layer.delta_gpu, layer.delta, layer.batch*layer.inputs);
-    free(in);
-    free(t);
-    */
-
-    copy_ongpu(layer.batch*layer.inputs, truth, 1, layer.delta_gpu, 1);
-    axpy_ongpu(layer.batch*layer.inputs, -1, input, 1, layer.delta_gpu, 1);
-
-    cuda_pull_array(layer.delta_gpu, layer.delta, layer.batch*layer.inputs);
-    *(layer.output) = dot_cpu(layer.batch*layer.inputs, layer.delta, 1, layer.delta, 1);
-    //printf("cost: %f\n", *layer.output);
+    cuda_pull_array(l.delta_gpu, l.delta, l.batch*l.inputs);
 }
 
-void backward_cost_layer_gpu(const cost_layer layer, float * input, float * delta)
+void push_cost_layer(cost_layer l)
 {
-    copy_ongpu(layer.batch*layer.inputs, layer.delta_gpu, 1, delta, 1);
+    cuda_push_array(l.delta_gpu, l.delta, l.batch*l.inputs);
+}
+
+int float_abs_compare (const void * a, const void * b)
+{
+    float fa = *(const float*) a;
+    if(fa < 0) fa = -fa;
+    float fb = *(const float*) b;
+    if(fb < 0) fb = -fb;
+    return (fa > fb) - (fa < fb);
+}
+
+void forward_cost_layer_gpu(cost_layer l, network_state state)
+{
+    if (!state.truth) return;
+    if (l.cost_type == MASKED) {
+        mask_ongpu(l.batch*l.inputs, state.input, SECRET_NUM, state.truth);
+    }
+
+    if(l.cost_type == SMOOTH){
+        smooth_l1_gpu(l.batch*l.inputs, state.input, state.truth, l.delta_gpu, l.output_gpu);
+    } else {
+        l2_gpu(l.batch*l.inputs, state.input, state.truth, l.delta_gpu, l.output_gpu);
+    }
+
+    if(l.ratio){
+        cuda_pull_array(l.delta_gpu, l.delta, l.batch*l.inputs);
+        qsort(l.delta, l.batch*l.inputs, sizeof(float), float_abs_compare);
+        int n = (1-l.ratio) * l.batch*l.inputs;
+        float thresh = l.delta[n];
+        thresh = 0;
+        printf("%f\n", thresh);
+        supp_ongpu(l.batch*l.inputs, thresh, l.delta_gpu, 1);
+    }
+
+    cuda_pull_array(l.output_gpu, l.output, l.batch*l.inputs);
+    l.cost[0] = sum_array(l.output, l.batch*l.inputs);
+}
+
+void backward_cost_layer_gpu(const cost_layer l, network_state state)
+{
+    axpy_ongpu(l.batch*l.inputs, l.scale, l.delta_gpu, 1, state.delta, 1);
 }
 #endif
 

--
Gitblit v1.10.0