From 028696bf15efeca3acb3db8c42a96f7b9e0f55ff Mon Sep 17 00:00:00 2001
From: iovodov <b@ovdv.ru>
Date: Thu, 03 May 2018 13:33:46 +0000
Subject: [PATCH] Output improvements for detector results: When printing detector results, output was done in random order, obfuscating results for interpreting. Now: 1. Text output includes coordinates of rects in (left,right,top,bottom in pixels) along with label and score 2. Text output is sorted by rect lefts to simplify finding appropriate rects on image 3. If several class probs are > thresh for some detection, the most probable is written first and coordinates for others are not repeated 4. Rects are imprinted in image in order by their best class prob, so most probable rects are always on top and not overlayed by less probable ones 5. Most probable label for rect is always written first Also: 6. Message about low GPU memory include required amount

---
 src/connected_layer.c |  393 ++++++++++++++++++++++++++++++++++++-------------------
 1 files changed, 256 insertions(+), 137 deletions(-)

diff --git a/src/connected_layer.c b/src/connected_layer.c
index 254d39e..e6dc759 100644
--- a/src/connected_layer.c
+++ b/src/connected_layer.c
@@ -1,4 +1,5 @@
 #include "connected_layer.h"
+#include "batchnorm_layer.h"
 #include "utils.h"
 #include "cuda.h"
 #include "blas.h"
@@ -9,203 +10,321 @@
 #include <stdlib.h>
 #include <string.h>
 
-connected_layer *make_connected_layer(int batch, int inputs, int outputs, ACTIVATION activation, float learning_rate, float momentum, float decay)
+connected_layer make_connected_layer(int batch, int inputs, int outputs, ACTIVATION activation, int batch_normalize)
 {
     int i;
-    connected_layer *layer = calloc(1, sizeof(connected_layer));
+    connected_layer l = {0};
+    l.type = CONNECTED;
 
-    layer->learning_rate = learning_rate;
-    layer->momentum = momentum;
-    layer->decay = decay;
+    l.inputs = inputs;
+    l.outputs = outputs;
+    l.batch=batch;
+    l.batch_normalize = batch_normalize;
+    l.h = 1;
+    l.w = 1;
+    l.c = inputs;
+    l.out_h = 1;
+    l.out_w = 1;
+    l.out_c = outputs;
 
-    layer->inputs = inputs;
-    layer->outputs = outputs;
-    layer->batch=batch;
+    l.output = calloc(batch*outputs, sizeof(float));
+    l.delta = calloc(batch*outputs, sizeof(float));
 
-    layer->output = calloc(batch*outputs, sizeof(float*));
-    layer->delta = calloc(batch*outputs, sizeof(float*));
+    l.weight_updates = calloc(inputs*outputs, sizeof(float));
+    l.bias_updates = calloc(outputs, sizeof(float));
 
-    layer->weight_updates = calloc(inputs*outputs, sizeof(float));
-    layer->bias_updates = calloc(outputs, sizeof(float));
+    l.weights = calloc(outputs*inputs, sizeof(float));
+    l.biases = calloc(outputs, sizeof(float));
 
-    layer->weight_prev = calloc(inputs*outputs, sizeof(float));
-    layer->bias_prev = calloc(outputs, sizeof(float));
+    l.forward = forward_connected_layer;
+    l.backward = backward_connected_layer;
+    l.update = update_connected_layer;
 
-    layer->weights = calloc(inputs*outputs, sizeof(float));
-    layer->biases = calloc(outputs, sizeof(float));
-
-
-    float scale = 1./sqrt(inputs);
-    //scale = .01;
-    for(i = 0; i < inputs*outputs; ++i){
-        layer->weights[i] = scale*rand_normal();
+    //float scale = 1./sqrt(inputs);
+    float scale = sqrt(2./inputs);
+    for(i = 0; i < outputs*inputs; ++i){
+        l.weights[i] = scale*rand_uniform(-1, 1);
     }
 
     for(i = 0; i < outputs; ++i){
-        layer->biases[i] = scale;
+        l.biases[i] = 0;
+    }
+
+    if(batch_normalize){
+        l.scales = calloc(outputs, sizeof(float));
+        l.scale_updates = calloc(outputs, sizeof(float));
+        for(i = 0; i < outputs; ++i){
+            l.scales[i] = 1;
+        }
+
+        l.mean = calloc(outputs, sizeof(float));
+        l.mean_delta = calloc(outputs, sizeof(float));
+        l.variance = calloc(outputs, sizeof(float));
+        l.variance_delta = calloc(outputs, sizeof(float));
+
+        l.rolling_mean = calloc(outputs, sizeof(float));
+        l.rolling_variance = calloc(outputs, sizeof(float));
+
+        l.x = calloc(batch*outputs, sizeof(float));
+        l.x_norm = calloc(batch*outputs, sizeof(float));
     }
 
 #ifdef GPU
-    layer->weights_gpu = cuda_make_array(layer->weights, inputs*outputs);
-    layer->biases_gpu = cuda_make_array(layer->biases, outputs);
+    l.forward_gpu = forward_connected_layer_gpu;
+    l.backward_gpu = backward_connected_layer_gpu;
+    l.update_gpu = update_connected_layer_gpu;
 
-    layer->weight_updates_gpu = cuda_make_array(layer->weight_updates, inputs*outputs);
-    layer->bias_updates_gpu = cuda_make_array(layer->bias_updates, outputs);
+    l.weights_gpu = cuda_make_array(l.weights, outputs*inputs);
+    l.biases_gpu = cuda_make_array(l.biases, outputs);
 
-    layer->output_gpu = cuda_make_array(layer->output, outputs*batch);
-    layer->delta_gpu = cuda_make_array(layer->delta, outputs*batch);
+    l.weight_updates_gpu = cuda_make_array(l.weight_updates, outputs*inputs);
+    l.bias_updates_gpu = cuda_make_array(l.bias_updates, outputs);
+
+    l.output_gpu = cuda_make_array(l.output, outputs*batch);
+    l.delta_gpu = cuda_make_array(l.delta, outputs*batch);
+    if(batch_normalize){
+        l.scales_gpu = cuda_make_array(l.scales, outputs);
+        l.scale_updates_gpu = cuda_make_array(l.scale_updates, outputs);
+
+        l.mean_gpu = cuda_make_array(l.mean, outputs);
+        l.variance_gpu = cuda_make_array(l.variance, outputs);
+
+        l.rolling_mean_gpu = cuda_make_array(l.mean, outputs);
+        l.rolling_variance_gpu = cuda_make_array(l.variance, outputs);
+
+        l.mean_delta_gpu = cuda_make_array(l.mean, outputs);
+        l.variance_delta_gpu = cuda_make_array(l.variance, outputs);
+
+        l.x_gpu = cuda_make_array(l.output, l.batch*outputs);
+        l.x_norm_gpu = cuda_make_array(l.output, l.batch*outputs);
+#ifdef CUDNN
+		cudnnCreateTensorDescriptor(&l.normTensorDesc);
+		cudnnCreateTensorDescriptor(&l.dstTensorDesc);
+		cudnnSetTensor4dDescriptor(l.dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l.batch, l.out_c, l.out_h, l.out_w);
+		cudnnSetTensor4dDescriptor(l.normTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, l.out_c, 1, 1);
 #endif
-    layer->activation = activation;
-    fprintf(stderr, "Connected Layer: %d inputs, %d outputs\n", inputs, outputs);
-    return layer;
+    }
+#endif
+    l.activation = activation;
+    fprintf(stderr, "connected                            %4d  ->  %4d\n", inputs, outputs);
+    return l;
 }
 
-void secret_update_connected_layer(connected_layer *layer)
+void update_connected_layer(connected_layer l, int batch, float learning_rate, float momentum, float decay)
 {
-    int n = layer->outputs*layer->inputs;
-    float dot = dot_cpu(n, layer->weight_updates, 1, layer->weight_prev, 1);
-    float mag = sqrt(dot_cpu(n, layer->weight_updates, 1, layer->weight_updates, 1))
-                * sqrt(dot_cpu(n, layer->weight_prev, 1, layer->weight_prev, 1));
-    float cos = dot/mag;
-    if(cos > .3) layer->learning_rate *= 1.1;
-    else if (cos < -.3) layer-> learning_rate /= 1.1;
+    axpy_cpu(l.outputs, learning_rate/batch, l.bias_updates, 1, l.biases, 1);
+    scal_cpu(l.outputs, momentum, l.bias_updates, 1);
 
-    scal_cpu(n, layer->momentum, layer->weight_prev, 1);
-    axpy_cpu(n, 1, layer->weight_updates, 1, layer->weight_prev, 1);
-    scal_cpu(n, 0, layer->weight_updates, 1);
+    if(l.batch_normalize){
+        axpy_cpu(l.outputs, learning_rate/batch, l.scale_updates, 1, l.scales, 1);
+        scal_cpu(l.outputs, momentum, l.scale_updates, 1);
+    }
 
-    scal_cpu(layer->outputs, layer->momentum, layer->bias_prev, 1);
-    axpy_cpu(layer->outputs, 1, layer->bias_updates, 1, layer->bias_prev, 1);
-    scal_cpu(layer->outputs, 0, layer->bias_updates, 1);
-
-    //printf("rate:   %f\n", layer->learning_rate);
-
-    axpy_cpu(layer->outputs, layer->learning_rate, layer->bias_prev, 1, layer->biases, 1);
-
-    axpy_cpu(layer->inputs*layer->outputs, -layer->decay, layer->weights, 1, layer->weight_prev, 1);
-    axpy_cpu(layer->inputs*layer->outputs, layer->learning_rate, layer->weight_prev, 1, layer->weights, 1);
+    axpy_cpu(l.inputs*l.outputs, -decay*batch, l.weights, 1, l.weight_updates, 1);
+    axpy_cpu(l.inputs*l.outputs, learning_rate/batch, l.weight_updates, 1, l.weights, 1);
+    scal_cpu(l.inputs*l.outputs, momentum, l.weight_updates, 1);
 }
 
-void update_connected_layer(connected_layer layer)
-{
-    axpy_cpu(layer.outputs, layer.learning_rate, layer.bias_updates, 1, layer.biases, 1);
-    scal_cpu(layer.outputs, layer.momentum, layer.bias_updates, 1);
-
-    axpy_cpu(layer.inputs*layer.outputs, -layer.decay, layer.weights, 1, layer.weight_updates, 1);
-    axpy_cpu(layer.inputs*layer.outputs, layer.learning_rate, layer.weight_updates, 1, layer.weights, 1);
-    scal_cpu(layer.inputs*layer.outputs, layer.momentum, layer.weight_updates, 1);
-}
-
-void forward_connected_layer(connected_layer layer, float *input)
+void forward_connected_layer(connected_layer l, network_state state)
 {
     int i;
-    for(i = 0; i < layer.batch; ++i){
-        copy_cpu(layer.outputs, layer.biases, 1, layer.output + i*layer.outputs, 1);
+    fill_cpu(l.outputs*l.batch, 0, l.output, 1);
+    int m = l.batch;
+    int k = l.inputs;
+    int n = l.outputs;
+    float *a = state.input;
+    float *b = l.weights;
+    float *c = l.output;
+    gemm(0,1,m,n,k,1,a,k,b,k,1,c,n);
+    if(l.batch_normalize){
+        if(state.train){
+            mean_cpu(l.output, l.batch, l.outputs, 1, l.mean);
+            variance_cpu(l.output, l.mean, l.batch, l.outputs, 1, l.variance);
+
+            scal_cpu(l.outputs, .95, l.rolling_mean, 1);
+            axpy_cpu(l.outputs, .05, l.mean, 1, l.rolling_mean, 1);
+            scal_cpu(l.outputs, .95, l.rolling_variance, 1);
+            axpy_cpu(l.outputs, .05, l.variance, 1, l.rolling_variance, 1);
+
+            copy_cpu(l.outputs*l.batch, l.output, 1, l.x, 1);
+            normalize_cpu(l.output, l.mean, l.variance, l.batch, l.outputs, 1);   
+            copy_cpu(l.outputs*l.batch, l.output, 1, l.x_norm, 1);
+        } else {
+            normalize_cpu(l.output, l.rolling_mean, l.rolling_variance, l.batch, l.outputs, 1);
+        }
+        scale_bias(l.output, l.scales, l.batch, l.outputs, 1);
     }
-    int m = layer.batch;
-    int k = layer.inputs;
-    int n = layer.outputs;
-    float *a = input;
-    float *b = layer.weights;
-    float *c = layer.output;
-    gemm(0,0,m,n,k,1,a,k,b,n,1,c,n);
-    activate_array(layer.output, layer.outputs*layer.batch, layer.activation);
+    for(i = 0; i < l.batch; ++i){
+        axpy_cpu(l.outputs, 1, l.biases, 1, l.output + i*l.outputs, 1);
+    }
+    activate_array(l.output, l.outputs*l.batch, l.activation);
 }
 
-void backward_connected_layer(connected_layer layer, float *input, float *delta)
+void backward_connected_layer(connected_layer l, network_state state)
 {
     int i;
-    gradient_array(layer.output, layer.outputs*layer.batch, layer.activation, layer.delta);
-    for(i = 0; i < layer.batch; ++i){
-        axpy_cpu(layer.outputs, 1, layer.delta + i*layer.outputs, 1, layer.bias_updates, 1);
+    gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta);
+    for(i = 0; i < l.batch; ++i){
+        axpy_cpu(l.outputs, 1, l.delta + i*l.outputs, 1, l.bias_updates, 1);
     }
-    int m = layer.inputs;
-    int k = layer.batch;
-    int n = layer.outputs;
-    float *a = input;
-    float *b = layer.delta;
-    float *c = layer.weight_updates;
+    if(l.batch_normalize){
+        backward_scale_cpu(l.x_norm, l.delta, l.batch, l.outputs, 1, l.scale_updates);
+
+        scale_bias(l.delta, l.scales, l.batch, l.outputs, 1);
+
+        mean_delta_cpu(l.delta, l.variance, l.batch, l.outputs, 1, l.mean_delta);
+        variance_delta_cpu(l.x, l.delta, l.mean, l.variance, l.batch, l.outputs, 1, l.variance_delta);
+        normalize_delta_cpu(l.x, l.mean, l.variance, l.mean_delta, l.variance_delta, l.batch, l.outputs, 1, l.delta);
+    }
+
+    int m = l.outputs;
+    int k = l.batch;
+    int n = l.inputs;
+    float *a = l.delta;
+    float *b = state.input;
+    float *c = l.weight_updates;
     gemm(1,0,m,n,k,1,a,m,b,n,1,c,n);
 
-    m = layer.batch;
-    k = layer.outputs;
-    n = layer.inputs;
+    m = l.batch;
+    k = l.outputs;
+    n = l.inputs;
 
-    a = layer.delta;
-    b = layer.weights;
-    c = delta;
+    a = l.delta;
+    b = l.weights;
+    c = state.delta;
 
-    if(c) gemm(0,1,m,n,k,1,a,k,b,k,0,c,n);
+    if(c) gemm(0,0,m,n,k,1,a,k,b,n,1,c,n);
+}
+
+
+void denormalize_connected_layer(layer l)
+{
+    int i, j;
+    for(i = 0; i < l.outputs; ++i){
+        float scale = l.scales[i]/sqrt(l.rolling_variance[i] + .000001);
+        for(j = 0; j < l.inputs; ++j){
+            l.weights[i*l.inputs + j] *= scale;
+        }
+        l.biases[i] -= l.rolling_mean[i] * scale;
+        l.scales[i] = 1;
+        l.rolling_mean[i] = 0;
+        l.rolling_variance[i] = 1;
+    }
+}
+
+
+void statistics_connected_layer(layer l)
+{
+    if(l.batch_normalize){
+        printf("Scales ");
+        print_statistics(l.scales, l.outputs);
+        /*
+        printf("Rolling Mean ");
+        print_statistics(l.rolling_mean, l.outputs);
+        printf("Rolling Variance ");
+        print_statistics(l.rolling_variance, l.outputs);
+        */
+    }
+    printf("Biases ");
+    print_statistics(l.biases, l.outputs);
+    printf("Weights ");
+    print_statistics(l.weights, l.outputs);
 }
 
 #ifdef GPU
 
-void pull_connected_layer(connected_layer layer)
+void pull_connected_layer(connected_layer l)
 {
-    cuda_pull_array(layer.weights_gpu, layer.weights, layer.inputs*layer.outputs);
-    cuda_pull_array(layer.biases_gpu, layer.biases, layer.outputs);
-    cuda_pull_array(layer.weight_updates_gpu, layer.weight_updates, layer.inputs*layer.outputs);
-    cuda_pull_array(layer.bias_updates_gpu, layer.bias_updates, layer.outputs);
+    cuda_pull_array(l.weights_gpu, l.weights, l.inputs*l.outputs);
+    cuda_pull_array(l.biases_gpu, l.biases, l.outputs);
+    cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.inputs*l.outputs);
+    cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
+    if (l.batch_normalize){
+        cuda_pull_array(l.scales_gpu, l.scales, l.outputs);
+        cuda_pull_array(l.rolling_mean_gpu, l.rolling_mean, l.outputs);
+        cuda_pull_array(l.rolling_variance_gpu, l.rolling_variance, l.outputs);
+    }
 }
 
-void push_connected_layer(connected_layer layer)
+void push_connected_layer(connected_layer l)
 {
-    cuda_push_array(layer.weights_gpu, layer.weights, layer.inputs*layer.outputs);
-    cuda_push_array(layer.biases_gpu, layer.biases, layer.outputs);
-    cuda_push_array(layer.weight_updates_gpu, layer.weight_updates, layer.inputs*layer.outputs);
-    cuda_push_array(layer.bias_updates_gpu, layer.bias_updates, layer.outputs);
+    cuda_push_array(l.weights_gpu, l.weights, l.inputs*l.outputs);
+    cuda_push_array(l.biases_gpu, l.biases, l.outputs);
+    cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.inputs*l.outputs);
+    cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
+    if (l.batch_normalize){
+        cuda_push_array(l.scales_gpu, l.scales, l.outputs);
+        cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.outputs);
+        cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.outputs);
+    }
 }
 
-void update_connected_layer_gpu(connected_layer layer)
+void update_connected_layer_gpu(connected_layer l, int batch, float learning_rate, float momentum, float decay)
 {
-    axpy_ongpu(layer.outputs, layer.learning_rate, layer.bias_updates_gpu, 1, layer.biases_gpu, 1);
-    scal_ongpu(layer.outputs, layer.momentum, layer.bias_updates_gpu, 1);
+    axpy_ongpu(l.outputs, learning_rate/batch, l.bias_updates_gpu, 1, l.biases_gpu, 1);
+    scal_ongpu(l.outputs, momentum, l.bias_updates_gpu, 1);
 
-    axpy_ongpu(layer.inputs*layer.outputs, -layer.decay, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
-    axpy_ongpu(layer.inputs*layer.outputs, layer.learning_rate, layer.weight_updates_gpu, 1, layer.weights_gpu, 1);
-    scal_ongpu(layer.inputs*layer.outputs, layer.momentum, layer.weight_updates_gpu, 1);
-    //pull_connected_layer(layer);
+    if(l.batch_normalize){
+        axpy_ongpu(l.outputs, learning_rate/batch, l.scale_updates_gpu, 1, l.scales_gpu, 1);
+        scal_ongpu(l.outputs, momentum, l.scale_updates_gpu, 1);
+    }
+
+    axpy_ongpu(l.inputs*l.outputs, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1);
+    axpy_ongpu(l.inputs*l.outputs, learning_rate/batch, l.weight_updates_gpu, 1, l.weights_gpu, 1);
+    scal_ongpu(l.inputs*l.outputs, momentum, l.weight_updates_gpu, 1);
 }
 
-void forward_connected_layer_gpu(connected_layer layer, float * input)
+void forward_connected_layer_gpu(connected_layer l, network_state state)
 {
     int i;
-    for(i = 0; i < layer.batch; ++i){
-        copy_ongpu_offset(layer.outputs, layer.biases_gpu, 0, 1, layer.output_gpu, i*layer.outputs, 1);
-    }
-    int m = layer.batch;
-    int k = layer.inputs;
-    int n = layer.outputs;
-    float * a = input;
-    float * b = layer.weights_gpu;
-    float * c = layer.output_gpu;
-    gemm_ongpu(0,0,m,n,k,1,a,k,b,n,1,c,n);
-    activate_array_ongpu(layer.output_gpu, layer.outputs*layer.batch, layer.activation);
+    fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
+
+    int m = l.batch;
+    int k = l.inputs;
+    int n = l.outputs;
+    float * a = state.input;
+    float * b = l.weights_gpu;
+    float * c = l.output_gpu;
+    gemm_ongpu(0,1,m,n,k,1,a,k,b,k,1,c,n);
+	if (l.batch_normalize) {
+		forward_batchnorm_layer_gpu(l, state);
+	}
+	else {
+		add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.outputs, 1);
+	}
+    //for(i = 0; i < l.batch; ++i) axpy_ongpu(l.outputs, 1, l.biases_gpu, 1, l.output_gpu + i*l.outputs, 1);
+    activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
 }
 
-void backward_connected_layer_gpu(connected_layer layer, float * input, float * delta)
+void backward_connected_layer_gpu(connected_layer l, network_state state)
 {
     int i;
-    gradient_array_ongpu(layer.output_gpu, layer.outputs*layer.batch, layer.activation, layer.delta_gpu);
-    for(i = 0; i < layer.batch; ++i){
-        axpy_ongpu_offset(layer.outputs, 1, layer.delta_gpu, i*layer.outputs, 1, layer.bias_updates_gpu, 0, 1);
+    constrain_ongpu(l.outputs*l.batch, 1, l.delta_gpu, 1);
+    gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
+    for(i = 0; i < l.batch; ++i){
+        axpy_ongpu(l.outputs, 1, l.delta_gpu + i*l.outputs, 1, l.bias_updates_gpu, 1);
     }
-    int m = layer.inputs;
-    int k = layer.batch;
-    int n = layer.outputs;
-    float * a = input;
-    float * b = layer.delta_gpu;
-    float * c = layer.weight_updates_gpu;
+
+    if(l.batch_normalize){
+        backward_batchnorm_layer_gpu(l, state);
+    }
+
+    int m = l.outputs;
+    int k = l.batch;
+    int n = l.inputs;
+    float * a = l.delta_gpu;
+    float * b = state.input;
+    float * c = l.weight_updates_gpu;
     gemm_ongpu(1,0,m,n,k,1,a,m,b,n,1,c,n);
 
-    m = layer.batch;
-    k = layer.outputs;
-    n = layer.inputs;
+    m = l.batch;
+    k = l.outputs;
+    n = l.inputs;
 
-    a = layer.delta_gpu;
-    b = layer.weights_gpu;
-    c = delta;
+    a = l.delta_gpu;
+    b = l.weights_gpu;
+    c = state.delta;
 
-    if(c) gemm_ongpu(0,1,m,n,k,1,a,k,b,k,0,c,n);
+    if(c) gemm_ongpu(0,0,m,n,k,1,a,k,b,n,1,c,n);
 }
 #endif

--
Gitblit v1.10.0