From 989ab8c38a02fa7ea9c25108151736c62e81c972 Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Fri, 24 Apr 2015 17:27:50 +0000
Subject: [PATCH] IOU loss function

---
 src/connected_layer.c |   80 ++++++++++++----------------------------
 1 files changed, 24 insertions(+), 56 deletions(-)

diff --git a/src/connected_layer.c b/src/connected_layer.c
index 254d39e..bdab6d8 100644
--- a/src/connected_layer.c
+++ b/src/connected_layer.c
@@ -9,15 +9,11 @@
 #include <stdlib.h>
 #include <string.h>
 
-connected_layer *make_connected_layer(int batch, int inputs, int outputs, ACTIVATION activation, float learning_rate, float momentum, float decay)
+connected_layer *make_connected_layer(int batch, int inputs, int outputs, ACTIVATION activation)
 {
     int i;
     connected_layer *layer = calloc(1, sizeof(connected_layer));
 
-    layer->learning_rate = learning_rate;
-    layer->momentum = momentum;
-    layer->decay = decay;
-
     layer->inputs = inputs;
     layer->outputs = outputs;
     layer->batch=batch;
@@ -36,9 +32,8 @@
 
 
     float scale = 1./sqrt(inputs);
-    //scale = .01;
     for(i = 0; i < inputs*outputs; ++i){
-        layer->weights[i] = scale*rand_normal();
+        layer->weights[i] = 2*scale*rand_uniform() - scale;
     }
 
     for(i = 0; i < outputs; ++i){
@@ -60,43 +55,17 @@
     return layer;
 }
 
-void secret_update_connected_layer(connected_layer *layer)
+void update_connected_layer(connected_layer layer, int batch, float learning_rate, float momentum, float decay)
 {
-    int n = layer->outputs*layer->inputs;
-    float dot = dot_cpu(n, layer->weight_updates, 1, layer->weight_prev, 1);
-    float mag = sqrt(dot_cpu(n, layer->weight_updates, 1, layer->weight_updates, 1))
-                * sqrt(dot_cpu(n, layer->weight_prev, 1, layer->weight_prev, 1));
-    float cos = dot/mag;
-    if(cos > .3) layer->learning_rate *= 1.1;
-    else if (cos < -.3) layer-> learning_rate /= 1.1;
+    axpy_cpu(layer.outputs, learning_rate/batch, layer.bias_updates, 1, layer.biases, 1);
+    scal_cpu(layer.outputs, momentum, layer.bias_updates, 1);
 
-    scal_cpu(n, layer->momentum, layer->weight_prev, 1);
-    axpy_cpu(n, 1, layer->weight_updates, 1, layer->weight_prev, 1);
-    scal_cpu(n, 0, layer->weight_updates, 1);
-
-    scal_cpu(layer->outputs, layer->momentum, layer->bias_prev, 1);
-    axpy_cpu(layer->outputs, 1, layer->bias_updates, 1, layer->bias_prev, 1);
-    scal_cpu(layer->outputs, 0, layer->bias_updates, 1);
-
-    //printf("rate:   %f\n", layer->learning_rate);
-
-    axpy_cpu(layer->outputs, layer->learning_rate, layer->bias_prev, 1, layer->biases, 1);
-
-    axpy_cpu(layer->inputs*layer->outputs, -layer->decay, layer->weights, 1, layer->weight_prev, 1);
-    axpy_cpu(layer->inputs*layer->outputs, layer->learning_rate, layer->weight_prev, 1, layer->weights, 1);
+    axpy_cpu(layer.inputs*layer.outputs, -decay*batch, layer.weights, 1, layer.weight_updates, 1);
+    axpy_cpu(layer.inputs*layer.outputs, learning_rate/batch, layer.weight_updates, 1, layer.weights, 1);
+    scal_cpu(layer.inputs*layer.outputs, momentum, layer.weight_updates, 1);
 }
 
-void update_connected_layer(connected_layer layer)
-{
-    axpy_cpu(layer.outputs, layer.learning_rate, layer.bias_updates, 1, layer.biases, 1);
-    scal_cpu(layer.outputs, layer.momentum, layer.bias_updates, 1);
-
-    axpy_cpu(layer.inputs*layer.outputs, -layer.decay, layer.weights, 1, layer.weight_updates, 1);
-    axpy_cpu(layer.inputs*layer.outputs, layer.learning_rate, layer.weight_updates, 1, layer.weights, 1);
-    scal_cpu(layer.inputs*layer.outputs, layer.momentum, layer.weight_updates, 1);
-}
-
-void forward_connected_layer(connected_layer layer, float *input)
+void forward_connected_layer(connected_layer layer, network_state state)
 {
     int i;
     for(i = 0; i < layer.batch; ++i){
@@ -105,14 +74,14 @@
     int m = layer.batch;
     int k = layer.inputs;
     int n = layer.outputs;
-    float *a = input;
+    float *a = state.input;
     float *b = layer.weights;
     float *c = layer.output;
     gemm(0,0,m,n,k,1,a,k,b,n,1,c,n);
     activate_array(layer.output, layer.outputs*layer.batch, layer.activation);
 }
 
-void backward_connected_layer(connected_layer layer, float *input, float *delta)
+void backward_connected_layer(connected_layer layer, network_state state)
 {
     int i;
     gradient_array(layer.output, layer.outputs*layer.batch, layer.activation, layer.delta);
@@ -122,7 +91,7 @@
     int m = layer.inputs;
     int k = layer.batch;
     int n = layer.outputs;
-    float *a = input;
+    float *a = state.input;
     float *b = layer.delta;
     float *c = layer.weight_updates;
     gemm(1,0,m,n,k,1,a,m,b,n,1,c,n);
@@ -133,7 +102,7 @@
 
     a = layer.delta;
     b = layer.weights;
-    c = delta;
+    c = state.delta;
 
     if(c) gemm(0,1,m,n,k,1,a,k,b,k,0,c,n);
 }
@@ -156,18 +125,17 @@
     cuda_push_array(layer.bias_updates_gpu, layer.bias_updates, layer.outputs);
 }
 
-void update_connected_layer_gpu(connected_layer layer)
+void update_connected_layer_gpu(connected_layer layer, int batch, float learning_rate, float momentum, float decay)
 {
-    axpy_ongpu(layer.outputs, layer.learning_rate, layer.bias_updates_gpu, 1, layer.biases_gpu, 1);
-    scal_ongpu(layer.outputs, layer.momentum, layer.bias_updates_gpu, 1);
+    axpy_ongpu(layer.outputs, learning_rate/batch, layer.bias_updates_gpu, 1, layer.biases_gpu, 1);
+    scal_ongpu(layer.outputs, momentum, layer.bias_updates_gpu, 1);
 
-    axpy_ongpu(layer.inputs*layer.outputs, -layer.decay, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
-    axpy_ongpu(layer.inputs*layer.outputs, layer.learning_rate, layer.weight_updates_gpu, 1, layer.weights_gpu, 1);
-    scal_ongpu(layer.inputs*layer.outputs, layer.momentum, layer.weight_updates_gpu, 1);
-    //pull_connected_layer(layer);
+    axpy_ongpu(layer.inputs*layer.outputs, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
+    axpy_ongpu(layer.inputs*layer.outputs, learning_rate/batch, layer.weight_updates_gpu, 1, layer.weights_gpu, 1);
+    scal_ongpu(layer.inputs*layer.outputs, momentum, layer.weight_updates_gpu, 1);
 }
 
-void forward_connected_layer_gpu(connected_layer layer, float * input)
+void forward_connected_layer_gpu(connected_layer layer, network_state state)
 {
     int i;
     for(i = 0; i < layer.batch; ++i){
@@ -176,14 +144,14 @@
     int m = layer.batch;
     int k = layer.inputs;
     int n = layer.outputs;
-    float * a = input;
+    float * a = state.input;
     float * b = layer.weights_gpu;
     float * c = layer.output_gpu;
     gemm_ongpu(0,0,m,n,k,1,a,k,b,n,1,c,n);
     activate_array_ongpu(layer.output_gpu, layer.outputs*layer.batch, layer.activation);
 }
 
-void backward_connected_layer_gpu(connected_layer layer, float * input, float * delta)
+void backward_connected_layer_gpu(connected_layer layer, network_state state)
 {
     int i;
     gradient_array_ongpu(layer.output_gpu, layer.outputs*layer.batch, layer.activation, layer.delta_gpu);
@@ -193,7 +161,7 @@
     int m = layer.inputs;
     int k = layer.batch;
     int n = layer.outputs;
-    float * a = input;
+    float * a = state.input;
     float * b = layer.delta_gpu;
     float * c = layer.weight_updates_gpu;
     gemm_ongpu(1,0,m,n,k,1,a,m,b,n,1,c,n);
@@ -204,7 +172,7 @@
 
     a = layer.delta_gpu;
     b = layer.weights_gpu;
-    c = delta;
+    c = state.delta;
 
     if(c) gemm_ongpu(0,1,m,n,k,1,a,k,b,k,0,c,n);
 }

--
Gitblit v1.10.0