From 2db9fbef2bd7d35a547d0018a9850f6b249c524f Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Wed, 13 Nov 2013 18:50:38 +0000
Subject: [PATCH] Parsing, image loading, lots of stuff

---
 src/connected_layer.c |   79 ++++++++++++++++++++-------------------
 1 files changed, 40 insertions(+), 39 deletions(-)

diff --git a/src/connected_layer.c b/src/connected_layer.c
index 11143b9..d77a10c 100644
--- a/src/connected_layer.c
+++ b/src/connected_layer.c
@@ -1,43 +1,48 @@
 #include "connected_layer.h"
 
 #include <math.h>
+#include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
 
-connected_layer make_connected_layer(int inputs, int outputs, ACTIVATOR_TYPE activator)
+connected_layer *make_connected_layer(int inputs, int outputs, ACTIVATION activator)
 {
+    printf("Connected Layer: %d inputs, %d outputs\n", inputs, outputs);
     int i;
-    connected_layer layer;
-    layer.inputs = inputs;
-    layer.outputs = outputs;
+    connected_layer *layer = calloc(1, sizeof(connected_layer));
+    layer->inputs = inputs;
+    layer->outputs = outputs;
 
-    layer.output = calloc(outputs, sizeof(double*));
+    layer->output = calloc(outputs, sizeof(double*));
+    layer->delta = calloc(outputs, sizeof(double*));
 
-    layer.weight_updates = calloc(inputs*outputs, sizeof(double));
-    layer.weights = calloc(inputs*outputs, sizeof(double));
+    layer->weight_updates = calloc(inputs*outputs, sizeof(double));
+    layer->weight_momentum = calloc(inputs*outputs, sizeof(double));
+    layer->weights = calloc(inputs*outputs, sizeof(double));
     for(i = 0; i < inputs*outputs; ++i)
-        layer.weights[i] = .5 - (double)rand()/RAND_MAX;
+        layer->weights[i] = .01*(.5 - (double)rand()/RAND_MAX);
 
-    layer.bias_updates = calloc(outputs, sizeof(double));
-    layer.biases = calloc(outputs, sizeof(double));
+    layer->bias_updates = calloc(outputs, sizeof(double));
+    layer->bias_momentum = calloc(outputs, sizeof(double));
+    layer->biases = calloc(outputs, sizeof(double));
     for(i = 0; i < outputs; ++i)
-        layer.biases[i] = (double)rand()/RAND_MAX;
+        layer->biases[i] = 1;
 
     if(activator == SIGMOID){
-        layer.activation = sigmoid_activation;
-        layer.gradient = sigmoid_gradient;
+        layer->activation = sigmoid_activation;
+        layer->gradient = sigmoid_gradient;
     }else if(activator == RELU){
-        layer.activation = relu_activation;
-        layer.gradient = relu_gradient;
+        layer->activation = relu_activation;
+        layer->gradient = relu_gradient;
     }else if(activator == IDENTITY){
-        layer.activation = identity_activation;
-        layer.gradient = identity_gradient;
+        layer->activation = identity_activation;
+        layer->gradient = identity_gradient;
     }
 
     return layer;
 }
 
-void run_connected_layer(double *input, connected_layer layer)
+void forward_connected_layer(connected_layer layer, double *input)
 {
     int i, j;
     for(i = 0; i < layer.outputs; ++i){
@@ -49,48 +54,44 @@
     }
 }
 
-void learn_connected_layer(double *input, connected_layer layer)
+void learn_connected_layer(connected_layer layer, double *input)
 {
-    calculate_update_connected_layer(input, layer);
-    backpropagate_connected_layer(input, layer);
+    int i, j;
+    for(i = 0; i < layer.outputs; ++i){
+        layer.bias_updates[i] += layer.delta[i];
+        for(j = 0; j < layer.inputs; ++j){
+            layer.weight_updates[i*layer.inputs + j] += layer.delta[i]*input[j];
+        }
+    }
 }
 
-void update_connected_layer(connected_layer layer, double step)
+void update_connected_layer(connected_layer layer, double step, double momentum, double decay)
 {
     int i,j;
     for(i = 0; i < layer.outputs; ++i){
-        layer.biases[i] += step*layer.bias_updates[i];
+        layer.bias_momentum[i] = step*(layer.bias_updates[i] - decay*layer.biases[i]) + momentum*layer.bias_momentum[i];
+        layer.biases[i] += layer.bias_momentum[i];
         for(j = 0; j < layer.inputs; ++j){
             int index = i*layer.inputs+j;
-            layer.weights[index] += step*layer.weight_updates[index];
+            layer.weight_momentum[index] = step*(layer.weight_updates[index] - decay*layer.weights[index]) + momentum*layer.weight_momentum[index];
+            layer.weights[index] += layer.weight_momentum[index];
         }
     }
     memset(layer.bias_updates, 0, layer.outputs*sizeof(double));
     memset(layer.weight_updates, 0, layer.outputs*layer.inputs*sizeof(double));
 }
 
-void calculate_update_connected_layer(double *input, connected_layer layer)
-{
-    int i, j;
-    for(i = 0; i < layer.outputs; ++i){
-        layer.bias_updates[i] += layer.output[i];
-        for(j = 0; j < layer.inputs; ++j){
-            layer.weight_updates[i*layer.inputs + j] += layer.output[i]*input[j];
-        }
-    }
-}
-
-void backpropagate_connected_layer(double *input, connected_layer layer)
+void backward_connected_layer(connected_layer layer, double *input, double *delta)
 {
     int i, j;
 
     for(j = 0; j < layer.inputs; ++j){
         double grad = layer.gradient(input[j]);
-        input[j] = 0;
+        delta[j] = 0;
         for(i = 0; i < layer.outputs; ++i){
-            input[j] += layer.output[i]*layer.weights[i*layer.inputs + j];
+            delta[j] += layer.delta[i]*layer.weights[i*layer.inputs + j];
         }
-        input[j] *= grad;
+        delta[j] *= grad;
     }
 }
 

--
Gitblit v1.10.0