From d9f1b0b16edeb59281355a855e18a8be343fc33c Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Fri, 08 Aug 2014 19:04:15 +0000
Subject: [PATCH] probably how maxpool layers should be

---
 src/connected_layer.c |  124 +++++++++++++++++++++++------------------
 1 files changed, 69 insertions(+), 55 deletions(-)

diff --git a/src/connected_layer.c b/src/connected_layer.c
index 99f146b..368fb63 100644
--- a/src/connected_layer.c
+++ b/src/connected_layer.c
@@ -1,32 +1,42 @@
 #include "connected_layer.h"
 #include "utils.h"
+#include "mini_blas.h"
 
 #include <math.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
 
-connected_layer *make_connected_layer(int inputs, int outputs, ACTIVATION activation)
+connected_layer *make_connected_layer(int batch, int inputs, int outputs, ACTIVATION activation, float learning_rate, float momentum, float decay)
 {
-    printf("Connected Layer: %d inputs, %d outputs\n", inputs, outputs);
+    fprintf(stderr, "Connected Layer: %d inputs, %d outputs\n", inputs, outputs);
     int i;
     connected_layer *layer = calloc(1, sizeof(connected_layer));
+
+    layer->learning_rate = learning_rate;
+    layer->momentum = momentum;
+    layer->decay = decay;
+
     layer->inputs = inputs;
     layer->outputs = outputs;
+    layer->batch=batch;
 
-    layer->output = calloc(outputs, sizeof(double*));
-    layer->delta = calloc(outputs, sizeof(double*));
+    layer->output = calloc(batch*outputs, sizeof(float*));
+    layer->delta = calloc(batch*outputs, sizeof(float*));
 
-    layer->weight_updates = calloc(inputs*outputs, sizeof(double));
-    layer->weight_momentum = calloc(inputs*outputs, sizeof(double));
-    layer->weights = calloc(inputs*outputs, sizeof(double));
-    double scale = 2./inputs;
+    layer->weight_updates = calloc(inputs*outputs, sizeof(float));
+    layer->weight_adapt = calloc(inputs*outputs, sizeof(float));
+    layer->weight_momentum = calloc(inputs*outputs, sizeof(float));
+    layer->weights = calloc(inputs*outputs, sizeof(float));
+    float scale = 1./inputs;
+    //scale = .01;
     for(i = 0; i < inputs*outputs; ++i)
-        layer->weights[i] = rand_normal()*scale;
+        layer->weights[i] = scale*(rand_uniform()-.5);
 
-    layer->bias_updates = calloc(outputs, sizeof(double));
-    layer->bias_momentum = calloc(outputs, sizeof(double));
-    layer->biases = calloc(outputs, sizeof(double));
+    layer->bias_updates = calloc(outputs, sizeof(float));
+    layer->bias_adapt = calloc(outputs, sizeof(float));
+    layer->bias_momentum = calloc(outputs, sizeof(float));
+    layer->biases = calloc(outputs, sizeof(float));
     for(i = 0; i < outputs; ++i)
         //layer->biases[i] = rand_normal()*scale + scale;
         layer->biases[i] = 1;
@@ -35,56 +45,60 @@
     return layer;
 }
 
-void forward_connected_layer(connected_layer layer, double *input)
+void update_connected_layer(connected_layer layer)
 {
-    int i, j;
+    int i;
     for(i = 0; i < layer.outputs; ++i){
-        layer.output[i] = layer.biases[i];
-        for(j = 0; j < layer.inputs; ++j){
-            layer.output[i] += input[j]*layer.weights[i*layer.inputs + j];
-        }
-        layer.output[i] = activate(layer.output[i], layer.activation);
-    }
-}
-
-void learn_connected_layer(connected_layer layer, double *input)
-{
-    int i, j;
-    for(i = 0; i < layer.outputs; ++i){
-        layer.delta[i] *= gradient(layer.output[i], layer.activation);
-        layer.bias_updates[i] += layer.delta[i];
-        for(j = 0; j < layer.inputs; ++j){
-            layer.weight_updates[i*layer.inputs + j] += layer.delta[i]*input[j];
-        }
-    }
-}
-
-void update_connected_layer(connected_layer layer, double step, double momentum, double decay)
-{
-    int i,j;
-    for(i = 0; i < layer.outputs; ++i){
-        layer.bias_momentum[i] = step*(layer.bias_updates[i]) + momentum*layer.bias_momentum[i];
+        layer.bias_momentum[i] = layer.learning_rate*(layer.bias_updates[i]) + layer.momentum*layer.bias_momentum[i];
         layer.biases[i] += layer.bias_momentum[i];
-        for(j = 0; j < layer.inputs; ++j){
-            int index = i*layer.inputs+j;
-            layer.weight_momentum[index] = step*(layer.weight_updates[index] - decay*layer.weights[index]) + momentum*layer.weight_momentum[index];
-            layer.weights[index] += layer.weight_momentum[index];
-            //layer.weights[index] = constrain(layer.weights[index], 100.);
-        }
     }
-    memset(layer.bias_updates, 0, layer.outputs*sizeof(double));
-    memset(layer.weight_updates, 0, layer.outputs*layer.inputs*sizeof(double));
+    for(i = 0; i < layer.outputs*layer.inputs; ++i){
+        layer.weight_momentum[i] = layer.learning_rate*(layer.weight_updates[i] - layer.decay*layer.weights[i]) + layer.momentum*layer.weight_momentum[i];
+        layer.weights[i] += layer.weight_momentum[i];
+    }
+    memset(layer.bias_updates, 0, layer.outputs*sizeof(float));
+    memset(layer.weight_updates, 0, layer.outputs*layer.inputs*sizeof(float));
 }
 
-void backward_connected_layer(connected_layer layer, double *input, double *delta)
+void forward_connected_layer(connected_layer layer, float *input)
 {
-    int i, j;
-
-    for(j = 0; j < layer.inputs; ++j){
-        delta[j] = 0;
-        for(i = 0; i < layer.outputs; ++i){
-            delta[j] += layer.delta[i]*layer.weights[i*layer.inputs + j];
-        }
+    int i;
+    for(i = 0; i < layer.batch; ++i){
+        memcpy(layer.output+i*layer.outputs, layer.biases, layer.outputs*sizeof(float));
     }
+    int m = layer.batch;
+    int k = layer.inputs;
+    int n = layer.outputs;
+    float *a = input;
+    float *b = layer.weights;
+    float *c = layer.output;
+    gemm(0,0,m,n,k,1,a,k,b,n,1,c,n);
+    activate_array(layer.output, layer.outputs*layer.batch, layer.activation);
+}
+
+void backward_connected_layer(connected_layer layer, float *input, float *delta)
+{
+    int i;
+    for(i = 0; i < layer.outputs*layer.batch; ++i){
+        layer.delta[i] *= gradient(layer.output[i], layer.activation);
+        layer.bias_updates[i%layer.outputs] += layer.delta[i];
+    }
+    int m = layer.inputs;
+    int k = layer.batch;
+    int n = layer.outputs;
+    float *a = input;
+    float *b = layer.delta;
+    float *c = layer.weight_updates;
+    gemm(1,0,m,n,k,1,a,m,b,n,1,c,n);
+
+    m = layer.batch;
+    k = layer.outputs;
+    n = layer.inputs;
+
+    a = layer.delta;
+    b = layer.weights;
+    c = delta;
+
+    if(c) gemm(0,1,m,n,k,1,a,k,b,k,0,c,n);
 }
 

--
Gitblit v1.10.0