From 9b1774bd39d65614cdbd2d4e3815086298008911 Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Wed, 06 Nov 2013 18:37:37 +0000
Subject: [PATCH] Connected layers work forward and backward!
---
src/connected_layer.c | 86 ++++++++++++++++++++++--------------------
1 files changed, 45 insertions(+), 41 deletions(-)
diff --git a/src/connected_layer.c b/src/connected_layer.c
index fe904ba..11143b9 100644
--- a/src/connected_layer.c
+++ b/src/connected_layer.c
@@ -1,19 +1,10 @@
#include "connected_layer.h"
+#include <math.h>
#include <stdlib.h>
#include <string.h>
-double activation(double x)
-{
- return x*(x>0);
-}
-
-double gradient(double x)
-{
- return (x>=0);
-}
-
-connected_layer make_connected_layer(int inputs, int outputs)
+connected_layer make_connected_layer(int inputs, int outputs, ACTIVATOR_TYPE activator)
{
int i;
connected_layer layer;
@@ -32,6 +23,17 @@
for(i = 0; i < outputs; ++i)
layer.biases[i] = (double)rand()/RAND_MAX;
+ if(activator == SIGMOID){
+ layer.activation = sigmoid_activation;
+ layer.gradient = sigmoid_gradient;
+ }else if(activator == RELU){
+ layer.activation = relu_activation;
+ layer.gradient = relu_gradient;
+ }else if(activator == IDENTITY){
+ layer.activation = identity_activation;
+ layer.gradient = identity_gradient;
+ }
+
return layer;
}
@@ -41,39 +43,16 @@
for(i = 0; i < layer.outputs; ++i){
layer.output[i] = layer.biases[i];
for(j = 0; j < layer.inputs; ++j){
- layer.output[i] += input[j]*layer.weights[i*layer.outputs + j];
+ layer.output[i] += input[j]*layer.weights[i*layer.inputs + j];
}
- layer.output[i] = activation(layer.output[i]);
+ layer.output[i] = layer.activation(layer.output[i]);
}
}
-void backpropagate_connected_layer(double *input, connected_layer layer)
+void learn_connected_layer(double *input, connected_layer layer)
{
- int i, j;
- double *old_input = calloc(layer.inputs, sizeof(double));
- memcpy(old_input, input, layer.inputs*sizeof(double));
- memset(input, 0, layer.inputs*sizeof(double));
-
- for(i = 0; i < layer.outputs; ++i){
- for(j = 0; j < layer.inputs; ++j){
- input[j] += layer.output[i]*layer.weights[i*layer.outputs + j];
- }
- }
- for(j = 0; j < layer.inputs; ++j){
- input[j] = input[j]*gradient(old_input[j]);
- }
- free(old_input);
-}
-
-void calculate_updates_connected_layer(double *input, connected_layer layer)
-{
- int i, j;
- for(i = 0; i < layer.outputs; ++i){
- layer.bias_updates[i] += layer.output[i];
- for(j = 0; j < layer.inputs; ++j){
- layer.weight_updates[i*layer.outputs + j] += layer.output[i]*input[j];
- }
- }
+ calculate_update_connected_layer(input, layer);
+ backpropagate_connected_layer(input, layer);
}
void update_connected_layer(connected_layer layer, double step)
@@ -82,11 +61,36 @@
for(i = 0; i < layer.outputs; ++i){
layer.biases[i] += step*layer.bias_updates[i];
for(j = 0; j < layer.inputs; ++j){
- int index = i*layer.outputs+j;
- layer.weights[index] = layer.weight_updates[index];
+ int index = i*layer.inputs+j;
+ layer.weights[index] += step*layer.weight_updates[index];
}
}
memset(layer.bias_updates, 0, layer.outputs*sizeof(double));
memset(layer.weight_updates, 0, layer.outputs*layer.inputs*sizeof(double));
}
+void calculate_update_connected_layer(double *input, connected_layer layer)
+{
+ int i, j;
+ for(i = 0; i < layer.outputs; ++i){
+ layer.bias_updates[i] += layer.output[i];
+ for(j = 0; j < layer.inputs; ++j){
+ layer.weight_updates[i*layer.inputs + j] += layer.output[i]*input[j];
+ }
+ }
+}
+
+void backpropagate_connected_layer(double *input, connected_layer layer)
+{
+ int i, j;
+
+ for(j = 0; j < layer.inputs; ++j){
+ double grad = layer.gradient(input[j]);
+ input[j] = 0;
+ for(i = 0; i < layer.outputs; ++i){
+ input[j] += layer.output[i]*layer.weights[i*layer.inputs + j];
+ }
+ input[j] *= grad;
+ }
+}
+
--
Gitblit v1.10.0