From ace5aeb0f59fdceb99e607af9780added20da37c Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Fri, 24 Jan 2014 22:51:17 +0000
Subject: [PATCH] MNIST connected network showing off matrices
---
src/connected_layer.c | 114 +++++++++++++++++++++++++++++++++++++++-----------------
1 files changed, 79 insertions(+), 35 deletions(-)
diff --git a/src/connected_layer.c b/src/connected_layer.c
index 99f146b..6871b2e 100644
--- a/src/connected_layer.c
+++ b/src/connected_layer.c
@@ -1,5 +1,6 @@
#include "connected_layer.h"
#include "utils.h"
+#include "mini_blas.h"
#include <math.h>
#include <stdio.h>
@@ -8,7 +9,7 @@
connected_layer *make_connected_layer(int inputs, int outputs, ACTIVATION activation)
{
- printf("Connected Layer: %d inputs, %d outputs\n", inputs, outputs);
+ fprintf(stderr, "Connected Layer: %d inputs, %d outputs\n", inputs, outputs);
int i;
connected_layer *layer = calloc(1, sizeof(connected_layer));
layer->inputs = inputs;
@@ -29,62 +30,105 @@
layer->biases = calloc(outputs, sizeof(double));
for(i = 0; i < outputs; ++i)
//layer->biases[i] = rand_normal()*scale + scale;
- layer->biases[i] = 1;
+ layer->biases[i] = 0;
layer->activation = activation;
return layer;
}
+void update_connected_layer(connected_layer layer, double step, double momentum, double decay)
+{
+ int i;
+ for(i = 0; i < layer.outputs; ++i){
+ layer.bias_momentum[i] = step*(layer.bias_updates[i]) + momentum*layer.bias_momentum[i];
+ layer.biases[i] += layer.bias_momentum[i];
+ }
+ for(i = 0; i < layer.outputs*layer.inputs; ++i){
+ layer.weight_momentum[i] = step*(layer.weight_updates[i] - decay*layer.weights[i]) + momentum*layer.weight_momentum[i];
+ layer.weights[i] += layer.weight_momentum[i];
+ }
+ memset(layer.bias_updates, 0, layer.outputs*sizeof(double));
+ memset(layer.weight_updates, 0, layer.outputs*layer.inputs*sizeof(double));
+}
+
void forward_connected_layer(connected_layer layer, double *input)
{
- int i, j;
+ int i;
+ memcpy(layer.output, layer.biases, layer.outputs*sizeof(double));
+ int m = 1;
+ int k = layer.inputs;
+ int n = layer.outputs;
+ double *a = input;
+ double *b = layer.weights;
+ double *c = layer.output;
+ gemm(0,0,m,n,k,1,a,k,b,n,1,c,n);
for(i = 0; i < layer.outputs; ++i){
- layer.output[i] = layer.biases[i];
- for(j = 0; j < layer.inputs; ++j){
- layer.output[i] += input[j]*layer.weights[i*layer.inputs + j];
- }
layer.output[i] = activate(layer.output[i], layer.activation);
}
}
void learn_connected_layer(connected_layer layer, double *input)
{
- int i, j;
+ int i;
for(i = 0; i < layer.outputs; ++i){
layer.delta[i] *= gradient(layer.output[i], layer.activation);
layer.bias_updates[i] += layer.delta[i];
- for(j = 0; j < layer.inputs; ++j){
- layer.weight_updates[i*layer.inputs + j] += layer.delta[i]*input[j];
- }
}
-}
-
-void update_connected_layer(connected_layer layer, double step, double momentum, double decay)
-{
- int i,j;
- for(i = 0; i < layer.outputs; ++i){
- layer.bias_momentum[i] = step*(layer.bias_updates[i]) + momentum*layer.bias_momentum[i];
- layer.biases[i] += layer.bias_momentum[i];
- for(j = 0; j < layer.inputs; ++j){
- int index = i*layer.inputs+j;
- layer.weight_momentum[index] = step*(layer.weight_updates[index] - decay*layer.weights[index]) + momentum*layer.weight_momentum[index];
- layer.weights[index] += layer.weight_momentum[index];
- //layer.weights[index] = constrain(layer.weights[index], 100.);
- }
- }
- memset(layer.bias_updates, 0, layer.outputs*sizeof(double));
- memset(layer.weight_updates, 0, layer.outputs*layer.inputs*sizeof(double));
+ int m = layer.inputs;
+ int k = 1;
+ int n = layer.outputs;
+ double *a = input;
+ double *b = layer.delta;
+ double *c = layer.weight_updates;
+ gemm(0,0,m,n,k,1,a,k,b,n,1,c,n);
}
void backward_connected_layer(connected_layer layer, double *input, double *delta)
{
- int i, j;
+ memset(delta, 0, layer.inputs*sizeof(double));
- for(j = 0; j < layer.inputs; ++j){
- delta[j] = 0;
- for(i = 0; i < layer.outputs; ++i){
- delta[j] += layer.delta[i]*layer.weights[i*layer.inputs + j];
- }
- }
+ int m = layer.inputs;
+ int k = layer.outputs;
+ int n = 1;
+
+ double *a = layer.weights;
+ double *b = layer.delta;
+ double *c = delta;
+
+ gemm(0,0,m,n,k,1,a,k,b,n,1,c,n);
}
+/*
+ void forward_connected_layer(connected_layer layer, double *input)
+ {
+ int i, j;
+ for(i = 0; i < layer.outputs; ++i){
+ layer.output[i] = layer.biases[i];
+ for(j = 0; j < layer.inputs; ++j){
+ layer.output[i] += input[j]*layer.weights[i*layer.inputs + j];
+ }
+ layer.output[i] = activate(layer.output[i], layer.activation);
+ }
+ }
+ void learn_connected_layer(connected_layer layer, double *input)
+ {
+ int i, j;
+ for(i = 0; i < layer.outputs; ++i){
+ layer.delta[i] *= gradient(layer.output[i], layer.activation);
+ layer.bias_updates[i] += layer.delta[i];
+ for(j = 0; j < layer.inputs; ++j){
+ layer.weight_updates[i*layer.inputs + j] += layer.delta[i]*input[j];
+ }
+ }
+ }
+ void backward_connected_layer(connected_layer layer, double *input, double *delta)
+ {
+ int i, j;
+ for(j = 0; j < layer.inputs; ++j){
+ delta[j] = 0;
+ for(i = 0; i < layer.outputs; ++i){
+ delta[j] += layer.delta[i]*layer.weights[i*layer.inputs + j];
+ }
+ }
+ }
+ */
--
Gitblit v1.10.0