From b4b729a15e577c68f64e0ac69fb299de6f5f706c Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Thu, 17 Apr 2014 16:58:24 +0000
Subject: [PATCH] Merge branch 'master' of pjreddie.com:jnet
---
src/connected_layer.c | 19 ++++++++++---------
1 files changed, 10 insertions(+), 9 deletions(-)
diff --git a/src/connected_layer.c b/src/connected_layer.c
index 07fad69..16a39be 100644
--- a/src/connected_layer.c
+++ b/src/connected_layer.c
@@ -7,16 +7,17 @@
#include <stdlib.h>
#include <string.h>
-connected_layer *make_connected_layer(int inputs, int outputs, ACTIVATION activation)
+connected_layer *make_connected_layer(int batch, int inputs, int outputs, ACTIVATION activation)
{
fprintf(stderr, "Connected Layer: %d inputs, %d outputs\n", inputs, outputs);
int i;
connected_layer *layer = calloc(1, sizeof(connected_layer));
layer->inputs = inputs;
layer->outputs = outputs;
+ layer->batch=batch;
- layer->output = calloc(outputs, sizeof(float*));
- layer->delta = calloc(outputs, sizeof(float*));
+ layer->output = calloc(batch*outputs, sizeof(float*));
+ layer->delta = calloc(batch*outputs, sizeof(float*));
layer->weight_updates = calloc(inputs*outputs, sizeof(float));
layer->weight_adapt = calloc(inputs*outputs, sizeof(float));
@@ -78,14 +79,14 @@
{
int i;
memcpy(layer.output, layer.biases, layer.outputs*sizeof(float));
- int m = 1;
+ int m = layer.batch;
int k = layer.inputs;
int n = layer.outputs;
float *a = input;
float *b = layer.weights;
float *c = layer.output;
gemm(0,0,m,n,k,1,a,k,b,n,1,c,n);
- for(i = 0; i < layer.outputs; ++i){
+ for(i = 0; i < layer.outputs*layer.batch; ++i){
layer.output[i] = activate(layer.output[i], layer.activation);
}
//for(i = 0; i < layer.outputs; ++i) if(i%(layer.outputs/10+1)==0) printf("%f, ", layer.output[i]); printf("\n");
@@ -94,12 +95,12 @@
void learn_connected_layer(connected_layer layer, float *input)
{
int i;
- for(i = 0; i < layer.outputs; ++i){
+ for(i = 0; i < layer.outputs*layer.batch; ++i){
layer.delta[i] *= gradient(layer.output[i], layer.activation);
- layer.bias_updates[i] += layer.delta[i];
+ layer.bias_updates[i%layer.batch] += layer.delta[i]/layer.batch;
}
int m = layer.inputs;
- int k = 1;
+ int k = layer.batch;
int n = layer.outputs;
float *a = input;
float *b = layer.delta;
@@ -113,7 +114,7 @@
int m = layer.inputs;
int k = layer.outputs;
- int n = 1;
+ int n = layer.batch;
float *a = layer.weights;
float *b = layer.delta;
--
Gitblit v1.10.0