From 7ee45082f103dac7037ddf4a2c7d598b73d47016 Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Mon, 09 Feb 2015 21:24:32 +0000
Subject: [PATCH] doing some testing w/o rand
---
src/convolutional_layer.c | 8 ++++----
1 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/src/convolutional_layer.c b/src/convolutional_layer.c
index 6848511..6a172aa 100644
--- a/src/convolutional_layer.c
+++ b/src/convolutional_layer.c
@@ -66,10 +66,8 @@
layer->biases = calloc(n, sizeof(float));
layer->bias_updates = calloc(n, sizeof(float));
float scale = 1./sqrt(size*size*c);
- //scale = .05;
for(i = 0; i < c*n*size*size; ++i) layer->filters[i] = scale*rand_normal();
for(i = 0; i < n; ++i){
- //layer->biases[i] = rand_normal()*scale + scale;
layer->biases[i] = scale;
}
int out_h = convolutional_out_height(*layer);
@@ -155,18 +153,20 @@
void learn_bias_convolutional_layer(convolutional_layer layer)
{
+ float alpha = 1./layer.batch;
int i,b;
int size = convolutional_out_height(layer)
*convolutional_out_width(layer);
for(b = 0; b < layer.batch; ++b){
for(i = 0; i < layer.n; ++i){
- layer.bias_updates[i] += sum_array(layer.delta+size*(i+b*layer.n), size);
+ layer.bias_updates[i] += alpha * sum_array(layer.delta+size*(i+b*layer.n), size);
}
}
}
void backward_convolutional_layer(convolutional_layer layer, float *in, float *delta)
{
+ float alpha = 1./layer.batch;
int i;
int m = layer.n;
int n = layer.size*layer.size*layer.c;
@@ -188,7 +188,7 @@
im2col_cpu(im, layer.c, layer.h, layer.w,
layer.size, layer.stride, layer.pad, b);
- gemm(0,1,m,n,k,1,a,k,b,k,1,c,n);
+ gemm(0,1,m,n,k,alpha,a,k,b,k,1,c,n);
if(delta){
a = layer.filters;
--
Gitblit v1.10.0