From e6c97a53a7b5ac4014d30d236ea2bf5adb4bb521 Mon Sep 17 00:00:00 2001
From: AlexeyAB <alexeyab84@gmail.com>
Date: Tue, 07 Aug 2018 20:19:50 +0000
Subject: [PATCH] Maxpool fixes
---
src/dropout_layer.c | 64 ++++++++++++++++++++++++-------
1 files changed, 49 insertions(+), 15 deletions(-)
diff --git a/src/dropout_layer.c b/src/dropout_layer.c
index fcad7b9..b1381e6 100644
--- a/src/dropout_layer.c
+++ b/src/dropout_layer.c
@@ -1,26 +1,60 @@
#include "dropout_layer.h"
-#include "stdlib.h"
-#include "stdio.h"
+#include "utils.h"
+#include "cuda.h"
+#include <stdlib.h>
+#include <stdio.h>
-dropout_layer *make_dropout_layer(int batch, int inputs, float probability)
+dropout_layer make_dropout_layer(int batch, int inputs, float probability)
{
- fprintf(stderr, "Dropout Layer: %d inputs, %f probability\n", inputs, probability);
- dropout_layer *layer = calloc(1, sizeof(dropout_layer));
- layer->probability = probability;
- layer->inputs = inputs;
- layer->batch = batch;
- return layer;
+ dropout_layer l = {0};
+ l.type = DROPOUT;
+ l.probability = probability;
+ l.inputs = inputs;
+ l.outputs = inputs;
+ l.batch = batch;
+ l.rand = calloc(inputs*batch, sizeof(float));
+ l.scale = 1./(1.-probability);
+ l.forward = forward_dropout_layer;
+ l.backward = backward_dropout_layer;
+ #ifdef GPU
+ l.forward_gpu = forward_dropout_layer_gpu;
+ l.backward_gpu = backward_dropout_layer_gpu;
+ l.rand_gpu = cuda_make_array(l.rand, inputs*batch);
+ #endif
+ fprintf(stderr, "dropout p = %.2f %4d -> %4d\n", probability, inputs, inputs);
+ return l;
}
-void forward_dropout_layer(dropout_layer layer, float *input)
+void resize_dropout_layer(dropout_layer *l, int inputs)
+{
+ l->rand = realloc(l->rand, l->inputs*l->batch*sizeof(float));
+ #ifdef GPU
+ cuda_free(l->rand_gpu);
+
+ l->rand_gpu = cuda_make_array(l->rand, inputs*l->batch);
+ #endif
+}
+
+void forward_dropout_layer(dropout_layer l, network_state state)
{
int i;
- for(i = 0; i < layer.batch * layer.inputs; ++i){
- if((float)rand()/RAND_MAX < layer.probability) input[i] = 0;
- else input[i] /= (1-layer.probability);
+ if (!state.train) return;
+ for(i = 0; i < l.batch * l.inputs; ++i){
+ float r = rand_uniform(0, 1);
+ l.rand[i] = r;
+ if(r < l.probability) state.input[i] = 0;
+ else state.input[i] *= l.scale;
}
}
-void backward_dropout_layer(dropout_layer layer, float *input, float *delta)
+
+void backward_dropout_layer(dropout_layer l, network_state state)
{
- // Don't do shit LULZ
+ int i;
+ if(!state.delta) return;
+ for(i = 0; i < l.batch * l.inputs; ++i){
+ float r = l.rand[i];
+ if(r < l.probability) state.delta[i] = 0;
+ else state.delta[i] *= l.scale;
+ }
}
+
--
Gitblit v1.10.0