From 6553b3f0e3e55fc30a99c7d4b5798aa86d18a114 Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Mon, 30 Mar 2015 02:31:47 +0000
Subject: [PATCH] no comment
---
src/dropout_layer.c | 67 ++++++++++++++-------------------
1 files changed, 28 insertions(+), 39 deletions(-)
diff --git a/src/dropout_layer.c b/src/dropout_layer.c
index ad13034..7fbf8ff 100644
--- a/src/dropout_layer.c
+++ b/src/dropout_layer.c
@@ -1,5 +1,7 @@
#include "dropout_layer.h"
+#include "params.h"
#include "utils.h"
+#include "cuda.h"
#include <stdlib.h>
#include <stdio.h>
@@ -10,57 +12,44 @@
layer->probability = probability;
layer->inputs = inputs;
layer->batch = batch;
- #ifdef GPU
layer->rand = calloc(inputs*batch, sizeof(float));
- layer->rand_cl = cl_make_array(layer->rand, inputs*batch);
+ layer->scale = 1./(1.-probability);
+ #ifdef GPU
+ layer->rand_gpu = cuda_make_array(layer->rand, inputs*batch);
#endif
return layer;
}
-void forward_dropout_layer(dropout_layer layer, float *input)
+void resize_dropout_layer(dropout_layer *layer, int inputs)
+{
+ layer->rand = realloc(layer->rand, layer->inputs*layer->batch*sizeof(float));
+ #ifdef GPU
+ cuda_free(layer->rand_gpu);
+
+ layer->rand_gpu = cuda_make_array(layer->rand, inputs*layer->batch);
+ #endif
+}
+
+void forward_dropout_layer(dropout_layer layer, network_state state)
{
int i;
+ if (!state.train) return;
for(i = 0; i < layer.batch * layer.inputs; ++i){
- if(rand_uniform() < layer.probability) input[i] = 0;
- else input[i] /= (1-layer.probability);
+ float r = rand_uniform();
+ layer.rand[i] = r;
+ if(r < layer.probability) state.input[i] = 0;
+ else state.input[i] *= layer.scale;
}
}
-void backward_dropout_layer(dropout_layer layer, float *input, float *delta)
-{
- // Don't do shit LULZ
-}
-#ifdef GPU
-cl_kernel get_dropout_kernel()
+void backward_dropout_layer(dropout_layer layer, network_state state)
{
- static int init = 0;
- static cl_kernel kernel;
- if(!init){
- kernel = get_kernel("src/dropout_layer.cl", "forward", 0);
- init = 1;
+ int i;
+ if(!state.delta) return;
+ for(i = 0; i < layer.batch * layer.inputs; ++i){
+ float r = layer.rand[i];
+ if(r < layer.probability) state.delta[i] = 0;
+ else state.delta[i] *= layer.scale;
}
- return kernel;
}
-void forward_dropout_layer_gpu(dropout_layer layer, cl_mem input)
-{
- int j;
- int size = layer.inputs*layer.batch;
- for(j = 0; j < size; ++j) layer.rand[j] = rand_uniform();
- cl_write_array(layer.rand_cl, layer.rand, layer.inputs*layer.batch);
-
- cl_kernel kernel = get_dropout_kernel();
- cl_command_queue queue = cl.queue;
-
- cl_uint i = 0;
- cl.error = clSetKernelArg(kernel, i++, sizeof(input), (void*) &input);
- cl.error = clSetKernelArg(kernel, i++, sizeof(layer.rand_cl), (void*) &layer.rand_cl);
- cl.error = clSetKernelArg(kernel, i++, sizeof(layer.probability), (void*) &layer.probability);
- check_error(cl);
-
- const size_t global_size[] = {size};
-
- cl.error = clEnqueueNDRangeKernel(queue, kernel, 1, 0, global_size, 0, 0, 0, 0);
- check_error(cl);
-}
-#endif
--
Gitblit v1.10.0