From ae43c2bc32fbb838bfebeeaf2c2b058ccab5c83c Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@burninator.cs.washington.edu>
Date: Thu, 23 Jun 2016 05:31:14 +0000
Subject: [PATCH] hi
---
src/dropout_layer_kernels.cu | 32 ++++++++++++++++++++------------
1 files changed, 20 insertions(+), 12 deletions(-)
diff --git a/src/dropout_layer_kernels.cu b/src/dropout_layer_kernels.cu
index 371f0dc..7e51bd5 100644
--- a/src/dropout_layer_kernels.cu
+++ b/src/dropout_layer_kernels.cu
@@ -1,33 +1,41 @@
+#include "cuda_runtime.h"
+#include "curand.h"
+#include "cublas_v2.h"
+
extern "C" {
#include "dropout_layer.h"
#include "cuda.h"
#include "utils.h"
}
-__global__ void yoloswag420blazeit360noscope(float *input, int size, float *rand, float prob, float scale, float *output)
+__global__ void yoloswag420blazeit360noscope(float *input, int size, float *rand, float prob, float scale)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
- if(id < size) output[id] = (rand[id] < prob) ? 0 : input[id]*scale;
+ if(id < size) input[id] = (rand[id] < prob) ? 0 : input[id]*scale;
}
-extern "C" void forward_dropout_layer_gpu(dropout_layer layer, float * input)
+void forward_dropout_layer_gpu(dropout_layer layer, network_state state)
{
- int j;
+ if (!state.train) return;
int size = layer.inputs*layer.batch;
- for(j = 0; j < size; ++j) layer.rand[j] = rand_uniform();
- cuda_push_array(layer.rand_gpu, layer.rand, layer.inputs*layer.batch);
+ cuda_random(layer.rand_gpu, size);
+ /*
+ int i;
+ for(i = 0; i < size; ++i){
+ layer.rand[i] = rand_uniform();
+ }
+ cuda_push_array(layer.rand_gpu, layer.rand, size);
+ */
- yoloswag420blazeit360noscope<<<cuda_gridsize(size), BLOCK>>>(input, size, layer.rand_gpu, layer.probability,
- layer.scale, layer.output_gpu);
+ yoloswag420blazeit360noscope<<<cuda_gridsize(size), BLOCK>>>(state.input, size, layer.rand_gpu, layer.probability, layer.scale);
check_error(cudaPeekAtLastError());
}
-extern "C" void backward_dropout_layer_gpu(dropout_layer layer, float *delta)
+void backward_dropout_layer_gpu(dropout_layer layer, network_state state)
{
- if(!delta) return;
+ if(!state.delta) return;
int size = layer.inputs*layer.batch;
- yoloswag420blazeit360noscope<<<cuda_gridsize(size), BLOCK>>>(delta, size, layer.rand_gpu, layer.probability,
- layer.scale, delta);
+ yoloswag420blazeit360noscope<<<cuda_gridsize(size), BLOCK>>>(state.delta, size, layer.rand_gpu, layer.probability, layer.scale);
check_error(cudaPeekAtLastError());
}
--
Gitblit v1.10.0