From d0b9326a352ed2fbc3ae66fdef40b4533a2f211d Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Tue, 11 Aug 2015 06:22:27 +0000
Subject: [PATCH] Hacks to get nightmare to not break gridsizing

---
 src/softmax_layer_kernels.cu |   18 ++++++------------
 1 files changed, 6 insertions(+), 12 deletions(-)

diff --git a/src/softmax_layer_kernels.cu b/src/softmax_layer_kernels.cu
index 61dc607..7e13387 100644
--- a/src/softmax_layer_kernels.cu
+++ b/src/softmax_layer_kernels.cu
@@ -4,8 +4,6 @@
 #include "blas.h"
 }
 
-#define BLOCK 256
-
 __global__ void forward_softmax_layer_kernel(int n, int batch, float *input, float *output)
 {
     int b = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
@@ -32,21 +30,17 @@
     cuda_pull_array(layer.output_gpu, layer.output, layer.inputs*layer.batch);
 }
 
-extern "C" void forward_softmax_layer_gpu(const softmax_layer layer, float *input)
+extern "C" void forward_softmax_layer_gpu(const softmax_layer layer, network_state state)
 {
-    forward_softmax_layer_kernel<<<cuda_gridsize(layer.batch), BLOCK>>>(layer.inputs, layer.batch, input, layer.output_gpu);
+    int inputs = layer.inputs / layer.groups;
+    int batch = layer.batch * layer.groups;
+    forward_softmax_layer_kernel<<<cuda_gridsize(batch), BLOCK>>>(inputs, batch, state.input, layer.output_gpu);
     check_error(cudaPeekAtLastError());
-
-    /*
-    cl_read_array(layer.output_cl, layer.output, layer.inputs*layer.batch);
-    int z;
-    for(z = 0; z < layer.inputs*layer.batch; ++z) printf("%f,",layer.output[z]);
-    */
 }
 
-extern "C" void backward_softmax_layer_gpu(const softmax_layer layer, float *delta)
+extern "C" void backward_softmax_layer_gpu(const softmax_layer layer, network_state state)
 {
-    copy_ongpu(layer.batch*layer.inputs, layer.delta_gpu, 1, delta, 1);
+    axpy_ongpu(layer.batch*layer.inputs, 1, layer.delta_gpu, 1, state.delta, 1);
 }
 
 /* This is if you want softmax w/o log-loss classification. You probably don't.

--
Gitblit v1.10.0