From ae43c2bc32fbb838bfebeeaf2c2b058ccab5c83c Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@burninator.cs.washington.edu>
Date: Thu, 23 Jun 2016 05:31:14 +0000
Subject: [PATCH] hi

---
 src/softmax_layer_kernels.cu |   14 +++++++++-----
 1 files changed, 9 insertions(+), 5 deletions(-)

diff --git a/src/softmax_layer_kernels.cu b/src/softmax_layer_kernels.cu
index 7e13387..8feaf89 100644
--- a/src/softmax_layer_kernels.cu
+++ b/src/softmax_layer_kernels.cu
@@ -1,10 +1,14 @@
+#include "cuda_runtime.h"
+#include "curand.h"
+#include "cublas_v2.h"
+
 extern "C" {
 #include "softmax_layer.h"
 #include "cuda.h"
 #include "blas.h"
 }
 
-__global__ void forward_softmax_layer_kernel(int n, int batch, float *input, float *output)
+__global__ void forward_softmax_layer_kernel(int n, int batch, float *input, float temp, float *output)
 {
     int b = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
     if(b >= batch) return;
@@ -17,11 +21,11 @@
         largest = (val>largest) ? val : largest;
     }
     for(i = 0; i < n; ++i){
-        sum += exp(input[i+b*n]-largest);
+        sum += exp(input[i+b*n]/temp-largest/temp);
     }
-    sum = (sum != 0) ? largest+log(sum) : largest-100;
+    sum = (sum != 0) ? largest/temp+log(sum) : largest-100;
     for(i = 0; i < n; ++i){
-        output[i+b*n] = exp(input[i+b*n]-sum);
+        output[i+b*n] = exp(input[i+b*n]/temp-sum);
     }
 }
 
@@ -34,7 +38,7 @@
 {
     int inputs = layer.inputs / layer.groups;
     int batch = layer.batch * layer.groups;
-    forward_softmax_layer_kernel<<<cuda_gridsize(batch), BLOCK>>>(inputs, batch, state.input, layer.output_gpu);
+    forward_softmax_layer_kernel<<<cuda_gridsize(batch), BLOCK>>>(inputs, batch, state.input, layer.temperature, layer.output_gpu);
     check_error(cudaPeekAtLastError());
 }
 

--
Gitblit v1.10.0