From af4e4f92dc9e5da160eb6c6870a7b38b863f1c6c Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Tue, 28 Oct 2014 02:45:06 +0000
Subject: [PATCH] getting rid of sub_arrays, nvidia driver memory leak

---
 src/softmax_layer.c |  103 ++++++++++++++++++++++++++++++++-------------------
 1 files changed, 64 insertions(+), 39 deletions(-)

diff --git a/src/softmax_layer.c b/src/softmax_layer.c
index b6e9fe9..dae332e 100644
--- a/src/softmax_layer.c
+++ b/src/softmax_layer.c
@@ -1,5 +1,6 @@
 #include "softmax_layer.h"
 #include "mini_blas.h"
+#include <float.h>
 #include <math.h>
 #include <stdlib.h>
 #include <stdio.h>
@@ -13,36 +14,25 @@
     layer->output = calloc(inputs*batch, sizeof(float));
     layer->delta = calloc(inputs*batch, sizeof(float));
     layer->jacobian = calloc(inputs*inputs*batch, sizeof(float));
+    #ifdef GPU
+    layer->output_cl = cl_make_array(layer->output, inputs*batch); 
+    layer->delta_cl = cl_make_array(layer->delta, inputs*batch); 
+    #endif
     return layer;
 }
 
-/* UNSTABLE!
-void forward_softmax_layer(const softmax_layer layer, float *input)
-{
-    int i;
-    float sum = 0;
-    for(i = 0; i < layer.inputs; ++i){
-        sum += exp(input[i]);
-    }
-    for(i = 0; i < layer.inputs; ++i){
-        layer.output[i] = exp(input[i])/sum;
-    }
-}
-*/
 void forward_softmax_layer(const softmax_layer layer, float *input)
 {
     int i,b;
     for(b = 0; b < layer.batch; ++b){
         float sum = 0;
-        float largest = 0;
+        float largest = -FLT_MAX;
         for(i = 0; i < layer.inputs; ++i){
             if(input[i+b*layer.inputs] > largest) largest = input[i+b*layer.inputs];
         }
         for(i = 0; i < layer.inputs; ++i){
             sum += exp(input[i+b*layer.inputs]-largest);
-            //printf("%f, ", input[i]);
         }
-        //printf("\n");
         if(sum) sum = largest+log(sum);
         else sum = largest-100;
         for(i = 0; i < layer.inputs; ++i){
@@ -51,33 +41,68 @@
     }
 }
 
-void backward_softmax_layer(const softmax_layer layer, float *input, float *delta)
+void backward_softmax_layer(const softmax_layer layer, float *delta)
 {
-/*
-    int i,j,b;
-    for(b = 0; b < layer.batch; ++b){
-        for(i = 0; i < layer.inputs; ++i){
-            for(j = 0; j < layer.inputs; ++j){
-                int d = (i==j);
-                layer.jacobian[b*layer.inputs*layer.inputs + i*layer.inputs + j] = 
-                        layer.output[b*layer.inputs + i] * (d - layer.output[b*layer.inputs + j]);
-            }
-        }
-    }
-    for(b = 0; b < layer.batch; ++b){
-        int M = layer.inputs;
-        int N = 1;
-        int K = layer.inputs;
-        float *A = layer.jacobian + b*layer.inputs*layer.inputs;
-        float *B = layer.delta + b*layer.inputs;
-        float *C = delta + b*layer.inputs;
-        gemm(0,0,M,N,K,1,A,K,B,N,0,C,N);
-    }
-    */
-
     int i;
     for(i = 0; i < layer.inputs*layer.batch; ++i){
         delta[i] = layer.delta[i];
     }
 }
 
+#ifdef GPU
+cl_kernel get_softmax_forward_kernel()
+{
+    static int init = 0;
+    static cl_kernel kernel;
+    if(!init){
+        kernel = get_kernel("src/softmax_layer.cl", "forward", 0);
+        init = 1;
+    }
+    return kernel;
+}
+
+void forward_softmax_layer_gpu(const softmax_layer layer, cl_mem input)
+{
+    cl_setup();
+    cl_kernel kernel = get_softmax_forward_kernel();
+    cl_command_queue queue = cl.queue;
+
+    cl_uint i = 0;
+    cl.error = clSetKernelArg(kernel, i++, sizeof(layer.inputs), (void*) &layer.inputs);
+    cl.error = clSetKernelArg(kernel, i++, sizeof(input), (void*) &input);
+    cl.error = clSetKernelArg(kernel, i++, sizeof(layer.output_cl), (void*) &layer.output_cl);
+    check_error(cl);
+
+    const size_t global_size[] = {layer.batch};
+
+    clEnqueueNDRangeKernel(queue, kernel, 1, 0, global_size, 0, 0, 0, 0);
+    check_error(cl);
+}
+
+void backward_softmax_layer_gpu(const softmax_layer layer, cl_mem delta)
+{
+    copy_ongpu(layer.batch*layer.inputs, layer.delta_cl, 1, delta, 1);
+}
+#endif
+
+/* This is if you want softmax w/o log-loss classification. You probably don't.
+   int i,j,b;
+   for(b = 0; b < layer.batch; ++b){
+   for(i = 0; i < layer.inputs; ++i){
+   for(j = 0; j < layer.inputs; ++j){
+   int d = (i==j);
+   layer.jacobian[b*layer.inputs*layer.inputs + i*layer.inputs + j] = 
+   layer.output[b*layer.inputs + i] * (d - layer.output[b*layer.inputs + j]);
+   }
+   }
+   }
+   for(b = 0; b < layer.batch; ++b){
+   int M = layer.inputs;
+   int N = 1;
+   int K = layer.inputs;
+   float *A = layer.jacobian + b*layer.inputs*layer.inputs;
+   float *B = layer.delta + b*layer.inputs;
+   float *C = delta + b*layer.inputs;
+   gemm(0,0,M,N,K,1,A,K,B,N,0,C,N);
+   }
+ */

--
Gitblit v1.10.0