From f98bf6bbdb5ed81f2ea2071ad8e705130f7ba596 Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Sat, 28 Mar 2015 23:11:37 +0000
Subject: [PATCH] We do our OWN resizing!

---
 src/softmax_layer.c |   63 ++++++++++++++++---------------
 1 files changed, 33 insertions(+), 30 deletions(-)

diff --git a/src/softmax_layer.c b/src/softmax_layer.c
index 1268423..e344d16 100644
--- a/src/softmax_layer.c
+++ b/src/softmax_layer.c
@@ -1,59 +1,62 @@
 #include "softmax_layer.h"
+#include "blas.h"
+#include "cuda.h"
+#include <float.h>
 #include <math.h>
 #include <stdlib.h>
 #include <stdio.h>
+#include <assert.h>
 
-softmax_layer *make_softmax_layer(int batch, int inputs)
+softmax_layer *make_softmax_layer(int batch, int inputs, int groups)
 {
+    assert(inputs%groups == 0);
     fprintf(stderr, "Softmax Layer: %d inputs\n", inputs);
     softmax_layer *layer = calloc(1, sizeof(softmax_layer));
     layer->batch = batch;
+    layer->groups = groups;
     layer->inputs = inputs;
     layer->output = calloc(inputs*batch, sizeof(float));
     layer->delta = calloc(inputs*batch, sizeof(float));
+    #ifdef GPU
+    layer->output_gpu = cuda_make_array(layer->output, inputs*batch); 
+    layer->delta_gpu = cuda_make_array(layer->delta, inputs*batch); 
+    #endif
     return layer;
 }
 
-/* UNSTABLE!
-void forward_softmax_layer(const softmax_layer layer, float *input)
+void softmax_array(float *input, int n, float *output)
 {
     int i;
     float sum = 0;
-    for(i = 0; i < layer.inputs; ++i){
-        sum += exp(input[i]);
+    float largest = -FLT_MAX;
+    for(i = 0; i < n; ++i){
+        if(input[i] > largest) largest = input[i];
     }
-    for(i = 0; i < layer.inputs; ++i){
-        layer.output[i] = exp(input[i])/sum;
+    for(i = 0; i < n; ++i){
+        sum += exp(input[i]-largest);
     }
-}
-*/
-void forward_softmax_layer(const softmax_layer layer, float *input)
-{
-    int i,b;
-    for(b = 0; b < layer.batch; ++b){
-        float sum = 0;
-        float largest = 0;
-        for(i = 0; i < layer.inputs; ++i){
-            if(input[i+b*layer.inputs] > largest) largest = input[i+b*layer.inputs];
-        }
-        for(i = 0; i < layer.inputs; ++i){
-            sum += exp(input[i+b*layer.inputs]-largest);
-            //printf("%f, ", input[i]);
-        }
-        //printf("\n");
-        if(sum) sum = largest+log(sum);
-        else sum = largest-100;
-        for(i = 0; i < layer.inputs; ++i){
-            layer.output[i+b*layer.inputs] = exp(input[i+b*layer.inputs]-sum);
-        }
+    if(sum) sum = largest+log(sum);
+    else sum = largest-100;
+    for(i = 0; i < n; ++i){
+        output[i] = exp(input[i]-sum);
     }
 }
 
-void backward_softmax_layer(const softmax_layer layer, float *input, float *delta)
+void forward_softmax_layer(const softmax_layer layer, network_state state)
+{
+    int b;
+    int inputs = layer.inputs / layer.groups;
+    int batch = layer.batch * layer.groups;
+    for(b = 0; b < batch; ++b){
+        softmax_array(state.input+b*inputs, inputs, layer.output+b*inputs);
+    }
+}
+
+void backward_softmax_layer(const softmax_layer layer, network_state state)
 {
     int i;
     for(i = 0; i < layer.inputs*layer.batch; ++i){
-        delta[i] = layer.delta[i];
+        state.delta[i] = layer.delta[i];
     }
 }
 

--
Gitblit v1.10.0