From b32a287e38f4c6a41828f18b4669dec9f3af4943 Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Thu, 17 Jul 2014 17:17:52 +0000
Subject: [PATCH] Merge branch 'master' of pjreddie.com:jnet

---
 src/activations.c |   97 +++++++++++++++++++++++++++++++++++++++++++++---
 1 files changed, 90 insertions(+), 7 deletions(-)

diff --git a/src/activations.c b/src/activations.c
index b8bb79d..3b11716 100644
--- a/src/activations.c
+++ b/src/activations.c
@@ -2,8 +2,28 @@
 
 #include <math.h>
 #include <stdio.h>
+#include <stdlib.h>
 #include <string.h>
 
+char *get_activation_string(ACTIVATION a)
+{
+    switch(a){
+        case SIGMOID:
+            return "sigmoid";
+        case RELU:
+            return "relu";
+        case RAMP:
+            return "ramp";
+        case LINEAR:
+            return "linear";
+        case TANH:
+            return "tanh";
+        default:
+            break;
+    }
+    return "relu";
+}
+
 ACTIVATION get_activation(char *s)
 {
     if (strcmp(s, "sigmoid")==0) return SIGMOID;
@@ -15,22 +35,40 @@
     return RELU;
 }
 
-double activate(double x, ACTIVATION a){
+float linear_activate(float x){return x;}
+float sigmoid_activate(float x){return 1./(1. + exp(-x));}
+float relu_activate(float x){return x*(x>0);}
+float ramp_activate(float x){return x*(x>0)+.1*x;}
+float tanh_activate(float x){return (exp(2*x)-1)/(exp(2*x)+1);}
+
+float activate(float x, ACTIVATION a, float dropout)
+{
+    if(dropout && (float)rand()/RAND_MAX < dropout) return 0;
     switch(a){
         case LINEAR:
-            return x;
+            return linear_activate(x)/(1-dropout);
         case SIGMOID:
-            return 1./(1.+exp(-x));
+            return sigmoid_activate(x)/(1-dropout);
         case RELU:
-            return x*(x>0);
+            return relu_activate(x)/(1-dropout);
         case RAMP:
-            return x*(x>0) + .1*x;
+            return ramp_activate(x)/(1-dropout);
         case TANH:
-            return (exp(2*x)-1)/(exp(2*x)+1);
+            return tanh_activate(x)/(1-dropout);
     }
     return 0;
 }
-double gradient(double x, ACTIVATION a){
+
+void activate_array(float *x, const int n, const ACTIVATION a, float dropout)
+{
+    int i;
+    for(i = 0; i < n; ++i){
+        x[i] = activate(x[i], a, dropout);
+    }
+}
+
+
+float gradient(float x, ACTIVATION a){
     switch(a){
         case LINEAR:
             return 1;
@@ -46,3 +84,48 @@
     return 0;
 }
 
+void gradient_array(const float *x, const int n, const ACTIVATION a, float *delta)
+{
+    int i;
+    for(i = 0; i < n; ++i){
+        delta[i] *= gradient(x[i], a);
+    }
+} 
+
+#ifdef GPU
+
+#include "opencl.h"
+#include <math.h>
+
+cl_kernel get_activation_kernel()
+{
+    static int init = 0;
+    static cl_kernel kernel;
+    if(!init){
+        kernel = get_kernel("src/activations.cl", "activate_array", 0);
+        init = 1;
+    }
+    return kernel;
+}
+
+
+void activate_array_ongpu(cl_mem x, int n, ACTIVATION a, float dropout) 
+{
+    cl_setup();
+    cl_kernel kernel = get_activation_kernel();
+    cl_command_queue queue = cl.queue;
+
+    cl_uint i = 0;
+    cl.error = clSetKernelArg(kernel, i++, sizeof(x), (void*) &x);
+    cl.error = clSetKernelArg(kernel, i++, sizeof(n), (void*) &n);
+    cl.error = clSetKernelArg(kernel, i++, sizeof(a), (void*) &a);
+    cl.error = clSetKernelArg(kernel, i++, sizeof(dropout), 
+        (void*) &dropout);
+    check_error(cl);
+
+    size_t gsize = n;
+
+    clEnqueueNDRangeKernel(queue, kernel, 1, 0, &gsize, 0, 0, 0, 0);
+    check_error(cl);
+}
+#endif

--
Gitblit v1.10.0