From cb1f33c6ae840e8dc0f43518daf76e6ed01034f0 Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Mon, 08 Dec 2014 19:48:57 +0000
Subject: [PATCH] Fixed race condition in server
---
src/activations.c | 163 ++++++++++++++++++++++++++++++++++++++++++------------
1 files changed, 127 insertions(+), 36 deletions(-)
diff --git a/src/activations.c b/src/activations.c
index a255f0f..4232efa 100644
--- a/src/activations.c
+++ b/src/activations.c
@@ -2,70 +2,161 @@
#include <math.h>
#include <stdio.h>
+#include <stdlib.h>
#include <string.h>
+char *get_activation_string(ACTIVATION a)
+{
+ switch(a){
+ case SIGMOID:
+ return "sigmoid";
+ case RELU:
+ return "relu";
+ case RAMP:
+ return "ramp";
+ case LINEAR:
+ return "linear";
+ case TANH:
+ return "tanh";
+ default:
+ break;
+ }
+ return "relu";
+}
+
ACTIVATION get_activation(char *s)
{
if (strcmp(s, "sigmoid")==0) return SIGMOID;
if (strcmp(s, "relu")==0) return RELU;
- if (strcmp(s, "identity")==0) return IDENTITY;
+ if (strcmp(s, "linear")==0) return LINEAR;
if (strcmp(s, "ramp")==0) return RAMP;
+ if (strcmp(s, "tanh")==0) return TANH;
fprintf(stderr, "Couldn't find activation function %s, going with ReLU\n", s);
return RELU;
}
-double activate(double x, ACTIVATION a){
+float linear_activate(float x){return x;}
+float sigmoid_activate(float x){return 1./(1. + exp(-x));}
+float relu_activate(float x){return x*(x>0);}
+float ramp_activate(float x){return x*(x>0)+.1*x;}
+float tanh_activate(float x){return (exp(2*x)-1)/(exp(2*x)+1);}
+//float tanh_activate(float x){return x - (x*x*x)/3;}
+
+float linear_gradient(float x){return 1;}
+float sigmoid_gradient(float x){return (1-x)*x;}
+float relu_gradient(float x){return (x>0);}
+float ramp_gradient(float x){return (x>0)+.1;}
+float tanh_gradient(float x){return 1-x*x;}
+
+float activate(float x, ACTIVATION a)
+{
switch(a){
- case IDENTITY:
- return x;
+ case LINEAR:
+ return linear_activate(x);
case SIGMOID:
- return 1./(1.+exp(-x));
+ return sigmoid_activate(x);
case RELU:
- return x*(x>0);
+ return relu_activate(x);
case RAMP:
- return x*(x>0) + .1*x;
- }
- return 0;
-}
-double gradient(double x, ACTIVATION a){
- switch(a){
- case IDENTITY:
- return 1;
- case SIGMOID:
- return (1.-x)*x;
- case RELU:
- return (x>0);
- case RAMP:
- return (x>0) + .1;
+ return ramp_activate(x);
+ case TANH:
+ return tanh_activate(x);
}
return 0;
}
-double identity_activation(double x)
+void activate_array(float *x, const int n, const ACTIVATION a)
{
- return x;
-}
-double identity_gradient(double x)
-{
- return 1;
+ int i;
+ for(i = 0; i < n; ++i){
+ x[i] = activate(x[i], a);
+ }
}
-double relu_activation(double x)
+float gradient(float x, ACTIVATION a)
{
- return x*(x>0);
-}
-double relu_gradient(double x)
-{
- return (x>0);
+ switch(a){
+ case LINEAR:
+ return linear_gradient(x);
+ case SIGMOID:
+ return sigmoid_gradient(x);
+ case RELU:
+ return relu_gradient(x);
+ case RAMP:
+ return ramp_gradient(x);
+ case TANH:
+ return tanh_gradient(x);
+ }
+ return 0;
}
-double sigmoid_activation(double x)
+void gradient_array(const float *x, const int n, const ACTIVATION a, float *delta)
{
- return 1./(1.+exp(-x));
+ int i;
+ for(i = 0; i < n; ++i){
+ delta[i] *= gradient(x[i], a);
+ }
+}
+
+#ifdef GPU
+
+#include "opencl.h"
+#include <math.h>
+
+cl_kernel get_activation_kernel()
+{
+ static int init = 0;
+ static cl_kernel kernel;
+ if(!init){
+ kernel = get_kernel("src/activations.cl", "activate_array", 0);
+ init = 1;
+ }
+ return kernel;
}
-double sigmoid_gradient(double x)
+void activate_array_ongpu(cl_mem x, int n, ACTIVATION a)
{
- return x*(1.-x);
+ cl_kernel kernel = get_activation_kernel();
+ cl_command_queue queue = cl.queue;
+
+ cl_uint i = 0;
+ cl.error = clSetKernelArg(kernel, i++, sizeof(x), (void*) &x);
+ cl.error = clSetKernelArg(kernel, i++, sizeof(n), (void*) &n);
+ cl.error = clSetKernelArg(kernel, i++, sizeof(a), (void*) &a);
+ check_error(cl);
+
+ size_t gsize = n;
+
+ cl.error = clEnqueueNDRangeKernel(queue, kernel, 1, 0, &gsize, 0, 0, 0, 0);
+ check_error(cl);
}
+cl_kernel get_gradient_kernel()
+{
+ static int init = 0;
+ static cl_kernel kernel;
+ if(!init){
+ kernel = get_kernel("src/activations.cl", "gradient_array", 0);
+ init = 1;
+ }
+ return kernel;
+}
+
+void gradient_array_ongpu(cl_mem x, int n, ACTIVATION a, cl_mem delta)
+{
+ cl_kernel kernel = get_gradient_kernel();
+ cl_command_queue queue = cl.queue;
+
+ cl_uint i = 0;
+ cl.error = clSetKernelArg(kernel, i++, sizeof(x), (void*) &x);
+ cl.error = clSetKernelArg(kernel, i++, sizeof(n), (void*) &n);
+ cl.error = clSetKernelArg(kernel, i++, sizeof(a), (void*) &a);
+ cl.error = clSetKernelArg(kernel, i++, sizeof(delta), (void*) &delta);
+ check_error(cl);
+
+ size_t gsize = n;
+
+ cl.error = clEnqueueNDRangeKernel(queue, kernel, 1, 0, &gsize, 0, 0, 0, 0);
+ check_error(cl);
+}
+#endif
--
Gitblit v1.10.0