| | |
| | | char *get_activation_string(ACTIVATION a) |
| | | { |
| | | switch(a){ |
| | | case SIGMOID: |
| | | return "sigmoid"; |
| | | case LOGISTIC: |
| | | return "logistic"; |
| | | case RELU: |
| | | return "relu"; |
| | | case RELIE: |
| | | return "relie"; |
| | | case RAMP: |
| | | return "ramp"; |
| | | case LINEAR: |
| | | return "linear"; |
| | | case TANH: |
| | | return "tanh"; |
| | | case PLSE: |
| | | return "plse"; |
| | | default: |
| | | break; |
| | | } |
| | |
| | | |
| | | ACTIVATION get_activation(char *s) |
| | | { |
| | | if (strcmp(s, "sigmoid")==0) return SIGMOID; |
| | | if (strcmp(s, "logistic")==0) return LOGISTIC; |
| | | if (strcmp(s, "relu")==0) return RELU; |
| | | if (strcmp(s, "relie")==0) return RELIE; |
| | | if (strcmp(s, "plse")==0) return PLSE; |
| | | if (strcmp(s, "linear")==0) return LINEAR; |
| | | if (strcmp(s, "ramp")==0) return RAMP; |
| | | if (strcmp(s, "tanh")==0) return TANH; |
| | |
| | | return RELU; |
| | | } |
| | | |
| | | float linear_activate(float x){return x;} |
| | | float sigmoid_activate(float x){return 1./(1. + exp(-x));} |
| | | float relu_activate(float x){return x*(x>0);} |
| | | float ramp_activate(float x){return x*(x>0)+.1*x;} |
| | | float tanh_activate(float x){return (exp(2*x)-1)/(exp(2*x)+1);} |
| | | |
| | | float linear_gradient(float x){return 1;} |
| | | float sigmoid_gradient(float x){return (1-x)*x;} |
| | | float relu_gradient(float x){return (x>0);} |
| | | float ramp_gradient(float x){return (x>0)+.1;} |
| | | float tanh_gradient(float x){return 1-x*x;} |
| | | |
| | | float activate(float x, ACTIVATION a) |
| | | { |
| | | switch(a){ |
| | | case LINEAR: |
| | | return linear_activate(x); |
| | | case SIGMOID: |
| | | return sigmoid_activate(x); |
| | | case LOGISTIC: |
| | | return logistic_activate(x); |
| | | case RELU: |
| | | return relu_activate(x); |
| | | case RELIE: |
| | | return relie_activate(x); |
| | | case RAMP: |
| | | return ramp_activate(x); |
| | | case TANH: |
| | | return tanh_activate(x); |
| | | case PLSE: |
| | | return plse_activate(x); |
| | | } |
| | | return 0; |
| | | } |
| | |
| | | switch(a){ |
| | | case LINEAR: |
| | | return linear_gradient(x); |
| | | case SIGMOID: |
| | | return sigmoid_gradient(x); |
| | | case LOGISTIC: |
| | | return logistic_gradient(x); |
| | | case RELU: |
| | | return relu_gradient(x); |
| | | case RELIE: |
| | | return relie_gradient(x); |
| | | case RAMP: |
| | | return ramp_gradient(x); |
| | | case TANH: |
| | | return tanh_gradient(x); |
| | | case PLSE: |
| | | return plse_gradient(x); |
| | | } |
| | | return 0; |
| | | } |
| | |
| | | } |
| | | } |
| | | |
| | | #ifdef GPU |
| | | |
| | | #include "opencl.h" |
| | | #include <math.h> |
| | | |
| | | cl_kernel get_activation_kernel() |
| | | { |
| | | static int init = 0; |
| | | static cl_kernel kernel; |
| | | if(!init){ |
| | | kernel = get_kernel("src/activations.cl", "activate_array", 0); |
| | | init = 1; |
| | | } |
| | | return kernel; |
| | | } |
| | | |
| | | void activate_array_ongpu(cl_mem x, int n, ACTIVATION a) |
| | | { |
| | | cl_setup(); |
| | | cl_kernel kernel = get_activation_kernel(); |
| | | cl_command_queue queue = cl.queue; |
| | | |
| | | cl_uint i = 0; |
| | | cl.error = clSetKernelArg(kernel, i++, sizeof(x), (void*) &x); |
| | | cl.error = clSetKernelArg(kernel, i++, sizeof(n), (void*) &n); |
| | | cl.error = clSetKernelArg(kernel, i++, sizeof(a), (void*) &a); |
| | | check_error(cl); |
| | | |
| | | size_t gsize = n; |
| | | |
| | | clEnqueueNDRangeKernel(queue, kernel, 1, 0, &gsize, 0, 0, 0, 0); |
| | | check_error(cl); |
| | | } |
| | | |
| | | cl_kernel get_gradient_kernel() |
| | | { |
| | | static int init = 0; |
| | | static cl_kernel kernel; |
| | | if(!init){ |
| | | kernel = get_kernel("src/activations.cl", "gradient_array", 0); |
| | | init = 1; |
| | | } |
| | | return kernel; |
| | | } |
| | | |
| | | void gradient_array_ongpu(cl_mem x, int n, ACTIVATION a, cl_mem delta) |
| | | { |
| | | cl_setup(); |
| | | cl_kernel kernel = get_gradient_kernel(); |
| | | cl_command_queue queue = cl.queue; |
| | | |
| | | cl_uint i = 0; |
| | | cl.error = clSetKernelArg(kernel, i++, sizeof(x), (void*) &x); |
| | | cl.error = clSetKernelArg(kernel, i++, sizeof(n), (void*) &n); |
| | | cl.error = clSetKernelArg(kernel, i++, sizeof(a), (void*) &a); |
| | | cl.error = clSetKernelArg(kernel, i++, sizeof(delta), (void*) &delta); |
| | | check_error(cl); |
| | | |
| | | size_t gsize = n; |
| | | |
| | | clEnqueueNDRangeKernel(queue, kernel, 1, 0, &gsize, 0, 0, 0, 0); |
| | | check_error(cl); |
| | | } |
| | | #endif |