| | |
| | | float relu_activate(float x){return x*(x>0);} |
| | | float ramp_activate(float x){return x*(x>0)+.1*x;} |
| | | float tanh_activate(float x){return (exp(2*x)-1)/(exp(2*x)+1);} |
| | | //float tanh_activate(float x){return x - (x*x*x)/3;} |
| | | |
| | | float linear_gradient(float x){return 1;} |
| | | float sigmoid_gradient(float x){return (1-x)*x;} |
| | |
| | | |
| | | void activate_array_ongpu(cl_mem x, int n, ACTIVATION a) |
| | | { |
| | | cl_setup(); |
| | | cl_kernel kernel = get_activation_kernel(); |
| | | cl_command_queue queue = cl.queue; |
| | | |
| | |
| | | |
| | | size_t gsize = n; |
| | | |
| | | clEnqueueNDRangeKernel(queue, kernel, 1, 0, &gsize, 0, 0, 0, 0); |
| | | cl.error = clEnqueueNDRangeKernel(queue, kernel, 1, 0, &gsize, 0, 0, 0, 0); |
| | | check_error(cl); |
| | | } |
| | | |
| | |
| | | |
| | | void gradient_array_ongpu(cl_mem x, int n, ACTIVATION a, cl_mem delta) |
| | | { |
| | | cl_setup(); |
| | | cl_kernel kernel = get_gradient_kernel(); |
| | | cl_command_queue queue = cl.queue; |
| | | |
| | |
| | | |
| | | size_t gsize = n; |
| | | |
| | | clEnqueueNDRangeKernel(queue, kernel, 1, 0, &gsize, 0, 0, 0, 0); |
| | | cl.error = clEnqueueNDRangeKernel(queue, kernel, 1, 0, &gsize, 0, 0, 0, 0); |
| | | check_error(cl); |
| | | } |
| | | #endif |