| | |
| | | __device__ float loggy_activate_kernel(float x){return 2./(1. + exp(-x)) - 1;} |
| | | __device__ float relu_activate_kernel(float x){return x*(x>0);} |
| | | __device__ float elu_activate_kernel(float x){return (x >= 0)*x + (x < 0)*(exp(x)-1);} |
| | | __device__ float relie_activate_kernel(float x){return x*(x>0);} |
| | | __device__ float relie_activate_kernel(float x){return (x>0) ? x : .01*x;} |
| | | __device__ float ramp_activate_kernel(float x){return x*(x>0)+.1*x;} |
| | | __device__ float leaky_activate_kernel(float x){return (x>0) ? x : .1*x;} |
| | | __device__ float tanh_activate_kernel(float x){return (2/(1 + exp(-2*x)) - 1);} |
| | |
| | | |
| | | extern "C" void activate_array_ongpu(float *x, int n, ACTIVATION a) |
| | | { |
| | | activate_array_kernel<<<cuda_gridsize(n), BLOCK>>>(x, n, a); |
| | | activate_array_kernel<<<cuda_gridsize(n), BLOCK, 0, get_cuda_stream()>>>(x, n, a); |
| | | check_error(cudaPeekAtLastError()); |
| | | } |
| | | |