| | |
| | | #include "cuda.h" |
| | | } |
| | | |
| | | |
| | | __device__ float lhtan_activate_kernel(float x) |
| | | { |
| | | if(x < 0) return .001*x; |
| | | if(x > 1) return .001*(x-1) + 1; |
| | | return x; |
| | | } |
| | | __device__ float lhtan_gradient_kernel(float x) |
| | | { |
| | | if(x > 0 && x < 1) return 1; |
| | | return .001; |
| | | } |
| | | |
| | | __device__ float hardtan_activate_kernel(float x) |
| | | { |
| | | if (x < -1) return -1; |
| | | if (x > 1) return 1; |
| | | return x; |
| | | } |
| | | __device__ float linear_activate_kernel(float x){return x;} |
| | | __device__ float logistic_activate_kernel(float x){return 1./(1. + exp(-x));} |
| | | __device__ float loggy_activate_kernel(float x){return 2./(1. + exp(-x)) - 1;} |
| | | __device__ float relu_activate_kernel(float x){return x*(x>0);} |
| | | __device__ float elu_activate_kernel(float x){return (x >= 0)*x + (x < 0)*(exp(x)-1);} |
| | | __device__ float relie_activate_kernel(float x){return x*(x>0);} |
| | | __device__ float relie_activate_kernel(float x){return (x>0) ? x : .01*x;} |
| | | __device__ float ramp_activate_kernel(float x){return x*(x>0)+.1*x;} |
| | | __device__ float leaky_activate_kernel(float x){return (x>0) ? x : .1*x;} |
| | | __device__ float tanh_activate_kernel(float x){return (2/(1 + exp(-2*x)) - 1);} |
| | |
| | | else return (x - n) + floor(x/2.); |
| | | } |
| | | |
| | | |
| | | __device__ float hardtan_gradient_kernel(float x) |
| | | { |
| | | if (x > -1 && x < 1) return 1; |
| | | return 0; |
| | | } |
| | | __device__ float linear_gradient_kernel(float x){return 1;} |
| | | __device__ float logistic_gradient_kernel(float x){return (1-x)*x;} |
| | | __device__ float loggy_gradient_kernel(float x) |
| | |
| | | return plse_activate_kernel(x); |
| | | case STAIR: |
| | | return stair_activate_kernel(x); |
| | | case HARDTAN: |
| | | return hardtan_activate_kernel(x); |
| | | case LHTAN: |
| | | return lhtan_activate_kernel(x); |
| | | } |
| | | return 0; |
| | | } |
| | |
| | | return plse_gradient_kernel(x); |
| | | case STAIR: |
| | | return stair_gradient_kernel(x); |
| | | case HARDTAN: |
| | | return hardtan_gradient_kernel(x); |
| | | case LHTAN: |
| | | return lhtan_gradient_kernel(x); |
| | | } |
| | | return 0; |
| | | } |
| | |
| | | |
| | | extern "C" void activate_array_ongpu(float *x, int n, ACTIVATION a) |
| | | { |
| | | activate_array_kernel<<<cuda_gridsize(n), BLOCK>>>(x, n, a); |
| | | activate_array_kernel<<<cuda_gridsize(n), BLOCK, 0, get_cuda_stream()>>>(x, n, a); |
| | | check_error(cudaPeekAtLastError()); |
| | | } |
| | | |