| | |
| | | #include "cuda.h" |
| | | } |
| | | |
| | | |
| | | __device__ float lhtan_activate_kernel(float x) |
| | | { |
| | | if(x < 0) return .001*x; |
| | | if(x > 1) return .001*(x-1) + 1; |
| | | return x; |
| | | } |
| | | __device__ float lhtan_gradient_kernel(float x) |
| | | { |
| | | if(x > 0 && x < 1) return 1; |
| | | return .001; |
| | | } |
| | | |
| | | __device__ float hardtan_activate_kernel(float x) |
| | | { |
| | | if (x < -1) return -1; |
| | | if (x > 1) return 1; |
| | | return x; |
| | | } |
| | | __device__ float linear_activate_kernel(float x){return x;} |
| | | __device__ float logistic_activate_kernel(float x){return 1./(1. + exp(-x));} |
| | | __device__ float loggy_activate_kernel(float x){return 2./(1. + exp(-x)) - 1;} |
| | |
| | | else return (x - n) + floor(x/2.); |
| | | } |
| | | |
| | | |
| | | __device__ float hardtan_gradient_kernel(float x) |
| | | { |
| | | if (x > -1 && x < 1) return 1; |
| | | return 0; |
| | | } |
| | | __device__ float linear_gradient_kernel(float x){return 1;} |
| | | __device__ float logistic_gradient_kernel(float x){return (1-x)*x;} |
| | | __device__ float loggy_gradient_kernel(float x) |
| | |
| | | return plse_activate_kernel(x); |
| | | case STAIR: |
| | | return stair_activate_kernel(x); |
| | | case HARDTAN: |
| | | return hardtan_activate_kernel(x); |
| | | case LHTAN: |
| | | return lhtan_activate_kernel(x); |
| | | } |
| | | return 0; |
| | | } |
| | |
| | | return plse_gradient_kernel(x); |
| | | case STAIR: |
| | | return stair_gradient_kernel(x); |
| | | case HARDTAN: |
| | | return hardtan_gradient_kernel(x); |
| | | case LHTAN: |
| | | return lhtan_gradient_kernel(x); |
| | | } |
| | | return 0; |
| | | } |