| | |
| | | GPU=0 |
| | | CUDNN=0 |
| | | OPENCV=0 |
| | | GPU=1 |
| | | CUDNN=1 |
| | | OPENCV=1 |
| | | DEBUG=0 |
| | | |
| | | ARCH= --gpu-architecture=compute_52 --gpu-code=compute_52 |
| | |
| | | } |
| | | |
| | | |
| | | __device__ float lhtan_activate_kernel(float x) |
| | | { |
| | | if(x < 0) return .001*x; |
| | | if(x > 1) return .001*(x-1) + 1; |
| | | return x; |
| | | } |
| | | __device__ float lhtan_gradient_kernel(float x) |
| | | { |
| | | if(x > 0 && x < 1) return 1; |
| | | return .001; |
| | | } |
| | | |
| | | __device__ float hardtan_activate_kernel(float x) |
| | | { |
| | | if (x < -1) return -1; |
| | |
| | | return stair_activate_kernel(x); |
| | | case HARDTAN: |
| | | return hardtan_activate_kernel(x); |
| | | case LHTAN: |
| | | return lhtan_activate_kernel(x); |
| | | } |
| | | return 0; |
| | | } |
| | |
| | | return stair_gradient_kernel(x); |
| | | case HARDTAN: |
| | | return hardtan_gradient_kernel(x); |
| | | case LHTAN: |
| | | return lhtan_gradient_kernel(x); |
| | | } |
| | | return 0; |
| | | } |
| | |
| | | return "stair"; |
| | | case HARDTAN: |
| | | return "hardtan"; |
| | | case LHTAN: |
| | | return "lhtan"; |
| | | default: |
| | | break; |
| | | } |
| | |
| | | if (strcmp(s, "relie")==0) return RELIE; |
| | | if (strcmp(s, "plse")==0) return PLSE; |
| | | if (strcmp(s, "hardtan")==0) return HARDTAN; |
| | | if (strcmp(s, "lhtan")==0) return LHTAN; |
| | | if (strcmp(s, "linear")==0) return LINEAR; |
| | | if (strcmp(s, "ramp")==0) return RAMP; |
| | | if (strcmp(s, "leaky")==0) return LEAKY; |
| | |
| | | return stair_activate(x); |
| | | case HARDTAN: |
| | | return hardtan_activate(x); |
| | | case LHTAN: |
| | | return lhtan_activate(x); |
| | | } |
| | | return 0; |
| | | } |
| | |
| | | return stair_gradient(x); |
| | | case HARDTAN: |
| | | return hardtan_gradient(x); |
| | | case LHTAN: |
| | | return lhtan_gradient(x); |
| | | } |
| | | return 0; |
| | | } |
| | |
| | | #include "math.h" |
| | | |
| | | typedef enum{ |
| | | LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN |
| | | LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN |
| | | }ACTIVATION; |
| | | |
| | | ACTIVATION get_activation(char *s); |
| | |
| | | return .125*x + .5; |
| | | } |
| | | |
| | | static inline float lhtan_activate(float x) |
| | | { |
| | | if(x < 0) return .001*x; |
| | | if(x > 1) return .001*(x-1) + 1; |
| | | return x; |
| | | } |
| | | static inline float lhtan_gradient(float x) |
| | | { |
| | | if(x > 0 && x < 1) return 1; |
| | | return .001; |
| | | } |
| | | |
| | | static inline float hardtan_gradient(float x) |
| | | { |
| | | if (x > -1 && x < 1) return 1; |
| | |
| | | case EXP: |
| | | return net.learning_rate * pow(net.gamma, batch_num); |
| | | case POLY: |
| | | if (batch_num < net.burn_in) return net.learning_rate * pow((float)batch_num / net.burn_in, net.power); |
| | | return net.learning_rate * pow(1 - (float)batch_num / net.max_batches, net.power); |
| | | case RANDOM: |
| | | return net.learning_rate * pow(rand_uniform(0,1), net.power); |
| | |
| | | float *scales; |
| | | int *steps; |
| | | int num_steps; |
| | | int burn_in; |
| | | |
| | | int inputs; |
| | | int h, w, c; |
| | |
| | | |
| | | char *policy_s = option_find_str(options, "policy", "constant"); |
| | | net->policy = get_policy(policy_s); |
| | | net->burn_in = option_find_int_quiet(options, "burn_in", 0); |
| | | if(net->policy == STEP){ |
| | | net->step = option_find_int(options, "step", 1); |
| | | net->scale = option_find_float(options, "scale", 1); |