| | |
| | | case POLY: |
| | | return net.learning_rate * pow(1 - (float)batch_num / net.max_batches, net.power); |
| | | case SIG: |
| | | return net.learning_rate * (1/(1+exp(net.gamma*(batch_num - net.step)))); |
| | | return net.learning_rate * (1./(1.+exp(net.gamma*(batch_num - net.step)))); |
| | | default: |
| | | fprintf(stderr, "Policy is weird!\n"); |
| | | return net.learning_rate; |
| | |
| | | //if(w == net->w && h == net->h) return 0; |
| | | net->w = w; |
| | | net->h = h; |
| | | int inputs = 0; |
| | | //fprintf(stderr, "Resizing to %d x %d...", w, h); |
| | | //fflush(stderr); |
| | | for (i = 0; i < net->n; ++i){ |
| | |
| | | break; |
| | | }else if(l.type == NORMALIZATION){ |
| | | resize_normalization_layer(&l, w, h); |
| | | }else if(l.type == COST){ |
| | | resize_cost_layer(&l, inputs); |
| | | }else{ |
| | | error("Cannot resize this type of layer"); |
| | | } |
| | | inputs = l.outputs; |
| | | net->layers[i] = l; |
| | | w = l.out_w; |
| | | h = l.out_h; |