| | |
| | | float get_current_rate(network net) |
| | | { |
| | | int batch_num = get_current_batch(net); |
| | | int i; |
| | | float rate; |
| | | switch (net.policy) { |
| | | case CONSTANT: |
| | | return net.learning_rate; |
| | | case STEP: |
| | | return net.learning_rate * pow(net.gamma, batch_num/net.step); |
| | | return net.learning_rate * pow(net.scale, batch_num/net.step); |
| | | case STEPS: |
| | | rate = net.learning_rate; |
| | | for(i = 0; i < net.num_steps; ++i){ |
| | | if(net.steps[i] > batch_num) return rate; |
| | | rate *= net.scales[i]; |
| | | } |
| | | return rate; |
| | | case EXP: |
| | | return net.learning_rate * pow(net.gamma, batch_num); |
| | | case POLY: |
| | | return net.learning_rate * pow(1 - (float)batch_num / net.max_batches, net.power); |
| | | case SIG: |
| | | return net.learning_rate * (1./(1.+exp(net.gamma*(batch_num - net.step)))); |
| | | default: |
| | | fprintf(stderr, "Policy is weird!\n"); |
| | | return net.learning_rate; |
| | |
| | | //if(w == net->w && h == net->h) return 0; |
| | | net->w = w; |
| | | net->h = h; |
| | | int inputs = 0; |
| | | //fprintf(stderr, "Resizing to %d x %d...", w, h); |
| | | //fflush(stderr); |
| | | for (i = 0; i < net->n; ++i){ |
| | |
| | | break; |
| | | }else if(l.type == NORMALIZATION){ |
| | | resize_normalization_layer(&l, w, h); |
| | | }else if(l.type == COST){ |
| | | resize_cost_layer(&l, inputs); |
| | | }else{ |
| | | error("Cannot resize this type of layer"); |
| | | } |
| | | inputs = l.outputs; |
| | | net->layers[i] = l; |
| | | w = l.out_w; |
| | | h = l.out_h; |