| | |
| | | net.momentum = 0; |
| | | net.decay = 0; |
| | | #ifdef GPU |
| | | if(gpu_index >= 0) update_network_gpu(net); |
| | | //if(net.gpu_index >= 0) update_network_gpu(net); |
| | | #endif |
| | | } |
| | | |
| | |
| | | int batch_num = get_current_batch(net); |
| | | int i; |
| | | float rate; |
| | | if (batch_num < net.burn_in) return net.learning_rate * pow((float)batch_num / net.burn_in, net.power); |
| | | switch (net.policy) { |
| | | case CONSTANT: |
| | | return net.learning_rate; |
| | |
| | | for(i = 0; i < net.num_steps; ++i){ |
| | | if(net.steps[i] > batch_num) return rate; |
| | | rate *= net.scales[i]; |
| | | if(net.steps[i] > batch_num - 1) reset_momentum(net); |
| | | //if(net.steps[i] > batch_num - 1 && net.scales[i] > 1) reset_momentum(net); |
| | | } |
| | | return rate; |
| | | case EXP: |
| | | return net.learning_rate * pow(net.gamma, batch_num); |
| | | case POLY: |
| | | if (batch_num < net.burn_in) return net.learning_rate * pow((float)batch_num / net.burn_in, net.power); |
| | | return net.learning_rate * pow(1 - (float)batch_num / net.max_batches, net.power); |
| | | return net.learning_rate * pow(1 - (float)batch_num / net.max_batches, net.power); |
| | | //if (batch_num < net.burn_in) return net.learning_rate * pow((float)batch_num / net.burn_in, net.power); |
| | | //return net.learning_rate * pow(1 - (float)batch_num / net.max_batches, net.power); |
| | | case RANDOM: |
| | | return net.learning_rate * pow(rand_uniform(0,1), net.power); |
| | | case SIG: |
| | |
| | | state.delta = prev.delta; |
| | | } |
| | | layer l = net.layers[i]; |
| | | if (l.stopbackward) break; |
| | | l.backward(l, state); |
| | | } |
| | | } |
| | |
| | | |
| | | int resize_network(network *net, int w, int h) |
| | | { |
| | | #ifdef GPU |
| | | cuda_set_device(net->gpu_index); |
| | | if(gpu_index >= 0){ |
| | | cuda_free(net->workspace); |
| | | } |
| | | #endif |
| | | int i; |
| | | //if(w == net->w && h == net->h) return 0; |
| | | net->w = w; |
| | |
| | | resize_crop_layer(&l, w, h); |
| | | }else if(l.type == MAXPOOL){ |
| | | resize_maxpool_layer(&l, w, h); |
| | | }else if(l.type == REGION){ |
| | | resize_region_layer(&l, w, h); |
| | | }else if(l.type == ROUTE){ |
| | | resize_route_layer(&l, net); |
| | | }else if(l.type == REORG){ |
| | | resize_reorg_layer(&l, w, h); |
| | | }else if(l.type == AVGPOOL){ |
| | |
| | | }else if(l.type == COST){ |
| | | resize_cost_layer(&l, inputs); |
| | | }else{ |
| | | fprintf(stderr, "Resizing type %d \n", (int)l.type); |
| | | error("Cannot resize this type of layer"); |
| | | } |
| | | if(l.workspace_size > workspace_size) workspace_size = l.workspace_size; |
| | |
| | | } |
| | | #ifdef GPU |
| | | if(gpu_index >= 0){ |
| | | cuda_free(net->workspace); |
| | | if(net->input_gpu) { |
| | | cuda_free(*net->input_gpu); |
| | | *net->input_gpu = 0; |
| | | cuda_free(*net->truth_gpu); |
| | | *net->truth_gpu = 0; |
| | | } |
| | | net->workspace = cuda_make_array(0, (workspace_size-1)/sizeof(float)+1); |
| | | }else { |
| | | free(net->workspace); |
| | |
| | | |
| | | void free_network(network net) |
| | | { |
| | | int i; |
| | | for(i = 0; i < net.n; ++i){ |
| | | free_layer(net.layers[i]); |
| | | } |
| | | free(net.layers); |
| | | int i; |
| | | for (i = 0; i < net.n; ++i) { |
| | | free_layer(net.layers[i]); |
| | | } |
| | | free(net.layers); |
| | | #ifdef GPU |
| | | if(*net.input_gpu) cuda_free(*net.input_gpu); |
| | | if(*net.truth_gpu) cuda_free(*net.truth_gpu); |
| | | if(net.input_gpu) free(net.input_gpu); |
| | | if(net.truth_gpu) free(net.truth_gpu); |
| | | if (gpu_index >= 0) cuda_free(net.workspace); |
| | | else free(net.workspace); |
| | | if (*net.input_gpu) cuda_free(*net.input_gpu); |
| | | if (*net.truth_gpu) cuda_free(*net.truth_gpu); |
| | | if (net.input_gpu) free(net.input_gpu); |
| | | if (net.truth_gpu) free(net.truth_gpu); |
| | | #else |
| | | free(net.workspace); |
| | | #endif |
| | | } |