| | |
| | | #include "route_layer.h" |
| | | #include "shortcut_layer.h" |
| | | #include "yolo_layer.h" |
| | | #include "parser.h" |
| | | |
| | | network *load_network_custom(char *cfg, char *weights, int clear, int batch) |
| | | { |
| | | printf(" Try to load cfg: %s, weights: %s, clear = %d \n", cfg, weights, clear); |
| | | network *net = calloc(1, sizeof(network)); |
| | | *net = parse_network_cfg_custom(cfg, batch); |
| | | if (weights && weights[0] != 0) { |
| | | load_weights(net, weights); |
| | | } |
| | | if (clear) (*net->seen) = 0; |
| | | return net; |
| | | } |
| | | |
| | | network *load_network(char *cfg, char *weights, int clear) |
| | | { |
| | | return load_network_custom(cfg, weights, clear, 0); |
| | | } |
| | | |
| | | int get_current_batch(network net) |
| | | { |
| | |
| | | #endif |
| | | } |
| | | |
| | | void reset_network_state(network *net, int b) |
| | | { |
| | | int i; |
| | | for (i = 0; i < net->n; ++i) { |
| | | #ifdef GPU |
| | | layer l = net->layers[i]; |
| | | if (l.state_gpu) { |
| | | fill_ongpu(l.outputs, 0, l.state_gpu + l.outputs*b, 1); |
| | | } |
| | | if (l.h_gpu) { |
| | | fill_ongpu(l.outputs, 0, l.h_gpu + l.outputs*b, 1); |
| | | } |
| | | #endif |
| | | } |
| | | } |
| | | |
| | | void reset_rnn(network *net) |
| | | { |
| | | reset_network_state(net, 0); |
| | | } |
| | | |
| | | float get_current_rate(network net) |
| | | { |
| | | int batch_num = get_current_batch(net); |
| | |
| | | |
| | | free(boxes); |
| | | free_ptrs((void **)probs, l.w*l.h*l.n); |
| | | |
| | | //correct_region_boxes(dets, l.w*l.h*l.n, w, h, net_w, net_h, relative); |
| | | correct_yolo_boxes(dets, l.w*l.h*l.n, w, h, net_w, net_h, relative, letter); |
| | | } |
| | | |
| | | void fill_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, detection *dets, int letter) |
| | |
| | | free_layer(net.layers[i]); |
| | | } |
| | | free(net.layers); |
| | | |
| | | free(net.scales); |
| | | free(net.steps); |
| | | free(net.seen); |
| | | |
| | | #ifdef GPU |
| | | if (gpu_index >= 0) cuda_free(net.workspace); |
| | | else free(net.workspace); |
| | |
| | | layer *l = &net.layers[j]; |
| | | |
| | | if (l->type == CONVOLUTIONAL) { |
| | | printf(" Fuse Convolutional layer \t\t l->size = %d \n", l->size); |
| | | //printf(" Merges Convolutional-%d and batch_norm \n", j); |
| | | |
| | | if (l->batch_normalize) { |
| | | int f; |
| | | for (f = 0; f < l->n; ++f) |
| | | { |
| | | l->biases[f] = l->biases[f] - l->scales[f] * l->rolling_mean[f] / (sqrtf(l->rolling_variance[f]) + .000001f); |
| | | l->biases[f] = l->biases[f] - (double)l->scales[f] * l->rolling_mean[f] / (sqrt((double)l->rolling_variance[f]) + .000001f); |
| | | |
| | | const size_t filter_size = l->size*l->size*l->c; |
| | | int i; |
| | | for (i = 0; i < filter_size; ++i) { |
| | | int w_index = f*filter_size + i; |
| | | |
| | | l->weights[w_index] = l->weights[w_index] * l->scales[f] / (sqrtf(l->rolling_variance[f]) + .000001f); |
| | | l->weights[w_index] = (double)l->weights[w_index] * l->scales[f] / (sqrt((double)l->rolling_variance[f]) + .000001f); |
| | | } |
| | | } |
| | | |
| | |
| | | } |
| | | } |
| | | else { |
| | | printf(" Skip layer: %d \n", l->type); |
| | | //printf(" Fusion skip layer type: %d \n", l->type); |
| | | } |
| | | } |
| | | } |