| | |
| | | float **probs = calloc(l.w*l.h*l.n, sizeof(float *)); |
| | | int i, j; |
| | | for (j = 0; j < l.w*l.h*l.n; ++j) probs[j] = calloc(l.classes, sizeof(float *)); |
| | | get_region_boxes(l, w, h, thresh, probs, boxes, 0, map); |
| | | get_region_boxes(l, 1, 1, thresh, probs, boxes, 0, map); |
| | | for (j = 0; j < l.w*l.h*l.n; ++j) { |
| | | dets[j].classes = l.classes; |
| | | dets[j].bbox = boxes[j]; |
| | |
| | | free(net.workspace); |
| | | #endif |
| | | } |
| | | |
| | | |
| | | void fuse_conv_batchnorm(network net) |
| | | { |
| | | int j; |
| | | for (j = 0; j < net.n; ++j) { |
| | | layer *l = &net.layers[j]; |
| | | |
| | | if (l->type == CONVOLUTIONAL) { |
| | | printf(" Fuse Convolutional layer \t\t l->size = %d \n", l->size); |
| | | |
| | | if (l->batch_normalize) { |
| | | int f; |
| | | for (f = 0; f < l->n; ++f) |
| | | { |
| | | l->biases[f] = l->biases[f] - l->scales[f] * l->rolling_mean[f] / (sqrtf(l->rolling_variance[f]) + .000001f); |
| | | |
| | | const size_t filter_size = l->size*l->size*l->c; |
| | | int i; |
| | | for (i = 0; i < filter_size; ++i) { |
| | | int w_index = f*filter_size + i; |
| | | |
| | | l->weights[w_index] = l->weights[w_index] * l->scales[f] / (sqrtf(l->rolling_variance[f]) + .000001f); |
| | | } |
| | | } |
| | | |
| | | l->batch_normalize = 0; |
| | | push_convolutional_layer(*l); |
| | | } |
| | | } |
| | | else { |
| | | printf(" Skip layer: %d \n", l->type); |
| | | } |
| | | } |
| | | } |