| | |
| | | #include "dropout_layer.h" |
| | | #include "route_layer.h" |
| | | #include "shortcut_layer.h" |
| | | #include "yolo_layer.h" |
| | | #include "upsample_layer.h" |
| | | #include "parser.h" |
| | | |
| | | network *load_network_custom(char *cfg, char *weights, int clear, int batch) |
| | | { |
| | | printf(" Try to load cfg: %s, weights: %s, clear = %d \n", cfg, weights, clear); |
| | | network *net = calloc(1, sizeof(network)); |
| | | *net = parse_network_cfg_custom(cfg, batch); |
| | | if (weights && weights[0] != 0) { |
| | | load_weights(net, weights); |
| | | } |
| | | if (clear) (*net->seen) = 0; |
| | | return net; |
| | | } |
| | | |
| | | network *load_network(char *cfg, char *weights, int clear) |
| | | { |
| | | return load_network_custom(cfg, weights, clear, 0); |
| | | } |
| | | |
| | | int get_current_batch(network net) |
| | | { |
| | |
| | | #endif |
| | | } |
| | | |
| | | void reset_network_state(network *net, int b) |
| | | { |
| | | int i; |
| | | for (i = 0; i < net->n; ++i) { |
| | | #ifdef GPU |
| | | layer l = net->layers[i]; |
| | | if (l.state_gpu) { |
| | | fill_ongpu(l.outputs, 0, l.state_gpu + l.outputs*b, 1); |
| | | } |
| | | if (l.h_gpu) { |
| | | fill_ongpu(l.outputs, 0, l.h_gpu + l.outputs*b, 1); |
| | | } |
| | | #endif |
| | | } |
| | | } |
| | | |
| | | void reset_rnn(network *net) |
| | | { |
| | | reset_network_state(net, 0); |
| | | } |
| | | |
| | | float get_current_rate(network net) |
| | | { |
| | | int batch_num = get_current_batch(net); |
| | | int i; |
| | | float rate; |
| | | if (batch_num < net.burn_in) return net.learning_rate * pow((float)batch_num / net.burn_in, net.power); |
| | | switch (net.policy) { |
| | | case CONSTANT: |
| | | return net.learning_rate; |
| | |
| | | case EXP: |
| | | return net.learning_rate * pow(net.gamma, batch_num); |
| | | case POLY: |
| | | if (batch_num < net.burn_in) return net.learning_rate * pow((float)batch_num / net.burn_in, net.power); |
| | | return net.learning_rate * pow(1 - (float)batch_num / net.max_batches, net.power); |
| | | return net.learning_rate * pow(1 - (float)batch_num / net.max_batches, net.power); |
| | | //if (batch_num < net.burn_in) return net.learning_rate * pow((float)batch_num / net.burn_in, net.power); |
| | | //return net.learning_rate * pow(1 - (float)batch_num / net.max_batches, net.power); |
| | | case RANDOM: |
| | | return net.learning_rate * pow(rand_uniform(0,1), net.power); |
| | | case SIG: |
| | |
| | | net.n = n; |
| | | net.layers = calloc(net.n, sizeof(layer)); |
| | | net.seen = calloc(1, sizeof(int)); |
| | | #ifdef GPU |
| | | #ifdef GPU |
| | | net.input_gpu = calloc(1, sizeof(float *)); |
| | | net.truth_gpu = calloc(1, sizeof(float *)); |
| | | #endif |
| | | |
| | | net.input16_gpu = calloc(1, sizeof(float *)); |
| | | net.output16_gpu = calloc(1, sizeof(float *)); |
| | | net.max_input16_size = calloc(1, sizeof(size_t)); |
| | | net.max_output16_size = calloc(1, sizeof(size_t)); |
| | | #endif |
| | | return net; |
| | | } |
| | | |
| | |
| | | net->layers[i].batch = b; |
| | | #ifdef CUDNN |
| | | if(net->layers[i].type == CONVOLUTIONAL){ |
| | | cudnn_convolutional_setup(net->layers + i); |
| | | cudnn_convolutional_setup(net->layers + i, cudnn_fastest); |
| | | /* |
| | | layer *l = net->layers + i; |
| | | cudnn_convolutional_setup(l, cudnn_fastest); |
| | | // check for excessive memory consumption |
| | | size_t free_byte; |
| | | size_t total_byte; |
| | | check_error(cudaMemGetInfo(&free_byte, &total_byte)); |
| | | if (l->workspace_size > free_byte || l->workspace_size >= total_byte / 2) { |
| | | printf(" used slow CUDNN algo without Workspace! \n"); |
| | | cudnn_convolutional_setup(l, cudnn_smallest); |
| | | l->workspace_size = get_workspace_size(*l); |
| | | } |
| | | */ |
| | | } |
| | | #endif |
| | | } |
| | |
| | | cuda_set_device(net->gpu_index); |
| | | if(gpu_index >= 0){ |
| | | cuda_free(net->workspace); |
| | | if (net->input_gpu) { |
| | | cuda_free(*net->input_gpu); |
| | | *net->input_gpu = 0; |
| | | cuda_free(*net->truth_gpu); |
| | | *net->truth_gpu = 0; |
| | | } |
| | | } |
| | | #endif |
| | | int i; |
| | |
| | | //fflush(stderr); |
| | | for (i = 0; i < net->n; ++i){ |
| | | layer l = net->layers[i]; |
| | | //printf(" %d: layer = %d,", i, l.type); |
| | | if(l.type == CONVOLUTIONAL){ |
| | | resize_convolutional_layer(&l, w, h); |
| | | }else if(l.type == CROP){ |
| | |
| | | resize_maxpool_layer(&l, w, h); |
| | | }else if(l.type == REGION){ |
| | | resize_region_layer(&l, w, h); |
| | | }else if (l.type == YOLO) { |
| | | resize_yolo_layer(&l, w, h); |
| | | }else if(l.type == ROUTE){ |
| | | resize_route_layer(&l, net); |
| | | }else if (l.type == SHORTCUT) { |
| | | resize_shortcut_layer(&l, w, h); |
| | | }else if (l.type == UPSAMPLE) { |
| | | resize_upsample_layer(&l, w, h); |
| | | }else if(l.type == REORG){ |
| | | resize_reorg_layer(&l, w, h); |
| | | }else if(l.type == AVGPOOL){ |
| | |
| | | } |
| | | #ifdef GPU |
| | | if(gpu_index >= 0){ |
| | | if(net->input_gpu) { |
| | | cuda_free(*net->input_gpu); |
| | | *net->input_gpu = 0; |
| | | cuda_free(*net->truth_gpu); |
| | | *net->truth_gpu = 0; |
| | | } |
| | | net->workspace = cuda_make_array(0, (workspace_size-1)/sizeof(float)+1); |
| | | printf(" try to allocate workspace = %zu * sizeof(float), ", workspace_size / sizeof(float) + 1); |
| | | net->workspace = cuda_make_array(0, workspace_size/sizeof(float) + 1); |
| | | printf(" CUDA allocate done! \n"); |
| | | }else { |
| | | free(net->workspace); |
| | | net->workspace = calloc(1, workspace_size); |
| | |
| | | return out; |
| | | } |
| | | |
| | | int num_detections(network *net, float thresh) |
| | | { |
| | | int i; |
| | | int s = 0; |
| | | for (i = 0; i < net->n; ++i) { |
| | | layer l = net->layers[i]; |
| | | if (l.type == YOLO) { |
| | | s += yolo_num_detections(l, thresh); |
| | | } |
| | | if (l.type == DETECTION || l.type == REGION) { |
| | | s += l.w*l.h*l.n; |
| | | } |
| | | } |
| | | return s; |
| | | } |
| | | |
| | | detection *make_network_boxes(network *net, float thresh, int *num) |
| | | { |
| | | layer l = net->layers[net->n - 1]; |
| | | int i; |
| | | int nboxes = num_detections(net, thresh); |
| | | if (num) *num = nboxes; |
| | | detection *dets = calloc(nboxes, sizeof(detection)); |
| | | for (i = 0; i < nboxes; ++i) { |
| | | dets[i].prob = calloc(l.classes, sizeof(float)); |
| | | if (l.coords > 4) { |
| | | dets[i].mask = calloc(l.coords - 4, sizeof(float)); |
| | | } |
| | | } |
| | | return dets; |
| | | } |
| | | |
| | | |
| | | void custom_get_region_detections(layer l, int w, int h, int net_w, int net_h, float thresh, int *map, float hier, int relative, detection *dets, int letter) |
| | | { |
| | | box *boxes = calloc(l.w*l.h*l.n, sizeof(box)); |
| | | float **probs = calloc(l.w*l.h*l.n, sizeof(float *)); |
| | | int i, j; |
| | | for (j = 0; j < l.w*l.h*l.n; ++j) probs[j] = calloc(l.classes, sizeof(float)); |
| | | get_region_boxes(l, 1, 1, thresh, probs, boxes, 0, map); |
| | | for (j = 0; j < l.w*l.h*l.n; ++j) { |
| | | dets[j].classes = l.classes; |
| | | dets[j].bbox = boxes[j]; |
| | | dets[j].objectness = 1; |
| | | for (i = 0; i < l.classes; ++i) { |
| | | dets[j].prob[i] = probs[j][i]; |
| | | } |
| | | } |
| | | |
| | | free(boxes); |
| | | free_ptrs((void **)probs, l.w*l.h*l.n); |
| | | |
| | | //correct_region_boxes(dets, l.w*l.h*l.n, w, h, net_w, net_h, relative); |
| | | correct_yolo_boxes(dets, l.w*l.h*l.n, w, h, net_w, net_h, relative, letter); |
| | | } |
| | | |
| | | void fill_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, detection *dets, int letter) |
| | | { |
| | | int prev_classes = -1; |
| | | int j; |
| | | for (j = 0; j < net->n; ++j) { |
| | | layer l = net->layers[j]; |
| | | if (l.type == YOLO) { |
| | | int count = get_yolo_detections(l, w, h, net->w, net->h, thresh, map, relative, dets, letter); |
| | | dets += count; |
| | | if (prev_classes < 0) prev_classes = l.classes; |
| | | else if (prev_classes != l.classes) { |
| | | printf(" Error: Different [yolo] layers have different number of classes = %d and %d - check your cfg-file! \n", |
| | | prev_classes, l.classes); |
| | | } |
| | | } |
| | | if (l.type == REGION) { |
| | | custom_get_region_detections(l, w, h, net->w, net->h, thresh, map, hier, relative, dets, letter); |
| | | //get_region_detections(l, w, h, net->w, net->h, thresh, map, hier, relative, dets); |
| | | dets += l.w*l.h*l.n; |
| | | } |
| | | if (l.type == DETECTION) { |
| | | get_detection_detections(l, w, h, thresh, dets); |
| | | dets += l.w*l.h*l.n; |
| | | } |
| | | } |
| | | } |
| | | |
| | | detection *get_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, int *num, int letter) |
| | | { |
| | | detection *dets = make_network_boxes(net, thresh, num); |
| | | fill_network_boxes(net, w, h, thresh, hier, map, relative, dets, letter); |
| | | return dets; |
| | | } |
| | | |
| | | void free_detections(detection *dets, int n) |
| | | { |
| | | int i; |
| | | for (i = 0; i < n; ++i) { |
| | | free(dets[i].prob); |
| | | if (dets[i].mask) free(dets[i].mask); |
| | | } |
| | | free(dets); |
| | | } |
| | | |
| | | float *network_predict_image(network *net, image im) |
| | | { |
| | | //image imr = letterbox_image(im, net->w, net->h); |
| | | image imr = resize_image(im, net->w, net->h); |
| | | set_batch_network(net, 1); |
| | | float *p = network_predict(*net, imr.data); |
| | | free_image(imr); |
| | | return p; |
| | | } |
| | | |
| | | int network_width(network *net) { return net->w; } |
| | | int network_height(network *net) { return net->h; } |
| | | |
| | | matrix network_predict_data_multi(network net, data test, int n) |
| | | { |
| | | int i,j,b,m; |
| | |
| | | free_layer(net.layers[i]); |
| | | } |
| | | free(net.layers); |
| | | |
| | | free(net.scales); |
| | | free(net.steps); |
| | | free(net.seen); |
| | | |
| | | #ifdef GPU |
| | | if (gpu_index >= 0) cuda_free(net.workspace); |
| | | else free(net.workspace); |
| | |
| | | if (*net.truth_gpu) cuda_free(*net.truth_gpu); |
| | | if (net.input_gpu) free(net.input_gpu); |
| | | if (net.truth_gpu) free(net.truth_gpu); |
| | | |
| | | if (*net.input16_gpu) cuda_free(*net.input16_gpu); |
| | | if (*net.output16_gpu) cuda_free(*net.output16_gpu); |
| | | if (net.input16_gpu) free(net.input16_gpu); |
| | | if (net.output16_gpu) free(net.output16_gpu); |
| | | if (net.max_input16_size) free(net.max_input16_size); |
| | | if (net.max_output16_size) free(net.max_output16_size); |
| | | #else |
| | | free(net.workspace); |
| | | #endif |
| | | } |
| | | |
| | | |
| | | void fuse_conv_batchnorm(network net) |
| | | { |
| | | int j; |
| | | for (j = 0; j < net.n; ++j) { |
| | | layer *l = &net.layers[j]; |
| | | |
| | | if (l->type == CONVOLUTIONAL) { |
| | | //printf(" Merges Convolutional-%d and batch_norm \n", j); |
| | | |
| | | if (l->batch_normalize) { |
| | | int f; |
| | | for (f = 0; f < l->n; ++f) |
| | | { |
| | | l->biases[f] = l->biases[f] - (double)l->scales[f] * l->rolling_mean[f] / (sqrt((double)l->rolling_variance[f]) + .000001f); |
| | | |
| | | const size_t filter_size = l->size*l->size*l->c; |
| | | int i; |
| | | for (i = 0; i < filter_size; ++i) { |
| | | int w_index = f*filter_size + i; |
| | | |
| | | l->weights[w_index] = (double)l->weights[w_index] * l->scales[f] / (sqrt((double)l->rolling_variance[f]) + .000001f); |
| | | } |
| | | } |
| | | |
| | | l->batch_normalize = 0; |
| | | #ifdef GPU |
| | | if (gpu_index >= 0) { |
| | | push_convolutional_layer(*l); |
| | | } |
| | | #endif |
| | | } |
| | | } |
| | | else { |
| | | //printf(" Fusion skip layer type: %d \n", l->type); |
| | | } |
| | | } |
| | | } |