shortcut_layer resize for random=1
| | |
| | | h = boxes[i].h; |
| | | id = boxes[i].id; |
| | | |
| | | if (w < .01 || h < .01) continue; |
| | | if (w < .001 || h < .001) continue; |
| | | |
| | | int col = (int)(x*num_boxes); |
| | | int row = (int)(y*num_boxes); |
| | |
| | | id = boxes[i].id; |
| | | |
| | | // not detect small objects |
| | | if ((w < 0.001 || h < 0.001)) { printf("small w = %f, h = %f \n", w, h); continue; } |
| | | if ((w < 0.001 || h < 0.001)) continue; |
| | | |
| | | truth[i*5+0] = x; |
| | | truth[i*5+1] = y; |
| | |
| | | float avg_loss = -1; |
| | | network *nets = calloc(ngpus, sizeof(network)); |
| | | |
| | | int iter_save; |
| | | iter_save = 100; |
| | | |
| | | srand(time(0)); |
| | | int seed = rand(); |
| | | int i; |
| | |
| | | args.small_object = l.small_object; |
| | | args.d = &buffer; |
| | | args.type = DETECTION_DATA; |
| | | args.threads = 4;// 8; |
| | | args.threads = 8; // 64 |
| | | |
| | | args.angle = net.angle; |
| | | args.exposure = net.exposure; |
| | |
| | | if(l.random && count++%10 == 0){ |
| | | printf("Resizing\n"); |
| | | int dim = (rand() % 12 + (init_w/32 - 5)) * 32; // +-160 |
| | | //int dim = (rand() % 10 + 10) * 32; |
| | | //if (get_current_batch(net)+100 > net.max_batches) dim = 544; |
| | | //int dim = (rand() % 4 + 16) * 32; |
| | | printf("%d\n", dim); |
| | |
| | | #endif // OPENCV |
| | | |
| | | //if (i % 1000 == 0 || (i < 1000 && i % 100 == 0)) { |
| | | if (i % 100 == 0) { |
| | | //if (i % 100 == 0) { |
| | | if(i >= iter_save) { |
| | | iter_save += 100; |
| | | #ifdef GPU |
| | | if (ngpus != 1) sync_nets(nets, ngpus, 0); |
| | | #endif |
| | |
| | | resize_region_layer(&l, w, h); |
| | | }else if(l.type == ROUTE){ |
| | | resize_route_layer(&l, net); |
| | | }else if (l.type == SHORTCUT) { |
| | | resize_shortcut_layer(&l, w, h); |
| | | }else if(l.type == REORG){ |
| | | resize_reorg_layer(&l, w, h); |
| | | }else if(l.type == AVGPOOL){ |
| | |
| | | return l; |
| | | } |
| | | |
| | | void resize_shortcut_layer(layer *l, int w, int h) |
| | | { |
| | | assert(l->w == l->out_w); |
| | | assert(l->h == l->out_h); |
| | | l->w = l->out_w = w; |
| | | l->h = l->out_h = h; |
| | | l->outputs = w*h*l->out_c; |
| | | l->inputs = l->outputs; |
| | | l->delta = realloc(l->delta, l->outputs*l->batch * sizeof(float)); |
| | | l->output = realloc(l->output, l->outputs*l->batch * sizeof(float)); |
| | | |
| | | #ifdef GPU |
| | | cuda_free(l->output_gpu); |
| | | cuda_free(l->delta_gpu); |
| | | l->output_gpu = cuda_make_array(l->output, l->outputs*l->batch); |
| | | l->delta_gpu = cuda_make_array(l->delta, l->outputs*l->batch); |
| | | #endif |
| | | |
| | | } |
| | | |
| | | void forward_shortcut_layer(const layer l, network_state state) |
| | | { |
| | | copy_cpu(l.outputs*l.batch, state.input, 1, l.output, 1); |
| | |
| | | layer make_shortcut_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2); |
| | | void forward_shortcut_layer(const layer l, network_state state); |
| | | void backward_shortcut_layer(const layer l, network_state state); |
| | | void resize_shortcut_layer(layer *l, int w, int h); |
| | | |
| | | #ifdef GPU |
| | | void forward_shortcut_layer_gpu(const layer l, network_state state); |