| | |
| | | int size; |
| | | int side; |
| | | int stride; |
| | | int reverse; |
| | | int pad; |
| | | int sqrt; |
| | | int flip; |
| | |
| | | int bias_match; |
| | | int random; |
| | | float thresh; |
| | | int classfix; |
| | | |
| | | int dontload; |
| | | int dontloadscales; |
| | |
| | | l.rescore = option_find_int_quiet(options, "rescore",0); |
| | | |
| | | l.thresh = option_find_float(options, "thresh", .5); |
| | | l.classfix = option_find_int_quiet(options, "classfix", 0); |
| | | |
| | | l.coord_scale = option_find_float(options, "coord_scale", 1); |
| | | l.object_scale = option_find_float(options, "object_scale", 1); |
| | |
| | | layer parse_reorg(list *options, size_params params) |
| | | { |
| | | int stride = option_find_int(options, "stride",1); |
| | | int reverse = option_find_int_quiet(options, "reverse",0); |
| | | |
| | | int batch,h,w,c; |
| | | h = params.h; |
| | |
| | | batch=params.batch; |
| | | if(!(h && w && c)) error("Layer before reorg layer must output image."); |
| | | |
| | | layer layer = make_reorg_layer(batch,w,h,c,stride); |
| | | layer layer = make_reorg_layer(batch,w,h,c,stride,reverse); |
| | | return layer; |
| | | } |
| | | |
| | |
| | | return iou; |
| | | } |
| | | |
| | | void delta_region_class(float *output, float *delta, int index, int class, int classes, tree *hier, float scale, float *avg_cat) |
| | | { |
| | | int i, n; |
| | | if(hier){ |
| | | float pred = 1; |
| | | while(class >= 0){ |
| | | pred *= output[index + class]; |
| | | int g = hier->group[class]; |
| | | int offset = hier->group_offset[g]; |
| | | for(i = 0; i < hier->group_size[g]; ++i){ |
| | | delta[index + offset + i] = scale * (0 - output[index + offset + i]); |
| | | } |
| | | delta[index + class] = scale * (1 - output[index + class]); |
| | | |
| | | class = hier->parent[class]; |
| | | } |
| | | *avg_cat += pred; |
| | | } else { |
| | | for(n = 0; n < classes; ++n){ |
| | | delta[index + n] = scale * (((n == class)?1 : 0) - output[index + n]); |
| | | if(n == class) *avg_cat += output[index + n]; |
| | | } |
| | | } |
| | | } |
| | | |
| | | float logit(float x) |
| | | { |
| | | return log(x/(1.-x)); |
| | |
| | | float avg_obj = 0; |
| | | float avg_anyobj = 0; |
| | | int count = 0; |
| | | int class_count = 0; |
| | | *(l.cost) = 0; |
| | | for (b = 0; b < l.batch; ++b) { |
| | | for (j = 0; j < l.h; ++j) { |
| | |
| | | int index = size*(j*l.w*l.n + i*l.n + n) + b*l.outputs; |
| | | box pred = get_region_box(l.output, l.biases, n, index, i, j, l.w, l.h); |
| | | float best_iou = 0; |
| | | int best_class = -1; |
| | | for(t = 0; t < 30; ++t){ |
| | | box truth = float_to_box(state.truth + t*5 + b*l.truths); |
| | | if(!truth.x) break; |
| | | float iou = box_iou(pred, truth); |
| | | if (iou > best_iou) best_iou = iou; |
| | | if (iou > best_iou) { |
| | | best_class = state.truth[t*5 + b*l.truths + 4]; |
| | | best_iou = iou; |
| | | } |
| | | } |
| | | avg_anyobj += l.output[index + 4]; |
| | | l.delta[index + 4] = l.noobject_scale * ((0 - l.output[index + 4]) * logistic_gradient(l.output[index + 4])); |
| | | if(best_iou > l.thresh) l.delta[index + 4] = 0; |
| | | if(l.classfix == -1) l.delta[index + 4] = l.noobject_scale * ((best_iou - l.output[index + 4]) * logistic_gradient(l.output[index + 4])); |
| | | else{ |
| | | if (best_iou > l.thresh) { |
| | | l.delta[index + 4] = 0; |
| | | if(l.classfix > 0){ |
| | | delta_region_class(l.output, l.delta, index + 5, best_class, l.classes, l.softmax_tree, l.class_scale*(l.classfix == 2 ? l.output[index + 4] : 1), &avg_cat); |
| | | ++class_count; |
| | | } |
| | | } |
| | | } |
| | | |
| | | if(*(state.net.seen) < 12800){ |
| | | box truth = {0}; |
| | |
| | | |
| | | int class = state.truth[t*5 + b*l.truths + 4]; |
| | | if (l.map) class = l.map[class]; |
| | | if(l.softmax_tree){ |
| | | float pred = 1; |
| | | while(class >= 0){ |
| | | pred *= l.output[best_index + 5 + class]; |
| | | int g = l.softmax_tree->group[class]; |
| | | int i; |
| | | int offset = l.softmax_tree->group_offset[g]; |
| | | for(i = 0; i < l.softmax_tree->group_size[g]; ++i){ |
| | | int index = best_index + 5 + offset + i; |
| | | l.delta[index] = l.class_scale * (0 - l.output[index]); |
| | | } |
| | | l.delta[best_index + 5 + class] = l.class_scale * (1 - l.output[best_index + 5 + class]); |
| | | |
| | | class = l.softmax_tree->parent[class]; |
| | | } |
| | | avg_cat += pred; |
| | | } else { |
| | | for(n = 0; n < l.classes; ++n){ |
| | | l.delta[best_index + 5 + n] = l.class_scale * (((n == class)?1 : 0) - l.output[best_index + 5 + n]); |
| | | if(n == class) avg_cat += l.output[best_index + 5 + n]; |
| | | } |
| | | } |
| | | delta_region_class(l.output, l.delta, best_index + 5, class, l.classes, l.softmax_tree, l.class_scale, &avg_cat); |
| | | ++count; |
| | | ++class_count; |
| | | } |
| | | } |
| | | //printf("\n"); |
| | | reorg(l.delta, l.w*l.h, size*l.n, l.batch, 0); |
| | | *(l.cost) = pow(mag_array(l.delta, l.outputs * l.batch), 2); |
| | | printf("Region Avg IOU: %f, Class: %f, Obj: %f, No Obj: %f, Avg Recall: %f, count: %d\n", avg_iou/count, avg_cat/count, avg_obj/count, avg_anyobj/(l.w*l.h*l.n*l.batch), recall/count, count); |
| | | printf("Region Avg IOU: %f, Class: %f, Obj: %f, No Obj: %f, Avg Recall: %f, count: %d\n", avg_iou/count, avg_cat/class_count, avg_obj/count, avg_anyobj/(l.w*l.h*l.n*l.batch), recall/count, count); |
| | | } |
| | | |
| | | void backward_region_layer(const region_layer l, network_state state) |
| | |
| | | { |
| | | int i,j,n; |
| | | float *predictions = l.output; |
| | | //int per_cell = 5*num+classes; |
| | | for (i = 0; i < l.w*l.h; ++i){ |
| | | int row = i / l.w; |
| | | int col = i % l.w; |
| | |
| | | int index = i*l.n + n; |
| | | int p_index = index * (l.classes + 5) + 4; |
| | | float scale = predictions[p_index]; |
| | | if(l.classfix == -1 && scale < .5) scale = 0; |
| | | int box_index = index * (l.classes + 5); |
| | | boxes[index] = get_region_box(predictions, l.biases, n, box_index, col, row, l.w, l.h); |
| | | boxes[index].x *= w; |
| | |
| | | #include <stdio.h> |
| | | |
| | | |
| | | layer make_reorg_layer(int batch, int h, int w, int c, int stride) |
| | | layer make_reorg_layer(int batch, int h, int w, int c, int stride, int reverse) |
| | | { |
| | | layer l = {0}; |
| | | l.type = REORG; |
| | |
| | | l.h = h; |
| | | l.w = w; |
| | | l.c = c; |
| | | if(reverse){ |
| | | l.out_w = w*stride; |
| | | l.out_h = h*stride; |
| | | l.out_c = c/(stride*stride); |
| | | }else{ |
| | | l.out_w = w/stride; |
| | | l.out_h = h/stride; |
| | | l.out_c = c*(stride*stride); |
| | | } |
| | | fprintf(stderr, "Reorg Layer: %d x %d x %d image -> %d x %d x %d image, \n", w,h,c,l.out_w, l.out_h, l.out_c); |
| | | l.outputs = l.out_h * l.out_w * l.out_c; |
| | | l.inputs = h*w*c; |
| | |
| | | #ifdef GPU |
| | | void forward_reorg_layer_gpu(layer l, network_state state) |
| | | { |
| | | if(l.reverse){ |
| | | reorg_ongpu(state.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.output_gpu); |
| | | }else { |
| | | reorg_ongpu(state.input, l.w, l.h, l.c, l.batch, l.stride, 0, l.output_gpu); |
| | | } |
| | | } |
| | | |
| | | void backward_reorg_layer_gpu(layer l, network_state state) |
| | | { |
| | | if(l.reverse){ |
| | | reorg_ongpu(l.delta_gpu, l.w, l.h, l.c, l.batch, l.stride, 0, state.delta); |
| | | }else{ |
| | | reorg_ongpu(l.delta_gpu, l.w, l.h, l.c, l.batch, l.stride, 1, state.delta); |
| | | } |
| | | } |
| | | #endif |
| | |
| | | #include "layer.h" |
| | | #include "network.h" |
| | | |
| | | layer make_reorg_layer(int batch, int h, int w, int c, int stride); |
| | | layer make_reorg_layer(int batch, int h, int w, int c, int stride, int reverse); |
| | | void resize_reorg_layer(layer *l, int w, int h); |
| | | void forward_reorg_layer(const layer l, network_state state); |
| | | void backward_reorg_layer(const layer l, network_state state); |