| | |
| | | |
| | | #define DOABS 1 |
| | | |
| | | region_layer make_region_layer(int batch, int w, int h, int n, int classes, int coords) |
| | | region_layer make_region_layer(int batch, int w, int h, int n, int classes, int coords, int max_boxes) |
| | | { |
| | | region_layer l = {0}; |
| | | l.type = REGION; |
| | |
| | | l.bias_updates = calloc(n*2, sizeof(float)); |
| | | l.outputs = h*w*n*(classes + coords + 1); |
| | | l.inputs = l.outputs; |
| | | l.truths = 30*(5); |
| | | l.max_boxes = max_boxes; |
| | | l.truths = max_boxes*(5); |
| | | l.delta = calloc(batch*l.outputs, sizeof(float)); |
| | | l.output = calloc(batch*l.outputs, sizeof(float)); |
| | | int i; |
| | |
| | | |
| | | void resize_region_layer(layer *l, int w, int h) |
| | | { |
| | | int old_w = l->w; |
| | | int old_h = l->h; |
| | | l->w = w; |
| | | l->h = h; |
| | | |
| | |
| | | l->delta = realloc(l->delta, l->batch*l->outputs*sizeof(float)); |
| | | |
| | | #ifdef GPU |
| | | cuda_free(l->delta_gpu); |
| | | cuda_free(l->output_gpu); |
| | | if (old_w < w || old_h < h) { |
| | | cuda_free(l->delta_gpu); |
| | | cuda_free(l->output_gpu); |
| | | |
| | | l->delta_gpu = cuda_make_array(l->delta, l->batch*l->outputs); |
| | | l->output_gpu = cuda_make_array(l->output, l->batch*l->outputs); |
| | | l->delta_gpu = cuda_make_array(l->delta, l->batch*l->outputs); |
| | | l->output_gpu = cuda_make_array(l->output, l->batch*l->outputs); |
| | | } |
| | | #endif |
| | | } |
| | | |
| | |
| | | return iou; |
| | | } |
| | | |
| | | void delta_region_class(float *output, float *delta, int index, int class, int classes, tree *hier, float scale, float *avg_cat) |
| | | void delta_region_class(float *output, float *delta, int index, int class_id, int classes, tree *hier, float scale, float *avg_cat, int focal_loss) |
| | | { |
| | | int i, n; |
| | | if(hier){ |
| | | float pred = 1; |
| | | while(class >= 0){ |
| | | pred *= output[index + class]; |
| | | int g = hier->group[class]; |
| | | while(class_id >= 0){ |
| | | pred *= output[index + class_id]; |
| | | int g = hier->group[class_id]; |
| | | int offset = hier->group_offset[g]; |
| | | for(i = 0; i < hier->group_size[g]; ++i){ |
| | | delta[index + offset + i] = scale * (0 - output[index + offset + i]); |
| | | } |
| | | delta[index + class] = scale * (1 - output[index + class]); |
| | | delta[index + class_id] = scale * (1 - output[index + class_id]); |
| | | |
| | | class = hier->parent[class]; |
| | | class_id = hier->parent[class_id]; |
| | | } |
| | | *avg_cat += pred; |
| | | } else { |
| | | for(n = 0; n < classes; ++n){ |
| | | delta[index + n] = scale * (((n == class)?1 : 0) - output[index + n]); |
| | | if(n == class) *avg_cat += output[index + n]; |
| | | } |
| | | } else { |
| | | // Focal loss |
| | | if (focal_loss) { |
| | | // Focal Loss for Dense Object Detection: http://blog.csdn.net/linmingan/article/details/77885832 |
| | | float alpha = 0.5; // 0.25 or 0.5 |
| | | //float gamma = 2; // hardcoded in many places of the grad-formula |
| | | |
| | | int ti = index + class_id; |
| | | float grad = -2 * (1 - output[ti])*logf(fmaxf(output[ti], 0.0000001))*output[ti] + (1 - output[ti])*(1 - output[ti]); |
| | | |
| | | for (n = 0; n < classes; ++n) { |
| | | delta[index + n] = scale * (((n == class_id) ? 1 : 0) - output[index + n]); |
| | | |
| | | delta[index + n] *= alpha*grad; |
| | | |
| | | if (n == class_id) *avg_cat += output[index + n]; |
| | | } |
| | | } |
| | | else { |
| | | // default |
| | | for (n = 0; n < classes; ++n) { |
| | | delta[index + n] = scale * (((n == class_id) ? 1 : 0) - output[index + n]); |
| | | if (n == class_id) *avg_cat += output[index + n]; |
| | | } |
| | | } |
| | | } |
| | | } |
| | | |
| | |
| | | for (b = 0; b < l.batch; ++b){ |
| | | for(i = 0; i < l.h*l.w*l.n; ++i){ |
| | | int index = size*i + b*l.outputs; |
| | | softmax(l.output + index + 5, l.classes, 1, l.output + index + 5); |
| | | softmax(l.output + index + 5, l.classes, 1, l.output + index + 5, 1); |
| | | } |
| | | } |
| | | } |
| | |
| | | *(l.cost) = 0; |
| | | for (b = 0; b < l.batch; ++b) { |
| | | if(l.softmax_tree){ |
| | | int onlyclass = 0; |
| | | for(t = 0; t < 30; ++t){ |
| | | int onlyclass_id = 0; |
| | | for(t = 0; t < l.max_boxes; ++t){ |
| | | box truth = float_to_box(state.truth + t*5 + b*l.truths); |
| | | if(!truth.x) break; |
| | | int class = state.truth[t*5 + b*l.truths + 4]; |
| | | int class_id = state.truth[t*5 + b*l.truths + 4]; |
| | | float maxp = 0; |
| | | int maxi = 0; |
| | | if(truth.x > 100000 && truth.y > 100000){ |
| | | for(n = 0; n < l.n*l.w*l.h; ++n){ |
| | | int index = size*n + b*l.outputs + 5; |
| | | float scale = l.output[index-1]; |
| | | float p = scale*get_hierarchy_probability(l.output + index, l.softmax_tree, class); |
| | | float p = scale*get_hierarchy_probability(l.output + index, l.softmax_tree, class_id); |
| | | if(p > maxp){ |
| | | maxp = p; |
| | | maxi = n; |
| | | } |
| | | } |
| | | int index = size*maxi + b*l.outputs + 5; |
| | | delta_region_class(l.output, l.delta, index, class, l.classes, l.softmax_tree, l.class_scale, &avg_cat); |
| | | delta_region_class(l.output, l.delta, index, class_id, l.classes, l.softmax_tree, l.class_scale, &avg_cat, l.focal_loss); |
| | | ++class_count; |
| | | onlyclass = 1; |
| | | onlyclass_id = 1; |
| | | break; |
| | | } |
| | | } |
| | | if(onlyclass) continue; |
| | | if(onlyclass_id) continue; |
| | | } |
| | | for (j = 0; j < l.h; ++j) { |
| | | for (i = 0; i < l.w; ++i) { |
| | |
| | | int index = size*(j*l.w*l.n + i*l.n + n) + b*l.outputs; |
| | | box pred = get_region_box(l.output, l.biases, n, index, i, j, l.w, l.h); |
| | | float best_iou = 0; |
| | | int best_class = -1; |
| | | for(t = 0; t < 30; ++t){ |
| | | int best_class_id = -1; |
| | | for(t = 0; t < l.max_boxes; ++t){ |
| | | box truth = float_to_box(state.truth + t*5 + b*l.truths); |
| | | if(!truth.x) break; |
| | | float iou = box_iou(pred, truth); |
| | | if (iou > best_iou) { |
| | | best_class = state.truth[t*5 + b*l.truths + 4]; |
| | | best_class_id = state.truth[t*5 + b*l.truths + 4]; |
| | | best_iou = iou; |
| | | } |
| | | } |
| | |
| | | if (best_iou > l.thresh) { |
| | | l.delta[index + 4] = 0; |
| | | if(l.classfix > 0){ |
| | | delta_region_class(l.output, l.delta, index + 5, best_class, l.classes, l.softmax_tree, l.class_scale*(l.classfix == 2 ? l.output[index + 4] : 1), &avg_cat); |
| | | delta_region_class(l.output, l.delta, index + 5, best_class_id, l.classes, l.softmax_tree, l.class_scale*(l.classfix == 2 ? l.output[index + 4] : 1), &avg_cat, l.focal_loss); |
| | | ++class_count; |
| | | } |
| | | } |
| | |
| | | } |
| | | } |
| | | } |
| | | for(t = 0; t < 30; ++t){ |
| | | for(t = 0; t < l.max_boxes; ++t){ |
| | | box truth = float_to_box(state.truth + t*5 + b*l.truths); |
| | | |
| | | if(!truth.x) break; |
| | |
| | | } |
| | | |
| | | |
| | | int class = state.truth[t*5 + b*l.truths + 4]; |
| | | if (l.map) class = l.map[class]; |
| | | delta_region_class(l.output, l.delta, best_index + 5, class, l.classes, l.softmax_tree, l.class_scale, &avg_cat); |
| | | int class_id = state.truth[t*5 + b*l.truths + 4]; |
| | | if (l.map) class_id = l.map[class_id]; |
| | | delta_region_class(l.output, l.delta, best_index + 5, class_id, l.classes, l.softmax_tree, l.class_scale, &avg_cat, l.focal_loss); |
| | | ++count; |
| | | ++class_count; |
| | | } |
| | |
| | | cuda_pull_array(state.truth, truth_cpu, num_truth); |
| | | } |
| | | cuda_pull_array(l.output_gpu, in_cpu, l.batch*l.inputs); |
| | | cudaStreamSynchronize(get_cuda_stream()); |
| | | network_state cpu_state = state; |
| | | cpu_state.train = state.train; |
| | | cpu_state.truth = truth_cpu; |
| | |
| | | free(cpu_state.input); |
| | | if(!state.train) return; |
| | | cuda_push_array(l.delta_gpu, l.delta, l.batch*l.outputs); |
| | | cudaStreamSynchronize(get_cuda_stream()); |
| | | if(cpu_state.truth) free(cpu_state.truth); |
| | | } |
| | | |