| | |
| | | #include "region_layer.h" |
| | | #include "activations.h" |
| | | #include "softmax_layer.h" |
| | | #include "blas.h" |
| | | #include "box.h" |
| | | #include "cuda.h" |
| | |
| | | #include <string.h> |
| | | #include <stdlib.h> |
| | | |
| | | region_layer make_region_layer(int batch, int inputs, int n, int side, int classes, int coords, int rescore) |
| | | region_layer make_region_layer(int batch, int w, int h, int n, int classes, int coords) |
| | | { |
| | | region_layer l = {0}; |
| | | l.type = REGION; |
| | | |
| | | |
| | | l.n = n; |
| | | l.batch = batch; |
| | | l.inputs = inputs; |
| | | l.h = h; |
| | | l.w = w; |
| | | l.classes = classes; |
| | | l.coords = coords; |
| | | l.rescore = rescore; |
| | | l.side = side; |
| | | assert(side*side*l.coords*l.n == inputs); |
| | | l.cost = calloc(1, sizeof(float)); |
| | | int outputs = l.n*5*side*side; |
| | | l.outputs = outputs; |
| | | l.output = calloc(batch*outputs, sizeof(float)); |
| | | l.delta = calloc(batch*inputs, sizeof(float)); |
| | | #ifdef GPU |
| | | l.output_gpu = cuda_make_array(l.output, batch*outputs); |
| | | l.delta_gpu = cuda_make_array(l.delta, batch*inputs); |
| | | l.biases = calloc(n*2, sizeof(float)); |
| | | l.bias_updates = calloc(n*2, sizeof(float)); |
| | | l.outputs = h*w*n*(classes + coords + 1); |
| | | l.inputs = l.outputs; |
| | | l.truths = 30*(5); |
| | | l.delta = calloc(batch*l.outputs, sizeof(float)); |
| | | l.output = calloc(batch*l.outputs, sizeof(float)); |
| | | int i; |
| | | for(i = 0; i < n*2; ++i){ |
| | | l.biases[i] = .5; |
| | | } |
| | | |
| | | l.forward = forward_region_layer; |
| | | l.backward = backward_region_layer; |
| | | #ifdef GPU |
| | | l.forward_gpu = forward_region_layer_gpu; |
| | | l.backward_gpu = backward_region_layer_gpu; |
| | | l.output_gpu = cuda_make_array(l.output, batch*l.outputs); |
| | | l.delta_gpu = cuda_make_array(l.delta, batch*l.outputs); |
| | | #endif |
| | | |
| | | fprintf(stderr, "Region Layer\n"); |
| | |
| | | return l; |
| | | } |
| | | |
| | | #define LOG 1 |
| | | |
| | | box get_region_box(float *x, float *biases, int n, int index, int i, int j, int w, int h) |
| | | { |
| | | box b; |
| | | b.x = (i + .5)/w + x[index + 0] * biases[2*n]; |
| | | b.y = (j + .5)/h + x[index + 1] * biases[2*n + 1]; |
| | | if(LOG){ |
| | | b.x = (i + logistic_activate(x[index + 0])) / w; |
| | | b.y = (j + logistic_activate(x[index + 1])) / h; |
| | | } |
| | | b.w = exp(x[index + 2]) * biases[2*n]; |
| | | b.h = exp(x[index + 3]) * biases[2*n+1]; |
| | | return b; |
| | | } |
| | | |
| | | float delta_region_box(box truth, float *x, float *biases, int n, int index, int i, int j, int w, int h, float *delta, float scale) |
| | | { |
| | | box pred = get_region_box(x, biases, n, index, i, j, w, h); |
| | | float iou = box_iou(pred, truth); |
| | | |
| | | float tx = (truth.x - (i + .5)/w) / biases[2*n]; |
| | | float ty = (truth.y - (j + .5)/h) / biases[2*n + 1]; |
| | | if(LOG){ |
| | | tx = (truth.x*w - i); |
| | | ty = (truth.y*h - j); |
| | | } |
| | | float tw = log(truth.w / biases[2*n]); |
| | | float th = log(truth.h / biases[2*n + 1]); |
| | | |
| | | delta[index + 0] = scale * (tx - x[index + 0]); |
| | | delta[index + 1] = scale * (ty - x[index + 1]); |
| | | if(LOG){ |
| | | delta[index + 0] = scale * (tx - logistic_activate(x[index + 0])) * logistic_gradient(logistic_activate(x[index + 0])); |
| | | delta[index + 1] = scale * (ty - logistic_activate(x[index + 1])) * logistic_gradient(logistic_activate(x[index + 1])); |
| | | } |
| | | delta[index + 2] = scale * (tw - x[index + 2]); |
| | | delta[index + 3] = scale * (th - x[index + 3]); |
| | | return iou; |
| | | } |
| | | |
| | | float logit(float x) |
| | | { |
| | | return log(x/(1.-x)); |
| | | } |
| | | |
| | | float tisnan(float x) |
| | | { |
| | | return (x != x); |
| | | } |
| | | |
| | | void softmax_tree(float *input, int batch, int inputs, float temp, tree *hierarchy, float *output); |
| | | void forward_region_layer(const region_layer l, network_state state) |
| | | { |
| | | int locations = l.side*l.side; |
| | | int i,j; |
| | | for(i = 0; i < l.batch*locations; ++i){ |
| | | for(j = 0; j < l.n; ++j){ |
| | | int in_index = i*l.n*l.coords + j*l.coords; |
| | | int out_index = i*l.n*5 + j*5; |
| | | |
| | | float prob = state.input[in_index+0]; |
| | | float x = state.input[in_index+1]; |
| | | float y = state.input[in_index+2]; |
| | | float w = state.input[in_index+3]; |
| | | float h = state.input[in_index+4]; |
| | | /* |
| | | float min_w = state.input[in_index+5]; |
| | | float max_w = state.input[in_index+6]; |
| | | float min_h = state.input[in_index+7]; |
| | | float max_h = state.input[in_index+8]; |
| | | */ |
| | | |
| | | l.output[out_index+0] = prob; |
| | | l.output[out_index+1] = x; |
| | | l.output[out_index+2] = y; |
| | | l.output[out_index+3] = w; |
| | | l.output[out_index+4] = h; |
| | | |
| | | int i,j,b,t,n; |
| | | int size = l.coords + l.classes + 1; |
| | | memcpy(l.output, state.input, l.outputs*l.batch*sizeof(float)); |
| | | reorg(l.output, l.w*l.h, size*l.n, l.batch, 1); |
| | | for (b = 0; b < l.batch; ++b){ |
| | | for(i = 0; i < l.h*l.w*l.n; ++i){ |
| | | int index = size*i + b*l.outputs; |
| | | l.output[index + 4] = logistic_activate(l.output[index + 4]); |
| | | if(l.softmax_tree){ |
| | | softmax_tree(l.output + index + 5, 1, 0, 1, l.softmax_tree, l.output + index + 5); |
| | | } else if(l.softmax){ |
| | | softmax(l.output + index + 5, l.classes, 1, l.output + index + 5); |
| | | } |
| | | } |
| | | } |
| | | if(state.train){ |
| | | float avg_iou = 0; |
| | | int count = 0; |
| | | *(l.cost) = 0; |
| | | int size = l.inputs * l.batch; |
| | | memset(l.delta, 0, size * sizeof(float)); |
| | | for (i = 0; i < l.batch*locations; ++i) { |
| | | |
| | | for(j = 0; j < l.n; ++j){ |
| | | int in_index = i*l.n*l.coords + j*l.coords; |
| | | l.delta[in_index+0] = .1*(0-state.input[in_index+0]); |
| | | } |
| | | |
| | | int truth_index = i*5; |
| | | int best_index = -1; |
| | | float best_iou = 0; |
| | | float best_rmse = 4; |
| | | |
| | | int bg = !state.truth[truth_index]; |
| | | if(bg) continue; |
| | | |
| | | box truth = {state.truth[truth_index+1], state.truth[truth_index+2], state.truth[truth_index+3], state.truth[truth_index+4]}; |
| | | truth.x /= l.side; |
| | | truth.y /= l.side; |
| | | |
| | | for(j = 0; j < l.n; ++j){ |
| | | int out_index = i*l.n*5 + j*5; |
| | | box out = {l.output[out_index+1], l.output[out_index+2], l.output[out_index+3], l.output[out_index+4]}; |
| | | |
| | | //printf("\n%f %f %f %f %f\n", l.output[out_index+0], out.x, out.y, out.w, out.h); |
| | | |
| | | out.x /= l.side; |
| | | out.y /= l.side; |
| | | |
| | | float iou = box_iou(out, truth); |
| | | float rmse = box_rmse(out, truth); |
| | | if(best_iou > 0 || iou > 0){ |
| | | if(iou > best_iou){ |
| | | best_iou = iou; |
| | | best_index = j; |
| | | if(!state.train) return; |
| | | memset(l.delta, 0, l.outputs * l.batch * sizeof(float)); |
| | | float avg_iou = 0; |
| | | float recall = 0; |
| | | float avg_cat = 0; |
| | | float avg_obj = 0; |
| | | float avg_anyobj = 0; |
| | | int count = 0; |
| | | *(l.cost) = 0; |
| | | for (b = 0; b < l.batch; ++b) { |
| | | for (j = 0; j < l.h; ++j) { |
| | | for (i = 0; i < l.w; ++i) { |
| | | for (n = 0; n < l.n; ++n) { |
| | | int index = size*(j*l.w*l.n + i*l.n + n) + b*l.outputs; |
| | | box pred = get_region_box(l.output, l.biases, n, index, i, j, l.w, l.h); |
| | | float best_iou = 0; |
| | | for(t = 0; t < 30; ++t){ |
| | | box truth = float_to_box(state.truth + t*5 + b*l.truths); |
| | | if(!truth.x) break; |
| | | float iou = box_iou(pred, truth); |
| | | if (iou > best_iou) best_iou = iou; |
| | | } |
| | | }else{ |
| | | if(rmse < best_rmse){ |
| | | best_rmse = rmse; |
| | | best_index = j; |
| | | avg_anyobj += l.output[index + 4]; |
| | | l.delta[index + 4] = l.noobject_scale * ((0 - l.output[index + 4]) * logistic_gradient(l.output[index + 4])); |
| | | if(best_iou > .5) l.delta[index + 4] = 0; |
| | | |
| | | if(*(state.net.seen) < 12800){ |
| | | box truth = {0}; |
| | | truth.x = (i + .5)/l.w; |
| | | truth.y = (j + .5)/l.h; |
| | | truth.w = l.biases[2*n]; |
| | | truth.h = l.biases[2*n+1]; |
| | | delta_region_box(truth, l.output, l.biases, n, index, i, j, l.w, l.h, l.delta, .01); |
| | | //l.delta[index + 0] = .1 * (0 - l.output[index + 0]); |
| | | //l.delta[index + 1] = .1 * (0 - l.output[index + 1]); |
| | | //l.delta[index + 2] = .1 * (0 - l.output[index + 2]); |
| | | //l.delta[index + 3] = .1 * (0 - l.output[index + 3]); |
| | | } |
| | | } |
| | | } |
| | | printf("%d", best_index); |
| | | //int out_index = i*l.n*5 + best_index*5; |
| | | //box out = {l.output[out_index+1], l.output[out_index+2], l.output[out_index+3], l.output[out_index+4]}; |
| | | int in_index = i*l.n*l.coords + best_index*l.coords; |
| | | } |
| | | for(t = 0; t < 30; ++t){ |
| | | box truth = float_to_box(state.truth + t*5 + b*l.truths); |
| | | |
| | | l.delta[in_index+0] = (1-state.input[in_index+0]); |
| | | l.delta[in_index+1] = state.truth[truth_index+1] - state.input[in_index+1]; |
| | | l.delta[in_index+2] = state.truth[truth_index+2] - state.input[in_index+2]; |
| | | l.delta[in_index+3] = state.truth[truth_index+3] - state.input[in_index+3]; |
| | | l.delta[in_index+4] = state.truth[truth_index+4] - state.input[in_index+4]; |
| | | /* |
| | | l.delta[in_index+5] = 0 - state.input[in_index+5]; |
| | | l.delta[in_index+6] = 1 - state.input[in_index+6]; |
| | | l.delta[in_index+7] = 0 - state.input[in_index+7]; |
| | | l.delta[in_index+8] = 1 - state.input[in_index+8]; |
| | | */ |
| | | if(!truth.x) break; |
| | | float best_iou = 0; |
| | | int best_index = 0; |
| | | int best_n = 0; |
| | | i = (truth.x * l.w); |
| | | j = (truth.y * l.h); |
| | | //printf("%d %f %d %f\n", i, truth.x*l.w, j, truth.y*l.h); |
| | | box truth_shift = truth; |
| | | truth_shift.x = 0; |
| | | truth_shift.y = 0; |
| | | printf("index %d %d\n",i, j); |
| | | for(n = 0; n < l.n; ++n){ |
| | | int index = size*(j*l.w*l.n + i*l.n + n) + b*l.outputs; |
| | | box pred = get_region_box(l.output, l.biases, n, index, i, j, l.w, l.h); |
| | | if(l.bias_match){ |
| | | pred.w = l.biases[2*n]; |
| | | pred.h = l.biases[2*n+1]; |
| | | } |
| | | printf("pred: (%f, %f) %f x %f\n", pred.x, pred.y, pred.w, pred.h); |
| | | pred.x = 0; |
| | | pred.y = 0; |
| | | float iou = box_iou(pred, truth_shift); |
| | | if (iou > best_iou){ |
| | | best_index = index; |
| | | best_iou = iou; |
| | | best_n = n; |
| | | } |
| | | } |
| | | printf("%d %f (%f, %f) %f x %f\n", best_n, best_iou, truth.x, truth.y, truth.w, truth.h); |
| | | |
| | | /* |
| | | float x = state.input[in_index+1]; |
| | | float y = state.input[in_index+2]; |
| | | float w = state.input[in_index+3]; |
| | | float h = state.input[in_index+4]; |
| | | float min_w = state.input[in_index+5]; |
| | | float max_w = state.input[in_index+6]; |
| | | float min_h = state.input[in_index+7]; |
| | | float max_h = state.input[in_index+8]; |
| | | */ |
| | | float iou = delta_region_box(truth, l.output, l.biases, best_n, best_index, i, j, l.w, l.h, l.delta, l.coord_scale); |
| | | if(iou > .5) recall += 1; |
| | | avg_iou += iou; |
| | | |
| | | //l.delta[best_index + 4] = iou - l.output[best_index + 4]; |
| | | avg_obj += l.output[best_index + 4]; |
| | | l.delta[best_index + 4] = l.object_scale * (1 - l.output[best_index + 4]) * logistic_gradient(l.output[best_index + 4]); |
| | | if (l.rescore) { |
| | | l.delta[best_index + 4] = l.object_scale * (iou - l.output[best_index + 4]) * logistic_gradient(l.output[best_index + 4]); |
| | | } |
| | | |
| | | |
| | | avg_iou += best_iou; |
| | | int class = state.truth[t*5 + b*l.truths + 4]; |
| | | if (l.map) class = l.map[class]; |
| | | if(l.softmax_tree){ |
| | | float pred = 1; |
| | | while(class >= 0){ |
| | | pred *= l.output[best_index + 5 + class]; |
| | | int g = l.softmax_tree->group[class]; |
| | | int i; |
| | | int offset = l.softmax_tree->group_offset[g]; |
| | | for(i = 0; i < l.softmax_tree->group_size[g]; ++i){ |
| | | int index = best_index + 5 + offset + i; |
| | | l.delta[index] = l.class_scale * (0 - l.output[index]); |
| | | } |
| | | l.delta[best_index + 5 + class] = l.class_scale * (1 - l.output[best_index + 5 + class]); |
| | | |
| | | class = l.softmax_tree->parent[class]; |
| | | } |
| | | avg_cat += pred; |
| | | } else { |
| | | for(n = 0; n < l.classes; ++n){ |
| | | l.delta[best_index + 5 + n] = l.class_scale * (((n == class)?1 : 0) - l.output[best_index + 5 + n]); |
| | | if(n == class) avg_cat += l.output[best_index + 5 + n]; |
| | | } |
| | | } |
| | | ++count; |
| | | } |
| | | printf("\nAvg IOU: %f %d\n", avg_iou/count, count); |
| | | } |
| | | printf("\n"); |
| | | reorg(l.delta, l.w*l.h, size*l.n, l.batch, 0); |
| | | *(l.cost) = pow(mag_array(l.delta, l.outputs * l.batch), 2); |
| | | printf("Region Avg IOU: %f, Class: %f, Obj: %f, No Obj: %f, Avg Recall: %f, count: %d\n", avg_iou/count, avg_cat/count, avg_obj/count, avg_anyobj/(l.w*l.h*l.n*l.batch), recall/count, count); |
| | | } |
| | | |
| | | void backward_region_layer(const region_layer l, network_state state) |
| | | { |
| | | axpy_cpu(l.batch*l.inputs, 1, l.delta, 1, state.delta, 1); |
| | | //copy_cpu(l.batch*l.inputs, l.delta, 1, state.delta, 1); |
| | | } |
| | | |
| | | void get_region_boxes(layer l, int w, int h, float thresh, float **probs, box *boxes, int only_objectness) |
| | | { |
| | | int i,j,n; |
| | | float *predictions = l.output; |
| | | //int per_cell = 5*num+classes; |
| | | for (i = 0; i < l.w*l.h; ++i){ |
| | | int row = i / l.w; |
| | | int col = i % l.w; |
| | | for(n = 0; n < l.n; ++n){ |
| | | int index = i*l.n + n; |
| | | int p_index = index * (l.classes + 5) + 4; |
| | | float scale = predictions[p_index]; |
| | | int box_index = index * (l.classes + 5); |
| | | boxes[index] = get_region_box(predictions, l.biases, n, box_index, col, row, l.w, l.h); |
| | | boxes[index].x *= w; |
| | | boxes[index].y *= h; |
| | | boxes[index].w *= w; |
| | | boxes[index].h *= h; |
| | | |
| | | int class_index = index * (l.classes + 5) + 5; |
| | | if(l.softmax_tree){ |
| | | |
| | | hierarchy_predictions(predictions + class_index, l.classes, l.softmax_tree, 0); |
| | | int found = 0; |
| | | for(j = l.classes - 1; j >= 0; --j){ |
| | | if(!found && predictions[class_index + j] > .5){ |
| | | found = 1; |
| | | } else { |
| | | predictions[class_index + j] = 0; |
| | | } |
| | | float prob = predictions[class_index+j]; |
| | | probs[index][j] = (scale > thresh) ? prob : 0; |
| | | } |
| | | }else{ |
| | | for(j = 0; j < l.classes; ++j){ |
| | | float prob = scale*predictions[class_index+j]; |
| | | probs[index][j] = (prob > thresh) ? prob : 0; |
| | | } |
| | | } |
| | | if(only_objectness){ |
| | | probs[index][0] = scale; |
| | | } |
| | | } |
| | | } |
| | | } |
| | | |
| | | #ifdef GPU |
| | | |
| | | void forward_region_layer_gpu(const region_layer l, network_state state) |
| | | { |
| | | /* |
| | | if(!state.train){ |
| | | copy_ongpu(l.batch*l.inputs, state.input, 1, l.output_gpu, 1); |
| | | return; |
| | | } |
| | | */ |
| | | |
| | | float *in_cpu = calloc(l.batch*l.inputs, sizeof(float)); |
| | | float *truth_cpu = 0; |
| | | if(state.truth){ |
| | | truth_cpu = calloc(l.batch*l.outputs, sizeof(float)); |
| | | cuda_pull_array(state.truth, truth_cpu, l.batch*l.outputs); |
| | | int num_truth = l.batch*l.truths; |
| | | truth_cpu = calloc(num_truth, sizeof(float)); |
| | | cuda_pull_array(state.truth, truth_cpu, num_truth); |
| | | } |
| | | cuda_pull_array(state.input, in_cpu, l.batch*l.inputs); |
| | | network_state cpu_state; |
| | | network_state cpu_state = state; |
| | | cpu_state.train = state.train; |
| | | cpu_state.truth = truth_cpu; |
| | | cpu_state.input = in_cpu; |
| | | forward_region_layer(l, cpu_state); |
| | | cuda_push_array(l.output_gpu, l.output, l.batch*l.outputs); |
| | | cuda_push_array(l.delta_gpu, l.delta, l.batch*l.inputs); |
| | | cuda_push_array(l.delta_gpu, l.delta, l.batch*l.outputs); |
| | | free(cpu_state.input); |
| | | if(cpu_state.truth) free(cpu_state.truth); |
| | | } |
| | | |
| | | void backward_region_layer_gpu(region_layer l, network_state state) |
| | | { |
| | | axpy_ongpu(l.batch*l.inputs, 1, l.delta_gpu, 1, state.delta, 1); |
| | | axpy_ongpu(l.batch*l.outputs, 1, l.delta_gpu, 1, state.delta, 1); |
| | | //copy_ongpu(l.batch*l.inputs, l.delta_gpu, 1, state.delta, 1); |
| | | } |
| | | #endif |