Joseph Redmon
2015-05-20 ff7e03325a2f36bf4eb13e1f538b78e1549305cc
detection exp
5 files modified
2 files added
513 ■■■■ changed files
cfg/detection.cfg 197 ●●●●● patch | view | raw | blame | history
cfg/rescore.cfg 198 ●●●●● patch | view | raw | blame | history
src/connected_layer.c 3 ●●●● patch | view | raw | blame | history
src/convolutional_layer.c 3 ●●●● patch | view | raw | blame | history
src/data.c 2 ●●● patch | view | raw | blame | history
src/detection.c 12 ●●●● patch | view | raw | blame | history
src/detection_layer.c 98 ●●●●● patch | view | raw | blame | history
cfg/detection.cfg
New file
@@ -0,0 +1,197 @@
[net]
batch=64
subdivisions=4
height=448
width=448
channels=3
learning_rate=0.01
momentum=0.9
decay=0.0005
seen = 0
[crop]
crop_width=448
crop_height=448
flip=0
angle=0
saturation = 2
exposure = 2
[convolutional]
filters=64
size=7
stride=2
pad=1
activation=ramp
[convolutional]
filters=192
size=3
stride=2
pad=1
activation=ramp
[convolutional]
filters=128
size=1
stride=1
pad=1
activation=ramp
[convolutional]
filters=256
size=3
stride=2
pad=1
activation=ramp
[convolutional]
filters=128
size=1
stride=1
pad=1
activation=ramp
[convolutional]
filters=256
size=3
stride=1
pad=1
activation=ramp
[convolutional]
filters=128
size=1
stride=1
pad=1
activation=ramp
[convolutional]
filters=512
size=3
stride=2
pad=1
activation=ramp
[convolutional]
filters=256
size=1
stride=1
pad=1
activation=ramp
[convolutional]
filters=512
size=3
stride=1
pad=1
activation=ramp
[convolutional]
filters=256
size=1
stride=1
pad=1
activation=ramp
[convolutional]
filters=512
size=3
stride=1
pad=1
activation=ramp
[convolutional]
filters=256
size=1
stride=1
pad=1
activation=ramp
[convolutional]
filters=512
size=3
stride=1
pad=1
activation=ramp
[convolutional]
filters=256
size=1
stride=1
pad=1
activation=ramp
[convolutional]
filters=512
size=3
stride=1
pad=1
activation=ramp
[convolutional]
filters=256
size=1
stride=1
pad=1
activation=ramp
[convolutional]
filters=1024
size=3
stride=2
pad=1
activation=ramp
[convolutional]
filters=512
size=1
stride=1
pad=1
activation=ramp
[convolutional]
filters=1024
size=3
stride=1
pad=1
activation=ramp
[convolutional]
size=3
stride=1
pad=1
filters=1024
activation=ramp
[convolutional]
size=3
stride=2
pad=1
filters=1024
activation=ramp
[convolutional]
size=3
stride=1
pad=1
filters=1024
activation=ramp
[connected]
output=4096
activation=ramp
[dropout]
probability=.5
[connected]
output=1225
activation=logistic
[detection]
classes=20
coords=4
rescore=0
nuisance = 1
background=1
cfg/rescore.cfg
New file
@@ -0,0 +1,198 @@
[net]
batch=64
subdivisions=4
height=448
width=448
channels=3
learning_rate=0.01
momentum=0.9
decay=0.0005
seen = 0
[crop]
crop_width=448
crop_height=448
flip=0
angle=0
saturation = 2
exposure = 2
[convolutional]
filters=64
size=7
stride=2
pad=1
activation=ramp
[convolutional]
filters=192
size=3
stride=2
pad=1
activation=ramp
[convolutional]
filters=128
size=1
stride=1
pad=1
activation=ramp
[convolutional]
filters=256
size=3
stride=2
pad=1
activation=ramp
[convolutional]
filters=128
size=1
stride=1
pad=1
activation=ramp
[convolutional]
filters=256
size=3
stride=1
pad=1
activation=ramp
[convolutional]
filters=128
size=1
stride=1
pad=1
activation=ramp
[convolutional]
filters=512
size=3
stride=2
pad=1
activation=ramp
[convolutional]
filters=256
size=1
stride=1
pad=1
activation=ramp
[convolutional]
filters=512
size=3
stride=1
pad=1
activation=ramp
[convolutional]
filters=256
size=1
stride=1
pad=1
activation=ramp
[convolutional]
filters=512
size=3
stride=1
pad=1
activation=ramp
[convolutional]
filters=256
size=1
stride=1
pad=1
activation=ramp
[convolutional]
filters=512
size=3
stride=1
pad=1
activation=ramp
[convolutional]
filters=256
size=1
stride=1
pad=1
activation=ramp
[convolutional]
filters=512
size=3
stride=1
pad=1
activation=ramp
[convolutional]
filters=256
size=1
stride=1
pad=1
activation=ramp
[convolutional]
filters=1024
size=3
stride=2
pad=1
activation=ramp
[convolutional]
filters=512
size=1
stride=1
pad=1
activation=ramp
[convolutional]
filters=1024
size=3
stride=1
pad=1
activation=ramp
[convolutional]
size=3
stride=1
pad=1
filters=1024
activation=ramp
[convolutional]
size=3
stride=2
pad=1
filters=1024
activation=ramp
[convolutional]
size=3
stride=1
pad=1
filters=1024
activation=ramp
[connected]
output=4096
activation=ramp
[dropout]
probability=.5
[connected]
output=1225
activation=logistic
[detection]
classes=20
coords=4
rescore=1
nuisance = 0
background=0
src/connected_layer.c
@@ -29,7 +29,8 @@
    l.biases = calloc(outputs, sizeof(float));
    float scale = 1./sqrt(inputs);
    //float scale = 1./sqrt(inputs);
    float scale = sqrt(2./inputs);
    for(i = 0; i < inputs*outputs; ++i){
        l.weights[i] = 2*scale*rand_uniform() - scale;
    }
src/convolutional_layer.c
@@ -61,7 +61,8 @@
    l.biases = calloc(n, sizeof(float));
    l.bias_updates = calloc(n, sizeof(float));
    float scale = 1./sqrt(size*size*c);
    //float scale = 1./sqrt(size*size*c);
    float scale = sqrt(2./(size*size*c));
    for(i = 0; i < c*n*size*size; ++i) l.filters[i] = 2*scale*rand_uniform() - scale;
    for(i = 0; i < n; ++i){
        l.biases[i] = scale;
src/data.c
@@ -174,7 +174,7 @@
        }
        int index = (i+j*num_boxes)*(4+classes+background);
        //if(truth[index+classes+background+2]) continue;
        if(truth[index+classes+background+2]) continue;
        if(background) truth[index++] = 0;
        truth[index+id] = 1;
        index += classes;
src/detection.c
@@ -47,6 +47,8 @@
                int top   = (y-h/2)*im.h;
                int bot   = (y+h/2)*im.h;
                draw_box(im, left, top, right, bot, red, green, blue);
                draw_box(im, left+1, top+1, right+1, bot+1, red, green, blue);
                draw_box(im, left-1, top-1, right-1, bot-1, red, green, blue);
            }
        }
    }
@@ -116,7 +118,11 @@
        float loss = train_network(net, train);
        //TODO
        #ifdef GPU
        float *out = get_network_output_gpu(net);
        #else
        float *out = get_network_output(net);
        #endif
        image im = float_to_image(net.w, net.h, 3, train.X.vals[127]);
        image copy = copy_image(im);
        draw_localization(copy, &(out[63*80]));
@@ -213,7 +219,7 @@
        avg_loss = avg_loss*.9 + loss*.1;
        printf("%d: %f, %f avg, %lf seconds, %d images\n", i, loss, avg_loss, sec(clock()-time), i*imgs);
        if(i == 100){
            net.learning_rate *= 10;
            //net.learning_rate *= 10;
        }
        if(i%100==0){
            char buff[256];
@@ -309,8 +315,8 @@
                float y = (pred.vals[j][ci + 1] + row)/num_boxes;
                float w = pred.vals[j][ci + 2]; //* distance_from_edge(row, num_boxes);
                float h = pred.vals[j][ci + 3]; //* distance_from_edge(col, num_boxes);
                w = pow(w, 1);
                h = pow(h, 1);
                w = pow(w, 2);
                h = pow(h, 2);
                float prob = scale*pred.vals[j][k+class+background+nuisance];
                if(prob < threshold) continue;
                printf("%d %d %f %f %f %f %f\n", offset +  j, class, prob, x, y, w, h);
src/detection_layer.c
@@ -330,8 +330,9 @@
            l.output[out_i++] = mask*state.input[in_i++];
        }
    }
    float avg_iou = 0;
    int count = 0;
    if(l.does_cost && state.train){
        int count = 0;
        *(l.cost) = 0;
        int size = get_detection_layer_output_size(l) * l.batch;
        memset(l.delta, 0, size * sizeof(float));
@@ -342,65 +343,54 @@
                *(l.cost) += pow(state.truth[j] - l.output[j], 2);
                l.delta[j] =  state.truth[j] - l.output[j];
            }
            box truth;
            truth.x = state.truth[j+0];
            truth.y = state.truth[j+1];
            truth.w = state.truth[j+2];
            truth.h = state.truth[j+3];
            truth.x = state.truth[j+0]/7;
            truth.y = state.truth[j+1]/7;
            truth.w = pow(state.truth[j+2], 2);
            truth.h = pow(state.truth[j+3], 2);
            box out;
            out.x = l.output[j+0];
            out.y = l.output[j+1];
            out.w = l.output[j+2];
            out.h = l.output[j+3];
            out.x = l.output[j+0]/7;
            out.y = l.output[j+1]/7;
            out.w = pow(l.output[j+2], 2);
            out.h = pow(l.output[j+3], 2);
            if(!(truth.w*truth.h)) continue;
            l.delta[j+0] = (truth.x - out.x);
            l.delta[j+1] = (truth.y - out.y);
            l.delta[j+2] = (truth.w - out.w);
            l.delta[j+3] = (truth.h - out.h);
            *(l.cost) += pow((out.x - truth.x), 2);
            *(l.cost) += pow((out.y - truth.y), 2);
            *(l.cost) += pow((out.w - truth.w), 2);
            *(l.cost) += pow((out.h - truth.h), 2);
/*
            l.delta[j+0] = .1 * (truth.x - out.x) / (49 * truth.w * truth.w);
            l.delta[j+1] = .1 * (truth.y - out.y) / (49 * truth.h * truth.h);
            l.delta[j+2] = .1 * (truth.w - out.w) / (     truth.w * truth.w);
            l.delta[j+3] = .1 * (truth.h - out.h) / (     truth.h * truth.h);
            *(l.cost) += pow((out.x - truth.x)/truth.w/7., 2);
            *(l.cost) += pow((out.y - truth.y)/truth.h/7., 2);
            *(l.cost) += pow((out.w - truth.w)/truth.w, 2);
            *(l.cost) += pow((out.h - truth.h)/truth.h, 2);
            */
            float iou = box_iou(out, truth);
            avg_iou += iou;
            ++count;
            dbox delta = diou(out, truth);
            l.delta[j+0] = 10 * delta.dx/7;
            l.delta[j+1] = 10 * delta.dy/7;
            l.delta[j+2] = 10 * delta.dw * 2 * sqrt(out.w);
            l.delta[j+3] = 10 * delta.dh * 2 * sqrt(out.h);
            *(l.cost) += pow((1-iou), 2);
            if(0){
                l.delta[j+0] = (state.truth[j+0] - l.output[j+0]);
                l.delta[j+1] = (state.truth[j+1] - l.output[j+1]);
                l.delta[j+2] = (state.truth[j+2] - l.output[j+2]);
                l.delta[j+3] = (state.truth[j+3] - l.output[j+3]);
            }else{
                l.delta[j+0] = 4 * (state.truth[j+0] - l.output[j+0]) / 7;
                l.delta[j+1] = 4 * (state.truth[j+1] - l.output[j+1]) / 7;
                l.delta[j+2] = 4 * (state.truth[j+2] - l.output[j+2]);
                l.delta[j+3] = 4 * (state.truth[j+3] - l.output[j+3]);
            }
            if(0){
                for (j = offset; j < offset+classes; ++j) {
                    if(state.truth[j]) state.truth[j] = iou;
                    l.delta[j] =  state.truth[j] - l.output[j];
                }
            }
            /*
             */
        }
        printf("Avg IOU: %f\n", avg_iou/count);
    }
    /*
       int count = 0;
       for(i = 0; i < l.batch*locations; ++i){
       for(j = 0; j < l.classes+l.background; ++j){
       printf("%f, ", l.output[count++]);
       }
       printf("\n");
       for(j = 0; j < l.coords; ++j){
       printf("%f, ", l.output[count++]);
       }
       printf("\n");
       }
     */
    /*
       if(l.background || 1){
       for(i = 0; i < l.batch*locations; ++i){
       int index = i*(l.classes+l.coords+l.background);
       for(j= 0; j < l.classes; ++j){
       if(state.truth[index+j+l.background]){
//dark_zone(l, j, index, state);
}
}
}
}
     */
}
void backward_detection_layer(const detection_layer l, network_state state)