Joseph Redmon
2015-07-11 ccde487525fc89a1d4bc3e1cf11a18971e8451c9
src/network.c
@@ -10,6 +10,7 @@
#include "convolutional_layer.h"
#include "deconvolutional_layer.h"
#include "detection_layer.h"
#include "normalization_layer.h"
#include "maxpool_layer.h"
#include "cost_layer.h"
#include "softmax_layer.h"
@@ -39,6 +40,8 @@
            return "cost";
        case ROUTE:
            return "route";
        case NORMALIZATION:
            return "normalization";
        default:
            break;
    }
@@ -66,6 +69,8 @@
            forward_convolutional_layer(l, state);
        } else if(l.type == DECONVOLUTIONAL){
            forward_deconvolutional_layer(l, state);
        } else if(l.type == NORMALIZATION){
            forward_normalization_layer(l, state);
        } else if(l.type == DETECTION){
            forward_detection_layer(l, state);
        } else if(l.type == CONNECTED){
@@ -132,10 +137,11 @@
{
    int i;
    float *original_input = state.input;
    float *original_delta = state.delta;
    for(i = net.n-1; i >= 0; --i){
        if(i == 0){
            state.input = original_input;
            state.delta = 0;
            state.delta = original_delta;
        }else{
            layer prev = net.layers[i-1];
            state.input = prev.output;
@@ -146,6 +152,8 @@
            backward_convolutional_layer(l, state);
        } else if(l.type == DECONVOLUTIONAL){
            backward_deconvolutional_layer(l, state);
        } else if(l.type == NORMALIZATION){
            backward_normalization_layer(l, state);
        } else if(l.type == MAXPOOL){
            if(i != 0) backward_maxpool_layer(l, state);
        } else if(l.type == DROPOUT){
@@ -171,6 +179,7 @@
    #endif
    network_state state;
    state.input = x;
    state.delta = 0;
    state.truth = y;
    state.train = 1;
    forward_network(net, state);
@@ -224,6 +233,7 @@
    int i,j;
    network_state state;
    state.train = 1;
    state.delta = 0;
    float sum = 0;
    int batch = 2;
    for(i = 0; i < n; ++i){
@@ -249,43 +259,32 @@
    }
}
/*
int resize_network(network net, int h, int w, int c)
int resize_network(network *net, int w, int h)
{
    fprintf(stderr, "Might be broken, careful!!");
    int i;
    for (i = 0; i < net.n; ++i){
        if(net.types[i] == CONVOLUTIONAL){
            convolutional_layer *layer = (convolutional_layer *)net.layers[i];
            resize_convolutional_layer(layer, h, w);
            image output = get_convolutional_image(*layer);
            h = output.h;
            w = output.w;
            c = output.c;
        } else if(net.types[i] == DECONVOLUTIONAL){
            deconvolutional_layer *layer = (deconvolutional_layer *)net.layers[i];
            resize_deconvolutional_layer(layer, h, w);
            image output = get_deconvolutional_image(*layer);
            h = output.h;
            w = output.w;
            c = output.c;
        }else if(net.types[i] == MAXPOOL){
            maxpool_layer *layer = (maxpool_layer *)net.layers[i];
            resize_maxpool_layer(layer, h, w);
            image output = get_maxpool_image(*layer);
            h = output.h;
            w = output.w;
            c = output.c;
        }else if(net.types[i] == DROPOUT){
            dropout_layer *layer = (dropout_layer *)net.layers[i];
            resize_dropout_layer(layer, h*w*c);
    //if(w == net->w && h == net->h) return 0;
    net->w = w;
    net->h = h;
    //fprintf(stderr, "Resizing to %d x %d...", w, h);
    //fflush(stderr);
    for (i = 0; i < net->n; ++i){
        layer l = net->layers[i];
        if(l.type == CONVOLUTIONAL){
            resize_convolutional_layer(&l, w, h);
        }else if(l.type == MAXPOOL){
            resize_maxpool_layer(&l, w, h);
        }else if(l.type == NORMALIZATION){
            resize_normalization_layer(&l, w, h);
        }else{
            error("Cannot resize this type of layer");
        }
        net->layers[i] = l;
        w = l.out_w;
        h = l.out_h;
    }
    //fprintf(stderr, " Done!\n");
    return 0;
}
*/
int get_network_output_size(network net)
{