Joseph Redmon
2015-09-01 8bcdee86585f496afe1a8a38d608ea0504a11243
src/network.c
@@ -4,13 +4,17 @@
#include "image.h"
#include "data.h"
#include "utils.h"
#include "blas.h"
#include "crop_layer.h"
#include "connected_layer.h"
#include "convolutional_layer.h"
#include "deconvolutional_layer.h"
#include "detection_layer.h"
#include "region_layer.h"
#include "normalization_layer.h"
#include "maxpool_layer.h"
#include "avgpool_layer.h"
#include "cost_layer.h"
#include "softmax_layer.h"
#include "dropout_layer.h"
@@ -27,10 +31,14 @@
            return "connected";
        case MAXPOOL:
            return "maxpool";
        case AVGPOOL:
            return "avgpool";
        case SOFTMAX:
            return "softmax";
        case DETECTION:
            return "detection";
        case REGION:
            return "region";
        case DROPOUT:
            return "dropout";
        case CROP:
@@ -39,6 +47,8 @@
            return "cost";
        case ROUTE:
            return "route";
        case NORMALIZATION:
            return "normalization";
        default:
            break;
    }
@@ -62,12 +72,19 @@
    int i;
    for(i = 0; i < net.n; ++i){
        layer l = net.layers[i];
        if(l.delta){
            scal_cpu(l.outputs * l.batch, 0, l.delta, 1);
        }
        if(l.type == CONVOLUTIONAL){
            forward_convolutional_layer(l, state);
        } else if(l.type == DECONVOLUTIONAL){
            forward_deconvolutional_layer(l, state);
        } else if(l.type == NORMALIZATION){
            forward_normalization_layer(l, state);
        } else if(l.type == DETECTION){
            forward_detection_layer(l, state);
        } else if(l.type == REGION){
            forward_region_layer(l, state);
        } else if(l.type == CONNECTED){
            forward_connected_layer(l, state);
        } else if(l.type == CROP){
@@ -78,6 +95,8 @@
            forward_softmax_layer(l, state);
        } else if(l.type == MAXPOOL){
            forward_maxpool_layer(l, state);
        } else if(l.type == AVGPOOL){
            forward_avgpool_layer(l, state);
        } else if(l.type == DROPOUT){
            forward_dropout_layer(l, state);
        } else if(l.type == ROUTE){
@@ -112,13 +131,24 @@
float get_network_cost(network net)
{
    if(net.layers[net.n-1].type == COST){
        return net.layers[net.n-1].output[0];
    int i;
    float sum = 0;
    int count = 0;
    for(i = 0; i < net.n; ++i){
        if(net.layers[i].type == COST){
            sum += net.layers[i].output[0];
            ++count;
        }
        if(net.layers[i].type == DETECTION){
            sum += net.layers[i].cost[0];
            ++count;
        }
        if(net.layers[i].type == REGION){
            sum += net.layers[i].cost[0];
            ++count;
        }
    }
    if(net.layers[net.n-1].type == DETECTION){
        return net.layers[net.n-1].cost[0];
    }
    return 0;
    return sum/count;
}
int get_predicted_class_network(network net)
@@ -132,10 +162,11 @@
{
    int i;
    float *original_input = state.input;
    float *original_delta = state.delta;
    for(i = net.n-1; i >= 0; --i){
        if(i == 0){
            state.input = original_input;
            state.delta = 0;
            state.delta = original_delta;
        }else{
            layer prev = net.layers[i-1];
            state.input = prev.output;
@@ -146,12 +177,18 @@
            backward_convolutional_layer(l, state);
        } else if(l.type == DECONVOLUTIONAL){
            backward_deconvolutional_layer(l, state);
        } else if(l.type == NORMALIZATION){
            backward_normalization_layer(l, state);
        } else if(l.type == MAXPOOL){
            if(i != 0) backward_maxpool_layer(l, state);
        } else if(l.type == AVGPOOL){
            backward_avgpool_layer(l, state);
        } else if(l.type == DROPOUT){
            backward_dropout_layer(l, state);
        } else if(l.type == DETECTION){
            backward_detection_layer(l, state);
        } else if(l.type == REGION){
            backward_region_layer(l, state);
        } else if(l.type == SOFTMAX){
            if(i != 0) backward_softmax_layer(l, state);
        } else if(l.type == CONNECTED){
@@ -166,11 +203,12 @@
float train_network_datum(network net, float *x, float *y)
{
    #ifdef GPU
#ifdef GPU
    if(gpu_index >= 0) return train_network_datum_gpu(net, x, y);
    #endif
#endif
    network_state state;
    state.input = x;
    state.delta = 0;
    state.truth = y;
    state.train = 1;
    forward_network(net, state);
@@ -224,6 +262,7 @@
    int i,j;
    network_state state;
    state.train = 1;
    state.delta = 0;
    float sum = 0;
    int batch = 2;
    for(i = 0; i < n; ++i){
@@ -249,43 +288,35 @@
    }
}
/*
int resize_network(network net, int h, int w, int c)
int resize_network(network *net, int w, int h)
{
    fprintf(stderr, "Might be broken, careful!!");
    int i;
    for (i = 0; i < net.n; ++i){
        if(net.types[i] == CONVOLUTIONAL){
            convolutional_layer *layer = (convolutional_layer *)net.layers[i];
            resize_convolutional_layer(layer, h, w);
            image output = get_convolutional_image(*layer);
            h = output.h;
            w = output.w;
            c = output.c;
        } else if(net.types[i] == DECONVOLUTIONAL){
            deconvolutional_layer *layer = (deconvolutional_layer *)net.layers[i];
            resize_deconvolutional_layer(layer, h, w);
            image output = get_deconvolutional_image(*layer);
            h = output.h;
            w = output.w;
            c = output.c;
        }else if(net.types[i] == MAXPOOL){
            maxpool_layer *layer = (maxpool_layer *)net.layers[i];
            resize_maxpool_layer(layer, h, w);
            image output = get_maxpool_image(*layer);
            h = output.h;
            w = output.w;
            c = output.c;
        }else if(net.types[i] == DROPOUT){
            dropout_layer *layer = (dropout_layer *)net.layers[i];
            resize_dropout_layer(layer, h*w*c);
    //if(w == net->w && h == net->h) return 0;
    net->w = w;
    net->h = h;
    //fprintf(stderr, "Resizing to %d x %d...", w, h);
    //fflush(stderr);
    for (i = 0; i < net->n; ++i){
        layer l = net->layers[i];
        if(l.type == CONVOLUTIONAL){
            resize_convolutional_layer(&l, w, h);
        }else if(l.type == MAXPOOL){
            resize_maxpool_layer(&l, w, h);
        }else if(l.type == AVGPOOL){
            resize_avgpool_layer(&l, w, h);
            break;
        }else if(l.type == NORMALIZATION){
            resize_normalization_layer(&l, w, h);
        }else{
            error("Cannot resize this type of layer");
        }
        net->layers[i] = l;
        w = l.out_w;
        h = l.out_h;
    }
    //fprintf(stderr, " Done!\n");
    return 0;
}
*/
int get_network_output_size(network net)
{
@@ -488,4 +519,17 @@
    return acc;
}
void free_network(network net)
{
    int i;
    for(i = 0; i < net.n; ++i){
        free_layer(net.layers[i]);
    }
    free(net.layers);
    #ifdef GPU
    if(*net.input_gpu) cuda_free(*net.input_gpu);
    if(*net.truth_gpu) cuda_free(*net.truth_gpu);
    if(net.input_gpu) free(net.input_gpu);
    if(net.truth_gpu) free(net.truth_gpu);
    #endif
}