Joseph Redmon
2015-03-08 f047cfff99e00e28c02eb59b6d32386c122f9af6
src/network.c
@@ -9,6 +9,7 @@
#include "connected_layer.h"
#include "convolutional_layer.h"
#include "deconvolutional_layer.h"
#include "detection_layer.h"
#include "maxpool_layer.h"
#include "cost_layer.h"
#include "normalization_layer.h"
@@ -29,6 +30,8 @@
            return "maxpool";
        case SOFTMAX:
            return "softmax";
        case DETECTION:
            return "detection";
        case NORMALIZATION:
            return "normalization";
        case DROPOUT:
@@ -76,6 +79,11 @@
            forward_deconvolutional_layer(layer, input);
            input = layer.output;
        }
        else if(net.types[i] == DETECTION){
            detection_layer layer = *(detection_layer *)net.layers[i];
            forward_detection_layer(layer, input, truth);
            input = layer.output;
        }
        else if(net.types[i] == CONNECTED){
            connected_layer layer = *(connected_layer *)net.layers[i];
            forward_connected_layer(layer, input);
@@ -152,6 +160,9 @@
    } else if(net.types[i] == MAXPOOL){
        maxpool_layer layer = *(maxpool_layer *)net.layers[i];
        return layer.output;
    } else if(net.types[i] == DETECTION){
        detection_layer layer = *(detection_layer *)net.layers[i];
        return layer.output;
    } else if(net.types[i] == SOFTMAX){
        softmax_layer layer = *(softmax_layer *)net.layers[i];
        return layer.output;
@@ -193,6 +204,9 @@
    } else if(net.types[i] == SOFTMAX){
        softmax_layer layer = *(softmax_layer *)net.layers[i];
        return layer.delta;
    } else if(net.types[i] == DETECTION){
        detection_layer layer = *(detection_layer *)net.layers[i];
        return layer.delta;
    } else if(net.types[i] == DROPOUT){
        if(i == 0) return 0;
        return get_network_delta_layer(net, i-1);
@@ -243,7 +257,7 @@
    return max_index(out, k);
}
void backward_network(network net, float *input)
void backward_network(network net, float *input, float *truth)
{
    int i;
    float *prev_input;
@@ -272,6 +286,10 @@
            dropout_layer layer = *(dropout_layer *)net.layers[i];
            backward_dropout_layer(layer, prev_delta);
        }
        else if(net.types[i] == DETECTION){
            detection_layer layer = *(detection_layer *)net.layers[i];
            backward_detection_layer(layer, prev_input, prev_delta);
        }
        else if(net.types[i] == NORMALIZATION){
            normalization_layer layer = *(normalization_layer *)net.layers[i];
            if(i != 0) backward_normalization_layer(layer, prev_input, prev_delta);
@@ -297,7 +315,7 @@
    if(gpu_index >= 0) return train_network_datum_gpu(net, x, y);
    #endif
    forward_network(net, x, y, 1);
    backward_network(net, x);
    backward_network(net, x, y);
    float error = get_network_cost(net);
    update_network(net);
    return error;
@@ -351,7 +369,7 @@
            float *x = d.X.vals[index];
            float *y = d.y.vals[index];
            forward_network(net, x, y, 1);
            backward_network(net, x);
            backward_network(net, x, y);
            sum += get_network_cost(net);
        }
        update_network(net);
@@ -381,7 +399,6 @@
    }
}
void set_batch_network(network *net, int b)
{
    net->batch = b;
@@ -404,6 +421,9 @@
        } else if(net->types[i] == DROPOUT){
            dropout_layer *layer = (dropout_layer *) net->layers[i];
            layer->batch = b;
        } else if(net->types[i] == DETECTION){
            detection_layer *layer = (detection_layer *) net->layers[i];
            layer->batch = b;
        }
        else if(net->types[i] == FREEWEIGHT){
            freeweight_layer *layer = (freeweight_layer *) net->layers[i];
@@ -445,6 +465,9 @@
    } else if(net.types[i] == DROPOUT){
        dropout_layer layer = *(dropout_layer *) net.layers[i];
        return layer.inputs;
    } else if(net.types[i] == DETECTION){
        detection_layer layer = *(detection_layer *) net.layers[i];
        return layer.inputs;
    } else if(net.types[i] == CROP){
        crop_layer layer = *(crop_layer *) net.layers[i];
        return layer.c*layer.h*layer.w;
@@ -473,6 +496,10 @@
        image output = get_deconvolutional_image(layer);
        return output.h*output.w*output.c;
    }
    else if(net.types[i] == DETECTION){
        detection_layer layer = *(detection_layer *)net.layers[i];
        return get_detection_layer_output_size(layer);
    }
    else if(net.types[i] == MAXPOOL){
        maxpool_layer layer = *(maxpool_layer *)net.layers[i];
        image output = get_maxpool_image(layer);