Joseph Redmon
2015-01-31 0f1a31648c5292fa49b35eac90a2ee676d6c13e6
src/network.c
@@ -15,6 +15,33 @@
#include "softmax_layer.h"
#include "dropout_layer.h"
char *get_layer_string(LAYER_TYPE a)
{
    switch(a){
        case CONVOLUTIONAL:
            return "convolutional";
        case CONNECTED:
            return "connected";
        case MAXPOOL:
            return "maxpool";
        case SOFTMAX:
            return "softmax";
        case NORMALIZATION:
            return "normalization";
        case DROPOUT:
            return "dropout";
        case FREEWEIGHT:
            return "freeweight";
        case CROP:
            return "crop";
        case COST:
            return "cost";
        default:
            break;
    }
    return "none";
}
network make_network(int n, int batch)
{
    network net;
@@ -24,14 +51,14 @@
    net.types = calloc(net.n, sizeof(LAYER_TYPE));
    net.outputs = 0;
    net.output = 0;
    net.seen = 0;
    #ifdef GPU
    net.input_cl = calloc(1, sizeof(cl_mem));
    net.truth_cl = calloc(1, sizeof(cl_mem));
    net.input_gpu = calloc(1, sizeof(float *));
    net.truth_gpu = calloc(1, sizeof(float *));
    #endif
    return net;
}
void forward_network(network net, float *input, float *truth, int train)
{
    int i;
@@ -48,7 +75,7 @@
        }
        else if(net.types[i] == CROP){
            crop_layer layer = *(crop_layer *)net.layers[i];
            forward_crop_layer(layer, input);
            forward_crop_layer(layer, train, input);
            input = layer.output;
        }
        else if(net.types[i] == COST){
@@ -78,9 +105,12 @@
        }
        else if(net.types[i] == FREEWEIGHT){
            if(!train) continue;
            freeweight_layer layer = *(freeweight_layer *)net.layers[i];
            forward_freeweight_layer(layer, input);
            //freeweight_layer layer = *(freeweight_layer *)net.layers[i];
            //forward_freeweight_layer(layer, input);
        }
        //char buff[256];
        //sprintf(buff, "layer %d", i);
        //cuda_compare(get_network_output_gpu_layer(net, i), input, get_network_output_size_layer(net, i)*net.batch, buff);
    }
}
@@ -103,8 +133,8 @@
        }
        else if(net.types[i] == CONNECTED){
            connected_layer layer = *(connected_layer *)net.layers[i];
            secret_update_connected_layer((connected_layer *)net.layers[i]);
            //update_connected_layer(layer);
            //secret_update_connected_layer((connected_layer *)net.layers[i]);
            update_connected_layer(layer);
        }
    }
}
@@ -372,6 +402,10 @@
            cost_layer *layer = (cost_layer *)net->layers[i];
            layer->batch = b;
        }
        else if(net->types[i] == CROP){
            crop_layer *layer = (crop_layer *)net->layers[i];
            layer->batch = b;
        }
    }
}
@@ -502,6 +536,9 @@
        normalization_layer layer = *(normalization_layer *)net.layers[i];
        return get_normalization_image(layer);
    }
    else if(net.types[i] == DROPOUT){
        return get_network_image_layer(net, i-1);
    }
    else if(net.types[i] == CROP){
        crop_layer layer = *(crop_layer *)net.layers[i];
        return get_crop_image(layer);
@@ -549,7 +586,7 @@
float *network_predict(network net, float *input)
{
    #ifdef GPU
        if(gpu_index >= 0) return network_predict_gpu(net, input);
    if(gpu_index >= 0)  return network_predict_gpu(net, input);
    #endif
    forward_network(net, input, 0, 0);