Joseph Redmon
2015-02-24 5f4a5f59b072d4029107422d30b04941424c48b1
src/network_kernels.cu
@@ -10,6 +10,7 @@
#include "crop_layer.h"
#include "connected_layer.h"
#include "convolutional_layer.h"
#include "deconvolutional_layer.h"
#include "maxpool_layer.h"
#include "cost_layer.h"
#include "normalization_layer.h"
@@ -20,6 +21,7 @@
extern "C" float * get_network_output_gpu_layer(network net, int i);
extern "C" float * get_network_delta_gpu_layer(network net, int i);
float *get_network_output_gpu(network net);
void forward_network_gpu(network net, float * input, float * truth, int train)
{
@@ -31,6 +33,11 @@
            forward_convolutional_layer_gpu(layer, input);
            input = layer.output_gpu;
        }
        else if(net.types[i] == DECONVOLUTIONAL){
            deconvolutional_layer layer = *(deconvolutional_layer *)net.layers[i];
            forward_deconvolutional_layer_gpu(layer, input);
            input = layer.output_gpu;
        }
        else if(net.types[i] == COST){
            cost_layer layer = *(cost_layer *)net.layers[i];
            forward_cost_layer_gpu(layer, input, truth);
@@ -61,6 +68,7 @@
            forward_crop_layer_gpu(layer, train, input);
            input = layer.output_gpu;
        }
        //cudaDeviceSynchronize();
        //printf("Forward %d %s %f\n", i, get_layer_string(net.types[i]), sec(clock() - time));
    }
}
@@ -83,6 +91,10 @@
            convolutional_layer layer = *(convolutional_layer *)net.layers[i];
            backward_convolutional_layer_gpu(layer, prev_input, prev_delta);
        }
        else if(net.types[i] == DECONVOLUTIONAL){
            deconvolutional_layer layer = *(deconvolutional_layer *)net.layers[i];
            backward_deconvolutional_layer_gpu(layer, prev_input, prev_delta);
        }
        else if(net.types[i] == COST){
            cost_layer layer = *(cost_layer *)net.layers[i];
            backward_cost_layer_gpu(layer, prev_input, prev_delta);
@@ -115,6 +127,10 @@
            convolutional_layer layer = *(convolutional_layer *)net.layers[i];
            update_convolutional_layer_gpu(layer);
        }
        else if(net.types[i] == DECONVOLUTIONAL){
            deconvolutional_layer layer = *(deconvolutional_layer *)net.layers[i];
            update_deconvolutional_layer_gpu(layer);
        }
        else if(net.types[i] == CONNECTED){
            connected_layer layer = *(connected_layer *)net.layers[i];
            update_connected_layer_gpu(layer);
@@ -128,6 +144,10 @@
        convolutional_layer layer = *(convolutional_layer *)net.layers[i];
        return layer.output_gpu;
    }
    else if(net.types[i] == DECONVOLUTIONAL){
        deconvolutional_layer layer = *(deconvolutional_layer *)net.layers[i];
        return layer.output_gpu;
    }
    else if(net.types[i] == CONNECTED){
        connected_layer layer = *(connected_layer *)net.layers[i];
        return layer.output_gpu;
@@ -156,6 +176,10 @@
        convolutional_layer layer = *(convolutional_layer *)net.layers[i];
        return layer.delta_gpu;
    }
    else if(net.types[i] == DECONVOLUTIONAL){
        deconvolutional_layer layer = *(deconvolutional_layer *)net.layers[i];
        return layer.delta_gpu;
    }
    else if(net.types[i] == CONNECTED){
        connected_layer layer = *(connected_layer *)net.layers[i];
        return layer.delta_gpu;
@@ -196,6 +220,10 @@
  //time = clock();
    update_network_gpu(net);
    float error = get_network_cost(net);
    //print_letters(y, 50);
    //float *out = get_network_output_gpu(net);
    //print_letters(out, 50);
  //printf("updt %f\n", sec(clock() - time));
  //time = clock();
    return error;
@@ -207,6 +235,10 @@
        convolutional_layer layer = *(convolutional_layer *)net.layers[i];
        return layer.output;
    }
    else if(net.types[i] == DECONVOLUTIONAL){
        deconvolutional_layer layer = *(deconvolutional_layer *)net.layers[i];
        return layer.output;
    }
    else if(net.types[i] == CONNECTED){
        connected_layer layer = *(connected_layer *)net.layers[i];
        cuda_pull_array(layer.output_gpu, layer.output, layer.outputs*layer.batch);