From 481b57a96a9ef29b112caec1bb3e17ffb043ceae Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Sun, 25 Sep 2016 06:12:54 +0000
Subject: [PATCH] So I have this new programming paradigm.......

---
 src/route_layer.c |   22 ++++++++++++++--------
 1 files changed, 14 insertions(+), 8 deletions(-)

diff --git a/src/route_layer.c b/src/route_layer.c
index df50b64..47e3d70 100644
--- a/src/route_layer.c
+++ b/src/route_layer.c
@@ -23,20 +23,26 @@
     l.inputs = outputs;
     l.delta =  calloc(outputs*batch, sizeof(float));
     l.output = calloc(outputs*batch, sizeof(float));;
+
+    l.forward = forward_route_layer;
+    l.backward = backward_route_layer;
     #ifdef GPU
+    l.forward_gpu = forward_route_layer_gpu;
+    l.backward_gpu = backward_route_layer_gpu;
+
     l.delta_gpu =  cuda_make_array(l.delta, outputs*batch);
     l.output_gpu = cuda_make_array(l.output, outputs*batch);
     #endif
     return l;
 }
 
-void forward_route_layer(const route_layer l, network net)
+void forward_route_layer(const route_layer l, network_state state)
 {
     int i, j;
     int offset = 0;
     for(i = 0; i < l.n; ++i){
         int index = l.input_layers[i];
-        float *input = net.layers[index].output;
+        float *input = state.net.layers[index].output;
         int input_size = l.input_sizes[i];
         for(j = 0; j < l.batch; ++j){
             copy_cpu(input_size, input + j*input_size, 1, l.output + offset + j*l.outputs, 1);
@@ -45,13 +51,13 @@
     }
 }
 
-void backward_route_layer(const route_layer l, network net)
+void backward_route_layer(const route_layer l, network_state state)
 {
     int i, j;
     int offset = 0;
     for(i = 0; i < l.n; ++i){
         int index = l.input_layers[i];
-        float *delta = net.layers[index].delta;
+        float *delta = state.net.layers[index].delta;
         int input_size = l.input_sizes[i];
         for(j = 0; j < l.batch; ++j){
             axpy_cpu(input_size, 1, l.delta + offset + j*l.outputs, 1, delta + j*input_size, 1);
@@ -61,13 +67,13 @@
 }
 
 #ifdef GPU
-void forward_route_layer_gpu(const route_layer l, network net)
+void forward_route_layer_gpu(const route_layer l, network_state state)
 {
     int i, j;
     int offset = 0;
     for(i = 0; i < l.n; ++i){
         int index = l.input_layers[i];
-        float *input = net.layers[index].output_gpu;
+        float *input = state.net.layers[index].output_gpu;
         int input_size = l.input_sizes[i];
         for(j = 0; j < l.batch; ++j){
             copy_ongpu(input_size, input + j*input_size, 1, l.output_gpu + offset + j*l.outputs, 1);
@@ -76,13 +82,13 @@
     }
 }
 
-void backward_route_layer_gpu(const route_layer l, network net)
+void backward_route_layer_gpu(const route_layer l, network_state state)
 {
     int i, j;
     int offset = 0;
     for(i = 0; i < l.n; ++i){
         int index = l.input_layers[i];
-        float *delta = net.layers[index].delta_gpu;
+        float *delta = state.net.layers[index].delta_gpu;
         int input_size = l.input_sizes[i];
         for(j = 0; j < l.batch; ++j){
             axpy_ongpu(input_size, 1, l.delta_gpu + offset + j*l.outputs, 1, delta + j*input_size, 1);

--
Gitblit v1.10.0