From d6fbe86e7a8c1bc389902c90c57ee7e80f5475b9 Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Tue, 16 Dec 2014 19:40:05 +0000
Subject: [PATCH] updates?

---
 src/network_gpu.c |   56 +++++++++++++++++++++++++++++++++++++++++++++-----------
 1 files changed, 45 insertions(+), 11 deletions(-)

diff --git a/src/network_gpu.c b/src/network_gpu.c
index 7302664..c3f22d3 100644
--- a/src/network_gpu.c
+++ b/src/network_gpu.c
@@ -22,7 +22,9 @@
 {
     //printf("start\n");
     int i;
+   // printf("Truth: %f\n", cl_checksum(truth, 1000*net.batch));
     for(i = 0; i < net.n; ++i){
+        //printf("Truth %i: %f\n", i, cl_checksum(truth, 1000*net.batch));
         //clock_t time = clock();
         if(net.types[i] == CONVOLUTIONAL){
             convolutional_layer layer = *(convolutional_layer *)net.layers[i];
@@ -48,6 +50,16 @@
             forward_softmax_layer_gpu(layer, input);
             input = layer.output_cl;
         }
+        else if(net.types[i] == DROPOUT){
+            if(!train) continue;
+            dropout_layer layer = *(dropout_layer *)net.layers[i];
+            forward_dropout_layer_gpu(layer, input);
+        }
+        else if(net.types[i] == CROP){
+            crop_layer layer = *(crop_layer *)net.layers[i];
+            forward_crop_layer_gpu(layer, input);
+            input = layer.output_cl;
+        }
         //printf("%d %f\n", i, sec(clock()-time));
         /*
            else if(net.types[i] == CROP){
@@ -80,7 +92,7 @@
         }
         if(net.types[i] == CONVOLUTIONAL){
             convolutional_layer layer = *(convolutional_layer *)net.layers[i];
-            backward_convolutional_layer_gpu(layer, prev_delta);
+            backward_convolutional_layer_gpu(layer, prev_input, prev_delta);
         }
         else if(net.types[i] == COST){
             cost_layer layer = *(cost_layer *)net.layers[i];
@@ -94,6 +106,10 @@
             maxpool_layer layer = *(maxpool_layer *)net.layers[i];
             backward_maxpool_layer_gpu(layer, prev_delta);
         }
+        else if(net.types[i] == DROPOUT){
+            dropout_layer layer = *(dropout_layer *)net.layers[i];
+            backward_dropout_layer_gpu(layer, prev_delta);
+        }
         else if(net.types[i] == SOFTMAX){
             softmax_layer layer = *(softmax_layer *)net.layers[i];
             backward_softmax_layer_gpu(layer, prev_delta);
@@ -131,9 +147,15 @@
         maxpool_layer layer = *(maxpool_layer *)net.layers[i];
         return layer.output_cl;
     }
+    else if(net.types[i] == CROP){
+        crop_layer layer = *(crop_layer *)net.layers[i];
+        return layer.output_cl;
+    }
     else if(net.types[i] == SOFTMAX){
         softmax_layer layer = *(softmax_layer *)net.layers[i];
         return layer.output_cl;
+    } else if(net.types[i] == DROPOUT){
+        return get_network_output_cl_layer(net, i-1);
     }
     return 0;
 }
@@ -155,6 +177,8 @@
     else if(net.types[i] == SOFTMAX){
         softmax_layer layer = *(softmax_layer *)net.layers[i];
         return layer.delta_cl;
+    } else if(net.types[i] == DROPOUT){
+        return get_network_delta_cl_layer(net, i-1);
     }
     return 0;
 }
@@ -171,18 +195,10 @@
         cl_write_array(*net.input_cl, x, x_size);
         cl_write_array(*net.truth_cl, y, y_size);
     }
-    //printf("trans %f\n", sec(clock()-time));
-    //time = clock();
     forward_network_gpu(net, *net.input_cl, *net.truth_cl, 1);
-    //printf("forw %f\n", sec(clock()-time));
-    //time = clock();
     backward_network_gpu(net, *net.input_cl);
-    //printf("back %f\n", sec(clock()-time));
-    //time = clock();
     update_network_gpu(net);
     float error = get_network_cost(net);
-    //printf("updt %f\n", sec(clock()-time));
-    //time = clock();
     return error;
 }
 
@@ -253,7 +269,7 @@
 
 float *network_predict_gpu(network net, float *input)
 {
-    
+
     int size = get_network_input_size(net) * net.batch;
     cl_mem input_cl = cl_make_array(input, size);
     forward_network_gpu(net, input_cl, 0, 0);
@@ -287,11 +303,29 @@
 float network_accuracy_gpu(network net, data d)
 {
     matrix guess = network_predict_data_gpu(net, d);
-    float acc = matrix_accuracy(d.y, guess);
+    float acc = matrix_topk_accuracy(d.y, guess,1);
+    free_matrix(guess);
+    return acc;
+}
+
+float *network_accuracies_gpu(network net, data d)
+{
+    static float acc[2];
+    matrix guess = network_predict_data_gpu(net, d);
+    acc[0] = matrix_topk_accuracy(d.y, guess,1);
+    acc[1] = matrix_topk_accuracy(d.y, guess,5);
     free_matrix(guess);
     return acc;
 }
 
 
+#else
+void forward_network_gpu(network net, cl_mem input, cl_mem truth, int train){}
+void backward_network_gpu(network net, cl_mem input){}
+void update_network_gpu(network net){}
+float train_network_sgd_gpu(network net, data d, int n){return 0;}
+float train_network_data_gpu(network net, data d, int n){return 0;}
+float *network_predict_gpu(network net, float *input){return 0;}
+float network_accuracy_gpu(network net, data d){return 0;}
 
 #endif

--
Gitblit v1.10.0