From 84d6533cb8112f23a34d3de76435a10f4620f4b8 Mon Sep 17 00:00:00 2001
From: AlexeyAB <alexeyab84@gmail.com>
Date: Mon, 23 Oct 2017 13:43:03 +0000
Subject: [PATCH] Fixed OpenCV usage in the yolo_console_dll.cpp

---
 src/convolutional_kernels.cu |   37 +++++++++++++++++++++++++++++++++----
 1 files changed, 33 insertions(+), 4 deletions(-)

diff --git a/src/convolutional_kernels.cu b/src/convolutional_kernels.cu
index b8d6478..03c9ab7 100644
--- a/src/convolutional_kernels.cu
+++ b/src/convolutional_kernels.cu
@@ -2,6 +2,10 @@
 #include "curand.h"
 #include "cublas_v2.h"
 
+#ifdef CUDNN
+#pragma comment(lib, "cudnn.lib")  
+#endif
+
 extern "C" {
 #include "convolutional_layer.h"
 #include "batchnorm_layer.h"
@@ -123,6 +127,7 @@
     activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
     //if(l.dot > 0) dot_error_gpu(l);
     if(l.binary || l.xnor) swap_binary(&l);
+	//cudaDeviceSynchronize();	// for correct profiling of performance
 }
 
 void backward_convolutional_layer_gpu(convolutional_layer l, network_state state)
@@ -133,6 +138,9 @@
 
     if(l.batch_normalize){
         backward_batchnorm_layer_gpu(l, state);
+        //axpy_ongpu(l.outputs*l.batch, -state.net.decay, l.x_gpu, 1, l.delta_gpu, 1);
+    } else {
+        //axpy_ongpu(l.outputs*l.batch, -state.net.decay, l.output_gpu, 1, l.delta_gpu, 1);
     }
     float *original_input = state.input;
 
@@ -215,6 +223,10 @@
         cuda_pull_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
         cuda_pull_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
     }
+    if (layer.adam){
+        cuda_pull_array(layer.m_gpu, layer.m, layer.c*layer.n*layer.size*layer.size);
+        cuda_pull_array(layer.v_gpu, layer.v, layer.c*layer.n*layer.size*layer.size);
+    }
 }
 
 void push_convolutional_layer(convolutional_layer layer)
@@ -228,12 +240,15 @@
         cuda_push_array(layer.rolling_mean_gpu, layer.rolling_mean, layer.n);
         cuda_push_array(layer.rolling_variance_gpu, layer.rolling_variance, layer.n);
     }
+    if (layer.adam){
+        cuda_push_array(layer.m_gpu, layer.m, layer.c*layer.n*layer.size*layer.size);
+        cuda_push_array(layer.v_gpu, layer.v, layer.c*layer.n*layer.size*layer.size);
+    }
 }
 
 void update_convolutional_layer_gpu(convolutional_layer layer, int batch, float learning_rate, float momentum, float decay)
 {
     int size = layer.size*layer.size*layer.c*layer.n;
-
     axpy_ongpu(layer.n, learning_rate/batch, layer.bias_updates_gpu, 1, layer.biases_gpu, 1);
     scal_ongpu(layer.n, momentum, layer.bias_updates_gpu, 1);
 
@@ -242,9 +257,23 @@
         scal_ongpu(layer.n, momentum, layer.scale_updates_gpu, 1);
     }
 
-    axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
-    axpy_ongpu(size, learning_rate/batch, layer.weight_updates_gpu, 1, layer.weights_gpu, 1);
-    scal_ongpu(size, momentum, layer.weight_updates_gpu, 1);
+    if(layer.adam){
+        scal_ongpu(size, layer.B1, layer.m_gpu, 1);
+        scal_ongpu(size, layer.B2, layer.v_gpu, 1);
+
+        axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
+
+        axpy_ongpu(size, -(1-layer.B1), layer.weight_updates_gpu, 1, layer.m_gpu, 1);
+        mul_ongpu(size, layer.weight_updates_gpu, 1, layer.weight_updates_gpu, 1);
+        axpy_ongpu(size, (1-layer.B2), layer.weight_updates_gpu, 1, layer.v_gpu, 1);
+
+        adam_gpu(size, layer.weights_gpu, layer.m_gpu, layer.v_gpu, layer.B1, layer.B2, learning_rate/batch, layer.eps, layer.t+1);
+        fill_ongpu(size, 0, layer.weight_updates_gpu, 1);
+    }else{
+        axpy_ongpu(size, -decay*batch, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
+        axpy_ongpu(size, learning_rate/batch, layer.weight_updates_gpu, 1, layer.weights_gpu, 1);
+        scal_ongpu(size, momentum, layer.weight_updates_gpu, 1);
+    }
 }
 
 

--
Gitblit v1.10.0