From 76e258520edb50e8bb897ba15aa9467579e70a6a Mon Sep 17 00:00:00 2001
From: AlexeyAB <alexeyab84@gmail.com>
Date: Wed, 20 Jun 2018 10:28:25 +0000
Subject: [PATCH] Minor fix

---
 src/connected_layer.c |   21 ++++++++++++++-------
 1 files changed, 14 insertions(+), 7 deletions(-)

diff --git a/src/connected_layer.c b/src/connected_layer.c
index 2694229..e6dc759 100644
--- a/src/connected_layer.c
+++ b/src/connected_layer.c
@@ -97,10 +97,16 @@
 
         l.x_gpu = cuda_make_array(l.output, l.batch*outputs);
         l.x_norm_gpu = cuda_make_array(l.output, l.batch*outputs);
+#ifdef CUDNN
+		cudnnCreateTensorDescriptor(&l.normTensorDesc);
+		cudnnCreateTensorDescriptor(&l.dstTensorDesc);
+		cudnnSetTensor4dDescriptor(l.dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l.batch, l.out_c, l.out_h, l.out_w);
+		cudnnSetTensor4dDescriptor(l.normTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, l.out_c, 1, 1);
+#endif
     }
 #endif
     l.activation = activation;
-    fprintf(stderr, "Connected Layer: %d inputs, %d outputs\n", inputs, outputs);
+    fprintf(stderr, "connected                            %4d  ->  %4d\n", inputs, outputs);
     return l;
 }
 
@@ -280,12 +286,13 @@
     float * b = l.weights_gpu;
     float * c = l.output_gpu;
     gemm_ongpu(0,1,m,n,k,1,a,k,b,k,1,c,n);
-    if(l.batch_normalize){
-        forward_batchnorm_layer_gpu(l, state);
-    }
-    for(i = 0; i < l.batch; ++i){
-        axpy_ongpu(l.outputs, 1, l.biases_gpu, 1, l.output_gpu + i*l.outputs, 1);
-    }
+	if (l.batch_normalize) {
+		forward_batchnorm_layer_gpu(l, state);
+	}
+	else {
+		add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.outputs, 1);
+	}
+    //for(i = 0; i < l.batch; ++i) axpy_ongpu(l.outputs, 1, l.biases_gpu, 1, l.output_gpu + i*l.outputs, 1);
     activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
 }
 

--
Gitblit v1.10.0