From 160eddddc4e265d5ee59a38797c30720bf46cd7c Mon Sep 17 00:00:00 2001
From: AlexeyAB <alexeyab84@gmail.com>
Date: Sun, 27 May 2018 13:53:42 +0000
Subject: [PATCH] Minor fix

---
 src/convolutional_kernels.cu |  164 ++++++++++++++++++++++++++++++++++++++++--------------
 1 files changed, 121 insertions(+), 43 deletions(-)

diff --git a/src/convolutional_kernels.cu b/src/convolutional_kernels.cu
index 9d88a88..324fc50 100644
--- a/src/convolutional_kernels.cu
+++ b/src/convolutional_kernels.cu
@@ -37,7 +37,7 @@
     int i = 0;
     float mean = 0;
     for(i = 0; i < n; ++i){
-        mean += abs(input[i*size + s]);
+        mean += fabs(input[i*size + s]);
     }
     mean = mean / n;
     for(i = 0; i < n; ++i){
@@ -59,7 +59,7 @@
     int i = 0;
     float mean = 0;
     for(i = 0; i < size; ++i){
-        mean += abs(weights[f*size + i]);
+        mean += fabs(weights[f*size + i]);
     }
     mean = mean / size;
     for(i = 0; i < size; ++i){
@@ -135,26 +135,24 @@
 	// More: http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#tensor_ops
 
 	const size_t input16_size = l.batch*l.c*l.w*l.h;
-	static size_t max_input16_size = input16_size;
-	static half* input16 = cuda_make_f16_from_f32_array(NULL, max_input16_size);
-
 	const size_t output16_size = l.batch*l.out_c*l.out_h*l.out_w;
-	static size_t max_output16_size = output16_size;
-	static half* output16 = cuda_make_f16_from_f32_array(NULL, max_output16_size);
 
-	if (max_input16_size < input16_size) {
-		max_input16_size = input16_size;
-		cuda_free((float *)input16);
-		input16 = cuda_make_f16_from_f32_array(state.input, max_input16_size);
+	if (*state.net.max_input16_size < input16_size) {
+		//printf("\n input16_size: cur = %zu \t max = %zu \n", input16_size, *state.net.max_input16_size);
+		*state.net.max_input16_size = input16_size;
+		if (*state.net.input16_gpu) cuda_free(*state.net.input16_gpu);
+		*state.net.input16_gpu = (float *)cuda_make_f16_from_f32_array(NULL, *state.net.max_input16_size);
 	}
+	float *input16 = *state.net.input16_gpu;
 
-	if (max_output16_size < output16_size) {
-		max_output16_size = output16_size;
-		cuda_free((float *)output16);
-		output16 = cuda_make_f16_from_f32_array(NULL, max_output16_size);
+	if (*state.net.max_output16_size < output16_size) {
+		*state.net.max_output16_size = output16_size;
+		if (*state.net.output16_gpu) cuda_free(*state.net.output16_gpu);
+		*state.net.output16_gpu = (float *)cuda_make_f16_from_f32_array(NULL, *state.net.max_output16_size);
 	}
+	float *output16 = *state.net.output16_gpu;
 
-	cuda_convert_f32_to_f16(state.input, input16_size, (float *)input16);
+	cuda_convert_f32_to_f16(state.input, input16_size, input16);
 
 	//fill_ongpu(output16_size / 2, 0, (float *)output16, 1);
 	cudnnConvolutionForward(cudnn_handle(),
@@ -171,7 +169,51 @@
 		l.dstTensorDesc,
 		output16);
 	
-	cuda_convert_f16_to_f32((float *)output16, output16_size, l.output_gpu);
+
+	if (l.batch_normalize) 
+	{		
+		if (state.train) // Training
+		{
+			copy_ongpu(l.outputs*l.batch / 2, output16, 1, l.x_gpu, 1);
+			//cudaMemcpyAsync(l.x_gpu, output16, l.outputs*l.batch*sizeof(half), cudaMemcpyDefault, get_cuda_stream());
+			float one = 1;
+			float zero = 0;
+			// Batch-normalization can still take FP16 inputs and outputs, saving half the bandwidth
+			// compared to FP32, it�s just that the statistics and value adjustment should be done in FP32.
+			cudnnBatchNormalizationForwardTraining(cudnn_handle(),
+				CUDNN_BATCHNORM_SPATIAL,
+				&one,
+				&zero,
+				l.normDstTensorDescF16,
+				l.x_gpu,			// input
+				l.normDstTensorDescF16,
+				output16,			// output
+				l.normTensorDesc,
+				l.scales_gpu,
+				l.biases_gpu,
+				.01,
+				l.rolling_mean_gpu,		// output (should be FP32)
+				l.rolling_variance_gpu,	// output (should be FP32)
+				.00001,
+				l.mean_gpu,			// output (should be FP32)
+				l.variance_gpu);	// output (should be FP32)
+
+			cuda_convert_f16_to_f32(output16, output16_size, l.output_gpu);
+			//forward_batchnorm_layer_gpu(l, state);
+		}
+		else // Detection
+		{
+			cuda_convert_f16_to_f32(output16, output16_size, l.output_gpu);
+			normalize_gpu(l.output_gpu, l.rolling_mean_gpu, l.rolling_variance_gpu, l.batch, l.out_c, l.out_h*l.out_w);
+			scale_bias_gpu(l.output_gpu, l.scales_gpu, l.batch, l.out_c, l.out_h*l.out_w);
+			add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.out_c, l.out_w*l.out_h);
+		}
+	}
+	else // BIAS only
+	{
+		cuda_convert_f16_to_f32(output16, output16_size, l.output_gpu);
+		add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
+	}	
 
 #else
 
@@ -188,7 +230,7 @@
                 &one,
                 l.dstTensorDesc,
                 l.output_gpu);
-#endif
+#endif	// CUDNN_HALF
 
 
 #else
@@ -205,10 +247,14 @@
     }
 #endif
 
+#ifndef CUDNN_HALF
     if (l.batch_normalize) {
         forward_batchnorm_layer_gpu(l, state);
-    }
-    add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
+	}
+	else {
+		add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
+	}
+#endif // no CUDNN_HALF
 
     activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
     //if(l.dot > 0) dot_error_gpu(l);
@@ -222,12 +268,13 @@
 
     backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h);
 
+#ifndef CUDNN_HALF
     if(l.batch_normalize){
         backward_batchnorm_layer_gpu(l, state);
-        //axpy_ongpu(l.outputs*l.batch, -state.net.decay, l.x_gpu, 1, l.delta_gpu, 1);
     } else {
-        //axpy_ongpu(l.outputs*l.batch, -state.net.decay, l.output_gpu, 1, l.delta_gpu, 1);
+		//backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h);
     }
+#endif // no CUDNN_HALF
     float *original_input = state.input;
 
     if(l.xnor) state.input = l.binary_input_gpu;
@@ -238,28 +285,59 @@
 #ifdef CUDNN_HALF
 		
 	const size_t input16_size = l.batch*l.c*l.w*l.h;
-	static size_t max_input16_size = input16_size;
-	static half* input16 = cuda_make_f16_from_f32_array(NULL, max_input16_size);
-
 	const size_t delta16_size = l.batch*l.n*l.out_w*l.out_h;
-	static size_t max_delta16_size = delta16_size;
-	static half* delta16 = cuda_make_f16_from_f32_array(NULL, max_delta16_size);
-
-	if (max_input16_size < input16_size) {
-		max_input16_size = input16_size;
-		cuda_free((float *)input16);
-		input16 = cuda_make_f16_from_f32_array(state.input, max_input16_size);
-	}
-
-	if (max_delta16_size < delta16_size) {
-		max_delta16_size = delta16_size;
-		cuda_free((float *)delta16);
-		delta16 = cuda_make_f16_from_f32_array(NULL, max_delta16_size);
-	}
-
-	cuda_convert_f32_to_f16(state.input, input16_size, (float *)input16);
-	cuda_convert_f32_to_f16(l.delta_gpu, delta16_size, (float *)delta16);
 	
+	if (*state.net.max_input16_size < input16_size) {		
+		*state.net.max_input16_size = input16_size;
+		if(*state.net.input16_gpu) cuda_free(*state.net.input16_gpu);
+		*state.net.input16_gpu = (float *)cuda_make_f16_from_f32_array(NULL, *state.net.max_input16_size);
+	}
+	float *input16 = *state.net.input16_gpu;
+
+	if (*state.net.max_output16_size < delta16_size) {
+		*state.net.max_output16_size = delta16_size;
+		if(*state.net.output16_gpu) cuda_free(*state.net.output16_gpu);
+		*state.net.output16_gpu = (float *)cuda_make_f16_from_f32_array(NULL, *state.net.max_output16_size);
+	}
+	float *delta16 = *state.net.output16_gpu;
+
+	cuda_convert_f32_to_f16(state.input, input16_size, input16);
+	cuda_convert_f32_to_f16(l.delta_gpu, delta16_size, delta16);
+
+	if (l.batch_normalize) {
+		//if (!state.train) {
+		//	l.mean_gpu = l.rolling_mean_gpu;
+		//	l.variance_gpu = l.rolling_variance_gpu;
+		//}
+		float one = 1;
+		float zero = 0;
+		cudnnBatchNormalizationBackward(cudnn_handle(),
+			CUDNN_BATCHNORM_SPATIAL,
+			&one,
+			&zero,
+			&one,
+			&one,
+			l.normDstTensorDescF16,
+			l.x_gpu,				// input
+			l.normDstTensorDescF16,
+			delta16,				// input
+			l.normDstTensorDescF16,
+			l.x_norm_gpu,			// output
+			l.normTensorDesc,
+			l.scales_gpu,			// output (should be FP32)
+			l.scale_updates_gpu,	// output (should be FP32)
+			l.bias_updates_gpu,		// output (should be FP32)
+			.00001,
+			l.mean_gpu,				// input (should be FP32)
+			l.variance_gpu);		// input (should be FP32)
+		copy_ongpu(l.outputs*l.batch / 2, l.x_norm_gpu, 1, delta16, 1);
+		//cudaMemcpyAsync(delta16, l.x_norm_gpu, l.outputs*l.batch * sizeof(half), cudaMemcpyDefault, get_cuda_stream());
+	}
+	else
+	{
+		//backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h);
+	}
+
 	// convert input: state.input (x), l.delta_gpu (y) from fp32 to fp16
 	// get output: l.weight_updates_gpu (dw) and convert it to fp32 (ONLY if it is fp16)
 
@@ -305,7 +383,7 @@
 			l.dsrcTensorDesc,
 			input16);	// state.delta);
 
-		cuda_convert_f16_to_f32((float *)input16, input16_size, state.delta);		
+		cuda_convert_f16_to_f32(input16, input16_size, state.delta);
 
 		if (l.binary || l.xnor) swap_binary(&l);
 		if (l.xnor) gradient_array_ongpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, state.delta);

--
Gitblit v1.10.0