From 9bae70b22549b68f5cdeece8b6c3b3de00c22714 Mon Sep 17 00:00:00 2001
From: AlexeyAB <alexeyab84@gmail.com>
Date: Mon, 16 Apr 2018 23:51:11 +0000
Subject: [PATCH] Accelerated by another 5% using FP16/32 Batch-norm for Tensor Cores.

---
 src/convolutional_kernels.cu |   91 +++++++++++++++++++++++++++++++++++++++++++--
 1 files changed, 86 insertions(+), 5 deletions(-)

diff --git a/src/convolutional_kernels.cu b/src/convolutional_kernels.cu
index 603d531..324fc50 100644
--- a/src/convolutional_kernels.cu
+++ b/src/convolutional_kernels.cu
@@ -169,7 +169,51 @@
 		l.dstTensorDesc,
 		output16);
 	
-	cuda_convert_f16_to_f32(output16, output16_size, l.output_gpu);
+
+	if (l.batch_normalize) 
+	{		
+		if (state.train) // Training
+		{
+			copy_ongpu(l.outputs*l.batch / 2, output16, 1, l.x_gpu, 1);
+			//cudaMemcpyAsync(l.x_gpu, output16, l.outputs*l.batch*sizeof(half), cudaMemcpyDefault, get_cuda_stream());
+			float one = 1;
+			float zero = 0;
+			// Batch-normalization can still take FP16 inputs and outputs, saving half the bandwidth
+			// compared to FP32, it�s just that the statistics and value adjustment should be done in FP32.
+			cudnnBatchNormalizationForwardTraining(cudnn_handle(),
+				CUDNN_BATCHNORM_SPATIAL,
+				&one,
+				&zero,
+				l.normDstTensorDescF16,
+				l.x_gpu,			// input
+				l.normDstTensorDescF16,
+				output16,			// output
+				l.normTensorDesc,
+				l.scales_gpu,
+				l.biases_gpu,
+				.01,
+				l.rolling_mean_gpu,		// output (should be FP32)
+				l.rolling_variance_gpu,	// output (should be FP32)
+				.00001,
+				l.mean_gpu,			// output (should be FP32)
+				l.variance_gpu);	// output (should be FP32)
+
+			cuda_convert_f16_to_f32(output16, output16_size, l.output_gpu);
+			//forward_batchnorm_layer_gpu(l, state);
+		}
+		else // Detection
+		{
+			cuda_convert_f16_to_f32(output16, output16_size, l.output_gpu);
+			normalize_gpu(l.output_gpu, l.rolling_mean_gpu, l.rolling_variance_gpu, l.batch, l.out_c, l.out_h*l.out_w);
+			scale_bias_gpu(l.output_gpu, l.scales_gpu, l.batch, l.out_c, l.out_h*l.out_w);
+			add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.out_c, l.out_w*l.out_h);
+		}
+	}
+	else // BIAS only
+	{
+		cuda_convert_f16_to_f32(output16, output16_size, l.output_gpu);
+		add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
+	}	
 
 #else
 
@@ -186,7 +230,7 @@
                 &one,
                 l.dstTensorDesc,
                 l.output_gpu);
-#endif
+#endif	// CUDNN_HALF
 
 
 #else
@@ -203,12 +247,14 @@
     }
 #endif
 
+#ifndef CUDNN_HALF
     if (l.batch_normalize) {
         forward_batchnorm_layer_gpu(l, state);
 	}
 	else {
 		add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
 	}
+#endif // no CUDNN_HALF
 
     activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
     //if(l.dot > 0) dot_error_gpu(l);
@@ -222,12 +268,13 @@
 
     backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h);
 
+#ifndef CUDNN_HALF
     if(l.batch_normalize){
         backward_batchnorm_layer_gpu(l, state);
-        //axpy_ongpu(l.outputs*l.batch, -state.net.decay, l.x_gpu, 1, l.delta_gpu, 1);
     } else {
-        //axpy_ongpu(l.outputs*l.batch, -state.net.decay, l.output_gpu, 1, l.delta_gpu, 1);
+		//backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h);
     }
+#endif // no CUDNN_HALF
     float *original_input = state.input;
 
     if(l.xnor) state.input = l.binary_input_gpu;
@@ -256,7 +303,41 @@
 
 	cuda_convert_f32_to_f16(state.input, input16_size, input16);
 	cuda_convert_f32_to_f16(l.delta_gpu, delta16_size, delta16);
-	
+
+	if (l.batch_normalize) {
+		//if (!state.train) {
+		//	l.mean_gpu = l.rolling_mean_gpu;
+		//	l.variance_gpu = l.rolling_variance_gpu;
+		//}
+		float one = 1;
+		float zero = 0;
+		cudnnBatchNormalizationBackward(cudnn_handle(),
+			CUDNN_BATCHNORM_SPATIAL,
+			&one,
+			&zero,
+			&one,
+			&one,
+			l.normDstTensorDescF16,
+			l.x_gpu,				// input
+			l.normDstTensorDescF16,
+			delta16,				// input
+			l.normDstTensorDescF16,
+			l.x_norm_gpu,			// output
+			l.normTensorDesc,
+			l.scales_gpu,			// output (should be FP32)
+			l.scale_updates_gpu,	// output (should be FP32)
+			l.bias_updates_gpu,		// output (should be FP32)
+			.00001,
+			l.mean_gpu,				// input (should be FP32)
+			l.variance_gpu);		// input (should be FP32)
+		copy_ongpu(l.outputs*l.batch / 2, l.x_norm_gpu, 1, delta16, 1);
+		//cudaMemcpyAsync(delta16, l.x_norm_gpu, l.outputs*l.batch * sizeof(half), cudaMemcpyDefault, get_cuda_stream());
+	}
+	else
+	{
+		//backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h);
+	}
+
 	// convert input: state.input (x), l.delta_gpu (y) from fp32 to fp16
 	// get output: l.weight_updates_gpu (dw) and convert it to fp32 (ONLY if it is fp16)
 

--
Gitblit v1.10.0