From e6c97a53a7b5ac4014d30d236ea2bf5adb4bb521 Mon Sep 17 00:00:00 2001
From: AlexeyAB <alexeyab84@gmail.com>
Date: Tue, 07 Aug 2018 20:19:50 +0000
Subject: [PATCH] Maxpool fixes

---
 src/convolutional_layer.c |  206 +++++++++++++++++++++++++++------------------------
 1 files changed, 110 insertions(+), 96 deletions(-)

diff --git a/src/convolutional_layer.c b/src/convolutional_layer.c
index 91c5b3c..554bd42 100644
--- a/src/convolutional_layer.c
+++ b/src/convolutional_layer.c
@@ -141,65 +141,67 @@
 {
 
 #ifdef CUDNN_HALF
-	// TRUE_HALF_CONFIG is only supported on architectures with true fp16 support (compute capability 5.3 and 6.0): 
-	//   Tegra X1, Jetson TX1, DRIVE CX, DRIVE PX, Quadro GP100, Tesla P100
-	// PSEUDO_HALF_CONFIG is required for Tensor Cores - our case!
-	const cudnnDataType_t data_type = CUDNN_DATA_HALF;
+    // TRUE_HALF_CONFIG is only supported on architectures with true fp16 support (compute capability 5.3 and 6.0): 
+    //   Tegra X1, Jetson TX1, DRIVE CX, DRIVE PX, Quadro GP100, Tesla P100
+    // PSEUDO_HALF_CONFIG is required for Tensor Cores - our case!
+    const cudnnDataType_t data_type = CUDNN_DATA_HALF;
 #else
-	cudnnDataType_t data_type = CUDNN_DATA_FLOAT;
+    cudnnDataType_t data_type = CUDNN_DATA_FLOAT;
 #endif
 
 #if(CUDNN_MAJOR >= 7)
-	// Tensor Core uses CUDNN_TENSOR_OP_MATH instead of CUDNN_DEFAULT_MATH
-	// For *_ALGO_WINOGRAD_NONFUSED can be used CUDNN_DATA_FLOAT
-	// otherwise Input, Filter and Output descriptors (xDesc, yDesc, wDesc, dxDesc, dyDesc and dwDesc as applicable) have dataType = CUDNN_DATA_HALF
-	// Three techniques for training using Mixed-precision: https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/
-	// 1. Accumulation into FP32
-	// 2. Loss Scaling - required only for: activation gradients. We do not use.
-	// 3. FP32 Master Copy of Weights
-	// More: http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#tensor_ops
-	cudnnSetConvolutionMathType(l->convDesc, CUDNN_TENSOR_OP_MATH);
+    // Tensor Core uses CUDNN_TENSOR_OP_MATH instead of CUDNN_DEFAULT_MATH
+    // For *_ALGO_WINOGRAD_NONFUSED can be used CUDNN_DATA_FLOAT
+    // otherwise Input, Filter and Output descriptors (xDesc, yDesc, wDesc, dxDesc, dyDesc and dwDesc as applicable) have dataType = CUDNN_DATA_HALF
+    // Three techniques for training using Mixed-precision: https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/
+    // 1. Accumulation into FP32
+    // 2. Loss Scaling - required only for: activation gradients. We do not use.
+    // 3. FP32 Master Copy of Weights
+    // More: http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#tensor_ops
+    cudnnSetConvolutionMathType(l->convDesc, CUDNN_TENSOR_OP_MATH);
 #endif
 
-	// INT8_CONFIG, INT8_EXT_CONFIG, INT8x4_CONFIG and INT8x4_EXT_CONFIG are only supported 
-	//   on architectures with DP4A support (compute capability 6.1 and later).
-	//cudnnDataType_t data_type = CUDNN_DATA_INT8;
+    // INT8_CONFIG, INT8_EXT_CONFIG, INT8x4_CONFIG and INT8x4_EXT_CONFIG are only supported 
+    //   on architectures with DP4A support (compute capability 6.1 and later).
+    //cudnnDataType_t data_type = CUDNN_DATA_INT8;
 
-	// backward delta
+    // backward delta
     cudnnSetTensor4dDescriptor(l->dsrcTensorDesc, CUDNN_TENSOR_NCHW, data_type, l->batch, l->c, l->h, l->w);
     cudnnSetTensor4dDescriptor(l->ddstTensorDesc, CUDNN_TENSOR_NCHW, data_type, l->batch, l->out_c, l->out_h, l->out_w);
     cudnnSetFilter4dDescriptor(l->dweightDesc, data_type, CUDNN_TENSOR_NCHW, l->n, l->c, l->size, l->size);
 
-	// forward
+    // forward
     cudnnSetTensor4dDescriptor(l->srcTensorDesc, CUDNN_TENSOR_NCHW, data_type, l->batch, l->c, l->h, l->w);
     cudnnSetTensor4dDescriptor(l->dstTensorDesc, CUDNN_TENSOR_NCHW, data_type, l->batch, l->out_c, l->out_h, l->out_w);
     cudnnSetFilter4dDescriptor(l->weightDesc, data_type, CUDNN_TENSOR_NCHW, l->n, l->c, l->size, l->size);
 
-	// batch norm
-	cudnnSetTensor4dDescriptor(l->normTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, l->out_c, 1, 1);
-	cudnnSetTensor4dDescriptor(l->normDstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->out_c, l->out_h, l->out_w);
-#if(CUDNN_MAJOR >= 6)
-	cudnnSetConvolution2dDescriptor(l->convDesc, l->pad, l->pad, l->stride, l->stride, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT);	// cudnn >= 6.0
-#else
-	cudnnSetConvolution2dDescriptor(l->convDesc, l->pad, l->pad, l->stride, l->stride, 1, 1, CUDNN_CROSS_CORRELATION);	// cudnn 5.1
-#endif
-	int forward_algo = CUDNN_CONVOLUTION_FWD_PREFER_FASTEST;
-	int backward_algo = CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST;
-	int backward_filter = CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST;
-	if (cudnn_preference == cudnn_smallest) 
-	{
-		forward_algo = CUDNN_CONVOLUTION_FWD_NO_WORKSPACE;
-		backward_algo = CUDNN_CONVOLUTION_BWD_DATA_NO_WORKSPACE;
-		backward_filter = CUDNN_CONVOLUTION_BWD_FILTER_NO_WORKSPACE;
-		printf(" CUDNN-slow ");
-	}
+    // batch norm
+    cudnnSetTensor4dDescriptor(l->normTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, l->out_c, 1, 1);
+    cudnnSetTensor4dDescriptor(l->normDstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->out_c, l->out_h, l->out_w);
 
-	cudnnGetConvolutionForwardAlgorithm(cudnn_handle(),
+    cudnnSetTensor4dDescriptor(l->normDstTensorDescF16, CUDNN_TENSOR_NCHW, data_type, l->batch, l->out_c, l->out_h, l->out_w);
+#if(CUDNN_MAJOR >= 6)
+    cudnnSetConvolution2dDescriptor(l->convDesc, l->pad, l->pad, l->stride, l->stride, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT);    // cudnn >= 6.0
+#else
+    cudnnSetConvolution2dDescriptor(l->convDesc, l->pad, l->pad, l->stride, l->stride, 1, 1, CUDNN_CROSS_CORRELATION);    // cudnn 5.1
+#endif
+    int forward_algo = CUDNN_CONVOLUTION_FWD_PREFER_FASTEST;
+    int backward_algo = CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST;
+    int backward_filter = CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST;
+    if (cudnn_preference == cudnn_smallest) 
+    {
+        forward_algo = CUDNN_CONVOLUTION_FWD_NO_WORKSPACE;
+        backward_algo = CUDNN_CONVOLUTION_BWD_DATA_NO_WORKSPACE;
+        backward_filter = CUDNN_CONVOLUTION_BWD_FILTER_NO_WORKSPACE;
+        printf(" CUDNN-slow ");
+    }
+
+    cudnnGetConvolutionForwardAlgorithm(cudnn_handle(),
             l->srcTensorDesc,
             l->weightDesc,
             l->convDesc,
             l->dstTensorDesc,
-			forward_algo,
+            forward_algo,
             0,
             &l->fw_algo);
     cudnnGetConvolutionBackwardDataAlgorithm(cudnn_handle(),
@@ -207,7 +209,7 @@
             l->ddstTensorDesc,
             l->convDesc,
             l->dsrcTensorDesc,
-			backward_algo,
+            backward_algo,
             0,
             &l->bd_algo);
     cudnnGetConvolutionBackwardFilterAlgorithm(cudnn_handle(),
@@ -215,41 +217,41 @@
             l->ddstTensorDesc,
             l->convDesc,
             l->dweightDesc,
-			backward_filter,
+            backward_filter,
             0,
             &l->bf_algo);
 
-	if (data_type == CUDNN_DATA_HALF) 
-	{
-		// HALF-16 if(data_type == CUDNN_DATA_HALF)
-		l->fw_algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
-		l->bd_algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
-		l->bf_algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
+    if (data_type == CUDNN_DATA_HALF) 
+    {
+        // HALF-16 if(data_type == CUDNN_DATA_HALF)
+        l->fw_algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
+        l->bd_algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1;
+        l->bf_algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
 
-		// FLOAT-32 if(data_type == CUDNN_DATA_FLOAT)
-		//l->fw_algo = CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED;
-		//l->bd_algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED;
-		//l->bf_algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED;
+        // FLOAT-32 if(data_type == CUDNN_DATA_FLOAT)
+        //l->fw_algo = CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED;
+        //l->bd_algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED;
+        //l->bf_algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED;
 
-		int fw = 0, bd = 0, bf = 0;
-		if (l->fw_algo == CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM) fw = 1;
-			//printf("Tensor Cores - Forward enabled: l->fw_algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM \n");
-		if (l->fw_algo == CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED) fw = 2;
-			//printf("Tensor Cores - Forward enabled: l->fw_algo = CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED \n");
+        int fw = 0, bd = 0, bf = 0;
+        if (l->fw_algo == CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM) fw = 1;
+            //printf("Tensor Cores - Forward enabled: l->fw_algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM \n");
+        if (l->fw_algo == CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED) fw = 2;
+            //printf("Tensor Cores - Forward enabled: l->fw_algo = CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED \n");
 
-		if (l->bd_algo == CUDNN_CONVOLUTION_BWD_DATA_ALGO_1) bd = 1;
-			//printf("Tensor Cores - Backward-data enabled: l->bd_algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1  \n");
-		if (l->bd_algo == CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED) bd = 2;
-			//printf("Tensor Cores - Backward-data enabled: l->bd_algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED \n");
+        if (l->bd_algo == CUDNN_CONVOLUTION_BWD_DATA_ALGO_1) bd = 1;
+            //printf("Tensor Cores - Backward-data enabled: l->bd_algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1  \n");
+        if (l->bd_algo == CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED) bd = 2;
+            //printf("Tensor Cores - Backward-data enabled: l->bd_algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED \n");
 
-		if (l->bf_algo == CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1) bf = 1;
-			//printf("Tensor Cores - Backward-filter enabled: l->bf_algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1   \n");
-		if (l->bf_algo == CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED) bf = 2;
-			//printf("Tensor Cores - Backward-filter enabled: l->bf_algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED \n");
+        if (l->bf_algo == CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1) bf = 1;
+            //printf("Tensor Cores - Backward-filter enabled: l->bf_algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1   \n");
+        if (l->bf_algo == CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED) bf = 2;
+            //printf("Tensor Cores - Backward-filter enabled: l->bf_algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED \n");
 
-		if (fw == 2 && bd == 2 && bf == 2) printf("TF ");
-		else if (fw == 1 && bd == 1 && bf == 1) printf("TH ");
-	}
+        if (fw == 2 && bd == 2 && bf == 2) printf("TF ");
+        else if (fw == 1 && bd == 1 && bf == 1) printf("TH ");
+    }
 }
 #endif
 #endif
@@ -342,8 +344,8 @@
 
         l.weights_gpu = cuda_make_array(l.weights, c*n*size*size);
 #ifdef CUDNN_HALF
-		l.weights_gpu16 = cuda_make_array(NULL, c*n*size*size / 2); //cuda_make_array(l.weights, c*n*size*size / 2);
-		l.weight_updates_gpu16 = cuda_make_array(NULL, c*n*size*size / 2); //cuda_make_array(l.weight_updates, c*n*size*size / 2);
+        l.weights_gpu16 = cuda_make_array(NULL, c*n*size*size / 2); //cuda_make_array(l.weights, c*n*size*size / 2);
+        l.weight_updates_gpu16 = cuda_make_array(NULL, c*n*size*size / 2); //cuda_make_array(l.weight_updates, c*n*size*size / 2);
 #endif
         l.weight_updates_gpu = cuda_make_array(l.weight_updates, c*n*size*size);
 
@@ -377,9 +379,10 @@
             l.x_gpu = cuda_make_array(l.output, l.batch*out_h*out_w*n);
             l.x_norm_gpu = cuda_make_array(l.output, l.batch*out_h*out_w*n);
         }
-#ifdef CUDNN		
-		cudnnCreateTensorDescriptor(&l.normDstTensorDesc);
-		cudnnCreateTensorDescriptor(&l.normTensorDesc);
+#ifdef CUDNN        
+        cudnnCreateTensorDescriptor(&l.normDstTensorDesc);
+        cudnnCreateTensorDescriptor(&l.normDstTensorDescF16);
+        cudnnCreateTensorDescriptor(&l.normTensorDesc);
         cudnnCreateTensorDescriptor(&l.srcTensorDesc);
         cudnnCreateTensorDescriptor(&l.dstTensorDesc);
         cudnnCreateFilterDescriptor(&l.weightDesc);
@@ -394,7 +397,9 @@
     l.workspace_size = get_workspace_size(l);
     l.activation = activation;
 
-    fprintf(stderr, "conv  %5d %2d x%2d /%2d  %4d x%4d x%4d   ->  %4d x%4d x%4d\n", n, size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c);
+    //fprintf(stderr, "conv  %5d %2d x%2d /%2d  %4d x%4d x%4d   ->  %4d x%4d x%4d\n", n, size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c);
+    l.bflops = (2.0 * l.n * l.size*l.size*l.c * l.out_h*l.out_w) / 1000000000.;
+    fprintf(stderr, "conv  %5d %2d x%2d /%2d  %4d x%4d x%4d   ->  %4d x%4d x%4d %5.3f BF\n", n, size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops);
 
     return l;
 }
@@ -440,8 +445,8 @@
 
 void resize_convolutional_layer(convolutional_layer *l, int w, int h)
 {
-	int old_w = l->w;
-	int old_h = l->h;
+    int old_w = l->w;
+    int old_h = l->h;
     l->w = w;
     l->h = h;
     int out_w = convolutional_out_width(*l);
@@ -460,22 +465,31 @@
         l->x_norm  = realloc(l->x_norm, l->batch*l->outputs*sizeof(float));
     }
 
+    if (l->xnor) {
+        //l->binary_input = realloc(l->inputs*l->batch, sizeof(float));
+    }
+
 #ifdef GPU
-	if (old_w < w || old_h < h) {
-		cuda_free(l->delta_gpu);
-		cuda_free(l->output_gpu);
+    if (old_w < w || old_h < h) {
+        cuda_free(l->delta_gpu);
+        cuda_free(l->output_gpu);
 
-		l->delta_gpu = cuda_make_array(l->delta, l->batch*l->outputs);
-		l->output_gpu = cuda_make_array(l->output, l->batch*l->outputs);
+        l->delta_gpu = cuda_make_array(l->delta, l->batch*l->outputs);
+        l->output_gpu = cuda_make_array(l->output, l->batch*l->outputs);
 
-		if (l->batch_normalize) {
-			cuda_free(l->x_gpu);
-			cuda_free(l->x_norm_gpu);
+        if (l->batch_normalize) {
+            cuda_free(l->x_gpu);
+            cuda_free(l->x_norm_gpu);
 
-			l->x_gpu = cuda_make_array(l->output, l->batch*l->outputs);
-			l->x_norm_gpu = cuda_make_array(l->output, l->batch*l->outputs);
-		}
-	}
+            l->x_gpu = cuda_make_array(l->output, l->batch*l->outputs);
+            l->x_norm_gpu = cuda_make_array(l->output, l->batch*l->outputs);
+        }
+
+        if (l->xnor) {
+            cuda_free(l->binary_input_gpu);
+            l->binary_input_gpu = cuda_make_array(0, l->inputs*l->batch);
+        }
+    }
 #ifdef CUDNN
     cudnn_convolutional_setup(l, cudnn_fastest);
 #endif
@@ -483,15 +497,15 @@
     l->workspace_size = get_workspace_size(*l);
 
 #ifdef CUDNN
-	// check for excessive memory consumption 
-	size_t free_byte;
-	size_t total_byte;
-	check_error(cudaMemGetInfo(&free_byte, &total_byte));
-	if (l->workspace_size > free_byte || l->workspace_size >= total_byte / 2) {
-		printf(" used slow CUDNN algo without Workspace! \n");
-		cudnn_convolutional_setup(l, cudnn_smallest);
-		l->workspace_size = get_workspace_size(*l);
-	}
+    // check for excessive memory consumption 
+    size_t free_byte;
+    size_t total_byte;
+    check_error(cudaMemGetInfo(&free_byte, &total_byte));
+    if (l->workspace_size > free_byte || l->workspace_size >= total_byte / 2) {
+        printf(" used slow CUDNN algo without Workspace! Need memory: %zu, available: %zu\n", l->workspace_size, (free_byte < total_byte/2) ? free_byte : total_byte/2);
+        cudnn_convolutional_setup(l, cudnn_smallest);
+        l->workspace_size = get_workspace_size(*l);
+    }
 #endif
 }
 

--
Gitblit v1.10.0