From a723e1c62a27aeb39aaf7fcdeb3beb4e89fba32d Mon Sep 17 00:00:00 2001
From: Alexey <AlexeyAB@users.noreply.github.com>
Date: Wed, 15 Aug 2018 20:52:09 +0000
Subject: [PATCH] Merge pull request #766 from HotChick91/AlexeyAB-mask

---
 src/cuda.c |   54 +++++++++++++++++++++++++++---------------------------
 1 files changed, 27 insertions(+), 27 deletions(-)

diff --git a/src/cuda.c b/src/cuda.c
index 5300906..2284dad 100644
--- a/src/cuda.c
+++ b/src/cuda.c
@@ -29,23 +29,23 @@
     //cudaDeviceSynchronize();
     cudaError_t status2 = cudaGetLastError();
     if (status != cudaSuccess)
-    {   
+    {
         const char *s = cudaGetErrorString(status);
         char buffer[256];
         printf("CUDA Error: %s\n", s);
         assert(0);
         snprintf(buffer, 256, "CUDA Error: %s", s);
         error(buffer);
-    } 
+    }
     if (status2 != cudaSuccess)
-    {   
+    {
         const char *s = cudaGetErrorString(status);
         char buffer[256];
         printf("CUDA Error Prev: %s\n", s);
         assert(0);
         snprintf(buffer, 256, "CUDA Error Prev: %s", s);
         error(buffer);
-    } 
+    }
 }
 
 dim3 cuda_gridsize(size_t n){
@@ -61,25 +61,25 @@
     return d;
 }
 
-static cudaStream_t streamsArray[16];	// cudaStreamSynchronize( get_cuda_stream() );
+static cudaStream_t streamsArray[16];    // cudaStreamSynchronize( get_cuda_stream() );
 static int streamInit[16] = { 0 };
 
 cudaStream_t get_cuda_stream() {
-	int i = cuda_get_device();
-	if (!streamInit[i]) {
-		cudaError_t status = cudaStreamCreate(&streamsArray[i]);
-		//cudaError_t status = cudaStreamCreateWithFlags(&streamsArray[i], cudaStreamNonBlocking);
-		if (status != cudaSuccess) {
-			printf(" cudaStreamCreate error: %d \n", status);
-			const char *s = cudaGetErrorString(status);
-			char buffer[256];
-			printf("CUDA Error: %s\n", s);
-			status = cudaStreamCreateWithFlags(&streamsArray[i], cudaStreamDefault);
-			check_error(status);
-		}
-		streamInit[i] = 1;
-	}
-	return streamsArray[i];
+    int i = cuda_get_device();
+    if (!streamInit[i]) {
+        cudaError_t status = cudaStreamCreate(&streamsArray[i]);
+        //cudaError_t status = cudaStreamCreateWithFlags(&streamsArray[i], cudaStreamNonBlocking);
+        if (status != cudaSuccess) {
+            printf(" cudaStreamCreate error: %d \n", status);
+            const char *s = cudaGetErrorString(status);
+            char buffer[256];
+            printf("CUDA Error: %s\n", s);
+            status = cudaStreamCreateWithFlags(&streamsArray[i], cudaStreamDefault);
+            check_error(status);
+        }
+        streamInit[i] = 1;
+    }
+    return streamsArray[i];
 }
 
 
@@ -92,7 +92,7 @@
     if(!init[i]) {
         cudnnCreate(&handle[i]);
         init[i] = 1;
-		cudnnStatus_t status = cudnnSetStream(handle[i], get_cuda_stream());
+        cudnnStatus_t status = cudnnSetStream(handle[i], get_cuda_stream());
     }
     return handle[i];
 }
@@ -105,7 +105,7 @@
     int i = cuda_get_device();
     if(!init[i]) {
         cublasCreate(&handle[i]);
-		cublasStatus_t status = cublasSetStream(handle[i], get_cuda_stream());
+        cublasStatus_t status = cublasSetStream(handle[i], get_cuda_stream());
         init[i] = 1;
     }
     return handle[i];
@@ -119,7 +119,7 @@
     check_error(status);
     if(x){
         //status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice);
-		status = cudaMemcpyAsync(x_gpu, x, size, cudaMemcpyHostToDevice, get_cuda_stream());
+        status = cudaMemcpyAsync(x_gpu, x, size, cudaMemcpyHostToDevice, get_cuda_stream());
         check_error(status);
     }
     if(!x_gpu) error("Cuda malloc failed\n");
@@ -164,7 +164,7 @@
 
 void cuda_free(float *x_gpu)
 {
-	//cudaStreamSynchronize(get_cuda_stream());
+    //cudaStreamSynchronize(get_cuda_stream());
     cudaError_t status = cudaFree(x_gpu);
     check_error(status);
 }
@@ -173,7 +173,7 @@
 {
     size_t size = sizeof(float)*n;
     //cudaError_t status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice);
-	cudaError_t status = cudaMemcpyAsync(x_gpu, x, size, cudaMemcpyHostToDevice, get_cuda_stream());
+    cudaError_t status = cudaMemcpyAsync(x_gpu, x, size, cudaMemcpyHostToDevice, get_cuda_stream());
     check_error(status);
 }
 
@@ -181,9 +181,9 @@
 {
     size_t size = sizeof(float)*n;
     //cudaError_t status = cudaMemcpy(x, x_gpu, size, cudaMemcpyDeviceToHost);
-	cudaError_t status = cudaMemcpyAsync(x, x_gpu, size, cudaMemcpyDeviceToHost, get_cuda_stream());
+    cudaError_t status = cudaMemcpyAsync(x, x_gpu, size, cudaMemcpyDeviceToHost, get_cuda_stream());
     check_error(status);
-	cudaStreamSynchronize(get_cuda_stream());
+    cudaStreamSynchronize(get_cuda_stream());
 }
 
 #else // GPU

--
Gitblit v1.10.0