From 23cb35e6c8eae8b59fab161036ae3f417a55c8db Mon Sep 17 00:00:00 2001
From: AlexeyAB <alexeyab84@gmail.com>
Date: Fri, 30 Mar 2018 11:46:51 +0000
Subject: [PATCH] Changed small_object
---
src/cuda.c | 26 +++++++++++++++++++++++---
1 files changed, 23 insertions(+), 3 deletions(-)
diff --git a/src/cuda.c b/src/cuda.c
index 1b51271..d8db851 100644
--- a/src/cuda.c
+++ b/src/cuda.c
@@ -61,6 +61,19 @@
return d;
}
+static cudaStream_t streamsArray[16]; // cudaStreamSynchronize( get_cuda_stream() );
+static int streamInit[16] = { 0 };
+
+cudaStream_t get_cuda_stream() {
+ int i = cuda_get_device();
+ if (!streamInit[i]) {
+ cudaStreamCreate(&streamsArray[i]);
+ streamInit[i] = 1;
+ }
+ return streamsArray[i];
+}
+
+
#ifdef CUDNN
cudnnHandle_t cudnn_handle()
{
@@ -70,6 +83,7 @@
if(!init[i]) {
cudnnCreate(&handle[i]);
init[i] = 1;
+ cudnnStatus_t status = cudnnSetStream(handle[i], get_cuda_stream());
}
return handle[i];
}
@@ -82,6 +96,7 @@
int i = cuda_get_device();
if(!init[i]) {
cublasCreate(&handle[i]);
+ cublasStatus_t status = cublasSetStream(handle[i], get_cuda_stream());
init[i] = 1;
}
return handle[i];
@@ -94,7 +109,8 @@
cudaError_t status = cudaMalloc((void **)&x_gpu, size);
check_error(status);
if(x){
- status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice);
+ //status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice);
+ status = cudaMemcpyAsync(x_gpu, x, size, cudaMemcpyHostToDevice, get_cuda_stream());
check_error(status);
}
if(!x_gpu) error("Cuda malloc failed\n");
@@ -139,6 +155,7 @@
void cuda_free(float *x_gpu)
{
+ //cudaStreamSynchronize(get_cuda_stream());
cudaError_t status = cudaFree(x_gpu);
check_error(status);
}
@@ -146,15 +163,18 @@
void cuda_push_array(float *x_gpu, float *x, size_t n)
{
size_t size = sizeof(float)*n;
- cudaError_t status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice);
+ //cudaError_t status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice);
+ cudaError_t status = cudaMemcpyAsync(x_gpu, x, size, cudaMemcpyHostToDevice, get_cuda_stream());
check_error(status);
}
void cuda_pull_array(float *x_gpu, float *x, size_t n)
{
size_t size = sizeof(float)*n;
- cudaError_t status = cudaMemcpy(x, x_gpu, size, cudaMemcpyDeviceToHost);
+ //cudaError_t status = cudaMemcpy(x, x_gpu, size, cudaMemcpyDeviceToHost);
+ cudaError_t status = cudaMemcpyAsync(x, x_gpu, size, cudaMemcpyDeviceToHost, get_cuda_stream());
check_error(status);
+ cudaStreamSynchronize(get_cuda_stream());
}
#endif
--
Gitblit v1.10.0