From 5b6be00d4b1ffd671c20c4c72d2239c924eaa3d4 Mon Sep 17 00:00:00 2001
From: AlexeyAB <alexeyab84@gmail.com>
Date: Thu, 23 Aug 2018 12:28:34 +0000
Subject: [PATCH] Added yolov3-tiny_xnor.cfg
---
src/cuda.c | 45 +++++++++++++++++++++++++++------------------
1 files changed, 27 insertions(+), 18 deletions(-)
diff --git a/src/cuda.c b/src/cuda.c
index f19c92d..2284dad 100644
--- a/src/cuda.c
+++ b/src/cuda.c
@@ -29,23 +29,23 @@
//cudaDeviceSynchronize();
cudaError_t status2 = cudaGetLastError();
if (status != cudaSuccess)
- {
+ {
const char *s = cudaGetErrorString(status);
char buffer[256];
printf("CUDA Error: %s\n", s);
assert(0);
snprintf(buffer, 256, "CUDA Error: %s", s);
error(buffer);
- }
+ }
if (status2 != cudaSuccess)
- {
+ {
const char *s = cudaGetErrorString(status);
char buffer[256];
printf("CUDA Error Prev: %s\n", s);
assert(0);
snprintf(buffer, 256, "CUDA Error Prev: %s", s);
error(buffer);
- }
+ }
}
dim3 cuda_gridsize(size_t n){
@@ -61,16 +61,25 @@
return d;
}
-static cudaStream_t streamsArray[16]; // cudaStreamSynchronize( get_cuda_stream() );
+static cudaStream_t streamsArray[16]; // cudaStreamSynchronize( get_cuda_stream() );
static int streamInit[16] = { 0 };
cudaStream_t get_cuda_stream() {
- int i = cuda_get_device();
- if (!streamInit[i]) {
- cudaStreamCreate(&streamsArray[i]);
- streamInit[i] = 1;
- }
- return streamsArray[i];
+ int i = cuda_get_device();
+ if (!streamInit[i]) {
+ cudaError_t status = cudaStreamCreate(&streamsArray[i]);
+ //cudaError_t status = cudaStreamCreateWithFlags(&streamsArray[i], cudaStreamNonBlocking);
+ if (status != cudaSuccess) {
+ printf(" cudaStreamCreate error: %d \n", status);
+ const char *s = cudaGetErrorString(status);
+ char buffer[256];
+ printf("CUDA Error: %s\n", s);
+ status = cudaStreamCreateWithFlags(&streamsArray[i], cudaStreamDefault);
+ check_error(status);
+ }
+ streamInit[i] = 1;
+ }
+ return streamsArray[i];
}
@@ -83,7 +92,7 @@
if(!init[i]) {
cudnnCreate(&handle[i]);
init[i] = 1;
- cudnnStatus_t status = cudnnSetStream(handle[i], get_cuda_stream());
+ cudnnStatus_t status = cudnnSetStream(handle[i], get_cuda_stream());
}
return handle[i];
}
@@ -96,7 +105,7 @@
int i = cuda_get_device();
if(!init[i]) {
cublasCreate(&handle[i]);
- cublasStatus_t status = cublasSetStream(handle[i], get_cuda_stream());
+ cublasStatus_t status = cublasSetStream(handle[i], get_cuda_stream());
init[i] = 1;
}
return handle[i];
@@ -110,7 +119,7 @@
check_error(status);
if(x){
//status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice);
- status = cudaMemcpyAsync(x_gpu, x, size, cudaMemcpyHostToDevice, get_cuda_stream());
+ status = cudaMemcpyAsync(x_gpu, x, size, cudaMemcpyHostToDevice, get_cuda_stream());
check_error(status);
}
if(!x_gpu) error("Cuda malloc failed\n");
@@ -155,7 +164,7 @@
void cuda_free(float *x_gpu)
{
- //cudaStreamSynchronize(get_cuda_stream());
+ //cudaStreamSynchronize(get_cuda_stream());
cudaError_t status = cudaFree(x_gpu);
check_error(status);
}
@@ -164,7 +173,7 @@
{
size_t size = sizeof(float)*n;
//cudaError_t status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice);
- cudaError_t status = cudaMemcpyAsync(x_gpu, x, size, cudaMemcpyHostToDevice, get_cuda_stream());
+ cudaError_t status = cudaMemcpyAsync(x_gpu, x, size, cudaMemcpyHostToDevice, get_cuda_stream());
check_error(status);
}
@@ -172,9 +181,9 @@
{
size_t size = sizeof(float)*n;
//cudaError_t status = cudaMemcpy(x, x_gpu, size, cudaMemcpyDeviceToHost);
- cudaError_t status = cudaMemcpyAsync(x, x_gpu, size, cudaMemcpyDeviceToHost, get_cuda_stream());
+ cudaError_t status = cudaMemcpyAsync(x, x_gpu, size, cudaMemcpyDeviceToHost, get_cuda_stream());
check_error(status);
- cudaStreamSynchronize(get_cuda_stream());
+ cudaStreamSynchronize(get_cuda_stream());
}
#else // GPU
--
Gitblit v1.10.0