From 564877ad6a3f53d3d866b0015237d07f4af2eaa2 Mon Sep 17 00:00:00 2001
From: vinjn <vinjn.z@gmail.com>
Date: Sat, 07 Jul 2018 04:30:45 +0000
Subject: [PATCH] cuda.h - converts tab to space
---
src/cuda.c | 130 ++++++++++++++++++++++++++++++++++++------
1 files changed, 110 insertions(+), 20 deletions(-)
diff --git a/src/cuda.c b/src/cuda.c
index 8849fb1..5300906 100644
--- a/src/cuda.c
+++ b/src/cuda.c
@@ -5,19 +5,47 @@
#include "cuda.h"
#include "utils.h"
#include "blas.h"
+#include "assert.h"
#include <stdlib.h>
+#include <time.h>
+void cuda_set_device(int n)
+{
+ gpu_index = n;
+ cudaError_t status = cudaSetDevice(n);
+ check_error(status);
+}
+
+int cuda_get_device()
+{
+ int n = 0;
+ cudaError_t status = cudaGetDevice(&n);
+ check_error(status);
+ return n;
+}
void check_error(cudaError_t status)
{
+ //cudaDeviceSynchronize();
+ cudaError_t status2 = cudaGetLastError();
if (status != cudaSuccess)
{
const char *s = cudaGetErrorString(status);
char buffer[256];
printf("CUDA Error: %s\n", s);
+ assert(0);
snprintf(buffer, 256, "CUDA Error: %s", s);
error(buffer);
}
+ if (status2 != cudaSuccess)
+ {
+ const char *s = cudaGetErrorString(status);
+ char buffer[256];
+ printf("CUDA Error Prev: %s\n", s);
+ assert(0);
+ snprintf(buffer, 256, "CUDA Error Prev: %s", s);
+ error(buffer);
+ }
}
dim3 cuda_gridsize(size_t n){
@@ -25,39 +53,94 @@
size_t x = k;
size_t y = 1;
if(x > 65535){
- x = ceil(sqrt(k));
- y = (n-1)/(x*BLOCK) + 1;
+ x = ceil(sqrt(k));
+ y = (n-1)/(x*BLOCK) + 1;
}
dim3 d = {x, y, 1};
//printf("%ld %ld %ld %ld\n", n, x, y, x*y*BLOCK);
return d;
}
-cublasHandle_t blas_handle()
-{
- static int init = 0;
- static cublasHandle_t handle;
- if(!init) {
- cublasCreate(&handle);
- init = 1;
- }
- return handle;
+static cudaStream_t streamsArray[16]; // cudaStreamSynchronize( get_cuda_stream() );
+static int streamInit[16] = { 0 };
+
+cudaStream_t get_cuda_stream() {
+ int i = cuda_get_device();
+ if (!streamInit[i]) {
+ cudaError_t status = cudaStreamCreate(&streamsArray[i]);
+ //cudaError_t status = cudaStreamCreateWithFlags(&streamsArray[i], cudaStreamNonBlocking);
+ if (status != cudaSuccess) {
+ printf(" cudaStreamCreate error: %d \n", status);
+ const char *s = cudaGetErrorString(status);
+ char buffer[256];
+ printf("CUDA Error: %s\n", s);
+ status = cudaStreamCreateWithFlags(&streamsArray[i], cudaStreamDefault);
+ check_error(status);
+ }
+ streamInit[i] = 1;
+ }
+ return streamsArray[i];
}
-float *cuda_make_array(float *x, int n)
+
+#ifdef CUDNN
+cudnnHandle_t cudnn_handle()
+{
+ static int init[16] = {0};
+ static cudnnHandle_t handle[16];
+ int i = cuda_get_device();
+ if(!init[i]) {
+ cudnnCreate(&handle[i]);
+ init[i] = 1;
+ cudnnStatus_t status = cudnnSetStream(handle[i], get_cuda_stream());
+ }
+ return handle[i];
+}
+#endif
+
+cublasHandle_t blas_handle()
+{
+ static int init[16] = {0};
+ static cublasHandle_t handle[16];
+ int i = cuda_get_device();
+ if(!init[i]) {
+ cublasCreate(&handle[i]);
+ cublasStatus_t status = cublasSetStream(handle[i], get_cuda_stream());
+ init[i] = 1;
+ }
+ return handle[i];
+}
+
+float *cuda_make_array(float *x, size_t n)
{
float *x_gpu;
size_t size = sizeof(float)*n;
cudaError_t status = cudaMalloc((void **)&x_gpu, size);
check_error(status);
if(x){
- status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice);
+ //status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice);
+ status = cudaMemcpyAsync(x_gpu, x, size, cudaMemcpyHostToDevice, get_cuda_stream());
check_error(status);
}
+ if(!x_gpu) error("Cuda malloc failed\n");
return x_gpu;
}
-float cuda_compare(float *x_gpu, float *x, int n, char *s)
+void cuda_random(float *x_gpu, size_t n)
+{
+ static curandGenerator_t gen[16];
+ static int init[16] = {0};
+ int i = cuda_get_device();
+ if(!init[i]){
+ curandCreateGenerator(&gen[i], CURAND_RNG_PSEUDO_DEFAULT);
+ curandSetPseudoRandomGeneratorSeed(gen[i], time(0));
+ init[i] = 1;
+ }
+ curandGenerateUniform(gen[i], x_gpu, n);
+ check_error(cudaPeekAtLastError());
+}
+
+float cuda_compare(float *x_gpu, float *x, size_t n, char *s)
{
float *tmp = calloc(n, sizeof(float));
cuda_pull_array(x_gpu, tmp, n);
@@ -70,7 +153,7 @@
return err;
}
-int *cuda_make_int_array(int n)
+int *cuda_make_int_array(size_t n)
{
int *x_gpu;
size_t size = sizeof(int)*n;
@@ -81,22 +164,29 @@
void cuda_free(float *x_gpu)
{
+ //cudaStreamSynchronize(get_cuda_stream());
cudaError_t status = cudaFree(x_gpu);
check_error(status);
}
-void cuda_push_array(float *x_gpu, float *x, int n)
+void cuda_push_array(float *x_gpu, float *x, size_t n)
{
size_t size = sizeof(float)*n;
- cudaError_t status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice);
+ //cudaError_t status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice);
+ cudaError_t status = cudaMemcpyAsync(x_gpu, x, size, cudaMemcpyHostToDevice, get_cuda_stream());
check_error(status);
}
-void cuda_pull_array(float *x_gpu, float *x, int n)
+void cuda_pull_array(float *x_gpu, float *x, size_t n)
{
size_t size = sizeof(float)*n;
- cudaError_t status = cudaMemcpy(x, x_gpu, size, cudaMemcpyDeviceToHost);
+ //cudaError_t status = cudaMemcpy(x, x_gpu, size, cudaMemcpyDeviceToHost);
+ cudaError_t status = cudaMemcpyAsync(x, x_gpu, size, cudaMemcpyDeviceToHost, get_cuda_stream());
check_error(status);
+ cudaStreamSynchronize(get_cuda_stream());
}
-#endif
+#else // GPU
+#include "cuda.h"
+void cuda_set_device(int n) {}
+#endif // GPU
--
Gitblit v1.10.0