| | |
| | | //cudaDeviceSynchronize(); |
| | | cudaError_t status2 = cudaGetLastError(); |
| | | if (status != cudaSuccess) |
| | | { |
| | | { |
| | | const char *s = cudaGetErrorString(status); |
| | | char buffer[256]; |
| | | printf("CUDA Error: %s\n", s); |
| | | assert(0); |
| | | snprintf(buffer, 256, "CUDA Error: %s", s); |
| | | error(buffer); |
| | | } |
| | | } |
| | | if (status2 != cudaSuccess) |
| | | { |
| | | { |
| | | const char *s = cudaGetErrorString(status); |
| | | char buffer[256]; |
| | | printf("CUDA Error Prev: %s\n", s); |
| | | assert(0); |
| | | snprintf(buffer, 256, "CUDA Error Prev: %s", s); |
| | | error(buffer); |
| | | } |
| | | } |
| | | } |
| | | |
| | | dim3 cuda_gridsize(size_t n){ |
| | |
| | | return d; |
| | | } |
| | | |
| | | static cudaStream_t streamsArray[16]; // cudaStreamSynchronize( get_cuda_stream() ); |
| | | static int streamInit[16] = { 0 }; |
| | | |
| | | cudaStream_t get_cuda_stream() { |
| | | int i = cuda_get_device(); |
| | | if (!streamInit[i]) { |
| | | cudaError_t status = cudaStreamCreate(&streamsArray[i]); |
| | | //cudaError_t status = cudaStreamCreateWithFlags(&streamsArray[i], cudaStreamNonBlocking); |
| | | if (status != cudaSuccess) { |
| | | printf(" cudaStreamCreate error: %d \n", status); |
| | | const char *s = cudaGetErrorString(status); |
| | | char buffer[256]; |
| | | printf("CUDA Error: %s\n", s); |
| | | status = cudaStreamCreateWithFlags(&streamsArray[i], cudaStreamDefault); |
| | | check_error(status); |
| | | } |
| | | streamInit[i] = 1; |
| | | } |
| | | return streamsArray[i]; |
| | | } |
| | | |
| | | |
| | | #ifdef CUDNN |
| | | cudnnHandle_t cudnn_handle() |
| | | { |
| | |
| | | if(!init[i]) { |
| | | cudnnCreate(&handle[i]); |
| | | init[i] = 1; |
| | | cudnnStatus_t status = cudnnSetStream(handle[i], get_cuda_stream()); |
| | | } |
| | | return handle[i]; |
| | | } |
| | |
| | | int i = cuda_get_device(); |
| | | if(!init[i]) { |
| | | cublasCreate(&handle[i]); |
| | | cublasStatus_t status = cublasSetStream(handle[i], get_cuda_stream()); |
| | | init[i] = 1; |
| | | } |
| | | return handle[i]; |
| | |
| | | cudaError_t status = cudaMalloc((void **)&x_gpu, size); |
| | | check_error(status); |
| | | if(x){ |
| | | status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice); |
| | | //status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice); |
| | | status = cudaMemcpyAsync(x_gpu, x, size, cudaMemcpyHostToDevice, get_cuda_stream()); |
| | | check_error(status); |
| | | } |
| | | if(!x_gpu) error("Cuda malloc failed\n"); |
| | |
| | | |
| | | void cuda_free(float *x_gpu) |
| | | { |
| | | //cudaStreamSynchronize(get_cuda_stream()); |
| | | cudaError_t status = cudaFree(x_gpu); |
| | | check_error(status); |
| | | } |
| | |
| | | void cuda_push_array(float *x_gpu, float *x, size_t n) |
| | | { |
| | | size_t size = sizeof(float)*n; |
| | | cudaError_t status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice); |
| | | //cudaError_t status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice); |
| | | cudaError_t status = cudaMemcpyAsync(x_gpu, x, size, cudaMemcpyHostToDevice, get_cuda_stream()); |
| | | check_error(status); |
| | | } |
| | | |
| | | void cuda_pull_array(float *x_gpu, float *x, size_t n) |
| | | { |
| | | size_t size = sizeof(float)*n; |
| | | cudaError_t status = cudaMemcpy(x, x_gpu, size, cudaMemcpyDeviceToHost); |
| | | //cudaError_t status = cudaMemcpy(x, x_gpu, size, cudaMemcpyDeviceToHost); |
| | | cudaError_t status = cudaMemcpyAsync(x, x_gpu, size, cudaMemcpyDeviceToHost, get_cuda_stream()); |
| | | check_error(status); |
| | | cudaStreamSynchronize(get_cuda_stream()); |
| | | } |
| | | |
| | | #endif |
| | | #else // GPU |
| | | #include "cuda.h" |
| | | void cuda_set_device(int n) {} |
| | | #endif // GPU |