| | |
| | | |
| | | void check_error(cudaError_t status) |
| | | { |
| | | //cudaDeviceSynchronize(); |
| | | cudaError_t status2 = cudaGetLastError(); |
| | | if (status != cudaSuccess) |
| | | { |
| | |
| | | return d; |
| | | } |
| | | |
| | | static cudaStream_t streamsArray[16]; // cudaStreamSynchronize( get_cuda_stream() ); |
| | | static int streamInit[16] = { 0 }; |
| | | |
| | | cudaStream_t get_cuda_stream() { |
| | | int i = cuda_get_device(); |
| | | if (!streamInit[i]) { |
| | | cudaStreamCreate(&streamsArray[i]); |
| | | streamInit[i] = 1; |
| | | } |
| | | return streamsArray[i]; |
| | | } |
| | | |
| | | |
| | | #ifdef CUDNN |
| | | cudnnHandle_t cudnn_handle() |
| | | { |
| | |
| | | if(!init[i]) { |
| | | cudnnCreate(&handle[i]); |
| | | init[i] = 1; |
| | | cudnnStatus_t status = cudnnSetStream(handle[i], get_cuda_stream()); |
| | | } |
| | | return handle[i]; |
| | | } |
| | |
| | | int i = cuda_get_device(); |
| | | if(!init[i]) { |
| | | cublasCreate(&handle[i]); |
| | | cublasStatus_t status = cublasSetStream(handle[i], get_cuda_stream()); |
| | | init[i] = 1; |
| | | } |
| | | return handle[i]; |
| | |
| | | cudaError_t status = cudaMalloc((void **)&x_gpu, size); |
| | | check_error(status); |
| | | if(x){ |
| | | status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice); |
| | | //status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice); |
| | | status = cudaMemcpyAsync(x_gpu, x, size, cudaMemcpyHostToDevice, get_cuda_stream()); |
| | | check_error(status); |
| | | } |
| | | if(!x_gpu) error("Cuda malloc failed\n"); |
| | |
| | | |
| | | void cuda_free(float *x_gpu) |
| | | { |
| | | //cudaStreamSynchronize(get_cuda_stream()); |
| | | cudaError_t status = cudaFree(x_gpu); |
| | | check_error(status); |
| | | } |
| | |
| | | void cuda_push_array(float *x_gpu, float *x, size_t n) |
| | | { |
| | | size_t size = sizeof(float)*n; |
| | | cudaError_t status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice); |
| | | //cudaError_t status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice); |
| | | cudaError_t status = cudaMemcpyAsync(x_gpu, x, size, cudaMemcpyHostToDevice, get_cuda_stream()); |
| | | check_error(status); |
| | | } |
| | | |
| | | void cuda_pull_array(float *x_gpu, float *x, size_t n) |
| | | { |
| | | size_t size = sizeof(float)*n; |
| | | cudaError_t status = cudaMemcpy(x, x_gpu, size, cudaMemcpyDeviceToHost); |
| | | //cudaError_t status = cudaMemcpy(x, x_gpu, size, cudaMemcpyDeviceToHost); |
| | | cudaError_t status = cudaMemcpyAsync(x, x_gpu, size, cudaMemcpyDeviceToHost, get_cuda_stream()); |
| | | check_error(status); |
| | | cudaStreamSynchronize(get_cuda_stream()); |
| | | } |
| | | |
| | | #endif |