| | |
| | | int h = (layer.h-1)/layer.stride + 1; |
| | | int w = (layer.w-1)/layer.stride + 1; |
| | | int c = layer.c; |
| | | cl_setup(); |
| | | cl_kernel kernel = get_forward_kernel(); |
| | | cl_command_queue queue = cl.queue; |
| | | |
| | |
| | | |
| | | const size_t global_size[] = {h*w*c*layer.batch}; |
| | | |
| | | clEnqueueNDRangeKernel(queue, kernel, 1, 0, global_size, 0, 0, 0, 0); |
| | | cl.error = clEnqueueNDRangeKernel(queue, kernel, 1, 0, global_size, 0, 0, 0, 0); |
| | | check_error(cl); |
| | | } |
| | | |
| | |
| | | |
| | | void backward_maxpool_layer_gpu(maxpool_layer layer, cl_mem delta) |
| | | { |
| | | cl_setup(); |
| | | cl_kernel kernel = get_backward_kernel(); |
| | | cl_command_queue queue = cl.queue; |
| | | |
| | |
| | | |
| | | const size_t global_size[] = {layer.h*layer.w*layer.c*layer.batch}; |
| | | |
| | | clEnqueueNDRangeKernel(queue, kernel, 1, 0, global_size, 0, 0, 0, 0); |
| | | cl.error = clEnqueueNDRangeKernel(queue, kernel, 1, 0, global_size, 0, 0, 0, 0); |
| | | check_error(cl); |
| | | } |
| | | |