| | |
| | | #ifdef GPU |
| | | net.input_gpu = calloc(1, sizeof(float *)); |
| | | net.truth_gpu = calloc(1, sizeof(float *)); |
| | | |
| | | net.input16_gpu = calloc(1, sizeof(float *)); |
| | | net.output16_gpu = calloc(1, sizeof(float *)); |
| | | net.max_input16_size = calloc(1, sizeof(size_t)); |
| | | net.max_output16_size = calloc(1, sizeof(size_t)); |
| | | #endif |
| | | return net; |
| | | } |
| | |
| | | net->layers[i].batch = b; |
| | | #ifdef CUDNN |
| | | if(net->layers[i].type == CONVOLUTIONAL){ |
| | | cudnn_convolutional_setup(net->layers + i); |
| | | cudnn_convolutional_setup(net->layers + i, cudnn_fastest); |
| | | /* |
| | | layer *l = net->layers + i; |
| | | cudnn_convolutional_setup(l, cudnn_fastest); |
| | | // check for excessive memory consumption |
| | | size_t free_byte; |
| | | size_t total_byte; |
| | | check_error(cudaMemGetInfo(&free_byte, &total_byte)); |
| | | if (l->workspace_size > free_byte || l->workspace_size >= total_byte / 2) { |
| | | printf(" used slow CUDNN algo without Workspace! \n"); |
| | | cudnn_convolutional_setup(l, cudnn_smallest); |
| | | l->workspace_size = get_workspace_size(*l); |
| | | } |
| | | */ |
| | | } |
| | | #endif |
| | | } |
| | |
| | | //fflush(stderr); |
| | | for (i = 0; i < net->n; ++i){ |
| | | layer l = net->layers[i]; |
| | | printf(" %d: layer = %d,", i, l.type); |
| | | //printf(" %d: layer = %d,", i, l.type); |
| | | if(l.type == CONVOLUTIONAL){ |
| | | resize_convolutional_layer(&l, w, h); |
| | | }else if(l.type == CROP){ |
| | |
| | | resize_region_layer(&l, w, h); |
| | | }else if(l.type == ROUTE){ |
| | | resize_route_layer(&l, net); |
| | | }else if (l.type == SHORTCUT) { |
| | | resize_shortcut_layer(&l, w, h); |
| | | }else if(l.type == REORG){ |
| | | resize_reorg_layer(&l, w, h); |
| | | }else if(l.type == AVGPOOL){ |
| | |
| | | } |
| | | #ifdef GPU |
| | | if(gpu_index >= 0){ |
| | | printf(" try to allocate workspace = %zu * sizeof(float), ", (workspace_size - 1) / sizeof(float) + 1); |
| | | net->workspace = cuda_make_array(0, (workspace_size-1)/sizeof(float)+1); |
| | | printf(" CUDA allocate done! \n"); |
| | | }else { |
| | |
| | | if (*net.truth_gpu) cuda_free(*net.truth_gpu); |
| | | if (net.input_gpu) free(net.input_gpu); |
| | | if (net.truth_gpu) free(net.truth_gpu); |
| | | |
| | | if (*net.input16_gpu) cuda_free(*net.input16_gpu); |
| | | if (*net.output16_gpu) cuda_free(*net.output16_gpu); |
| | | if (net.input16_gpu) free(net.input16_gpu); |
| | | if (net.output16_gpu) free(net.output16_gpu); |
| | | if (net.max_input16_size) free(net.max_input16_size); |
| | | if (net.max_output16_size) free(net.max_output16_size); |
| | | #else |
| | | free(net.workspace); |
| | | #endif |