| | |
| | | #include <stdio.h> |
| | | #include <time.h> |
| | | |
| | | #ifdef CUDNN |
| | | #pragma comment(lib, "cudnn.lib") |
| | | #endif |
| | | |
| | | #ifdef AI2 |
| | | #include "xnor_layer.h" |
| | | #endif |
| | |
| | | |
| | | #ifdef GPU |
| | | #ifdef CUDNN |
| | | void cudnn_convolutional_setup(layer *l) |
| | | void cudnn_convolutional_setup(layer *l, int cudnn_preference) |
| | | { |
| | | cudnnSetTensor4dDescriptor(l->dsrcTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->c, l->h, l->w); |
| | | cudnnSetTensor4dDescriptor(l->ddstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->out_c, l->out_h, l->out_w); |
| | | cudnnSetFilter4dDescriptor(l->dweightDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, l->n, l->c, l->size, l->size); |
| | | |
| | | cudnnSetTensor4dDescriptor(l->srcTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->c, l->h, l->w); |
| | | cudnnSetTensor4dDescriptor(l->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->out_c, l->out_h, l->out_w); |
| | | cudnnSetFilter4dDescriptor(l->weightDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, l->n, l->c, l->size, l->size); |
| | | cudnnSetConvolution2dDescriptor(l->convDesc, l->pad, l->pad, l->stride, l->stride, 1, 1, CUDNN_CROSS_CORRELATION); |
| | | cudnnGetConvolutionForwardAlgorithm(cudnn_handle(), |
| | | #ifdef CUDNN_HALF |
| | | // TRUE_HALF_CONFIG is only supported on architectures with true fp16 support (compute capability 5.3 and 6.0): |
| | | // Tegra X1, Jetson TX1, DRIVE CX, DRIVE PX, Quadro GP100, Tesla P100 |
| | | // PSEUDO_HALF_CONFIG is required for Tensor Cores - our case! |
| | | const cudnnDataType_t data_type = CUDNN_DATA_HALF; |
| | | #else |
| | | cudnnDataType_t data_type = CUDNN_DATA_FLOAT; |
| | | #endif |
| | | |
| | | #if(CUDNN_MAJOR >= 7) |
| | | // Tensor Core uses CUDNN_TENSOR_OP_MATH instead of CUDNN_DEFAULT_MATH |
| | | // For *_ALGO_WINOGRAD_NONFUSED can be used CUDNN_DATA_FLOAT |
| | | // otherwise Input, Filter and Output descriptors (xDesc, yDesc, wDesc, dxDesc, dyDesc and dwDesc as applicable) have dataType = CUDNN_DATA_HALF |
| | | // Three techniques for training using Mixed-precision: https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/ |
| | | // 1. Accumulation into FP32 |
| | | // 2. Loss Scaling - required only for: activation gradients. We do not use. |
| | | // 3. FP32 Master Copy of Weights |
| | | // More: http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#tensor_ops |
| | | cudnnSetConvolutionMathType(l->convDesc, CUDNN_TENSOR_OP_MATH); |
| | | #endif |
| | | |
| | | // INT8_CONFIG, INT8_EXT_CONFIG, INT8x4_CONFIG and INT8x4_EXT_CONFIG are only supported |
| | | // on architectures with DP4A support (compute capability 6.1 and later). |
| | | //cudnnDataType_t data_type = CUDNN_DATA_INT8; |
| | | |
| | | // backward delta |
| | | cudnnSetTensor4dDescriptor(l->dsrcTensorDesc, CUDNN_TENSOR_NCHW, data_type, l->batch, l->c, l->h, l->w); |
| | | cudnnSetTensor4dDescriptor(l->ddstTensorDesc, CUDNN_TENSOR_NCHW, data_type, l->batch, l->out_c, l->out_h, l->out_w); |
| | | cudnnSetFilter4dDescriptor(l->dweightDesc, data_type, CUDNN_TENSOR_NCHW, l->n, l->c, l->size, l->size); |
| | | |
| | | // forward |
| | | cudnnSetTensor4dDescriptor(l->srcTensorDesc, CUDNN_TENSOR_NCHW, data_type, l->batch, l->c, l->h, l->w); |
| | | cudnnSetTensor4dDescriptor(l->dstTensorDesc, CUDNN_TENSOR_NCHW, data_type, l->batch, l->out_c, l->out_h, l->out_w); |
| | | cudnnSetFilter4dDescriptor(l->weightDesc, data_type, CUDNN_TENSOR_NCHW, l->n, l->c, l->size, l->size); |
| | | |
| | | // batch norm |
| | | cudnnSetTensor4dDescriptor(l->normTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, l->out_c, 1, 1); |
| | | cudnnSetTensor4dDescriptor(l->normDstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->out_c, l->out_h, l->out_w); |
| | | |
| | | cudnnSetTensor4dDescriptor(l->normDstTensorDescF16, CUDNN_TENSOR_NCHW, data_type, l->batch, l->out_c, l->out_h, l->out_w); |
| | | #if(CUDNN_MAJOR >= 6) |
| | | cudnnSetConvolution2dDescriptor(l->convDesc, l->pad, l->pad, l->stride, l->stride, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); // cudnn >= 6.0 |
| | | #else |
| | | cudnnSetConvolution2dDescriptor(l->convDesc, l->pad, l->pad, l->stride, l->stride, 1, 1, CUDNN_CROSS_CORRELATION); // cudnn 5.1 |
| | | #endif |
| | | int forward_algo = CUDNN_CONVOLUTION_FWD_PREFER_FASTEST; |
| | | int backward_algo = CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST; |
| | | int backward_filter = CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST; |
| | | if (cudnn_preference == cudnn_smallest) |
| | | { |
| | | forward_algo = CUDNN_CONVOLUTION_FWD_NO_WORKSPACE; |
| | | backward_algo = CUDNN_CONVOLUTION_BWD_DATA_NO_WORKSPACE; |
| | | backward_filter = CUDNN_CONVOLUTION_BWD_FILTER_NO_WORKSPACE; |
| | | printf(" CUDNN-slow "); |
| | | } |
| | | |
| | | cudnnGetConvolutionForwardAlgorithm(cudnn_handle(), |
| | | l->srcTensorDesc, |
| | | l->weightDesc, |
| | | l->convDesc, |
| | | l->dstTensorDesc, |
| | | CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, |
| | | forward_algo, |
| | | 0, |
| | | &l->fw_algo); |
| | | cudnnGetConvolutionBackwardDataAlgorithm(cudnn_handle(), |
| | |
| | | l->ddstTensorDesc, |
| | | l->convDesc, |
| | | l->dsrcTensorDesc, |
| | | CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, |
| | | backward_algo, |
| | | 0, |
| | | &l->bd_algo); |
| | | cudnnGetConvolutionBackwardFilterAlgorithm(cudnn_handle(), |
| | |
| | | l->ddstTensorDesc, |
| | | l->convDesc, |
| | | l->dweightDesc, |
| | | CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, |
| | | backward_filter, |
| | | 0, |
| | | &l->bf_algo); |
| | | |
| | | if (data_type == CUDNN_DATA_HALF) |
| | | { |
| | | // HALF-16 if(data_type == CUDNN_DATA_HALF) |
| | | l->fw_algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM; |
| | | l->bd_algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; |
| | | l->bf_algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; |
| | | |
| | | // FLOAT-32 if(data_type == CUDNN_DATA_FLOAT) |
| | | //l->fw_algo = CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED; |
| | | //l->bd_algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED; |
| | | //l->bf_algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED; |
| | | |
| | | int fw = 0, bd = 0, bf = 0; |
| | | if (l->fw_algo == CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM) fw = 1; |
| | | //printf("Tensor Cores - Forward enabled: l->fw_algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM \n"); |
| | | if (l->fw_algo == CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED) fw = 2; |
| | | //printf("Tensor Cores - Forward enabled: l->fw_algo = CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED \n"); |
| | | |
| | | if (l->bd_algo == CUDNN_CONVOLUTION_BWD_DATA_ALGO_1) bd = 1; |
| | | //printf("Tensor Cores - Backward-data enabled: l->bd_algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1 \n"); |
| | | if (l->bd_algo == CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED) bd = 2; |
| | | //printf("Tensor Cores - Backward-data enabled: l->bd_algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED \n"); |
| | | |
| | | if (l->bf_algo == CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1) bf = 1; |
| | | //printf("Tensor Cores - Backward-filter enabled: l->bf_algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1 \n"); |
| | | if (l->bf_algo == CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED) bf = 2; |
| | | //printf("Tensor Cores - Backward-filter enabled: l->bf_algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED \n"); |
| | | |
| | | if (fw == 2 && bd == 2 && bf == 2) printf("TF "); |
| | | else if (fw == 1 && bd == 1 && bf == 1) printf("TH "); |
| | | } |
| | | } |
| | | #endif |
| | | #endif |
| | |
| | | l.outputs = l.out_h * l.out_w * l.out_c; |
| | | l.inputs = l.w * l.h * l.c; |
| | | |
| | | l.output = calloc(l.batch*out_h * out_w * n, sizeof(float)); |
| | | l.delta = calloc(l.batch*out_h * out_w * n, sizeof(float)); |
| | | l.output = calloc(l.batch*l.outputs, sizeof(float)); |
| | | l.delta = calloc(l.batch*l.outputs, sizeof(float)); |
| | | |
| | | l.forward = forward_convolutional_layer; |
| | | l.backward = backward_convolutional_layer; |
| | |
| | | l.mean = calloc(n, sizeof(float)); |
| | | l.variance = calloc(n, sizeof(float)); |
| | | |
| | | l.mean_delta = calloc(n, sizeof(float)); |
| | | l.variance_delta = calloc(n, sizeof(float)); |
| | | |
| | | l.rolling_mean = calloc(n, sizeof(float)); |
| | | l.rolling_variance = calloc(n, sizeof(float)); |
| | | l.x = calloc(l.batch*l.outputs, sizeof(float)); |
| | | l.x_norm = calloc(l.batch*l.outputs, sizeof(float)); |
| | | } |
| | | if(adam){ |
| | | l.adam = 1; |
| | | l.m = calloc(c*n*size*size, sizeof(float)); |
| | | l.v = calloc(c*n*size*size, sizeof(float)); |
| | | } |
| | | |
| | | #ifdef GPU |
| | |
| | | |
| | | if(gpu_index >= 0){ |
| | | if (adam) { |
| | | l.adam = 1; |
| | | l.m_gpu = cuda_make_array(l.weight_updates, c*n*size*size); |
| | | l.v_gpu = cuda_make_array(l.weight_updates, c*n*size*size); |
| | | l.m_gpu = cuda_make_array(l.m, c*n*size*size); |
| | | l.v_gpu = cuda_make_array(l.v, c*n*size*size); |
| | | } |
| | | |
| | | l.weights_gpu = cuda_make_array(l.weights, c*n*size*size); |
| | | #ifdef CUDNN_HALF |
| | | l.weights_gpu16 = cuda_make_array(NULL, c*n*size*size / 2); //cuda_make_array(l.weights, c*n*size*size / 2); |
| | | l.weight_updates_gpu16 = cuda_make_array(NULL, c*n*size*size / 2); //cuda_make_array(l.weight_updates, c*n*size*size / 2); |
| | | #endif |
| | | l.weight_updates_gpu = cuda_make_array(l.weight_updates, c*n*size*size); |
| | | |
| | | l.biases_gpu = cuda_make_array(l.biases, n); |
| | |
| | | l.x_gpu = cuda_make_array(l.output, l.batch*out_h*out_w*n); |
| | | l.x_norm_gpu = cuda_make_array(l.output, l.batch*out_h*out_w*n); |
| | | } |
| | | #ifdef CUDNN |
| | | #ifdef CUDNN |
| | | cudnnCreateTensorDescriptor(&l.normDstTensorDesc); |
| | | cudnnCreateTensorDescriptor(&l.normDstTensorDescF16); |
| | | cudnnCreateTensorDescriptor(&l.normTensorDesc); |
| | | cudnnCreateTensorDescriptor(&l.srcTensorDesc); |
| | | cudnnCreateTensorDescriptor(&l.dstTensorDesc); |
| | | cudnnCreateFilterDescriptor(&l.weightDesc); |
| | |
| | | cudnnCreateTensorDescriptor(&l.ddstTensorDesc); |
| | | cudnnCreateFilterDescriptor(&l.dweightDesc); |
| | | cudnnCreateConvolutionDescriptor(&l.convDesc); |
| | | cudnn_convolutional_setup(&l); |
| | | cudnn_convolutional_setup(&l, cudnn_fastest); |
| | | #endif |
| | | } |
| | | #endif |
| | | l.workspace_size = get_workspace_size(l); |
| | | l.activation = activation; |
| | | |
| | | fprintf(stderr, "Convolutional Layer: %d x %d x %d image, %d filters -> %d x %d x %d image\n", h,w,c,n, out_h, out_w, n); |
| | | //fprintf(stderr, "conv %5d %2d x%2d /%2d %4d x%4d x%4d -> %4d x%4d x%4d\n", n, size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c); |
| | | l.bflops = (2.0 * l.n * l.size*l.size*l.c * l.out_h*l.out_w) / 1000000000.; |
| | | fprintf(stderr, "conv %5d %2d x%2d /%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", n, size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops); |
| | | |
| | | return l; |
| | | } |
| | |
| | | |
| | | void resize_convolutional_layer(convolutional_layer *l, int w, int h) |
| | | { |
| | | int old_w = l->w; |
| | | int old_h = l->h; |
| | | l->w = w; |
| | | l->h = h; |
| | | int out_w = convolutional_out_width(*l); |
| | |
| | | l->outputs = l->out_h * l->out_w * l->out_c; |
| | | l->inputs = l->w * l->h * l->c; |
| | | |
| | | l->output = realloc(l->output, |
| | | l->batch*out_h * out_w * l->n*sizeof(float)); |
| | | l->delta = realloc(l->delta, |
| | | l->batch*out_h * out_w * l->n*sizeof(float)); |
| | | l->output = realloc(l->output, l->batch*l->outputs*sizeof(float)); |
| | | l->delta = realloc(l->delta, l->batch*l->outputs*sizeof(float)); |
| | | if(l->batch_normalize){ |
| | | l->x = realloc(l->x, l->batch*l->outputs*sizeof(float)); |
| | | l->x_norm = realloc(l->x_norm, l->batch*l->outputs*sizeof(float)); |
| | | } |
| | | |
| | | if (l->xnor) { |
| | | //l->binary_input = realloc(l->inputs*l->batch, sizeof(float)); |
| | | } |
| | | |
| | | #ifdef GPU |
| | | cuda_free(l->delta_gpu); |
| | | cuda_free(l->output_gpu); |
| | | if (old_w < w || old_h < h) { |
| | | cuda_free(l->delta_gpu); |
| | | cuda_free(l->output_gpu); |
| | | |
| | | l->delta_gpu = cuda_make_array(l->delta, l->batch*out_h*out_w*l->n); |
| | | l->output_gpu = cuda_make_array(l->output, l->batch*out_h*out_w*l->n); |
| | | l->delta_gpu = cuda_make_array(l->delta, l->batch*l->outputs); |
| | | l->output_gpu = cuda_make_array(l->output, l->batch*l->outputs); |
| | | |
| | | if (l->batch_normalize) { |
| | | cuda_free(l->x_gpu); |
| | | cuda_free(l->x_norm_gpu); |
| | | |
| | | l->x_gpu = cuda_make_array(l->output, l->batch*l->outputs); |
| | | l->x_norm_gpu = cuda_make_array(l->output, l->batch*l->outputs); |
| | | } |
| | | |
| | | if (l->xnor) { |
| | | cuda_free(l->binary_input_gpu); |
| | | l->binary_input_gpu = cuda_make_array(0, l->inputs*l->batch); |
| | | } |
| | | } |
| | | #ifdef CUDNN |
| | | cudnn_convolutional_setup(l); |
| | | cudnn_convolutional_setup(l, cudnn_fastest); |
| | | #endif |
| | | #endif |
| | | l->workspace_size = get_workspace_size(*l); |
| | | |
| | | #ifdef CUDNN |
| | | // check for excessive memory consumption |
| | | size_t free_byte; |
| | | size_t total_byte; |
| | | check_error(cudaMemGetInfo(&free_byte, &total_byte)); |
| | | if (l->workspace_size > free_byte || l->workspace_size >= total_byte / 2) { |
| | | printf(" used slow CUDNN algo without Workspace! Need memory: %d, available: %d\n", l->workspace_size, (free_byte < total_byte/2) ? free_byte : total_byte/2); |
| | | cudnn_convolutional_setup(l, cudnn_smallest); |
| | | l->workspace_size = get_workspace_size(*l); |
| | | } |
| | | #endif |
| | | } |
| | | |
| | | void add_bias(float *output, float *biases, int batch, int n, int size) |
| | |
| | | int out_w = convolutional_out_width(l); |
| | | int i; |
| | | |
| | | |
| | | fill_cpu(l.outputs*l.batch, 0, l.output, 1); |
| | | |
| | | /* |
| | | if(l.binary){ |
| | | binarize_weights(l.weights, l.n, l.c*l.size*l.size, l.binary_weights); |
| | | binarize_weights2(l.weights, l.n, l.c*l.size*l.size, l.cweights, l.scales); |
| | | swap_binary(&l); |
| | | } |
| | | */ |
| | | |
| | | /* |
| | | if(l.binary){ |
| | | int m = l.n; |
| | | int k = l.size*l.size*l.c; |
| | | int n = out_h*out_w; |
| | | |
| | | char *a = l.cweights; |
| | | float *b = state.workspace; |
| | | float *c = l.output; |
| | | |
| | | for(i = 0; i < l.batch; ++i){ |
| | | im2col_cpu(state.input, l.c, l.h, l.w, |
| | | l.size, l.stride, l.pad, b); |
| | | gemm_bin(m,n,k,1,a,k,b,n,c,n); |
| | | c += n*m; |
| | | state.input += l.c*l.h*l.w; |
| | | } |
| | | scale_bias(l.output, l.scales, l.batch, l.n, out_h*out_w); |
| | | add_bias(l.output, l.biases, l.batch, l.n, out_h*out_w); |
| | | activate_array(l.output, m*n*l.batch, l.activation); |
| | | return; |
| | | } |
| | | */ |
| | | |
| | | if(l.xnor){ |
| | | binarize_weights(l.weights, l.n, l.c*l.size*l.size, l.binary_weights); |
| | | swap_binary(&l); |
| | |
| | | int k = l.size*l.size*l.c; |
| | | int n = out_h*out_w; |
| | | |
| | | if (l.xnor && l.c%32 == 0 && AI2) { |
| | | forward_xnor_layer(l, state); |
| | | printf("xnor\n"); |
| | | } else { |
| | | |
| | | float *a = l.weights; |
| | | float *b = state.workspace; |
| | | float *c = l.output; |
| | | float *a = l.weights; |
| | | float *b = state.workspace; |
| | | float *c = l.output; |
| | | |
| | | for(i = 0; i < l.batch; ++i){ |
| | | im2col_cpu(state.input, l.c, l.h, l.w, |
| | | l.size, l.stride, l.pad, b); |
| | | gemm(0,0,m,n,k,1,a,k,b,n,1,c,n); |
| | | c += n*m; |
| | | state.input += l.c*l.h*l.w; |
| | | } |
| | | for(i = 0; i < l.batch; ++i){ |
| | | im2col_cpu(state.input, l.c, l.h, l.w, |
| | | l.size, l.stride, l.pad, b); |
| | | gemm(0,0,m,n,k,1,a,k,b,n,1,c,n); |
| | | c += n*m; |
| | | state.input += l.c*l.h*l.w; |
| | | } |
| | | |
| | | if(l.batch_normalize){ |
| | |
| | | gradient_array(l.output, m*k*l.batch, l.activation, l.delta); |
| | | backward_bias(l.bias_updates, l.delta, l.batch, l.n, k); |
| | | |
| | | if(l.batch_normalize){ |
| | | backward_batchnorm_layer(l, state); |
| | | } |
| | | |
| | | for(i = 0; i < l.batch; ++i){ |
| | | float *a = l.delta + i*m*k; |
| | | float *b = state.workspace; |