| | |
| | | |
| | | #ifndef AI2 |
| | | #define AI2 0 |
| | | void forward_xnor_layer(layer l, network_state state); |
| | | #endif |
| | | |
| | | void swap_binary(convolutional_layer *l) |
| | |
| | | } |
| | | } |
| | | |
| | | void binarize_cpu(float *input, int n, float *binary) |
| | | { |
| | | int i; |
| | | for(i = 0; i < n; ++i){ |
| | | binary[i] = (input[i] > 0) ? 1 : -1; |
| | | } |
| | | } |
| | | |
| | | void binarize_input(float *input, int n, int size, float *binary) |
| | | { |
| | | int i, s; |
| | |
| | | |
| | | int convolutional_out_height(convolutional_layer l) |
| | | { |
| | | int h = l.h; |
| | | if (!l.pad) h -= l.size; |
| | | else h -= 1; |
| | | return h/l.stride + 1; |
| | | return (l.h + 2*l.pad - l.size) / l.stride + 1; |
| | | } |
| | | |
| | | int convolutional_out_width(convolutional_layer l) |
| | | { |
| | | int w = l.w; |
| | | if (!l.pad) w -= l.size; |
| | | else w -= 1; |
| | | return w/l.stride + 1; |
| | | return (l.w + 2*l.pad - l.size) / l.stride + 1; |
| | | } |
| | | |
| | | image get_convolutional_image(convolutional_layer l) |
| | |
| | | |
| | | size_t get_workspace_size(layer l){ |
| | | #ifdef CUDNN |
| | | size_t most = 0; |
| | | size_t s = 0; |
| | | cudnnGetConvolutionForwardWorkspaceSize(cudnn_handle(), |
| | | l.srcTensorDesc, |
| | | l.filterDesc, |
| | | l.convDesc, |
| | | l.dstTensorDesc, |
| | | l.fw_algo, |
| | | &s); |
| | | if (s > most) most = s; |
| | | cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnn_handle(), |
| | | l.srcTensorDesc, |
| | | l.ddstTensorDesc, |
| | | l.convDesc, |
| | | l.dfilterDesc, |
| | | l.bf_algo, |
| | | &s); |
| | | if (s > most) most = s; |
| | | cudnnGetConvolutionBackwardDataWorkspaceSize(cudnn_handle(), |
| | | l.filterDesc, |
| | | l.ddstTensorDesc, |
| | | l.convDesc, |
| | | l.dsrcTensorDesc, |
| | | l.bd_algo, |
| | | &s); |
| | | if (s > most) most = s; |
| | | return most; |
| | | #else |
| | | if(gpu_index >= 0){ |
| | | size_t most = 0; |
| | | size_t s = 0; |
| | | cudnnGetConvolutionForwardWorkspaceSize(cudnn_handle(), |
| | | l.srcTensorDesc, |
| | | l.filterDesc, |
| | | l.convDesc, |
| | | l.dstTensorDesc, |
| | | l.fw_algo, |
| | | &s); |
| | | if (s > most) most = s; |
| | | cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnn_handle(), |
| | | l.srcTensorDesc, |
| | | l.ddstTensorDesc, |
| | | l.convDesc, |
| | | l.dfilterDesc, |
| | | l.bf_algo, |
| | | &s); |
| | | if (s > most) most = s; |
| | | cudnnGetConvolutionBackwardDataWorkspaceSize(cudnn_handle(), |
| | | l.filterDesc, |
| | | l.ddstTensorDesc, |
| | | l.convDesc, |
| | | l.dsrcTensorDesc, |
| | | l.bd_algo, |
| | | &s); |
| | | if (s > most) most = s; |
| | | return most; |
| | | } |
| | | #endif |
| | | return (size_t)l.out_h*l.out_w*l.size*l.size*l.c*sizeof(float); |
| | | #endif |
| | | } |
| | | |
| | | convolutional_layer make_convolutional_layer(int batch, int h, int w, int c, int n, int size, int stride, int pad, ACTIVATION activation, int batch_normalize, int binary, int xnor) |
| | | #ifdef GPU |
| | | #ifdef CUDNN |
| | | void cudnn_convolutional_setup(layer *l) |
| | | { |
| | | cudnnSetTensor4dDescriptor(l->dsrcTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->c, l->h, l->w); |
| | | cudnnSetTensor4dDescriptor(l->ddstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->out_c, l->out_h, l->out_w); |
| | | cudnnSetFilter4dDescriptor(l->dfilterDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, l->n, l->c, l->size, l->size); |
| | | |
| | | cudnnSetTensor4dDescriptor(l->srcTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->c, l->h, l->w); |
| | | cudnnSetTensor4dDescriptor(l->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->out_c, l->out_h, l->out_w); |
| | | cudnnSetFilter4dDescriptor(l->filterDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, l->n, l->c, l->size, l->size); |
| | | cudnnSetConvolution2dDescriptor(l->convDesc, l->pad, l->pad, l->stride, l->stride, 1, 1, CUDNN_CROSS_CORRELATION); |
| | | cudnnGetConvolutionForwardAlgorithm(cudnn_handle(), |
| | | l->srcTensorDesc, |
| | | l->filterDesc, |
| | | l->convDesc, |
| | | l->dstTensorDesc, |
| | | CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, |
| | | 0, |
| | | &l->fw_algo); |
| | | cudnnGetConvolutionBackwardDataAlgorithm(cudnn_handle(), |
| | | l->filterDesc, |
| | | l->ddstTensorDesc, |
| | | l->convDesc, |
| | | l->dsrcTensorDesc, |
| | | CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, |
| | | 0, |
| | | &l->bd_algo); |
| | | cudnnGetConvolutionBackwardFilterAlgorithm(cudnn_handle(), |
| | | l->srcTensorDesc, |
| | | l->ddstTensorDesc, |
| | | l->convDesc, |
| | | l->dfilterDesc, |
| | | CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, |
| | | 0, |
| | | &l->bf_algo); |
| | | } |
| | | #endif |
| | | #endif |
| | | |
| | | convolutional_layer make_convolutional_layer(int batch, int h, int w, int c, int n, int size, int stride, int padding, ACTIVATION activation, int batch_normalize, int binary, int xnor) |
| | | { |
| | | int i; |
| | | convolutional_layer l = {0}; |
| | |
| | | l.batch = batch; |
| | | l.stride = stride; |
| | | l.size = size; |
| | | l.pad = pad; |
| | | l.pad = padding; |
| | | l.batch_normalize = batch_normalize; |
| | | |
| | | l.filters = calloc(c*n*size*size, sizeof(float)); |
| | |
| | | } |
| | | |
| | | #ifdef GPU |
| | | l.filters_gpu = cuda_make_array(l.filters, c*n*size*size); |
| | | l.filter_updates_gpu = cuda_make_array(l.filter_updates, c*n*size*size); |
| | | if(gpu_index >= 0){ |
| | | l.filters_gpu = cuda_make_array(l.filters, c*n*size*size); |
| | | l.filter_updates_gpu = cuda_make_array(l.filter_updates, c*n*size*size); |
| | | |
| | | l.biases_gpu = cuda_make_array(l.biases, n); |
| | | l.bias_updates_gpu = cuda_make_array(l.bias_updates, n); |
| | | l.biases_gpu = cuda_make_array(l.biases, n); |
| | | l.bias_updates_gpu = cuda_make_array(l.bias_updates, n); |
| | | |
| | | l.scales_gpu = cuda_make_array(l.scales, n); |
| | | l.scale_updates_gpu = cuda_make_array(l.scale_updates, n); |
| | | l.scales_gpu = cuda_make_array(l.scales, n); |
| | | l.scale_updates_gpu = cuda_make_array(l.scale_updates, n); |
| | | |
| | | l.delta_gpu = cuda_make_array(l.delta, l.batch*out_h*out_w*n); |
| | | l.output_gpu = cuda_make_array(l.output, l.batch*out_h*out_w*n); |
| | | l.delta_gpu = cuda_make_array(l.delta, l.batch*out_h*out_w*n); |
| | | l.output_gpu = cuda_make_array(l.output, l.batch*out_h*out_w*n); |
| | | |
| | | if(binary){ |
| | | l.binary_filters_gpu = cuda_make_array(l.filters, c*n*size*size); |
| | | } |
| | | if(xnor){ |
| | | l.binary_filters_gpu = cuda_make_array(l.filters, c*n*size*size); |
| | | l.binary_input_gpu = cuda_make_array(0, l.inputs*l.batch); |
| | | } |
| | | if(binary){ |
| | | l.binary_filters_gpu = cuda_make_array(l.filters, c*n*size*size); |
| | | } |
| | | if(xnor){ |
| | | l.binary_filters_gpu = cuda_make_array(l.filters, c*n*size*size); |
| | | l.binary_input_gpu = cuda_make_array(0, l.inputs*l.batch); |
| | | } |
| | | |
| | | if(batch_normalize){ |
| | | l.mean_gpu = cuda_make_array(l.mean, n); |
| | | l.variance_gpu = cuda_make_array(l.variance, n); |
| | | if(batch_normalize){ |
| | | l.mean_gpu = cuda_make_array(l.mean, n); |
| | | l.variance_gpu = cuda_make_array(l.variance, n); |
| | | |
| | | l.rolling_mean_gpu = cuda_make_array(l.mean, n); |
| | | l.rolling_variance_gpu = cuda_make_array(l.variance, n); |
| | | l.rolling_mean_gpu = cuda_make_array(l.mean, n); |
| | | l.rolling_variance_gpu = cuda_make_array(l.variance, n); |
| | | |
| | | l.mean_delta_gpu = cuda_make_array(l.mean, n); |
| | | l.variance_delta_gpu = cuda_make_array(l.variance, n); |
| | | l.mean_delta_gpu = cuda_make_array(l.mean, n); |
| | | l.variance_delta_gpu = cuda_make_array(l.variance, n); |
| | | |
| | | l.x_gpu = cuda_make_array(l.output, l.batch*out_h*out_w*n); |
| | | l.x_norm_gpu = cuda_make_array(l.output, l.batch*out_h*out_w*n); |
| | | } |
| | | l.x_gpu = cuda_make_array(l.output, l.batch*out_h*out_w*n); |
| | | l.x_norm_gpu = cuda_make_array(l.output, l.batch*out_h*out_w*n); |
| | | } |
| | | #ifdef CUDNN |
| | | cudnnCreateTensorDescriptor(&l.srcTensorDesc); |
| | | cudnnCreateTensorDescriptor(&l.dstTensorDesc); |
| | | cudnnCreateFilterDescriptor(&l.filterDesc); |
| | | cudnnCreateTensorDescriptor(&l.dsrcTensorDesc); |
| | | cudnnCreateTensorDescriptor(&l.ddstTensorDesc); |
| | | cudnnCreateFilterDescriptor(&l.dfilterDesc); |
| | | cudnnCreateConvolutionDescriptor(&l.convDesc); |
| | | cudnnSetTensor4dDescriptor(l.dsrcTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l.batch, l.c, l.h, l.w); |
| | | cudnnSetTensor4dDescriptor(l.ddstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l.batch, l.out_c, l.out_h, l.out_w); |
| | | cudnnSetFilter4dDescriptor(l.dfilterDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, l.n, l.c, l.size, l.size); |
| | | |
| | | cudnnSetTensor4dDescriptor(l.srcTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l.batch, l.c, l.h, l.w); |
| | | cudnnSetTensor4dDescriptor(l.dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l.batch, l.out_c, l.out_h, l.out_w); |
| | | cudnnSetFilter4dDescriptor(l.filterDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, l.n, l.c, l.size, l.size); |
| | | int padding = l.pad ? l.size/2 : 0; |
| | | cudnnSetConvolution2dDescriptor(l.convDesc, padding, padding, l.stride, l.stride, 1, 1, CUDNN_CROSS_CORRELATION); |
| | | cudnnGetConvolutionForwardAlgorithm(cudnn_handle(), |
| | | l.srcTensorDesc, |
| | | l.filterDesc, |
| | | l.convDesc, |
| | | l.dstTensorDesc, |
| | | CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, |
| | | 0, |
| | | &l.fw_algo); |
| | | cudnnGetConvolutionBackwardDataAlgorithm(cudnn_handle(), |
| | | l.filterDesc, |
| | | l.ddstTensorDesc, |
| | | l.convDesc, |
| | | l.dsrcTensorDesc, |
| | | CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, |
| | | 0, |
| | | &l.bd_algo); |
| | | cudnnGetConvolutionBackwardFilterAlgorithm(cudnn_handle(), |
| | | l.srcTensorDesc, |
| | | l.ddstTensorDesc, |
| | | l.convDesc, |
| | | l.dfilterDesc, |
| | | CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, |
| | | 0, |
| | | &l.bf_algo); |
| | | cudnnCreateTensorDescriptor(&l.srcTensorDesc); |
| | | cudnnCreateTensorDescriptor(&l.dstTensorDesc); |
| | | cudnnCreateFilterDescriptor(&l.filterDesc); |
| | | cudnnCreateTensorDescriptor(&l.dsrcTensorDesc); |
| | | cudnnCreateTensorDescriptor(&l.ddstTensorDesc); |
| | | cudnnCreateFilterDescriptor(&l.dfilterDesc); |
| | | cudnnCreateConvolutionDescriptor(&l.convDesc); |
| | | cudnn_convolutional_setup(&l); |
| | | #endif |
| | | } |
| | | #endif |
| | | l.workspace_size = get_workspace_size(l); |
| | | l.activation = activation; |
| | |
| | | l.filters[i*l.c*l.size*l.size + j] *= scale; |
| | | } |
| | | l.biases[i] -= l.rolling_mean[i] * scale; |
| | | l.scales[i] = 1; |
| | | l.rolling_mean[i] = 0; |
| | | l.rolling_variance[i] = 1; |
| | | } |
| | | } |
| | | |
| | |
| | | l->delta_gpu = cuda_make_array(l->delta, l->batch*out_h*out_w*l->n); |
| | | l->output_gpu = cuda_make_array(l->output, l->batch*out_h*out_w*l->n); |
| | | #ifdef CUDNN |
| | | cudnnSetTensor4dDescriptor(l->dsrcTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->c, l->h, l->w); |
| | | cudnnSetTensor4dDescriptor(l->ddstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->out_c, l->out_h, l->out_w); |
| | | cudnnSetFilter4dDescriptor(l->dfilterDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, l->n, l->c, l->size, l->size); |
| | | |
| | | cudnnSetTensor4dDescriptor(l->srcTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->c, l->h, l->w); |
| | | cudnnSetTensor4dDescriptor(l->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->out_c, l->out_h, l->out_w); |
| | | cudnnSetFilter4dDescriptor(l->filterDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, l->n, l->c, l->size, l->size); |
| | | int padding = l->pad ? l->size/2 : 0; |
| | | cudnnSetConvolution2dDescriptor(l->convDesc, padding, padding, l->stride, l->stride, 1, 1, CUDNN_CROSS_CORRELATION); |
| | | cudnnGetConvolutionForwardAlgorithm(cudnn_handle(), |
| | | l->srcTensorDesc, |
| | | l->filterDesc, |
| | | l->convDesc, |
| | | l->dstTensorDesc, |
| | | CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, |
| | | 0, |
| | | &l->fw_algo); |
| | | cudnnGetConvolutionBackwardDataAlgorithm(cudnn_handle(), |
| | | l->filterDesc, |
| | | l->ddstTensorDesc, |
| | | l->convDesc, |
| | | l->dsrcTensorDesc, |
| | | CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST, |
| | | 0, |
| | | &l->bd_algo); |
| | | cudnnGetConvolutionBackwardFilterAlgorithm(cudnn_handle(), |
| | | l->srcTensorDesc, |
| | | l->ddstTensorDesc, |
| | | l->convDesc, |
| | | l->dfilterDesc, |
| | | CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST, |
| | | 0, |
| | | &l->bf_algo); |
| | | cudnn_convolutional_setup(l); |
| | | #endif |
| | | #endif |
| | | l->workspace_size = get_workspace_size(*l); |
| | |
| | | } |
| | | */ |
| | | |
| | | if(l.xnor && (l.c%32 != 0 || !AI2)){ |
| | | if(l.xnor){ |
| | | binarize_filters(l.filters, l.n, l.c*l.size*l.size, l.binary_filters); |
| | | swap_binary(&l); |
| | | for(i = 0; i < l.batch; ++i){ |
| | | binarize_input(state.input + i*l.inputs, l.c, l.h*l.w, l.binary_input + i*l.inputs); |
| | | } |
| | | binarize_cpu(state.input, l.c*l.h*l.w*l.batch, l.binary_input); |
| | | state.input = l.binary_input; |
| | | } |
| | | |