| | |
| | | #endif |
| | | #endif |
| | | |
| | | convolutional_layer make_convolutional_layer(int batch, int h, int w, int c, int n, int size, int stride, int padding, ACTIVATION activation, int batch_normalize, int binary, int xnor) |
| | | convolutional_layer make_convolutional_layer(int batch, int h, int w, int c, int n, int size, int stride, int padding, ACTIVATION activation, int batch_normalize, int binary, int xnor, int adam) |
| | | { |
| | | int i; |
| | | convolutional_layer l = {0}; |
| | |
| | | l.output = calloc(l.batch*out_h * out_w * n, sizeof(float)); |
| | | l.delta = calloc(l.batch*out_h * out_w * n, sizeof(float)); |
| | | |
| | | l.forward = forward_convolutional_layer; |
| | | l.backward = backward_convolutional_layer; |
| | | l.update = update_convolutional_layer; |
| | | if(binary){ |
| | | l.binary_weights = calloc(c*n*size*size, sizeof(float)); |
| | | l.cweights = calloc(c*n*size*size, sizeof(char)); |
| | |
| | | } |
| | | |
| | | #ifdef GPU |
| | | l.forward_gpu = forward_convolutional_layer_gpu; |
| | | l.backward_gpu = backward_convolutional_layer_gpu; |
| | | l.update_gpu = update_convolutional_layer_gpu; |
| | | |
| | | if(gpu_index >= 0){ |
| | | if (adam) { |
| | | l.adam = 1; |
| | | l.m_gpu = cuda_make_array(l.weight_updates, c*n*size*size); |
| | | l.v_gpu = cuda_make_array(l.weight_updates, c*n*size*size); |
| | | } |
| | | |
| | | l.weights_gpu = cuda_make_array(l.weights, c*n*size*size); |
| | | l.weight_updates_gpu = cuda_make_array(l.weight_updates, c*n*size*size); |
| | | |
| | |
| | | |
| | | void test_convolutional_layer() |
| | | { |
| | | convolutional_layer l = make_convolutional_layer(1, 5, 5, 3, 2, 5, 2, 1, LEAKY, 1, 0, 0); |
| | | convolutional_layer l = make_convolutional_layer(1, 5, 5, 3, 2, 5, 2, 1, LEAKY, 1, 0, 0, 0); |
| | | l.batch_normalize = 1; |
| | | float data[] = {1,1,1,1,1, |
| | | 1,1,1,1,1, |