| | |
| | | |
| | | avgpool_layer make_avgpool_layer(int batch, int w, int h, int c) |
| | | { |
| | | fprintf(stderr, "Avgpool Layer: %d x %d x %d image\n", w,h,c); |
| | | fprintf(stderr, "avg %4d x%4d x%4d -> %4d\n", w, h, c, c); |
| | | avgpool_layer l = {0}; |
| | | l.type = AVGPOOL; |
| | | l.batch = batch; |
| | |
| | | int output_size = l.outputs * batch; |
| | | l.output = calloc(output_size, sizeof(float)); |
| | | l.delta = calloc(output_size, sizeof(float)); |
| | | l.forward = forward_avgpool_layer; |
| | | l.backward = backward_avgpool_layer; |
| | | #ifdef GPU |
| | | l.forward_gpu = forward_avgpool_layer_gpu; |
| | | l.backward_gpu = backward_avgpool_layer_gpu; |
| | | l.output_gpu = cuda_make_array(l.output, output_size); |
| | | l.delta_gpu = cuda_make_array(l.delta, output_size); |
| | | #endif |
| | |
| | | |
| | | void resize_avgpool_layer(avgpool_layer *l, int w, int h) |
| | | { |
| | | l->h = h; |
| | | l->w = w; |
| | | l->h = h; |
| | | l->inputs = h*w*l->c; |
| | | } |
| | | |
| | | void forward_avgpool_layer(const avgpool_layer l, network_state state) |
| | |
| | | int out_index = k + b*l.c; |
| | | for(i = 0; i < l.h*l.w; ++i){ |
| | | int in_index = i + l.h*l.w*(k + b*l.c); |
| | | state.delta[in_index] = l.delta[out_index] / (l.h*l.w); |
| | | state.delta[in_index] += l.delta[out_index] / (l.h*l.w); |
| | | } |
| | | } |
| | | } |