| | |
| | | int output_size = l.outputs * batch; |
| | | l.output = calloc(output_size, sizeof(float)); |
| | | l.delta = calloc(output_size, sizeof(float)); |
| | | l.forward = forward_avgpool_layer; |
| | | l.backward = backward_avgpool_layer; |
| | | #ifdef GPU |
| | | l.forward_gpu = forward_avgpool_layer_gpu; |
| | | l.backward_gpu = backward_avgpool_layer_gpu; |
| | | l.output_gpu = cuda_make_array(l.output, output_size); |
| | | l.delta_gpu = cuda_make_array(l.delta, output_size); |
| | | #endif |
| | |
| | | |
| | | void resize_avgpool_layer(avgpool_layer *l, int w, int h) |
| | | { |
| | | l->h = h; |
| | | l->w = w; |
| | | l->h = h; |
| | | l->inputs = h*w*l->c; |
| | | } |
| | | |
| | | void forward_avgpool_layer(const avgpool_layer l, network_state state) |
| | |
| | | int out_index = k + b*l.c; |
| | | for(i = 0; i < l.h*l.w; ++i){ |
| | | int in_index = i + l.h*l.w*(k + b*l.c); |
| | | state.delta[in_index] = l.delta[out_index] / (l.h*l.w); |
| | | state.delta[in_index] += l.delta[out_index] / (l.h*l.w); |
| | | } |
| | | } |
| | | } |