| | |
| | | |
| | | l.delta = calloc(l.outputs*batch, sizeof(float)); |
| | | l.output = calloc(l.outputs*batch, sizeof(float));; |
| | | |
| | | l.forward = forward_shortcut_layer; |
| | | l.backward = backward_shortcut_layer; |
| | | #ifdef GPU |
| | | l.forward_gpu = forward_shortcut_layer_gpu; |
| | | l.backward_gpu = backward_shortcut_layer_gpu; |
| | | |
| | | l.delta_gpu = cuda_make_array(l.delta, l.outputs*batch); |
| | | l.output_gpu = cuda_make_array(l.output, l.outputs*batch); |
| | | #endif |
| | | return l; |
| | | } |
| | | |
| | | void resize_shortcut_layer(layer *l, int w, int h) |
| | | { |
| | | //assert(l->w == l->out_w); |
| | | //assert(l->h == l->out_h); |
| | | l->w = l->out_w = w; |
| | | l->h = l->out_h = h; |
| | | l->outputs = w*h*l->out_c; |
| | | l->inputs = l->outputs; |
| | | l->delta = realloc(l->delta, l->outputs*l->batch * sizeof(float)); |
| | | l->output = realloc(l->output, l->outputs*l->batch * sizeof(float)); |
| | | |
| | | #ifdef GPU |
| | | cuda_free(l->output_gpu); |
| | | cuda_free(l->delta_gpu); |
| | | l->output_gpu = cuda_make_array(l->output, l->outputs*l->batch); |
| | | l->delta_gpu = cuda_make_array(l->delta, l->outputs*l->batch); |
| | | #endif |
| | | |
| | | } |
| | | |
| | | void forward_shortcut_layer(const layer l, network_state state) |
| | | { |
| | | copy_cpu(l.outputs*l.batch, state.input, 1, l.output, 1); |