| | |
| | | |
| | | void forward_network(network net, network_state state) |
| | | { |
| | | state.workspace = net.workspace; |
| | | int i; |
| | | for(i = 0; i < net.n; ++i){ |
| | | state.index = i; |
| | |
| | | int i; |
| | | float *original_input = state.input; |
| | | float *original_delta = state.delta; |
| | | state.workspace = net.workspace; |
| | | for(i = net.n-1; i >= 0; --i){ |
| | | state.index = i; |
| | | if(i == 0){ |
| | |
| | | net->w = w; |
| | | net->h = h; |
| | | int inputs = 0; |
| | | size_t workspace_size = 0; |
| | | //fprintf(stderr, "Resizing to %d x %d...", w, h); |
| | | //fflush(stderr); |
| | | for (i = 0; i < net->n; ++i){ |
| | |
| | | }else{ |
| | | error("Cannot resize this type of layer"); |
| | | } |
| | | if(l.workspace_size > workspace_size) workspace_size = l.workspace_size; |
| | | inputs = l.outputs; |
| | | net->layers[i] = l; |
| | | w = l.out_w; |
| | | h = l.out_h; |
| | | if(l.type == AVGPOOL) break; |
| | | } |
| | | #ifdef GPU |
| | | cuda_free(net->workspace); |
| | | net->workspace = cuda_make_array(0, (workspace_size-1)/sizeof(float)+1); |
| | | #else |
| | | free(net->workspace); |
| | | net->workspace = calloc(1, (workspace_size-1)/sizeof(float)+1); |
| | | #endif |
| | | //fprintf(stderr, " Done!\n"); |
| | | return 0; |
| | | } |