| | |
| | | #include "region_layer.h" |
| | | #include "convolutional_layer.h" |
| | | #include "activation_layer.h" |
| | | #include "deconvolutional_layer.h" |
| | | #include "maxpool_layer.h" |
| | | #include "reorg_layer.h" |
| | | #include "avgpool_layer.h" |
| | |
| | | if(l.delta_gpu){ |
| | | fill_ongpu(l.outputs * l.batch, 0, l.delta_gpu, 1); |
| | | } |
| | | if(l.type == CONVOLUTIONAL){ |
| | | forward_convolutional_layer_gpu(l, state); |
| | | } else if(l.type == DECONVOLUTIONAL){ |
| | | forward_deconvolutional_layer_gpu(l, state); |
| | | } else if(l.type == ACTIVE){ |
| | | forward_activation_layer_gpu(l, state); |
| | | } else if(l.type == LOCAL){ |
| | | forward_local_layer_gpu(l, state); |
| | | } else if(l.type == DETECTION){ |
| | | forward_detection_layer_gpu(l, state); |
| | | } else if(l.type == REGION){ |
| | | forward_region_layer_gpu(l, state); |
| | | } else if(l.type == CONNECTED){ |
| | | forward_connected_layer_gpu(l, state); |
| | | } else if(l.type == RNN){ |
| | | forward_rnn_layer_gpu(l, state); |
| | | } else if(l.type == GRU){ |
| | | forward_gru_layer_gpu(l, state); |
| | | } else if(l.type == CRNN){ |
| | | forward_crnn_layer_gpu(l, state); |
| | | } else if(l.type == CROP){ |
| | | forward_crop_layer_gpu(l, state); |
| | | } else if(l.type == COST){ |
| | | forward_cost_layer_gpu(l, state); |
| | | } else if(l.type == SOFTMAX){ |
| | | forward_softmax_layer_gpu(l, state); |
| | | } else if(l.type == NORMALIZATION){ |
| | | forward_normalization_layer_gpu(l, state); |
| | | } else if(l.type == BATCHNORM){ |
| | | forward_batchnorm_layer_gpu(l, state); |
| | | } else if(l.type == MAXPOOL){ |
| | | forward_maxpool_layer_gpu(l, state); |
| | | } else if(l.type == REORG){ |
| | | forward_reorg_layer_gpu(l, state); |
| | | } else if(l.type == AVGPOOL){ |
| | | forward_avgpool_layer_gpu(l, state); |
| | | } else if(l.type == DROPOUT){ |
| | | forward_dropout_layer_gpu(l, state); |
| | | } else if(l.type == ROUTE){ |
| | | forward_route_layer_gpu(l, net); |
| | | } else if(l.type == SHORTCUT){ |
| | | forward_shortcut_layer_gpu(l, state); |
| | | } |
| | | l.forward_gpu(l, state); |
| | | state.input = l.output_gpu; |
| | | } |
| | | } |
| | |
| | | state.input = prev.output_gpu; |
| | | state.delta = prev.delta_gpu; |
| | | } |
| | | if(l.type == CONVOLUTIONAL){ |
| | | backward_convolutional_layer_gpu(l, state); |
| | | } else if(l.type == DECONVOLUTIONAL){ |
| | | backward_deconvolutional_layer_gpu(l, state); |
| | | } else if(l.type == ACTIVE){ |
| | | backward_activation_layer_gpu(l, state); |
| | | } else if(l.type == LOCAL){ |
| | | backward_local_layer_gpu(l, state); |
| | | } else if(l.type == MAXPOOL){ |
| | | if(i != 0) backward_maxpool_layer_gpu(l, state); |
| | | } else if(l.type == REORG){ |
| | | backward_reorg_layer_gpu(l, state); |
| | | } else if(l.type == AVGPOOL){ |
| | | if(i != 0) backward_avgpool_layer_gpu(l, state); |
| | | } else if(l.type == DROPOUT){ |
| | | backward_dropout_layer_gpu(l, state); |
| | | } else if(l.type == DETECTION){ |
| | | backward_detection_layer_gpu(l, state); |
| | | } else if(l.type == REGION){ |
| | | backward_region_layer_gpu(l, state); |
| | | } else if(l.type == NORMALIZATION){ |
| | | backward_normalization_layer_gpu(l, state); |
| | | } else if(l.type == BATCHNORM){ |
| | | backward_batchnorm_layer_gpu(l, state); |
| | | } else if(l.type == SOFTMAX){ |
| | | if(i != 0) backward_softmax_layer_gpu(l, state); |
| | | } else if(l.type == CONNECTED){ |
| | | backward_connected_layer_gpu(l, state); |
| | | } else if(l.type == RNN){ |
| | | backward_rnn_layer_gpu(l, state); |
| | | } else if(l.type == GRU){ |
| | | backward_gru_layer_gpu(l, state); |
| | | } else if(l.type == CRNN){ |
| | | backward_crnn_layer_gpu(l, state); |
| | | } else if(l.type == COST){ |
| | | backward_cost_layer_gpu(l, state); |
| | | } else if(l.type == ROUTE){ |
| | | backward_route_layer_gpu(l, net); |
| | | } else if(l.type == SHORTCUT){ |
| | | backward_shortcut_layer_gpu(l, state); |
| | | } |
| | | l.backward_gpu(l, state); |
| | | } |
| | | } |
| | | |
| | | void update_network_gpu(network net) |
| | | { |
| | | cuda_set_device(net.gpu_index); |
| | | int i; |
| | | int update_batch = net.batch*net.subdivisions; |
| | | float rate = get_current_rate(net); |
| | | for(i = 0; i < net.n; ++i){ |
| | | layer l = net.layers[i]; |
| | | if(l.type == CONVOLUTIONAL){ |
| | | update_convolutional_layer_gpu(l, update_batch, rate, net.momentum, net.decay); |
| | | } else if(l.type == DECONVOLUTIONAL){ |
| | | update_deconvolutional_layer_gpu(l, rate, net.momentum, net.decay); |
| | | } else if(l.type == CONNECTED){ |
| | | update_connected_layer_gpu(l, update_batch, rate, net.momentum, net.decay); |
| | | } else if(l.type == GRU){ |
| | | update_gru_layer_gpu(l, update_batch, rate, net.momentum, net.decay); |
| | | } else if(l.type == RNN){ |
| | | update_rnn_layer_gpu(l, update_batch, rate, net.momentum, net.decay); |
| | | } else if(l.type == CRNN){ |
| | | update_crnn_layer_gpu(l, update_batch, rate, net.momentum, net.decay); |
| | | } else if(l.type == LOCAL){ |
| | | update_local_layer_gpu(l, update_batch, rate, net.momentum, net.decay); |
| | | l.t = get_current_batch(net); |
| | | if(l.update_gpu){ |
| | | l.update_gpu(l, update_batch, rate, net.momentum, net.decay); |
| | | } |
| | | } |
| | | } |
| | |
| | | { |
| | | int update_batch = net.batch*net.subdivisions; |
| | | float rate = get_current_rate(net); |
| | | if(l.type == CONVOLUTIONAL){ |
| | | update_convolutional_layer_gpu(l, update_batch, rate, net.momentum, net.decay); |
| | | } else if(l.type == DECONVOLUTIONAL){ |
| | | update_deconvolutional_layer_gpu(l, rate, net.momentum, net.decay); |
| | | } else if(l.type == CONNECTED){ |
| | | update_connected_layer_gpu(l, update_batch, rate, net.momentum, net.decay); |
| | | } else if(l.type == RNN){ |
| | | update_rnn_layer_gpu(l, update_batch, rate, net.momentum, net.decay); |
| | | } else if(l.type == GRU){ |
| | | update_gru_layer_gpu(l, update_batch, rate, net.momentum, net.decay); |
| | | } else if(l.type == CRNN){ |
| | | update_crnn_layer_gpu(l, update_batch, rate, net.momentum, net.decay); |
| | | } else if(l.type == LOCAL){ |
| | | update_local_layer_gpu(l, update_batch, rate, net.momentum, net.decay); |
| | | l.t = get_current_batch(net); |
| | | if(l.update_gpu){ |
| | | l.update_gpu(l, update_batch, rate, net.momentum, net.decay); |
| | | } |
| | | } |
| | | |
| | |
| | | } |
| | | for(i = 0; i < n; ++i){ |
| | | pthread_join(threads[i], 0); |
| | | printf("%f\n", errors[i]); |
| | | //printf("%f\n", errors[i]); |
| | | sum += errors[i]; |
| | | } |
| | | //cudaDeviceSynchronize(); |
| | | if (get_current_batch(nets[0]) % interval == 0) { |
| | | printf("Syncing... "); |
| | | fflush(stdout); |
| | | sync_nets(nets, n, interval); |
| | | printf("Done!\n"); |
| | | } |
| | | //cudaDeviceSynchronize(); |
| | | free(threads); |
| | | free(errors); |
| | | return (float)sum/(n); |
| | |
| | | float *get_network_output_layer_gpu(network net, int i) |
| | | { |
| | | layer l = net.layers[i]; |
| | | cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch); |
| | | if(l.type != REGION) cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch); |
| | | return l.output; |
| | | } |
| | | |
| | |
| | | |
| | | float *network_predict_gpu(network net, float *input) |
| | | { |
| | | cuda_set_device(net.gpu_index); |
| | | int size = get_network_input_size(net) * net.batch; |
| | | network_state state; |
| | | state.index = 0; |