| | |
| | | #include "region_layer.h" |
| | | #include "convolutional_layer.h" |
| | | #include "activation_layer.h" |
| | | #include "deconvolutional_layer.h" |
| | | #include "maxpool_layer.h" |
| | | #include "reorg_layer.h" |
| | | #include "avgpool_layer.h" |
| | |
| | | #include "blas.h" |
| | | } |
| | | |
| | | #ifdef OPENCV |
| | | #include "opencv2/highgui/highgui_c.h" |
| | | #endif |
| | | |
| | | float * get_network_output_gpu_layer(network net, int i); |
| | | float * get_network_delta_gpu_layer(network net, int i); |
| | | float * get_network_output_gpu(network net); |
| | |
| | | if(l.delta_gpu){ |
| | | fill_ongpu(l.outputs * l.batch, 0, l.delta_gpu, 1); |
| | | } |
| | | if(l.type == CONVOLUTIONAL){ |
| | | forward_convolutional_layer_gpu(l, state); |
| | | } else if(l.type == DECONVOLUTIONAL){ |
| | | forward_deconvolutional_layer_gpu(l, state); |
| | | } else if(l.type == ACTIVE){ |
| | | forward_activation_layer_gpu(l, state); |
| | | } else if(l.type == LOCAL){ |
| | | forward_local_layer_gpu(l, state); |
| | | } else if(l.type == DETECTION){ |
| | | forward_detection_layer_gpu(l, state); |
| | | } else if(l.type == REGION){ |
| | | forward_region_layer_gpu(l, state); |
| | | } else if(l.type == CONNECTED){ |
| | | forward_connected_layer_gpu(l, state); |
| | | } else if(l.type == RNN){ |
| | | forward_rnn_layer_gpu(l, state); |
| | | } else if(l.type == GRU){ |
| | | forward_gru_layer_gpu(l, state); |
| | | } else if(l.type == CRNN){ |
| | | forward_crnn_layer_gpu(l, state); |
| | | } else if(l.type == CROP){ |
| | | forward_crop_layer_gpu(l, state); |
| | | } else if(l.type == COST){ |
| | | forward_cost_layer_gpu(l, state); |
| | | } else if(l.type == SOFTMAX){ |
| | | forward_softmax_layer_gpu(l, state); |
| | | } else if(l.type == NORMALIZATION){ |
| | | forward_normalization_layer_gpu(l, state); |
| | | } else if(l.type == BATCHNORM){ |
| | | forward_batchnorm_layer_gpu(l, state); |
| | | } else if(l.type == MAXPOOL){ |
| | | forward_maxpool_layer_gpu(l, state); |
| | | } else if(l.type == REORG){ |
| | | forward_reorg_layer_gpu(l, state); |
| | | } else if(l.type == AVGPOOL){ |
| | | forward_avgpool_layer_gpu(l, state); |
| | | } else if(l.type == DROPOUT){ |
| | | forward_dropout_layer_gpu(l, state); |
| | | } else if(l.type == ROUTE){ |
| | | forward_route_layer_gpu(l, net); |
| | | } else if(l.type == SHORTCUT){ |
| | | forward_shortcut_layer_gpu(l, state); |
| | | } |
| | | l.forward_gpu(l, state); |
| | | if(net.wait_stream) |
| | | cudaStreamSynchronize(get_cuda_stream()); |
| | | state.input = l.output_gpu; |
| | | /* |
| | | cuda_pull_array(l.output_gpu, l.output, l.batch*l.outputs); |
| | | if (l.out_w >= 0 && l.out_h >= 1 && l.c >= 3) { |
| | | int j; |
| | | for (j = 0; j < l.out_c; ++j) { |
| | | image img = make_image(l.out_w, l.out_h, 3); |
| | | memcpy(img.data, l.output+ l.out_w*l.out_h*j, l.out_w*l.out_h * 1 * sizeof(float)); |
| | | char buff[256]; |
| | | sprintf(buff, "layer-%d slice-%d", i, j); |
| | | show_image(img, buff); |
| | | } |
| | | cvWaitKey(0); // wait press-key in console |
| | | cvDestroyAllWindows(); |
| | | } |
| | | */ |
| | | } |
| | | } |
| | | |
| | |
| | | for(i = net.n-1; i >= 0; --i){ |
| | | state.index = i; |
| | | layer l = net.layers[i]; |
| | | if (l.stopbackward) break; |
| | | if(i == 0){ |
| | | state.input = original_input; |
| | | state.delta = original_delta; |
| | |
| | | state.input = prev.output_gpu; |
| | | state.delta = prev.delta_gpu; |
| | | } |
| | | if(l.type == CONVOLUTIONAL){ |
| | | backward_convolutional_layer_gpu(l, state); |
| | | } else if(l.type == DECONVOLUTIONAL){ |
| | | backward_deconvolutional_layer_gpu(l, state); |
| | | } else if(l.type == ACTIVE){ |
| | | backward_activation_layer_gpu(l, state); |
| | | } else if(l.type == LOCAL){ |
| | | backward_local_layer_gpu(l, state); |
| | | } else if(l.type == MAXPOOL){ |
| | | if(i != 0) backward_maxpool_layer_gpu(l, state); |
| | | } else if(l.type == REORG){ |
| | | backward_reorg_layer_gpu(l, state); |
| | | } else if(l.type == AVGPOOL){ |
| | | if(i != 0) backward_avgpool_layer_gpu(l, state); |
| | | } else if(l.type == DROPOUT){ |
| | | backward_dropout_layer_gpu(l, state); |
| | | } else if(l.type == DETECTION){ |
| | | backward_detection_layer_gpu(l, state); |
| | | } else if(l.type == REGION){ |
| | | backward_region_layer_gpu(l, state); |
| | | } else if(l.type == NORMALIZATION){ |
| | | backward_normalization_layer_gpu(l, state); |
| | | } else if(l.type == BATCHNORM){ |
| | | backward_batchnorm_layer_gpu(l, state); |
| | | } else if(l.type == SOFTMAX){ |
| | | if(i != 0) backward_softmax_layer_gpu(l, state); |
| | | } else if(l.type == CONNECTED){ |
| | | backward_connected_layer_gpu(l, state); |
| | | } else if(l.type == RNN){ |
| | | backward_rnn_layer_gpu(l, state); |
| | | } else if(l.type == GRU){ |
| | | backward_gru_layer_gpu(l, state); |
| | | } else if(l.type == CRNN){ |
| | | backward_crnn_layer_gpu(l, state); |
| | | } else if(l.type == COST){ |
| | | backward_cost_layer_gpu(l, state); |
| | | } else if(l.type == ROUTE){ |
| | | backward_route_layer_gpu(l, net); |
| | | } else if(l.type == SHORTCUT){ |
| | | backward_shortcut_layer_gpu(l, state); |
| | | } |
| | | l.backward_gpu(l, state); |
| | | } |
| | | } |
| | | |
| | | void update_network_gpu(network net) |
| | | { |
| | | cuda_set_device(net.gpu_index); |
| | | int i; |
| | | int update_batch = net.batch*net.subdivisions; |
| | | float rate = get_current_rate(net); |
| | | for(i = 0; i < net.n; ++i){ |
| | | layer l = net.layers[i]; |
| | | if(l.type == CONVOLUTIONAL){ |
| | | update_convolutional_layer_gpu(l, update_batch, rate, net.momentum, net.decay); |
| | | } else if(l.type == DECONVOLUTIONAL){ |
| | | update_deconvolutional_layer_gpu(l, rate, net.momentum, net.decay); |
| | | } else if(l.type == CONNECTED){ |
| | | update_connected_layer_gpu(l, update_batch, rate, net.momentum, net.decay); |
| | | } else if(l.type == GRU){ |
| | | update_gru_layer_gpu(l, update_batch, rate, net.momentum, net.decay); |
| | | } else if(l.type == RNN){ |
| | | update_rnn_layer_gpu(l, update_batch, rate, net.momentum, net.decay); |
| | | } else if(l.type == CRNN){ |
| | | update_crnn_layer_gpu(l, update_batch, rate, net.momentum, net.decay); |
| | | } else if(l.type == LOCAL){ |
| | | update_local_layer_gpu(l, update_batch, rate, net.momentum, net.decay); |
| | | l.t = get_current_batch(net); |
| | | if(l.update_gpu){ |
| | | l.update_gpu(l, update_batch, rate, net.momentum, net.decay); |
| | | } |
| | | } |
| | | } |
| | |
| | | state.delta = 0; |
| | | state.truth = *net.truth_gpu; |
| | | state.train = 1; |
| | | #ifdef CUDNN_HALF |
| | | int i; |
| | | for (i = 0; i < net.n; ++i) { |
| | | layer l = net.layers[i]; |
| | | cuda_convert_f32_to_f16(l.weights_gpu, l.c*l.n*l.size*l.size, l.weights_gpu16); |
| | | } |
| | | #endif |
| | | forward_network_gpu(net, state); |
| | | //cudaStreamSynchronize(get_cuda_stream()); |
| | | backward_network_gpu(net, state); |
| | | } |
| | | |
| | |
| | | { |
| | | int update_batch = net.batch*net.subdivisions; |
| | | float rate = get_current_rate(net); |
| | | if(l.type == CONVOLUTIONAL){ |
| | | update_convolutional_layer_gpu(l, update_batch, rate, net.momentum, net.decay); |
| | | } else if(l.type == DECONVOLUTIONAL){ |
| | | update_deconvolutional_layer_gpu(l, rate, net.momentum, net.decay); |
| | | } else if(l.type == CONNECTED){ |
| | | update_connected_layer_gpu(l, update_batch, rate, net.momentum, net.decay); |
| | | } else if(l.type == RNN){ |
| | | update_rnn_layer_gpu(l, update_batch, rate, net.momentum, net.decay); |
| | | } else if(l.type == GRU){ |
| | | update_gru_layer_gpu(l, update_batch, rate, net.momentum, net.decay); |
| | | } else if(l.type == CRNN){ |
| | | update_crnn_layer_gpu(l, update_batch, rate, net.momentum, net.decay); |
| | | } else if(l.type == LOCAL){ |
| | | update_local_layer_gpu(l, update_batch, rate, net.momentum, net.decay); |
| | | l.t = get_current_batch(net); |
| | | if(l.update_gpu){ |
| | | l.update_gpu(l, update_batch, rate, net.momentum, net.decay); |
| | | } |
| | | } |
| | | |
| | |
| | | } |
| | | for(i = 0; i < n; ++i){ |
| | | pthread_join(threads[i], 0); |
| | | printf("%f\n", errors[i]); |
| | | //printf("%f\n", errors[i]); |
| | | sum += errors[i]; |
| | | } |
| | | //cudaDeviceSynchronize(); |
| | | if (get_current_batch(nets[0]) % interval == 0) { |
| | | printf("Syncing... "); |
| | | fflush(stdout); |
| | | sync_nets(nets, n, interval); |
| | | printf("Done!\n"); |
| | | } |
| | | //cudaDeviceSynchronize(); |
| | | free(threads); |
| | | free(errors); |
| | | return (float)sum/(n); |
| | |
| | | float *get_network_output_layer_gpu(network net, int i) |
| | | { |
| | | layer l = net.layers[i]; |
| | | cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch); |
| | | if(l.type != REGION) cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch); |
| | | return l.output; |
| | | } |
| | | |
| | |
| | | |
| | | float *network_predict_gpu(network net, float *input) |
| | | { |
| | | if (net.gpu_index != cuda_get_device()) |
| | | cuda_set_device(net.gpu_index); |
| | | int size = get_network_input_size(net) * net.batch; |
| | | network_state state; |
| | | state.index = 0; |