| | |
| | | { |
| | | int i; |
| | | for(i = 0; i < net.n; ++i){ |
| | | //clock_t time = clock(); |
| | | if(net.types[i] == CONVOLUTIONAL){ |
| | | forward_convolutional_layer_gpu(*(convolutional_layer *)net.layers[i], state); |
| | | } |
| | |
| | | forward_crop_layer_gpu(*(crop_layer *)net.layers[i], state); |
| | | } |
| | | state.input = get_network_output_gpu_layer(net, i); |
| | | //cudaDeviceSynchronize(); |
| | | //printf("forw %d: %s %f\n", i, get_layer_string(net.types[i]), sec(clock() - time)); |
| | | //time = clock(); |
| | | } |
| | | } |
| | | |
| | |
| | | else if(net.types[i] == SOFTMAX){ |
| | | backward_softmax_layer_gpu(*(softmax_layer *)net.layers[i], state); |
| | | } |
| | | //cudaDeviceSynchronize(); |
| | | //printf("back %d: %s %f\n", i, get_layer_string(net.types[i]), sec(clock() - time)); |
| | | //time = clock(); |
| | | } |
| | | } |
| | | |
| | |
| | | state.input = *net.input_gpu; |
| | | state.truth = *net.truth_gpu; |
| | | state.train = 1; |
| | | //cudaDeviceSynchronize(); |
| | | //printf("trans %f\n", sec(clock() - time)); |
| | | //time = clock(); |
| | | forward_network_gpu(net, state); |
| | | //cudaDeviceSynchronize(); |
| | | //printf("forw %f\n", sec(clock() - time)); |
| | | //time = clock(); |
| | | backward_network_gpu(net, state); |
| | | //cudaDeviceSynchronize(); |
| | | //printf("back %f\n", sec(clock() - time)); |
| | | //time = clock(); |
| | | update_network_gpu(net); |
| | |
| | | //print_letters(y, 50); |
| | | //float *out = get_network_output_gpu(net); |
| | | //print_letters(out, 50); |
| | | //cudaDeviceSynchronize(); |
| | | //printf("updt %f\n", sec(clock() - time)); |
| | | //time = clock(); |
| | | return error; |
| | |
| | | |
| | | float *network_predict_gpu(network net, float *input) |
| | | { |
| | | |
| | | int size = get_network_input_size(net) * net.batch; |
| | | network_state state; |
| | | state.input = cuda_make_array(input, size); |