| | |
| | | //printf("start\n"); |
| | | int i; |
| | | for(i = 0; i < net.n; ++i){ |
| | | //clock_t time = clock(); |
| | | clock_t time = clock(); |
| | | if(net.types[i] == CONVOLUTIONAL){ |
| | | convolutional_layer layer = *(convolutional_layer *)net.layers[i]; |
| | | forward_convolutional_layer_gpu(layer, input); |
| | |
| | | forward_softmax_layer_gpu(layer, input); |
| | | input = layer.output_cl; |
| | | } |
| | | //printf("%d %f\n", i, sec(clock()-time)); |
| | | printf("%d %f\n", i, sec(clock()-time)); |
| | | /* |
| | | else if(net.types[i] == CROP){ |
| | | crop_layer layer = *(crop_layer *)net.layers[i]; |
| | |
| | | cl_mem prev_input; |
| | | cl_mem prev_delta; |
| | | for(i = net.n-1; i >= 0; --i){ |
| | | clock_t time = clock(); |
| | | if(i == 0){ |
| | | prev_input = input; |
| | | prev_delta = 0; |
| | |
| | | softmax_layer layer = *(softmax_layer *)net.layers[i]; |
| | | backward_softmax_layer_gpu(layer, prev_delta); |
| | | } |
| | | printf("back: %d %f\n", i, sec(clock()-time)); |
| | | } |
| | | } |
| | | |
| | |
| | | int i; |
| | | float sum = 0; |
| | | for(i = 0; i < n; ++i){ |
| | | get_batch(d, batch, X, y); |
| | | get_random_batch(d, batch, X, y); |
| | | float err = train_network_datum_gpu(net, X, y); |
| | | sum += err; |
| | | } |
| | | free(X); |
| | | free(y); |
| | | return (float)sum/(n*batch); |
| | | } |
| | | |
| | | float train_network_data_gpu(network net, data d, int n) |
| | | { |
| | | int batch = net.batch; |
| | | float *X = calloc(batch*d.X.cols, sizeof(float)); |
| | | float *y = calloc(batch*d.y.cols, sizeof(float)); |
| | | |
| | | int i; |
| | | float sum = 0; |
| | | for(i = 0; i < n; ++i){ |
| | | get_next_batch(d, batch, i*batch, X, y); |
| | | float err = train_network_datum_gpu(net, X, y); |
| | | sum += err; |
| | | } |
| | |
| | | int i; |
| | | float sum = 0; |
| | | for(i = 0; i < n; ++i){ |
| | | get_batch(d, batch, X, y); |
| | | get_random_batch(d, batch, X, y); |
| | | float err = train_network_datum(net, X, y); |
| | | sum += err; |
| | | } |