| | |
| | | #include "cuda_runtime.h" |
| | | #include "curand.h" |
| | | #include "cublas_v2.h" |
| | | |
| | | extern "C" { |
| | | #include <stdio.h> |
| | | #include <time.h> |
| | | #include <assert.h> |
| | | |
| | | #include "network.h" |
| | | #include "image.h" |
| | | #include "data.h" |
| | | #include "utils.h" |
| | | #include "parser.h" |
| | | |
| | | #include "crop_layer.h" |
| | | #include "connected_layer.h" |
| | | #include "rnn_layer.h" |
| | | #include "gru_layer.h" |
| | | #include "crnn_layer.h" |
| | | #include "detection_layer.h" |
| | | #include "region_layer.h" |
| | | #include "convolutional_layer.h" |
| | | #include "activation_layer.h" |
| | | #include "maxpool_layer.h" |
| | | #include "cost_layer.h" |
| | | #include "reorg_layer.h" |
| | | #include "avgpool_layer.h" |
| | | #include "normalization_layer.h" |
| | | #include "freeweight_layer.h" |
| | | #include "batchnorm_layer.h" |
| | | #include "cost_layer.h" |
| | | #include "local_layer.h" |
| | | #include "softmax_layer.h" |
| | | #include "dropout_layer.h" |
| | | #include "route_layer.h" |
| | | #include "shortcut_layer.h" |
| | | #include "blas.h" |
| | | } |
| | | |
| | | extern "C" float * get_network_output_gpu_layer(network net, int i); |
| | | extern "C" float * get_network_delta_gpu_layer(network net, int i); |
| | | float * get_network_output_gpu_layer(network net, int i); |
| | | float * get_network_delta_gpu_layer(network net, int i); |
| | | float * get_network_output_gpu(network net); |
| | | |
| | | void forward_network_gpu(network net, float * input, float * truth, int train) |
| | | void forward_network_gpu(network net, network_state state) |
| | | { |
| | | state.workspace = net.workspace; |
| | | int i; |
| | | for(i = 0; i < net.n; ++i){ |
| | | //clock_t time = clock(); |
| | | if(net.types[i] == CONVOLUTIONAL){ |
| | | convolutional_layer layer = *(convolutional_layer *)net.layers[i]; |
| | | forward_convolutional_layer_gpu(layer, input); |
| | | input = layer.output_gpu; |
| | | state.index = i; |
| | | layer l = net.layers[i]; |
| | | if(l.delta_gpu){ |
| | | fill_ongpu(l.outputs * l.batch, 0, l.delta_gpu, 1); |
| | | } |
| | | else if(net.types[i] == COST){ |
| | | cost_layer layer = *(cost_layer *)net.layers[i]; |
| | | forward_cost_layer_gpu(layer, input, truth); |
| | | } |
| | | else if(net.types[i] == CONNECTED){ |
| | | connected_layer layer = *(connected_layer *)net.layers[i]; |
| | | forward_connected_layer_gpu(layer, input); |
| | | input = layer.output_gpu; |
| | | } |
| | | else if(net.types[i] == MAXPOOL){ |
| | | maxpool_layer layer = *(maxpool_layer *)net.layers[i]; |
| | | forward_maxpool_layer_gpu(layer, input); |
| | | input = layer.output_gpu; |
| | | } |
| | | else if(net.types[i] == SOFTMAX){ |
| | | softmax_layer layer = *(softmax_layer *)net.layers[i]; |
| | | forward_softmax_layer_gpu(layer, input); |
| | | input = layer.output_gpu; |
| | | } |
| | | else if(net.types[i] == DROPOUT){ |
| | | if(!train) continue; |
| | | dropout_layer layer = *(dropout_layer *)net.layers[i]; |
| | | forward_dropout_layer_gpu(layer, input); |
| | | input = layer.output_gpu; |
| | | } |
| | | else if(net.types[i] == CROP){ |
| | | crop_layer layer = *(crop_layer *)net.layers[i]; |
| | | forward_crop_layer_gpu(layer, train, input); |
| | | input = layer.output_gpu; |
| | | } |
| | | //printf("Forward %d %s %f\n", i, get_layer_string(net.types[i]), sec(clock() - time)); |
| | | l.forward_gpu(l, state); |
| | | if(net.wait_stream) |
| | | cudaStreamSynchronize(get_cuda_stream()); |
| | | state.input = l.output_gpu; |
| | | } |
| | | } |
| | | |
| | | void backward_network_gpu(network net, float * input) |
| | | void backward_network_gpu(network net, network_state state) |
| | | { |
| | | state.workspace = net.workspace; |
| | | int i; |
| | | float * prev_input; |
| | | float * prev_delta; |
| | | float * original_input = state.input; |
| | | float * original_delta = state.delta; |
| | | for(i = net.n-1; i >= 0; --i){ |
| | | //clock_t time = clock(); |
| | | state.index = i; |
| | | layer l = net.layers[i]; |
| | | if (l.stopbackward) break; |
| | | if(i == 0){ |
| | | prev_input = input; |
| | | prev_delta = 0; |
| | | state.input = original_input; |
| | | state.delta = original_delta; |
| | | }else{ |
| | | prev_input = get_network_output_gpu_layer(net, i-1); |
| | | prev_delta = get_network_delta_gpu_layer(net, i-1); |
| | | layer prev = net.layers[i-1]; |
| | | state.input = prev.output_gpu; |
| | | state.delta = prev.delta_gpu; |
| | | } |
| | | if(net.types[i] == CONVOLUTIONAL){ |
| | | convolutional_layer layer = *(convolutional_layer *)net.layers[i]; |
| | | backward_convolutional_layer_gpu(layer, prev_input, prev_delta); |
| | | } |
| | | else if(net.types[i] == COST){ |
| | | cost_layer layer = *(cost_layer *)net.layers[i]; |
| | | backward_cost_layer_gpu(layer, prev_input, prev_delta); |
| | | } |
| | | else if(net.types[i] == CONNECTED){ |
| | | connected_layer layer = *(connected_layer *)net.layers[i]; |
| | | backward_connected_layer_gpu(layer, prev_input, prev_delta); |
| | | } |
| | | else if(net.types[i] == MAXPOOL){ |
| | | maxpool_layer layer = *(maxpool_layer *)net.layers[i]; |
| | | backward_maxpool_layer_gpu(layer, prev_delta); |
| | | } |
| | | else if(net.types[i] == DROPOUT){ |
| | | dropout_layer layer = *(dropout_layer *)net.layers[i]; |
| | | backward_dropout_layer_gpu(layer, prev_delta); |
| | | } |
| | | else if(net.types[i] == SOFTMAX){ |
| | | softmax_layer layer = *(softmax_layer *)net.layers[i]; |
| | | backward_softmax_layer_gpu(layer, prev_delta); |
| | | } |
| | | //printf("Backward %d %s %f\n", i, get_layer_string(net.types[i]), sec(clock() - time)); |
| | | l.backward_gpu(l, state); |
| | | } |
| | | } |
| | | |
| | | void update_network_gpu(network net) |
| | | { |
| | | cuda_set_device(net.gpu_index); |
| | | int i; |
| | | int update_batch = net.batch*net.subdivisions; |
| | | float rate = get_current_rate(net); |
| | | for(i = 0; i < net.n; ++i){ |
| | | if(net.types[i] == CONVOLUTIONAL){ |
| | | convolutional_layer layer = *(convolutional_layer *)net.layers[i]; |
| | | update_convolutional_layer_gpu(layer); |
| | | } |
| | | else if(net.types[i] == CONNECTED){ |
| | | connected_layer layer = *(connected_layer *)net.layers[i]; |
| | | update_connected_layer_gpu(layer); |
| | | layer l = net.layers[i]; |
| | | l.t = get_current_batch(net); |
| | | if(l.update_gpu){ |
| | | l.update_gpu(l, update_batch, rate, net.momentum, net.decay); |
| | | } |
| | | } |
| | | } |
| | | |
| | | float * get_network_output_gpu_layer(network net, int i) |
| | | void forward_backward_network_gpu(network net, float *x, float *y) |
| | | { |
| | | if(net.types[i] == CONVOLUTIONAL){ |
| | | convolutional_layer layer = *(convolutional_layer *)net.layers[i]; |
| | | return layer.output_gpu; |
| | | } |
| | | else if(net.types[i] == CONNECTED){ |
| | | connected_layer layer = *(connected_layer *)net.layers[i]; |
| | | return layer.output_gpu; |
| | | } |
| | | else if(net.types[i] == MAXPOOL){ |
| | | maxpool_layer layer = *(maxpool_layer *)net.layers[i]; |
| | | return layer.output_gpu; |
| | | } |
| | | else if(net.types[i] == CROP){ |
| | | crop_layer layer = *(crop_layer *)net.layers[i]; |
| | | return layer.output_gpu; |
| | | } |
| | | else if(net.types[i] == SOFTMAX){ |
| | | softmax_layer layer = *(softmax_layer *)net.layers[i]; |
| | | return layer.output_gpu; |
| | | } else if(net.types[i] == DROPOUT){ |
| | | dropout_layer layer = *(dropout_layer *)net.layers[i]; |
| | | return layer.output_gpu; |
| | | } |
| | | return 0; |
| | | } |
| | | |
| | | float * get_network_delta_gpu_layer(network net, int i) |
| | | { |
| | | if(net.types[i] == CONVOLUTIONAL){ |
| | | convolutional_layer layer = *(convolutional_layer *)net.layers[i]; |
| | | return layer.delta_gpu; |
| | | } |
| | | else if(net.types[i] == CONNECTED){ |
| | | connected_layer layer = *(connected_layer *)net.layers[i]; |
| | | return layer.delta_gpu; |
| | | } |
| | | else if(net.types[i] == MAXPOOL){ |
| | | maxpool_layer layer = *(maxpool_layer *)net.layers[i]; |
| | | return layer.delta_gpu; |
| | | } |
| | | else if(net.types[i] == SOFTMAX){ |
| | | softmax_layer layer = *(softmax_layer *)net.layers[i]; |
| | | return layer.delta_gpu; |
| | | } else if(net.types[i] == DROPOUT){ |
| | | if(i == 0) return 0; |
| | | return get_network_delta_gpu_layer(net, i-1); |
| | | } |
| | | return 0; |
| | | } |
| | | |
| | | float train_network_datum_gpu(network net, float *x, float *y) |
| | | { |
| | | //clock_t time = clock(); |
| | | network_state state; |
| | | state.index = 0; |
| | | state.net = net; |
| | | int x_size = get_network_input_size(net)*net.batch; |
| | | int y_size = get_network_output_size(net)*net.batch; |
| | | if(net.layers[net.n-1].truths) y_size = net.layers[net.n-1].truths*net.batch; |
| | | if(!*net.input_gpu){ |
| | | *net.input_gpu = cuda_make_array(x, x_size); |
| | | *net.truth_gpu = cuda_make_array(y, y_size); |
| | |
| | | cuda_push_array(*net.input_gpu, x, x_size); |
| | | cuda_push_array(*net.truth_gpu, y, y_size); |
| | | } |
| | | //printf("trans %f\n", sec(clock() - time)); |
| | | //time = clock(); |
| | | forward_network_gpu(net, *net.input_gpu, *net.truth_gpu, 1); |
| | | //printf("forw %f\n", sec(clock() - time)); |
| | | //time = clock(); |
| | | backward_network_gpu(net, *net.input_gpu); |
| | | //printf("back %f\n", sec(clock() - time)); |
| | | //time = clock(); |
| | | update_network_gpu(net); |
| | | state.input = *net.input_gpu; |
| | | state.delta = 0; |
| | | state.truth = *net.truth_gpu; |
| | | state.train = 1; |
| | | #ifdef CUDNN_HALF |
| | | int i; |
| | | for (i = 0; i < net.n; ++i) { |
| | | layer l = net.layers[i]; |
| | | cuda_convert_f32_to_f16(l.weights_gpu, l.c*l.n*l.size*l.size, l.weights_gpu16); |
| | | } |
| | | #endif |
| | | forward_network_gpu(net, state); |
| | | //cudaStreamSynchronize(get_cuda_stream()); |
| | | backward_network_gpu(net, state); |
| | | } |
| | | |
| | | float train_network_datum_gpu(network net, float *x, float *y) |
| | | { |
| | | *net.seen += net.batch; |
| | | forward_backward_network_gpu(net, x, y); |
| | | float error = get_network_cost(net); |
| | | //printf("updt %f\n", sec(clock() - time)); |
| | | //time = clock(); |
| | | if (((*net.seen) / net.batch) % net.subdivisions == 0) update_network_gpu(net); |
| | | |
| | | return error; |
| | | } |
| | | |
| | | typedef struct { |
| | | network net; |
| | | data d; |
| | | float *err; |
| | | } train_args; |
| | | |
| | | void *train_thread(void *ptr) |
| | | { |
| | | train_args args = *(train_args*)ptr; |
| | | free(ptr); |
| | | cuda_set_device(args.net.gpu_index); |
| | | *args.err = train_network(args.net, args.d); |
| | | return 0; |
| | | } |
| | | |
| | | pthread_t train_network_in_thread(network net, data d, float *err) |
| | | { |
| | | pthread_t thread; |
| | | train_args *ptr = (train_args *)calloc(1, sizeof(train_args)); |
| | | ptr->net = net; |
| | | ptr->d = d; |
| | | ptr->err = err; |
| | | if(pthread_create(&thread, 0, train_thread, ptr)) error("Thread creation failed"); |
| | | return thread; |
| | | } |
| | | |
| | | void pull_updates(layer l) |
| | | { |
| | | if(l.type == CONVOLUTIONAL){ |
| | | cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n); |
| | | cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c); |
| | | if(l.scale_updates) cuda_pull_array(l.scale_updates_gpu, l.scale_updates, l.n); |
| | | } else if(l.type == CONNECTED){ |
| | | cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.outputs); |
| | | cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs); |
| | | } |
| | | } |
| | | |
| | | void push_updates(layer l) |
| | | { |
| | | if(l.type == CONVOLUTIONAL){ |
| | | cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n); |
| | | cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c); |
| | | if(l.scale_updates) cuda_push_array(l.scale_updates_gpu, l.scale_updates, l.n); |
| | | } else if(l.type == CONNECTED){ |
| | | cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.outputs); |
| | | cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs); |
| | | } |
| | | } |
| | | |
| | | void update_layer(layer l, network net) |
| | | { |
| | | int update_batch = net.batch*net.subdivisions; |
| | | float rate = get_current_rate(net); |
| | | l.t = get_current_batch(net); |
| | | if(l.update_gpu){ |
| | | l.update_gpu(l, update_batch, rate, net.momentum, net.decay); |
| | | } |
| | | } |
| | | |
| | | void merge_weights(layer l, layer base) |
| | | { |
| | | if (l.type == CONVOLUTIONAL) { |
| | | axpy_cpu(l.n, 1, l.biases, 1, base.biases, 1); |
| | | axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weights, 1, base.weights, 1); |
| | | if (l.scales) { |
| | | axpy_cpu(l.n, 1, l.scales, 1, base.scales, 1); |
| | | } |
| | | } else if(l.type == CONNECTED) { |
| | | axpy_cpu(l.outputs, 1, l.biases, 1, base.biases, 1); |
| | | axpy_cpu(l.outputs*l.inputs, 1, l.weights, 1, base.weights, 1); |
| | | } |
| | | } |
| | | |
| | | void scale_weights(layer l, float s) |
| | | { |
| | | if (l.type == CONVOLUTIONAL) { |
| | | scal_cpu(l.n, s, l.biases, 1); |
| | | scal_cpu(l.n*l.size*l.size*l.c, s, l.weights, 1); |
| | | if (l.scales) { |
| | | scal_cpu(l.n, s, l.scales, 1); |
| | | } |
| | | } else if(l.type == CONNECTED) { |
| | | scal_cpu(l.outputs, s, l.biases, 1); |
| | | scal_cpu(l.outputs*l.inputs, s, l.weights, 1); |
| | | } |
| | | } |
| | | |
| | | |
| | | void pull_weights(layer l) |
| | | { |
| | | if(l.type == CONVOLUTIONAL){ |
| | | cuda_pull_array(l.biases_gpu, l.biases, l.n); |
| | | cuda_pull_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c); |
| | | if(l.scales) cuda_pull_array(l.scales_gpu, l.scales, l.n); |
| | | } else if(l.type == CONNECTED){ |
| | | cuda_pull_array(l.biases_gpu, l.biases, l.outputs); |
| | | cuda_pull_array(l.weights_gpu, l.weights, l.outputs*l.inputs); |
| | | } |
| | | } |
| | | |
| | | void push_weights(layer l) |
| | | { |
| | | if(l.type == CONVOLUTIONAL){ |
| | | cuda_push_array(l.biases_gpu, l.biases, l.n); |
| | | cuda_push_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c); |
| | | if(l.scales) cuda_push_array(l.scales_gpu, l.scales, l.n); |
| | | } else if(l.type == CONNECTED){ |
| | | cuda_push_array(l.biases_gpu, l.biases, l.outputs); |
| | | cuda_push_array(l.weights_gpu, l.weights, l.outputs*l.inputs); |
| | | } |
| | | } |
| | | |
| | | void distribute_weights(layer l, layer base) |
| | | { |
| | | if(l.type == CONVOLUTIONAL){ |
| | | cuda_push_array(l.biases_gpu, base.biases, l.n); |
| | | cuda_push_array(l.weights_gpu, base.weights, l.n*l.size*l.size*l.c); |
| | | if(base.scales) cuda_push_array(l.scales_gpu, base.scales, l.n); |
| | | } else if(l.type == CONNECTED){ |
| | | cuda_push_array(l.biases_gpu, base.biases, l.outputs); |
| | | cuda_push_array(l.weights_gpu, base.weights, l.outputs*l.inputs); |
| | | } |
| | | } |
| | | |
| | | |
| | | void merge_updates(layer l, layer base) |
| | | { |
| | | if (l.type == CONVOLUTIONAL) { |
| | | axpy_cpu(l.n, 1, l.bias_updates, 1, base.bias_updates, 1); |
| | | axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weight_updates, 1, base.weight_updates, 1); |
| | | if (l.scale_updates) { |
| | | axpy_cpu(l.n, 1, l.scale_updates, 1, base.scale_updates, 1); |
| | | } |
| | | } else if(l.type == CONNECTED) { |
| | | axpy_cpu(l.outputs, 1, l.bias_updates, 1, base.bias_updates, 1); |
| | | axpy_cpu(l.outputs*l.inputs, 1, l.weight_updates, 1, base.weight_updates, 1); |
| | | } |
| | | } |
| | | |
| | | void distribute_updates(layer l, layer base) |
| | | { |
| | | if(l.type == CONVOLUTIONAL){ |
| | | cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.n); |
| | | cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.n*l.size*l.size*l.c); |
| | | if(base.scale_updates) cuda_push_array(l.scale_updates_gpu, base.scale_updates, l.n); |
| | | } else if(l.type == CONNECTED){ |
| | | cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.outputs); |
| | | cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.outputs*l.inputs); |
| | | } |
| | | } |
| | | |
| | | void sync_layer(network *nets, int n, int j) |
| | | { |
| | | //printf("Syncing layer %d\n", j); |
| | | int i; |
| | | network net = nets[0]; |
| | | layer base = net.layers[j]; |
| | | cuda_set_device(net.gpu_index); |
| | | pull_weights(base); |
| | | for (i = 1; i < n; ++i) { |
| | | cuda_set_device(nets[i].gpu_index); |
| | | layer l = nets[i].layers[j]; |
| | | pull_weights(l); |
| | | merge_weights(l, base); |
| | | } |
| | | scale_weights(base, 1./n); |
| | | for (i = 0; i < n; ++i) { |
| | | cuda_set_device(nets[i].gpu_index); |
| | | layer l = nets[i].layers[j]; |
| | | distribute_weights(l, base); |
| | | } |
| | | //printf("Done syncing layer %d\n", j); |
| | | } |
| | | |
| | | typedef struct{ |
| | | network *nets; |
| | | int n; |
| | | int j; |
| | | } sync_args; |
| | | |
| | | void *sync_layer_thread(void *ptr) |
| | | { |
| | | sync_args args = *(sync_args*)ptr; |
| | | sync_layer(args.nets, args.n, args.j); |
| | | free(ptr); |
| | | return 0; |
| | | } |
| | | |
| | | pthread_t sync_layer_in_thread(network *nets, int n, int j) |
| | | { |
| | | pthread_t thread; |
| | | sync_args *ptr = (sync_args *)calloc(1, sizeof(sync_args)); |
| | | ptr->nets = nets; |
| | | ptr->n = n; |
| | | ptr->j = j; |
| | | if(pthread_create(&thread, 0, sync_layer_thread, ptr)) error("Thread creation failed"); |
| | | return thread; |
| | | } |
| | | |
| | | void sync_nets(network *nets, int n, int interval) |
| | | { |
| | | int j; |
| | | int layers = nets[0].n; |
| | | pthread_t *threads = (pthread_t *) calloc(layers, sizeof(pthread_t)); |
| | | |
| | | *nets[0].seen += interval * (n-1) * nets[0].batch * nets[0].subdivisions; |
| | | for (j = 0; j < n; ++j){ |
| | | *nets[j].seen = *nets[0].seen; |
| | | } |
| | | for (j = 0; j < layers; ++j) { |
| | | threads[j] = sync_layer_in_thread(nets, n, j); |
| | | } |
| | | for (j = 0; j < layers; ++j) { |
| | | pthread_join(threads[j], 0); |
| | | } |
| | | free(threads); |
| | | } |
| | | |
| | | float train_networks(network *nets, int n, data d, int interval) |
| | | { |
| | | int i; |
| | | int batch = nets[0].batch; |
| | | int subdivisions = nets[0].subdivisions; |
| | | assert(batch * subdivisions * n == d.X.rows); |
| | | pthread_t *threads = (pthread_t *) calloc(n, sizeof(pthread_t)); |
| | | float *errors = (float *) calloc(n, sizeof(float)); |
| | | |
| | | float sum = 0; |
| | | for(i = 0; i < n; ++i){ |
| | | data p = get_data_part(d, i, n); |
| | | threads[i] = train_network_in_thread(nets[i], p, errors + i); |
| | | } |
| | | for(i = 0; i < n; ++i){ |
| | | pthread_join(threads[i], 0); |
| | | //printf("%f\n", errors[i]); |
| | | sum += errors[i]; |
| | | } |
| | | //cudaDeviceSynchronize(); |
| | | if (get_current_batch(nets[0]) % interval == 0) { |
| | | printf("Syncing... "); |
| | | fflush(stdout); |
| | | sync_nets(nets, n, interval); |
| | | printf("Done!\n"); |
| | | } |
| | | //cudaDeviceSynchronize(); |
| | | free(threads); |
| | | free(errors); |
| | | return (float)sum/(n); |
| | | } |
| | | |
| | | float *get_network_output_layer_gpu(network net, int i) |
| | | { |
| | | if(net.types[i] == CONVOLUTIONAL){ |
| | | convolutional_layer layer = *(convolutional_layer *)net.layers[i]; |
| | | return layer.output; |
| | | } |
| | | else if(net.types[i] == CONNECTED){ |
| | | connected_layer layer = *(connected_layer *)net.layers[i]; |
| | | cuda_pull_array(layer.output_gpu, layer.output, layer.outputs*layer.batch); |
| | | return layer.output; |
| | | } |
| | | else if(net.types[i] == MAXPOOL){ |
| | | maxpool_layer layer = *(maxpool_layer *)net.layers[i]; |
| | | return layer.output; |
| | | } |
| | | else if(net.types[i] == SOFTMAX){ |
| | | softmax_layer layer = *(softmax_layer *)net.layers[i]; |
| | | pull_softmax_layer_output(layer); |
| | | return layer.output; |
| | | } |
| | | return 0; |
| | | layer l = net.layers[i]; |
| | | if(l.type != REGION) cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch); |
| | | return l.output; |
| | | } |
| | | |
| | | float *get_network_output_gpu(network net) |
| | | { |
| | | int i; |
| | | for(i = net.n-1; i > 0; --i) if(net.types[i] != COST) break; |
| | | for(i = net.n-1; i > 0; --i) if(net.layers[i].type != COST) break; |
| | | return get_network_output_layer_gpu(net, i); |
| | | } |
| | | |
| | | float *network_predict_gpu(network net, float *input) |
| | | { |
| | | |
| | | if (net.gpu_index != cuda_get_device()) |
| | | cuda_set_device(net.gpu_index); |
| | | int size = get_network_input_size(net) * net.batch; |
| | | float * input_gpu = cuda_make_array(input, size); |
| | | forward_network_gpu(net, input_gpu, 0, 0); |
| | | network_state state; |
| | | state.index = 0; |
| | | state.net = net; |
| | | state.input = cuda_make_array(input, size); |
| | | state.truth = 0; |
| | | state.train = 0; |
| | | state.delta = 0; |
| | | forward_network_gpu(net, state); |
| | | float *out = get_network_output_gpu(net); |
| | | cuda_free(input_gpu); |
| | | cuda_free(state.input); |
| | | return out; |
| | | } |
| | | |