| | |
| | | #include "softmax_layer.h" |
| | | #include "dropout_layer.h" |
| | | |
| | | char *get_layer_string(LAYER_TYPE a) |
| | | { |
| | | switch(a){ |
| | | case CONVOLUTIONAL: |
| | | return "convolutional"; |
| | | case CONNECTED: |
| | | return "connected"; |
| | | case MAXPOOL: |
| | | return "maxpool"; |
| | | case SOFTMAX: |
| | | return "softmax"; |
| | | case NORMALIZATION: |
| | | return "normalization"; |
| | | case DROPOUT: |
| | | return "dropout"; |
| | | case FREEWEIGHT: |
| | | return "freeweight"; |
| | | case CROP: |
| | | return "crop"; |
| | | case COST: |
| | | return "cost"; |
| | | default: |
| | | break; |
| | | } |
| | | return "none"; |
| | | } |
| | | |
| | | |
| | | |
| | | network make_network(int n, int batch) |
| | | { |
| | | network net; |
| | |
| | | net.types = calloc(net.n, sizeof(LAYER_TYPE)); |
| | | net.outputs = 0; |
| | | net.output = 0; |
| | | net.seen = 0; |
| | | #ifdef GPU |
| | | net.input_cl = calloc(1, sizeof(cl_mem)); |
| | | net.truth_cl = calloc(1, sizeof(cl_mem)); |
| | | net.input_gpu = calloc(1, sizeof(float *)); |
| | | net.truth_gpu = calloc(1, sizeof(float *)); |
| | | #endif |
| | | return net; |
| | | } |
| | |
| | | } |
| | | else if(net.types[i] == FREEWEIGHT){ |
| | | if(!train) continue; |
| | | freeweight_layer layer = *(freeweight_layer *)net.layers[i]; |
| | | forward_freeweight_layer(layer, input); |
| | | //freeweight_layer layer = *(freeweight_layer *)net.layers[i]; |
| | | //forward_freeweight_layer(layer, input); |
| | | } |
| | | //char buff[256]; |
| | | //sprintf(buff, "layer %d", i); |
| | | //cuda_compare(get_network_output_gpu_layer(net, i), input, get_network_output_size_layer(net, i)*net.batch, buff); |
| | | } |
| | | } |
| | | |
| | |
| | | } |
| | | else if(net.types[i] == CONNECTED){ |
| | | connected_layer layer = *(connected_layer *)net.layers[i]; |
| | | secret_update_connected_layer((connected_layer *)net.layers[i]); |
| | | //update_connected_layer(layer); |
| | | //secret_update_connected_layer((connected_layer *)net.layers[i]); |
| | | update_connected_layer(layer); |
| | | } |
| | | } |
| | | } |
| | |
| | | cost_layer *layer = (cost_layer *)net->layers[i]; |
| | | layer->batch = b; |
| | | } |
| | | else if(net->types[i] == CROP){ |
| | | crop_layer *layer = (crop_layer *)net->layers[i]; |
| | | layer->batch = b; |
| | | } |
| | | } |
| | | } |
| | | |
| | |
| | | float *network_predict(network net, float *input) |
| | | { |
| | | #ifdef GPU |
| | | if(gpu_index >= 0) return network_predict_gpu(net, input); |
| | | if(gpu_index >= 0) return network_predict_gpu(net, input); |
| | | #endif |
| | | |
| | | forward_network(net, input, 0, 0); |