| | |
| | | COST_TYPE get_cost_type(char *s) |
| | | { |
| | | if (strcmp(s, "sse")==0) return SSE; |
| | | if (strcmp(s, "detection")==0) return DETECTION; |
| | | fprintf(stderr, "Couldn't find activation function %s, going with SSE\n", s); |
| | | return SSE; |
| | | } |
| | |
| | | switch(a){ |
| | | case SSE: |
| | | return "sse"; |
| | | case DETECTION: |
| | | return "detection"; |
| | | } |
| | | return "sse"; |
| | | } |
| | |
| | | return layer; |
| | | } |
| | | |
| | | void forward_cost_layer(cost_layer layer, float *input, float *truth) |
| | | void forward_cost_layer(cost_layer layer, network_state state) |
| | | { |
| | | if (!truth) return; |
| | | copy_cpu(layer.batch*layer.inputs, truth, 1, layer.delta, 1); |
| | | axpy_cpu(layer.batch*layer.inputs, -1, input, 1, layer.delta, 1); |
| | | if(layer.type == DETECTION){ |
| | | int i; |
| | | for(i = 0; i < layer.batch*layer.inputs; ++i){ |
| | | if((i%25) && !truth[(i/25)*25]) layer.delta[i] = 0; |
| | | } |
| | | } |
| | | if (!state.truth) return; |
| | | copy_cpu(layer.batch*layer.inputs, state.truth, 1, layer.delta, 1); |
| | | axpy_cpu(layer.batch*layer.inputs, -1, state.input, 1, layer.delta, 1); |
| | | *(layer.output) = dot_cpu(layer.batch*layer.inputs, layer.delta, 1, layer.delta, 1); |
| | | //printf("cost: %f\n", *layer.output); |
| | | } |
| | | |
| | | void backward_cost_layer(const cost_layer layer, float *input, float *delta) |
| | | void backward_cost_layer(const cost_layer layer, network_state state) |
| | | { |
| | | copy_cpu(layer.batch*layer.inputs, layer.delta, 1, delta, 1); |
| | | copy_cpu(layer.batch*layer.inputs, layer.delta, 1, state.delta, 1); |
| | | } |
| | | |
| | | #ifdef GPU |
| | | |
| | | void forward_cost_layer_gpu(cost_layer layer, float * input, float * truth) |
| | | void pull_cost_layer(cost_layer layer) |
| | | { |
| | | if (!truth) return; |
| | | cuda_pull_array(layer.delta_gpu, layer.delta, layer.batch*layer.inputs); |
| | | } |
| | | |
| | | copy_ongpu(layer.batch*layer.inputs, truth, 1, layer.delta_gpu, 1); |
| | | axpy_ongpu(layer.batch*layer.inputs, -1, input, 1, layer.delta_gpu, 1); |
| | | void push_cost_layer(cost_layer layer) |
| | | { |
| | | cuda_push_array(layer.delta_gpu, layer.delta, layer.batch*layer.inputs); |
| | | } |
| | | |
| | | if(layer.type==DETECTION){ |
| | | mask_ongpu(layer.inputs*layer.batch, layer.delta_gpu, truth, 25); |
| | | } |
| | | void forward_cost_layer_gpu(cost_layer layer, network_state state) |
| | | { |
| | | if (!state.truth) return; |
| | | |
| | | copy_ongpu(layer.batch*layer.inputs, state.truth, 1, layer.delta_gpu, 1); |
| | | axpy_ongpu(layer.batch*layer.inputs, -1, state.input, 1, layer.delta_gpu, 1); |
| | | |
| | | cuda_pull_array(layer.delta_gpu, layer.delta, layer.batch*layer.inputs); |
| | | *(layer.output) = dot_cpu(layer.batch*layer.inputs, layer.delta, 1, layer.delta, 1); |
| | | //printf("cost: %f\n", *layer.output); |
| | | } |
| | | |
| | | void backward_cost_layer_gpu(const cost_layer layer, float * input, float * delta) |
| | | void backward_cost_layer_gpu(const cost_layer layer, network_state state) |
| | | { |
| | | copy_ongpu(layer.batch*layer.inputs, layer.delta_gpu, 1, delta, 1); |
| | | copy_ongpu(layer.batch*layer.inputs, layer.delta_gpu, 1, state.delta, 1); |
| | | } |
| | | #endif |
| | | |