| | |
| | | #include <stdlib.h> |
| | | #include <string.h> |
| | | |
| | | connected_layer *make_connected_layer(int batch, int inputs, int outputs, ACTIVATION activation) |
| | | connected_layer *make_connected_layer(int batch, int inputs, int outputs, float dropout, ACTIVATION activation) |
| | | { |
| | | fprintf(stderr, "Connected Layer: %d inputs, %d outputs\n", inputs, outputs); |
| | | int i; |
| | |
| | | layer->inputs = inputs; |
| | | layer->outputs = outputs; |
| | | layer->batch=batch; |
| | | layer->dropout = dropout; |
| | | |
| | | layer->output = calloc(batch*outputs, sizeof(float*)); |
| | | layer->delta = calloc(batch*outputs, sizeof(float*)); |
| | |
| | | memset(layer.weight_updates, 0, layer.outputs*layer.inputs*sizeof(float)); |
| | | } |
| | | |
| | | void forward_connected_layer(connected_layer layer, float *input) |
| | | void forward_connected_layer(connected_layer layer, float *input, int train) |
| | | { |
| | | int i; |
| | | memcpy(layer.output, layer.biases, layer.outputs*sizeof(float)); |
| | | if(!train) layer.dropout = 0; |
| | | for(i = 0; i < layer.batch; ++i){ |
| | | memcpy(layer.output+i*layer.outputs, layer.biases, layer.outputs*sizeof(float)); |
| | | } |
| | | int m = layer.batch; |
| | | int k = layer.inputs; |
| | | int n = layer.outputs; |
| | |
| | | float *b = layer.weights; |
| | | float *c = layer.output; |
| | | gemm(0,0,m,n,k,1,a,k,b,n,1,c,n); |
| | | for(i = 0; i < layer.outputs*layer.batch; ++i){ |
| | | layer.output[i] = activate(layer.output[i], layer.activation); |
| | | } |
| | | activate_array(layer.output, layer.outputs*layer.batch, layer.activation, layer.dropout); |
| | | } |
| | | |
| | | void learn_connected_layer(connected_layer layer, float *input) |
| | | void backward_connected_layer(connected_layer layer, float *input, float *delta) |
| | | { |
| | | int i; |
| | | for(i = 0; i < layer.outputs*layer.batch; ++i){ |
| | | layer.delta[i] *= gradient(layer.output[i], layer.activation); |
| | | layer.bias_updates[i%layer.batch] += layer.delta[i]/layer.batch; |
| | | layer.bias_updates[i%layer.batch] += layer.delta[i]; |
| | | } |
| | | int m = layer.inputs; |
| | | int k = layer.batch; |
| | |
| | | float *a = input; |
| | | float *b = layer.delta; |
| | | float *c = layer.weight_updates; |
| | | gemm(0,0,m,n,k,1,a,k,b,n,1,c,n); |
| | | } |
| | | gemm(1,0,m,n,k,1,a,k,b,n,1,c,n); |
| | | |
| | | void backward_connected_layer(connected_layer layer, float *input, float *delta) |
| | | { |
| | | int m = layer.inputs; |
| | | int k = layer.outputs; |
| | | int n = layer.batch; |
| | | m = layer.batch; |
| | | k = layer.outputs; |
| | | n = layer.inputs; |
| | | |
| | | float *a = layer.weights; |
| | | float *b = layer.delta; |
| | | float *c = delta; |
| | | a = layer.delta; |
| | | b = layer.weights; |
| | | c = delta; |
| | | |
| | | gemm(0,0,m,n,k,1,a,k,b,n,0,c,n); |
| | | if(c) gemm(0,1,m,n,k,1,a,k,b,k,0,c,n); |
| | | } |
| | | |