probably how maxpool layers should be
29 files modified
3 files added
| | |
| | | CC=gcc |
| | | GPU=0 |
| | | COMMON=-Wall -Werror -Wfatal-errors `pkg-config --cflags opencv` -I/usr/local/cuda/include/ |
| | | COMMON=-Wall -Wfatal-errors `pkg-config --cflags opencv` -I/usr/local/cuda/include/ |
| | | ifeq ($(GPU), 1) |
| | | COMMON+=-DGPU |
| | | else |
| | |
| | | EXEC=cnn |
| | | OBJDIR=./obj/ |
| | | |
| | | OBJ=network.o image.o cnn.o connected_layer.o maxpool_layer.o activations.o list.o option_list.o parser.o utils.o data.o matrix.o softmax_layer.o mini_blas.o convolutional_layer.o gemm.o normalization_layer.o opencl.o im2col.o col2im.o axpy.o |
| | | OBJ=network.o image.o cnn.o connected_layer.o maxpool_layer.o activations.o list.o option_list.o parser.o utils.o data.o matrix.o softmax_layer.o mini_blas.o convolutional_layer.o gemm.o normalization_layer.o opencl.o im2col.o col2im.o axpy.o dropout_layer.o |
| | | OBJS = $(addprefix $(OBJDIR), $(OBJ)) |
| | | |
| | | all: $(EXEC) |
| | |
| | | float ramp_activate(float x){return x*(x>0)+.1*x;} |
| | | float tanh_activate(float x){return (exp(2*x)-1)/(exp(2*x)+1);} |
| | | |
| | | float activate(float x, ACTIVATION a, float dropout) |
| | | float activate(float x, ACTIVATION a) |
| | | { |
| | | if(dropout && (float)rand()/RAND_MAX < dropout) return 0; |
| | | switch(a){ |
| | | case LINEAR: |
| | | return linear_activate(x)/(1-dropout); |
| | | return linear_activate(x); |
| | | case SIGMOID: |
| | | return sigmoid_activate(x)/(1-dropout); |
| | | return sigmoid_activate(x); |
| | | case RELU: |
| | | return relu_activate(x)/(1-dropout); |
| | | return relu_activate(x); |
| | | case RAMP: |
| | | return ramp_activate(x)/(1-dropout); |
| | | return ramp_activate(x); |
| | | case TANH: |
| | | return tanh_activate(x)/(1-dropout); |
| | | return tanh_activate(x); |
| | | } |
| | | return 0; |
| | | } |
| | | |
| | | void activate_array(float *x, const int n, const ACTIVATION a, float dropout) |
| | | void activate_array(float *x, const int n, const ACTIVATION a) |
| | | { |
| | | int i; |
| | | for(i = 0; i < n; ++i){ |
| | | x[i] = activate(x[i], a, dropout); |
| | | x[i] = activate(x[i], a); |
| | | } |
| | | } |
| | | |
| | |
| | | } |
| | | |
| | | |
| | | void activate_array_ongpu(cl_mem x, int n, ACTIVATION a, float dropout) |
| | | void activate_array_ongpu(cl_mem x, int n, ACTIVATION a) |
| | | { |
| | | cl_setup(); |
| | | cl_kernel kernel = get_activation_kernel(); |
| | |
| | | cl.error = clSetKernelArg(kernel, i++, sizeof(x), (void*) &x); |
| | | cl.error = clSetKernelArg(kernel, i++, sizeof(n), (void*) &n); |
| | | cl.error = clSetKernelArg(kernel, i++, sizeof(a), (void*) &a); |
| | | cl.error = clSetKernelArg(kernel, i++, sizeof(dropout), |
| | | (void*) &dropout); |
| | | check_error(cl); |
| | | |
| | | size_t gsize = n; |
| | |
| | | float ramp_activate(float x){return x*(x>0)+.1*x;} |
| | | float tanh_activate(float x){return (exp(2*x)-1)/(exp(2*x)+1);} |
| | | |
| | | float activate(float x, ACTIVATION a, float dropout) |
| | | float activate(float x, ACTIVATION a) |
| | | { |
| | | //if((float)rand()/RAND_MAX < dropout) return 0; |
| | | switch(a){ |
| | | case LINEAR: |
| | | return linear_activate(x)/(1-dropout); |
| | | return linear_activate(x); |
| | | case SIGMOID: |
| | | return sigmoid_activate(x)/(1-dropout); |
| | | return sigmoid_activate(x); |
| | | case RELU: |
| | | return relu_activate(x)/(1-dropout); |
| | | return relu_activate(x); |
| | | case RAMP: |
| | | return ramp_activate(x)/(1-dropout); |
| | | return ramp_activate(x); |
| | | case TANH: |
| | | return tanh_activate(x)/(1-dropout); |
| | | return tanh_activate(x); |
| | | } |
| | | return 0; |
| | | } |
| | | |
| | | __kernel void activate_array(__global float *x, |
| | | const int n, const ACTIVATION a, const float dropout) |
| | | const int n, const ACTIVATION a) |
| | | { |
| | | int i = get_global_id(0); |
| | | x[i] = activate(x[i], a, dropout); |
| | | x[i] = activate(x[i], a); |
| | | } |
| | |
| | | ACTIVATION get_activation(char *s); |
| | | |
| | | char *get_activation_string(ACTIVATION a); |
| | | float activate(float x, ACTIVATION a, float dropout); |
| | | float activate(float x, ACTIVATION a); |
| | | float gradient(float x, ACTIVATION a); |
| | | void gradient_array(const float *x, const int n, const ACTIVATION a, float *delta); |
| | | void activate_array(float *x, const int n, const ACTIVATION a, float dropout); |
| | | void activate_array(float *x, const int n, const ACTIVATION a); |
| | | #ifdef GPU |
| | | void activate_array_ongpu(cl_mem x, int n, ACTIVATION a, float dropout); |
| | | void activate_array_ongpu(cl_mem x, int n, ACTIVATION a); |
| | | #endif |
| | | |
| | | #endif |
| | |
| | | int i; |
| | | clock_t start = clock(), end; |
| | | for(i = 0; i < 1000; ++i){ |
| | | im2col_cpu(dog.data, dog.c, dog.h, dog.w, size, stride, 0, matrix); |
| | | im2col_cpu(dog.data,1, dog.c, dog.h, dog.w, size, stride, 0, matrix); |
| | | gemm(0,0,n,mw,mh,1,filters,mh,matrix,mw,1,edge.data,mw); |
| | | } |
| | | end = clock(); |
| | |
| | | int size = 3; |
| | | float eps = .00000001; |
| | | image test = make_random_image(5,5, 1); |
| | | convolutional_layer layer = *make_convolutional_layer(1,test.h,test.w,test.c, n, size, stride, 0, RELU); |
| | | convolutional_layer layer = *make_convolutional_layer(1,test.h,test.w,test.c, n, size, stride, 0, RELU,0,0,0); |
| | | image out = get_convolutional_image(layer); |
| | | float **jacobian = calloc(test.h*test.w*test.c, sizeof(float)); |
| | | |
| | |
| | | |
| | | void test_parser() |
| | | { |
| | | network net = parse_network_cfg("test_parser.cfg"); |
| | | float input[1]; |
| | | int count = 0; |
| | | |
| | | float avgerr = 0; |
| | | while(++count < 100000000){ |
| | | float v = ((float)rand()/RAND_MAX); |
| | | float truth = v*v; |
| | | input[0] = v; |
| | | forward_network(net, input, 1); |
| | | float *out = get_network_output(net); |
| | | float *delta = get_network_delta(net); |
| | | float err = pow((out[0]-truth),2.); |
| | | avgerr = .99 * avgerr + .01 * err; |
| | | if(count % 1000000 == 0) printf("%f %f :%f AVG %f \n", truth, out[0], err, avgerr); |
| | | delta[0] = truth - out[0]; |
| | | backward_network(net, input, &truth); |
| | | update_network(net, .001,0,0); |
| | | } |
| | | network net = parse_network_cfg("cfg/test_parser.cfg"); |
| | | save_network(net, "cfg/test_parser_1.cfg"); |
| | | network net2 = parse_network_cfg("cfg/test_parser_1.cfg"); |
| | | save_network(net2, "cfg/test_parser_2.cfg"); |
| | | } |
| | | |
| | | void test_data() |
| | |
| | | //scale_data_rows(train, 1./255.); |
| | | normalize_data_rows(train); |
| | | clock_t start = clock(), end; |
| | | float loss = train_network_sgd(net, train, 1000, lr, momentum, decay); |
| | | float loss = train_network_sgd(net, train, 1000); |
| | | end = clock(); |
| | | printf("%d: %f, Time: %lf seconds, LR: %f, Momentum: %f, Decay: %f\n", i, loss, (float)(end-start)/CLOCKS_PER_SEC, lr, momentum, decay); |
| | | free_data(train); |
| | |
| | | |
| | | void test_cifar10() |
| | | { |
| | | data test = load_cifar10_data("images/cifar10/test_batch.bin"); |
| | | scale_data_rows(test, 1./255); |
| | | srand(222222); |
| | | network net = parse_network_cfg("cfg/cifar10.cfg"); |
| | | //data test = load_cifar10_data("data/cifar10/test_batch.bin"); |
| | | int count = 0; |
| | | float lr = .000005; |
| | | float momentum = .99; |
| | | float decay = 0.001; |
| | | decay = 0; |
| | | int batch = 10000; |
| | | int iters = 10000/net.batch; |
| | | data train = load_all_cifar10(); |
| | | while(++count <= 10000){ |
| | | char buff[256]; |
| | | sprintf(buff, "images/cifar10/data_batch_%d.bin", rand()%5+1); |
| | | data train = load_cifar10_data(buff); |
| | | scale_data_rows(train, 1./255); |
| | | train_network_sgd(net, train, batch, lr, momentum, decay); |
| | | //printf("%5f %5f\n",(double)count*batch/train.X.rows, loss); |
| | | clock_t start = clock(), end; |
| | | float loss = train_network_sgd(net, train, iters); |
| | | end = clock(); |
| | | //visualize_network(net); |
| | | //cvWaitKey(1000); |
| | | |
| | | float test_acc = network_accuracy(net, test); |
| | | printf("%5f %5f\n",(double)count*batch/train.X.rows/5, 1-test_acc); |
| | | free_data(train); |
| | | //float test_acc = network_accuracy(net, test); |
| | | //printf("%d: Loss: %f, Test Acc: %f, Time: %lf seconds, LR: %f, Momentum: %f, Decay: %f\n", count, loss, test_acc,(float)(end-start)/CLOCKS_PER_SEC, net.learning_rate, net.momentum, net.decay); |
| | | printf("%d: Loss: %f, Time: %lf seconds, LR: %f, Momentum: %f, Decay: %f\n", count, loss, (float)(end-start)/CLOCKS_PER_SEC, net.learning_rate, net.momentum, net.decay); |
| | | } |
| | | |
| | | free_data(train); |
| | | } |
| | | |
| | | void test_vince() |
| | |
| | | normalize_data_rows(train); |
| | | |
| | | int count = 0; |
| | | float lr = .00005; |
| | | float momentum = .9; |
| | | float decay = 0.0001; |
| | | decay = 0; |
| | | //float lr = .00005; |
| | | //float momentum = .9; |
| | | //float decay = 0.0001; |
| | | //decay = 0; |
| | | int batch = 10000; |
| | | while(++count <= 10000){ |
| | | float loss = train_network_sgd(net, train, batch, lr, momentum, decay); |
| | | float loss = train_network_sgd(net, train, batch); |
| | | printf("%5f %5f\n",(double)count*batch/train.X.rows, loss); |
| | | } |
| | | } |
| | | |
| | | void test_nist_single() |
| | | { |
| | | srand(222222); |
| | | network net = parse_network_cfg("cfg/nist.cfg"); |
| | | data train = load_categorical_data_csv("data/mnist/mnist_tiny.csv", 0, 10); |
| | | normalize_data_rows(train); |
| | | float loss = train_network_sgd(net, train, 5); |
| | | printf("Loss: %f, LR: %f, Momentum: %f, Decay: %f\n", loss, net.learning_rate, net.momentum, net.decay); |
| | | |
| | | } |
| | | |
| | | void test_nist() |
| | | { |
| | | srand(222222); |
| | | network net = parse_network_cfg("cfg/nist.cfg"); |
| | | data train = load_categorical_data_csv("data/mnist/mnist_train.csv", 0, 10); |
| | | data test = load_categorical_data_csv("data/mnist/mnist_test.csv",0,10); |
| | | normalize_data_rows(train); |
| | | normalize_data_rows(test); |
| | | translate_data_rows(train, -144); |
| | | scale_data_rows(train, 1./128); |
| | | translate_data_rows(test, -144); |
| | | scale_data_rows(test, 1./128); |
| | | //randomize_data(train); |
| | | int count = 0; |
| | | float lr = .0001; |
| | | float momentum = .9; |
| | | float decay = 0.0001; |
| | | //clock_t start = clock(), end; |
| | | int iters = 1000; |
| | | while(++count <= 10){ |
| | | int iters = 10000/net.batch; |
| | | while(++count <= 100){ |
| | | clock_t start = clock(), end; |
| | | float loss = train_network_sgd(net, train, iters, lr, momentum, decay); |
| | | float loss = train_network_sgd(net, train, iters); |
| | | end = clock(); |
| | | float test_acc = network_accuracy(net, test); |
| | | //float test_acc = 0; |
| | | printf("%d: Loss: %f, Test Acc: %f, Time: %lf seconds, LR: %f, Momentum: %f, Decay: %f\n", count, loss, test_acc,(float)(end-start)/CLOCKS_PER_SEC, lr, momentum, decay); |
| | | printf("%d: Loss: %f, Test Acc: %f, Time: %lf seconds, LR: %f, Momentum: %f, Decay: %f\n", count, loss, test_acc,(float)(end-start)/CLOCKS_PER_SEC, net.learning_rate, net.momentum, net.decay); |
| | | //save_network(net, "cfg/nist_basic_trained.cfg"); |
| | | |
| | | //printf("%5d Training Loss: %lf, Params: %f %f %f, ",count*1000, loss, lr, momentum, decay); |
| | | //end = clock(); |
| | |
| | | float decay = .01; |
| | | network net = parse_network_cfg("nist.cfg"); |
| | | while(++count <= 15){ |
| | | float acc = train_network_sgd(net, train, train.X.rows, lr, momentum, decay); |
| | | float acc = train_network_sgd(net, train, train.X.rows); |
| | | printf("Training Accuracy: %lf Learning Rate: %f Momentum: %f Decay: %f\n", acc, lr, momentum, decay ); |
| | | lr /= 2; |
| | | } |
| | |
| | | // printf("%f\n", delta[0]); |
| | | //printf("%f %f\n", truth[index], out[0]); |
| | | //backward_network(net, m.vals[index], ); |
| | | update_network(net, .00001, 0,0); |
| | | update_network(net); |
| | | } |
| | | //float test_acc = error_network(net, m, truth); |
| | | //float valid_acc = error_network(net, ho, ho_truth); |
| | |
| | | float *matrix = calloc(msize, sizeof(float)); |
| | | int i; |
| | | for(i = 0; i < 1000; ++i){ |
| | | im2col_cpu(test.data, c, h, w, size, stride, 0, matrix); |
| | | im2col_cpu(test.data,1, c, h, w, size, stride, 0, matrix); |
| | | //image render = float_to_image(mh, mw, mc, matrix); |
| | | } |
| | | } |
| | |
| | | |
| | | translate_data_rows(train, -144); |
| | | clock_t start = clock(), end; |
| | | float loss = train_network_sgd(net, train, 10, lr, momentum, decay); |
| | | float loss = train_network_sgd(net, train, 10); |
| | | end = clock(); |
| | | printf("%d: %f, Time: %lf seconds, LR: %f, Momentum: %f, Decay: %f\n", i, loss, (float)(end-start)/CLOCKS_PER_SEC, lr, momentum, decay); |
| | | free_data(train); |
| | |
| | | // test_im2row(); |
| | | //test_split(); |
| | | //test_ensemble(); |
| | | //test_nist_single(); |
| | | test_nist(); |
| | | //test_cifar10(); |
| | | //test_vince(); |
| | |
| | | //visualize_cat(); |
| | | //flip_network(); |
| | | //test_visualize(); |
| | | //test_parser(); |
| | | fprintf(stderr, "Success!\n"); |
| | | //test_random_preprocess(); |
| | | //test_random_classify(); |
| | |
| | | inline void col2im_set_pixel(float *im, int height, int width, int channels, |
| | | #include <stdio.h> |
| | | #include <math.h> |
| | | inline void col2im_add_pixel(float *im, int height, int width, int channels, |
| | | int row, int col, int channel, int pad, float val) |
| | | { |
| | | row -= pad; |
| | |
| | | |
| | | if (row < 0 || col < 0 || |
| | | row >= height || col >= width) return; |
| | | im[col + width*(row + channel*height)] = val; |
| | | im[col + width*(row + channel*height)] += val; |
| | | } |
| | | //This one might be too, can't remember. |
| | | void col2im_cpu(float* data_col, |
| | |
| | | int im_row = h_offset + h * stride; |
| | | int im_col = w_offset + w * stride; |
| | | double val = data_col[(c * height_col + h) * width_col + w]; |
| | | col2im_set_pixel(data_im, height, width, channels, |
| | | col2im_add_pixel(data_im, height, width, channels, |
| | | im_row, im_col, c_im, pad, val); |
| | | } |
| | | } |
| | |
| | | #include <stdlib.h> |
| | | #include <string.h> |
| | | |
| | | connected_layer *make_connected_layer(int batch, int inputs, int outputs, float dropout, ACTIVATION activation) |
| | | connected_layer *make_connected_layer(int batch, int inputs, int outputs, ACTIVATION activation, float learning_rate, float momentum, float decay) |
| | | { |
| | | fprintf(stderr, "Connected Layer: %d inputs, %d outputs\n", inputs, outputs); |
| | | int i; |
| | | connected_layer *layer = calloc(1, sizeof(connected_layer)); |
| | | |
| | | layer->learning_rate = learning_rate; |
| | | layer->momentum = momentum; |
| | | layer->decay = decay; |
| | | |
| | | layer->inputs = inputs; |
| | | layer->outputs = outputs; |
| | | layer->batch=batch; |
| | | layer->dropout = dropout; |
| | | |
| | | layer->output = calloc(batch*outputs, sizeof(float*)); |
| | | layer->delta = calloc(batch*outputs, sizeof(float*)); |
| | |
| | | layer->weight_momentum = calloc(inputs*outputs, sizeof(float)); |
| | | layer->weights = calloc(inputs*outputs, sizeof(float)); |
| | | float scale = 1./inputs; |
| | | //scale = .01; |
| | | for(i = 0; i < inputs*outputs; ++i) |
| | | layer->weights[i] = scale*(rand_uniform()); |
| | | layer->weights[i] = scale*(rand_uniform()-.5); |
| | | |
| | | layer->bias_updates = calloc(outputs, sizeof(float)); |
| | | layer->bias_adapt = calloc(outputs, sizeof(float)); |
| | |
| | | return layer; |
| | | } |
| | | |
| | | void update_connected_layer(connected_layer layer, float step, float momentum, float decay) |
| | | void update_connected_layer(connected_layer layer) |
| | | { |
| | | int i; |
| | | for(i = 0; i < layer.outputs; ++i){ |
| | | layer.bias_momentum[i] = step*(layer.bias_updates[i]) + momentum*layer.bias_momentum[i]; |
| | | layer.bias_momentum[i] = layer.learning_rate*(layer.bias_updates[i]) + layer.momentum*layer.bias_momentum[i]; |
| | | layer.biases[i] += layer.bias_momentum[i]; |
| | | } |
| | | for(i = 0; i < layer.outputs*layer.inputs; ++i){ |
| | | layer.weight_momentum[i] = step*(layer.weight_updates[i] - decay*layer.weights[i]) + momentum*layer.weight_momentum[i]; |
| | | layer.weight_momentum[i] = layer.learning_rate*(layer.weight_updates[i] - layer.decay*layer.weights[i]) + layer.momentum*layer.weight_momentum[i]; |
| | | layer.weights[i] += layer.weight_momentum[i]; |
| | | } |
| | | memset(layer.bias_updates, 0, layer.outputs*sizeof(float)); |
| | | memset(layer.weight_updates, 0, layer.outputs*layer.inputs*sizeof(float)); |
| | | } |
| | | |
| | | void forward_connected_layer(connected_layer layer, float *input, int train) |
| | | void forward_connected_layer(connected_layer layer, float *input) |
| | | { |
| | | int i; |
| | | if(!train) layer.dropout = 0; |
| | | for(i = 0; i < layer.batch; ++i){ |
| | | memcpy(layer.output+i*layer.outputs, layer.biases, layer.outputs*sizeof(float)); |
| | | } |
| | |
| | | float *b = layer.weights; |
| | | float *c = layer.output; |
| | | gemm(0,0,m,n,k,1,a,k,b,n,1,c,n); |
| | | activate_array(layer.output, layer.outputs*layer.batch, layer.activation, layer.dropout); |
| | | activate_array(layer.output, layer.outputs*layer.batch, layer.activation); |
| | | } |
| | | |
| | | void backward_connected_layer(connected_layer layer, float *input, float *delta) |
| | |
| | | #include "activations.h" |
| | | |
| | | typedef struct{ |
| | | float learning_rate; |
| | | float momentum; |
| | | float decay; |
| | | |
| | | int batch; |
| | | int inputs; |
| | | int outputs; |
| | |
| | | float *output; |
| | | float *delta; |
| | | |
| | | float dropout; |
| | | |
| | | ACTIVATION activation; |
| | | |
| | | } connected_layer; |
| | | |
| | | connected_layer *make_connected_layer(int batch, int inputs, int outputs, float dropout, ACTIVATION activation); |
| | | connected_layer *make_connected_layer(int batch, int inputs, int outputs, ACTIVATION activation, float learning_rate, float momentum, float decay); |
| | | |
| | | void forward_connected_layer(connected_layer layer, float *input, int train); |
| | | void forward_connected_layer(connected_layer layer, float *input); |
| | | void backward_connected_layer(connected_layer layer, float *input, float *delta); |
| | | void update_connected_layer(connected_layer layer, float step, float momentum, float decay); |
| | | void update_connected_layer(connected_layer layer); |
| | | |
| | | |
| | | #endif |
| | |
| | | return float_to_image(h,w,c,layer.delta); |
| | | } |
| | | |
| | | convolutional_layer *make_convolutional_layer(int batch, int h, int w, int c, int n, int size, int stride, int pad, ACTIVATION activation) |
| | | convolutional_layer *make_convolutional_layer(int batch, int h, int w, int c, int n, int size, int stride, int pad, ACTIVATION activation, float learning_rate, float momentum, float decay) |
| | | { |
| | | int i; |
| | | size = 2*(size/2)+1; //HA! And you thought you'd use an even sized filter... |
| | | convolutional_layer *layer = calloc(1, sizeof(convolutional_layer)); |
| | | |
| | | layer->learning_rate = learning_rate; |
| | | layer->momentum = momentum; |
| | | layer->decay = decay; |
| | | |
| | | layer->h = h; |
| | | layer->w = w; |
| | | layer->c = c; |
| | |
| | | layer->bias_updates = calloc(n, sizeof(float)); |
| | | layer->bias_momentum = calloc(n, sizeof(float)); |
| | | float scale = 1./(size*size*c); |
| | | for(i = 0; i < c*n*size*size; ++i) layer->filters[i] = scale*(rand_uniform()); |
| | | //scale = .0001; |
| | | for(i = 0; i < c*n*size*size; ++i) layer->filters[i] = scale*(rand_uniform()-.5); |
| | | for(i = 0; i < n; ++i){ |
| | | //layer->biases[i] = rand_normal()*scale + scale; |
| | | layer->biases[i] = .5; |
| | |
| | | layer->bias_updates_cl = cl_make_array(layer->bias_updates, n); |
| | | layer->bias_momentum_cl = cl_make_array(layer->bias_momentum, n); |
| | | |
| | | layer->col_image_cl = cl_make_array(layer->col_image, layer.batch*out_h*out_w*size*size*c); |
| | | layer->col_image_cl = cl_make_array(layer->col_image, layer->batch*out_h*out_w*size*size*c); |
| | | layer->delta_cl = cl_make_array(layer->delta, layer->batch*out_h*out_w*n); |
| | | layer->output_cl = cl_make_array(layer->output, layer->batch*out_h*out_w*n); |
| | | #endif |
| | |
| | | float *b = layer.col_image; |
| | | float *c = layer.output; |
| | | |
| | | for(i = 0; i < layer.batch; ++i){ |
| | | im2col_cpu(in, layer.c, layer.h, layer.w, |
| | | im2col_cpu(in, layer.batch, layer.c, layer.h, layer.w, |
| | | layer.size, layer.stride, layer.pad, b); |
| | | |
| | | for(i = 0; i < layer.batch; ++i){ |
| | | gemm(0,0,m,n,k,1,a,k,b,n,1,c,n); |
| | | c += n*m; |
| | | in += layer.h*layer.w*layer.c; |
| | |
| | | for(i = 0; i < m*n; ++i) printf("%f, ", layer.output[i]); |
| | | printf("\n"); |
| | | */ |
| | | activate_array(layer.output, m*n*layer.batch, layer.activation, 0.); |
| | | activate_array(layer.output, m*n*layer.batch, layer.activation); |
| | | } |
| | | |
| | | #ifdef GPU |
| | | void forward_convolutional_layer_gpu(convolutional_layer layer, cl_mem in) |
| | | { |
| | | int m = layer.n; |
| | | int k = layer.size*layer.size*layer.c; |
| | | int n = convolutional_out_height(layer)* |
| | | convolutional_out_width(layer)* |
| | | layer.batch; |
| | | |
| | | cl_write_array(layer.filters_cl, layer.filters, m*k); |
| | | cl_mem a = layer.filters_cl; |
| | | cl_mem b = layer.col_image_cl; |
| | | cl_mem c = layer.output_cl; |
| | | im2col_ongpu(in, layer.batch, layer.c, layer.h, layer.w, layer.size, layer.stride, b); |
| | | gemm_ongpu(0,0,m,n,k,1,a,k,b,n,0,c,n); |
| | | activate_array_ongpu(layer.output_cl, m*n, layer.activation, 0.); |
| | | cl_read_array(layer.output_cl, layer.output, m*n); |
| | | } |
| | | #endif |
| | | |
| | | void learn_bias_convolutional_layer(convolutional_layer layer) |
| | | { |
| | | int i,b; |
| | |
| | | } |
| | | } |
| | | |
| | | void update_convolutional_layer(convolutional_layer layer, float step, float momentum, float decay) |
| | | void update_convolutional_layer(convolutional_layer layer) |
| | | { |
| | | int size = layer.size*layer.size*layer.c*layer.n; |
| | | axpy_cpu(layer.n, step, layer.bias_updates, 1, layer.biases, 1); |
| | | scal_cpu(layer.n, momentum, layer.bias_updates, 1); |
| | | axpy_cpu(layer.n, layer.learning_rate, layer.bias_updates, 1, layer.biases, 1); |
| | | scal_cpu(layer.n,layer.momentum, layer.bias_updates, 1); |
| | | |
| | | scal_cpu(size, 1.-step*decay, layer.filters, 1); |
| | | axpy_cpu(size, step, layer.filter_updates, 1, layer.filters, 1); |
| | | scal_cpu(size, momentum, layer.filter_updates, 1); |
| | | scal_cpu(size, 1.-layer.learning_rate*layer.decay, layer.filters, 1); |
| | | axpy_cpu(size, layer.learning_rate, layer.filter_updates, 1, layer.filters, 1); |
| | | scal_cpu(size, layer.momentum, layer.filter_updates, 1); |
| | | } |
| | | |
| | | |
| | |
| | | image dc = collapse_image_layers(delta, 1); |
| | | char buff[256]; |
| | | sprintf(buff, "%s: Output", window); |
| | | show_image(dc, buff); |
| | | save_image(dc, buff); |
| | | //show_image(dc, buff); |
| | | //save_image(dc, buff); |
| | | free_image(dc); |
| | | return single_filters; |
| | | } |
| | | |
| | | #ifdef GPU |
| | | void forward_convolutional_layer_gpu(convolutional_layer layer, cl_mem in) |
| | | { |
| | | int m = layer.n; |
| | | int k = layer.size*layer.size*layer.c; |
| | | int n = convolutional_out_height(layer)* |
| | | convolutional_out_width(layer)* |
| | | layer.batch; |
| | | |
| | | cl_write_array(layer.filters_cl, layer.filters, m*k); |
| | | cl_mem a = layer.filters_cl; |
| | | cl_mem b = layer.col_image_cl; |
| | | cl_mem c = layer.output_cl; |
| | | im2col_ongpu(in, layer.batch, layer.c, layer.h, layer.w, layer.size, layer.stride, b); |
| | | gemm_ongpu(0,0,m,n,k,1,a,k,b,n,0,c,n); |
| | | activate_array_ongpu(layer.output_cl, m*n, layer.activation); |
| | | cl_read_array(layer.output_cl, layer.output, m*n); |
| | | } |
| | | #endif |
| | | |
| | |
| | | #include "activations.h" |
| | | |
| | | typedef struct { |
| | | float learning_rate; |
| | | float momentum; |
| | | float decay; |
| | | |
| | | int batch; |
| | | int h,w,c; |
| | | int n; |
| | |
| | | void forward_convolutional_layer_gpu(convolutional_layer layer, cl_mem in); |
| | | #endif |
| | | |
| | | convolutional_layer *make_convolutional_layer(int batch, int h, int w, int c, int n, int size, int stride, int pad, ACTIVATION activation); |
| | | convolutional_layer *make_convolutional_layer(int batch, int h, int w, int c, int n, int size, int stride, int pad, ACTIVATION activation, float learning_rate, float momentum, float decay); |
| | | void resize_convolutional_layer(convolutional_layer *layer, int h, int w, int c); |
| | | void forward_convolutional_layer(const convolutional_layer layer, float *in); |
| | | void update_convolutional_layer(convolutional_layer layer, float step, float momentum, float decay); |
| | | void update_convolutional_layer(convolutional_layer layer); |
| | | image *visualize_convolutional_layer(convolutional_layer layer, char *window, image *prev_filters); |
| | | |
| | | void backward_convolutional_layer(convolutional_layer layer, float *delta); |
| | |
| | | d.y = y; |
| | | |
| | | FILE *fp = fopen(filename, "rb"); |
| | | if(!fp) file_error(filename); |
| | | for(i = 0; i < 10000; ++i){ |
| | | unsigned char bytes[3073]; |
| | | fread(bytes, 1, 3073, fp); |
| | |
| | | X.vals[i][j] = (double)bytes[j+1]; |
| | | } |
| | | } |
| | | translate_data_rows(d, -144); |
| | | scale_data_rows(d, 1./128); |
| | | //normalize_data_rows(d); |
| | | fclose(fp); |
| | | return d; |
| | | } |
| | | |
| | | data load_all_cifar10() |
| | | { |
| | | data d; |
| | | d.shallow = 0; |
| | | int i,j,b; |
| | | matrix X = make_matrix(50000, 3072); |
| | | matrix y = make_matrix(50000, 10); |
| | | d.X = X; |
| | | d.y = y; |
| | | |
| | | |
| | | for(b = 0; b < 5; ++b){ |
| | | char buff[256]; |
| | | sprintf(buff, "data/cifar10/data_batch_%d.bin", b+1); |
| | | FILE *fp = fopen(buff, "rb"); |
| | | if(!fp) file_error(buff); |
| | | for(i = 0; i < 10000; ++i){ |
| | | unsigned char bytes[3073]; |
| | | fread(bytes, 1, 3073, fp); |
| | | int class = bytes[0]; |
| | | y.vals[i+b*10000][class] = 1; |
| | | for(j = 0; j < X.cols; ++j){ |
| | | X.vals[i+b*10000][j] = (double)bytes[j+1]; |
| | | } |
| | | } |
| | | fclose(fp); |
| | | } |
| | | //normalize_data_rows(d); |
| | | translate_data_rows(d, -144); |
| | | scale_data_rows(d, 1./128); |
| | | return d; |
| | | } |
| | | |
| | | void randomize_data(data d) |
| | | { |
| | | int i; |
| | |
| | | data load_data_image_pathfile_random(char *filename, int n, char **labels, |
| | | int k, int h, int w); |
| | | data load_cifar10_data(char *filename); |
| | | data load_all_cifar10(); |
| | | list *get_paths(char *filename); |
| | | data load_categorical_data_csv(char *filename, int target, int k); |
| | | void normalize_data_rows(data d); |
| New file |
| | |
| | | #include "dropout_layer.h" |
| | | #include "stdlib.h" |
| | | #include "stdio.h" |
| | | |
| | | dropout_layer *make_dropout_layer(int batch, int inputs, float probability) |
| | | { |
| | | fprintf(stderr, "Dropout Layer: %d inputs, %f probability\n", inputs, probability); |
| | | dropout_layer *layer = calloc(1, sizeof(dropout_layer)); |
| | | layer->probability = probability; |
| | | layer->inputs = inputs; |
| | | layer->batch = batch; |
| | | return layer; |
| | | } |
| | | |
| | | void forward_dropout_layer(dropout_layer layer, float *input) |
| | | { |
| | | int i; |
| | | for(i = 0; i < layer.batch * layer.inputs; ++i){ |
| | | if((float)rand()/RAND_MAX < layer.probability) input[i] = 0; |
| | | else input[i] /= (1-layer.probability); |
| | | } |
| | | } |
| | | void backward_dropout_layer(dropout_layer layer, float *input, float *delta) |
| | | { |
| | | // Don't do shit LULZ |
| | | } |
| New file |
| | |
| | | #ifndef DROPOUT_LAYER_H |
| | | #define DROPOUT_LAYER_H |
| | | |
| | | typedef struct{ |
| | | int batch; |
| | | int inputs; |
| | | float probability; |
| | | } dropout_layer; |
| | | |
| | | dropout_layer *make_dropout_layer(int batch, int inputs, float probability); |
| | | |
| | | void forward_dropout_layer(dropout_layer layer, float *input); |
| | | void backward_dropout_layer(dropout_layer layer, float *input, float *delta); |
| | | |
| | | #endif |
| | |
| | | |
| | | //From Berkeley Vision's Caffe! |
| | | //https://github.com/BVLC/caffe/blob/master/LICENSE |
| | | void im2col_cpu(float* data_im, |
| | | void im2col_cpu(float* data_im, const int batch, |
| | | const int channels, const int height, const int width, |
| | | const int ksize, const int stride, int pad, float* data_col) |
| | | { |
| | | int c,h,w; |
| | | int c,h,w,b; |
| | | int height_col = (height - ksize) / stride + 1; |
| | | int width_col = (width - ksize) / stride + 1; |
| | | if (pad){ |
| | |
| | | pad = ksize/2; |
| | | } |
| | | int channels_col = channels * ksize * ksize; |
| | | int im_size = height*width*channels; |
| | | int col_size = height_col*width_col*channels_col; |
| | | for (b = 0; b < batch; ++b) { |
| | | for (c = 0; c < channels_col; ++c) { |
| | | int w_offset = c % ksize; |
| | | int h_offset = (c / ksize) % ksize; |
| | |
| | | } |
| | | } |
| | | } |
| | | data_im += im_size; |
| | | data_col += col_size; |
| | | } |
| | | } |
| | | |
| | | |
| | |
| | | |
| | | __kernel void im2col(__global float *data_im, |
| | | const int batch, const int channels, const int height, const int width, |
| | | const int ksize, const int stride, __global float *data_col) |
| | | __kernel void im2col(__global float *data_im, const int im_offset, |
| | | const int channels, const int height, const int width, |
| | | const int ksize, const int stride, __global float *data_col, const int col_offset) |
| | | { |
| | | int b = get_global_id(0); |
| | | int c = get_global_id(1); |
| | |
| | | } |
| | | free_image(copy); |
| | | if(disp->height < 500 || disp->width < 500 || disp->height > 1000){ |
| | | int w = 1500; |
| | | int w = 500; |
| | | int h = w*p.h/p.w; |
| | | if(h > 1000){ |
| | | h = 1000; |
| | |
| | | void show_images(image *ims, int n, char *window) |
| | | { |
| | | image m = collapse_images_vert(ims, n); |
| | | save_image(m, window); |
| | | //save_image(m, window); |
| | | show_image(m, window); |
| | | free_image(m); |
| | | } |
| | |
| | | return float_to_image(h,w,c,layer.delta); |
| | | } |
| | | |
| | | maxpool_layer *make_maxpool_layer(int batch, int h, int w, int c, int stride) |
| | | maxpool_layer *make_maxpool_layer(int batch, int h, int w, int c, int size, int stride) |
| | | { |
| | | fprintf(stderr, "Maxpool Layer: %d x %d x %d image, %d stride\n", h,w,c,stride); |
| | | fprintf(stderr, "Maxpool Layer: %d x %d x %d image, %d size, %d stride\n", h,w,c,size,stride); |
| | | maxpool_layer *layer = calloc(1, sizeof(maxpool_layer)); |
| | | layer->batch = batch; |
| | | layer->h = h; |
| | | layer->w = w; |
| | | layer->c = c; |
| | | layer->size = size; |
| | | layer->stride = stride; |
| | | layer->output = calloc(((h-1)/stride+1) * ((w-1)/stride+1) * c*batch, sizeof(float)); |
| | | layer->delta = calloc(((h-1)/stride+1) * ((w-1)/stride+1) * c*batch, sizeof(float)); |
| | |
| | | layer->delta = realloc(layer->delta, ((h-1)/layer->stride+1) * ((w-1)/layer->stride+1) * c * layer->batch*sizeof(float)); |
| | | } |
| | | |
| | | float get_max_region(image im, int h, int w, int c, int size) |
| | | { |
| | | int i,j; |
| | | int lower = (-size-1)/2 + 1; |
| | | int upper = size/2 + 1; |
| | | |
| | | int lh = (h-lower < 0) ? 0 : h-lower; |
| | | int uh = (h+upper > im.h) ? im.h : h+upper; |
| | | |
| | | int lw = (w-lower < 0) ? 0 : w-lower; |
| | | int uw = (w+upper > im.w) ? im.w : w+upper; |
| | | |
| | | //printf("%d\n", -3/2); |
| | | //printf("%d %d\n", lower, upper); |
| | | //printf("%d %d %d %d\n", lh, uh, lw, uw); |
| | | |
| | | float max = -FLT_MAX; |
| | | for(i = lh; i < uh; ++i){ |
| | | for(j = lw; j < uw; ++j){ |
| | | float val = get_pixel(im, i, j, c); |
| | | if (val > max) max = val; |
| | | } |
| | | } |
| | | return max; |
| | | } |
| | | |
| | | void forward_maxpool_layer(const maxpool_layer layer, float *in) |
| | | { |
| | | int b; |
| | |
| | | image output = float_to_image(h,w,c,layer.output+b*h*w*c); |
| | | |
| | | int i,j,k; |
| | | for(i = 0; i < output.h*output.w*output.c; ++i) output.data[i] = -DBL_MAX; |
| | | for(k = 0; k < input.c; ++k){ |
| | | for(i = 0; i < input.h; ++i){ |
| | | for(j = 0; j < input.w; ++j){ |
| | | float val = get_pixel(input, i, j, k); |
| | | float cur = get_pixel(output, i/layer.stride, j/layer.stride, k); |
| | | if(val > cur) set_pixel(output, i/layer.stride, j/layer.stride, k, val); |
| | | for(i = 0; i < input.h; i += layer.stride){ |
| | | for(j = 0; j < input.w; j += layer.stride){ |
| | | float max = get_max_region(input, i, j, k, layer.size); |
| | | set_pixel(output, i/layer.stride, j/layer.stride, k, max); |
| | | } |
| | | } |
| | | } |
| | | } |
| | | } |
| | | |
| | | float set_max_region_delta(image im, image delta, int h, int w, int c, int size, float max, float error) |
| | | { |
| | | int i,j; |
| | | int lower = (-size-1)/2 + 1; |
| | | int upper = size/2 + 1; |
| | | |
| | | int lh = (h-lower < 0) ? 0 : h-lower; |
| | | int uh = (h+upper > im.h) ? im.h : h+upper; |
| | | |
| | | int lw = (w-lower < 0) ? 0 : w-lower; |
| | | int uw = (w+upper > im.w) ? im.w : w+upper; |
| | | |
| | | for(i = lh; i < uh; ++i){ |
| | | for(j = lw; j < uw; ++j){ |
| | | float val = get_pixel(im, i, j, c); |
| | | if (val == max){ |
| | | add_pixel(delta, i, j, c, error); |
| | | } |
| | | } |
| | | } |
| | | return max; |
| | | } |
| | | |
| | | void backward_maxpool_layer(const maxpool_layer layer, float *in, float *delta) |
| | | { |
| | | int b; |
| | |
| | | int c = layer.c; |
| | | image output = float_to_image(h,w,c,layer.output+b*h*w*c); |
| | | image output_delta = float_to_image(h,w,c,layer.delta+b*h*w*c); |
| | | zero_image(input_delta); |
| | | |
| | | int i,j,k; |
| | | for(k = 0; k < input.c; ++k){ |
| | | for(i = 0; i < input.h; ++i){ |
| | | for(j = 0; j < input.w; ++j){ |
| | | float val = get_pixel(input, i, j, k); |
| | | float cur = get_pixel(output, i/layer.stride, j/layer.stride, k); |
| | | float d = get_pixel(output_delta, i/layer.stride, j/layer.stride, k); |
| | | if(val == cur) { |
| | | set_pixel(input_delta, i, j, k, d); |
| | | } |
| | | else set_pixel(input_delta, i, j, k, 0); |
| | | for(i = 0; i < input.h; i += layer.stride){ |
| | | for(j = 0; j < input.w; j += layer.stride){ |
| | | float max = get_pixel(output, i/layer.stride, j/layer.stride, k); |
| | | float error = get_pixel(output_delta, i/layer.stride, j/layer.stride, k); |
| | | set_max_region_delta(input, input_delta, i, j, k, layer.size, max, error); |
| | | } |
| | | } |
| | | } |
| | |
| | | int batch; |
| | | int h,w,c; |
| | | int stride; |
| | | int size; |
| | | float *delta; |
| | | float *output; |
| | | } maxpool_layer; |
| | | |
| | | image get_maxpool_image(maxpool_layer layer); |
| | | maxpool_layer *make_maxpool_layer(int batch, int h, int w, int c, int stride); |
| | | maxpool_layer *make_maxpool_layer(int batch, int h, int w, int c, int size, int stride); |
| | | void resize_maxpool_layer(maxpool_layer *layer, int h, int w, int c); |
| | | void forward_maxpool_layer(const maxpool_layer layer, float *in); |
| | | void backward_maxpool_layer(const maxpool_layer layer, float *in, float *delta); |
| | |
| | | cl_mem C_gpu, int ldc); |
| | | #endif |
| | | |
| | | void im2col_cpu(float* data_im, |
| | | void im2col_cpu(float* data_im, const int batch, |
| | | const int channels, const int height, const int width, |
| | | const int ksize, const int stride, int pad, float* data_col); |
| | | |
| | |
| | | #include "maxpool_layer.h" |
| | | #include "normalization_layer.h" |
| | | #include "softmax_layer.h" |
| | | #include "dropout_layer.h" |
| | | |
| | | network make_network(int n, int batch) |
| | | { |
| | |
| | | return net; |
| | | } |
| | | |
| | | void print_convolutional_cfg(FILE *fp, convolutional_layer *l, int first) |
| | | { |
| | | int i; |
| | | fprintf(fp, "[convolutional]\n"); |
| | | if(first) fprintf(fp, "batch=%d\n" |
| | | "height=%d\n" |
| | | "width=%d\n" |
| | | "channels=%d\n", |
| | | l->batch,l->h, l->w, l->c); |
| | | fprintf(fp, "filters=%d\n" |
| | | "size=%d\n" |
| | | "stride=%d\n" |
| | | "activation=%s\n", |
| | | l->n, l->size, l->stride, |
| | | get_activation_string(l->activation)); |
| | | fprintf(fp, "data="); |
| | | for(i = 0; i < l->n; ++i) fprintf(fp, "%g,", l->biases[i]); |
| | | for(i = 0; i < l->n*l->c*l->size*l->size; ++i) fprintf(fp, "%g,", l->filters[i]); |
| | | fprintf(fp, "\n\n"); |
| | | } |
| | | void print_connected_cfg(FILE *fp, connected_layer *l, int first) |
| | | { |
| | | int i; |
| | | fprintf(fp, "[connected]\n"); |
| | | if(first) fprintf(fp, "batch=%d\ninput=%d\n", l->batch, l->inputs); |
| | | fprintf(fp, "output=%d\n" |
| | | "activation=%s\n", |
| | | l->outputs, |
| | | get_activation_string(l->activation)); |
| | | fprintf(fp, "data="); |
| | | for(i = 0; i < l->outputs; ++i) fprintf(fp, "%g,", l->biases[i]); |
| | | for(i = 0; i < l->inputs*l->outputs; ++i) fprintf(fp, "%g,", l->weights[i]); |
| | | fprintf(fp, "\n\n"); |
| | | } |
| | | |
| | | void print_maxpool_cfg(FILE *fp, maxpool_layer *l, int first) |
| | | { |
| | | fprintf(fp, "[maxpool]\n"); |
| | | if(first) fprintf(fp, "batch=%d\n" |
| | | "height=%d\n" |
| | | "width=%d\n" |
| | | "channels=%d\n", |
| | | l->batch,l->h, l->w, l->c); |
| | | fprintf(fp, "stride=%d\n\n", l->stride); |
| | | } |
| | | |
| | | void print_normalization_cfg(FILE *fp, normalization_layer *l, int first) |
| | | { |
| | | fprintf(fp, "[localresponsenormalization]\n"); |
| | | if(first) fprintf(fp, "batch=%d\n" |
| | | "height=%d\n" |
| | | "width=%d\n" |
| | | "channels=%d\n", |
| | | l->batch,l->h, l->w, l->c); |
| | | fprintf(fp, "size=%d\n" |
| | | "alpha=%g\n" |
| | | "beta=%g\n" |
| | | "kappa=%g\n\n", l->size, l->alpha, l->beta, l->kappa); |
| | | } |
| | | |
| | | void print_softmax_cfg(FILE *fp, softmax_layer *l, int first) |
| | | { |
| | | fprintf(fp, "[softmax]\n"); |
| | | if(first) fprintf(fp, "batch=%d\ninput=%d\n", l->batch, l->inputs); |
| | | fprintf(fp, "\n"); |
| | | } |
| | | |
| | | void save_network(network net, char *filename) |
| | | { |
| | | FILE *fp = fopen(filename, "w"); |
| | | if(!fp) file_error(filename); |
| | | int i; |
| | | for(i = 0; i < net.n; ++i) |
| | | { |
| | | if(net.types[i] == CONVOLUTIONAL) |
| | | print_convolutional_cfg(fp, (convolutional_layer *)net.layers[i], i==0); |
| | | else if(net.types[i] == CONNECTED) |
| | | print_connected_cfg(fp, (connected_layer *)net.layers[i], i==0); |
| | | else if(net.types[i] == MAXPOOL) |
| | | print_maxpool_cfg(fp, (maxpool_layer *)net.layers[i], i==0); |
| | | else if(net.types[i] == NORMALIZATION) |
| | | print_normalization_cfg(fp, (normalization_layer *)net.layers[i], i==0); |
| | | else if(net.types[i] == SOFTMAX) |
| | | print_softmax_cfg(fp, (softmax_layer *)net.layers[i], i==0); |
| | | } |
| | | fclose(fp); |
| | | } |
| | | |
| | | #ifdef GPU |
| | | void forward_network(network net, float *input, int train) |
| | | { |
| | |
| | | } |
| | | else if(net.types[i] == CONNECTED){ |
| | | connected_layer layer = *(connected_layer *)net.layers[i]; |
| | | forward_connected_layer(layer, input, train); |
| | | forward_connected_layer(layer, input); |
| | | input = layer.output; |
| | | } |
| | | else if(net.types[i] == SOFTMAX){ |
| | |
| | | forward_normalization_layer(layer, input); |
| | | input = layer.output; |
| | | } |
| | | else if(net.types[i] == DROPOUT){ |
| | | if(!train) continue; |
| | | dropout_layer layer = *(dropout_layer *)net.layers[i]; |
| | | forward_dropout_layer(layer, input); |
| | | } |
| | | } |
| | | } |
| | | #endif |
| | | |
| | | void update_network(network net, float step, float momentum, float decay) |
| | | void update_network(network net) |
| | | { |
| | | int i; |
| | | for(i = 0; i < net.n; ++i){ |
| | | if(net.types[i] == CONVOLUTIONAL){ |
| | | convolutional_layer layer = *(convolutional_layer *)net.layers[i]; |
| | | update_convolutional_layer(layer, step, momentum, decay); |
| | | update_convolutional_layer(layer); |
| | | } |
| | | else if(net.types[i] == MAXPOOL){ |
| | | //maxpool_layer layer = *(maxpool_layer *)net.layers[i]; |
| | |
| | | } |
| | | else if(net.types[i] == CONNECTED){ |
| | | connected_layer layer = *(connected_layer *)net.layers[i]; |
| | | update_connected_layer(layer, step, momentum, decay); |
| | | update_connected_layer(layer); |
| | | } |
| | | } |
| | | } |
| | |
| | | } else if(net.types[i] == SOFTMAX){ |
| | | softmax_layer layer = *(softmax_layer *)net.layers[i]; |
| | | return layer.output; |
| | | } else if(net.types[i] == DROPOUT){ |
| | | return get_network_output_layer(net, i-1); |
| | | } else if(net.types[i] == CONNECTED){ |
| | | connected_layer layer = *(connected_layer *)net.layers[i]; |
| | | return layer.output; |
| | |
| | | } else if(net.types[i] == SOFTMAX){ |
| | | softmax_layer layer = *(softmax_layer *)net.layers[i]; |
| | | return layer.delta; |
| | | } else if(net.types[i] == DROPOUT){ |
| | | return get_network_delta_layer(net, i-1); |
| | | } else if(net.types[i] == CONNECTED){ |
| | | connected_layer layer = *(connected_layer *)net.layers[i]; |
| | | return layer.delta; |
| | |
| | | return error; |
| | | } |
| | | |
| | | float train_network_datum(network net, float *x, float *y, float step, float momentum, float decay) |
| | | float train_network_datum(network net, float *x, float *y) |
| | | { |
| | | forward_network(net, x, 1); |
| | | //int class = get_predicted_class_network(net); |
| | | float error = backward_network(net, x, y); |
| | | update_network(net, step, momentum, decay); |
| | | update_network(net); |
| | | //return (y[class]?1:0); |
| | | return error; |
| | | } |
| | | |
| | | float train_network_sgd(network net, data d, int n, float step, float momentum,float decay) |
| | | float train_network_sgd(network net, data d, int n) |
| | | { |
| | | int batch = net.batch; |
| | | float *X = calloc(batch*d.X.cols, sizeof(float)); |
| | |
| | | memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); |
| | | memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); |
| | | } |
| | | float err = train_network_datum(net, X, y, step, momentum, decay); |
| | | float err = train_network_datum(net, X, y); |
| | | sum += err; |
| | | //train_network_datum(net, X, y, step, momentum, decay); |
| | | //train_network_datum(net, X, y); |
| | | /* |
| | | float *y = d.y.vals[index]; |
| | | int class = get_predicted_class_network(net); |
| | |
| | | free(y); |
| | | return (float)sum/(n*batch); |
| | | } |
| | | float train_network_batch(network net, data d, int n, float step, float momentum,float decay) |
| | | float train_network_batch(network net, data d, int n) |
| | | { |
| | | int i,j; |
| | | float sum = 0; |
| | |
| | | forward_network(net, x, 1); |
| | | sum += backward_network(net, x, y); |
| | | } |
| | | update_network(net, step, momentum, decay); |
| | | update_network(net); |
| | | } |
| | | return (float)sum/(n*batch); |
| | | } |
| | | |
| | | |
| | | void train_network(network net, data d, float step, float momentum, float decay) |
| | | void train_network(network net, data d) |
| | | { |
| | | int i; |
| | | int correct = 0; |
| | | for(i = 0; i < d.X.rows; ++i){ |
| | | correct += train_network_datum(net, d.X.vals[i], d.y.vals[i], step, momentum, decay); |
| | | correct += train_network_datum(net, d.X.vals[i], d.y.vals[i]); |
| | | if(i%100 == 0){ |
| | | visualize_network(net); |
| | | cvWaitKey(10); |
| | |
| | | else if(net.types[i] == CONNECTED){ |
| | | connected_layer layer = *(connected_layer *)net.layers[i]; |
| | | return layer.inputs; |
| | | } else if(net.types[i] == DROPOUT){ |
| | | dropout_layer layer = *(dropout_layer *) net.layers[i]; |
| | | return layer.inputs; |
| | | } |
| | | else if(net.types[i] == SOFTMAX){ |
| | | softmax_layer layer = *(softmax_layer *)net.layers[i]; |
| | |
| | | else if(net.types[i] == CONNECTED){ |
| | | connected_layer layer = *(connected_layer *)net.layers[i]; |
| | | return layer.outputs; |
| | | } else if(net.types[i] == DROPOUT){ |
| | | dropout_layer layer = *(dropout_layer *) net.layers[i]; |
| | | return layer.inputs; |
| | | } |
| | | else if(net.types[i] == SOFTMAX){ |
| | | softmax_layer layer = *(softmax_layer *)net.layers[i]; |
| | |
| | | CONNECTED, |
| | | MAXPOOL, |
| | | SOFTMAX, |
| | | NORMALIZATION |
| | | NORMALIZATION, |
| | | DROPOUT |
| | | } LAYER_TYPE; |
| | | |
| | | typedef struct { |
| | | int n; |
| | | int batch; |
| | | float learning_rate; |
| | | float momentum; |
| | | float decay; |
| | | void **layers; |
| | | LAYER_TYPE *types; |
| | | int outputs; |
| | |
| | | network make_network(int n, int batch); |
| | | void forward_network(network net, float *input, int train); |
| | | float backward_network(network net, float *input, float *truth); |
| | | void update_network(network net, float step, float momentum, float decay); |
| | | float train_network_sgd(network net, data d, int n, float step, float momentum,float decay); |
| | | float train_network_batch(network net, data d, int n, float step, float momentum,float decay); |
| | | void train_network(network net, data d, float step, float momentum, float decay); |
| | | void update_network(network net); |
| | | float train_network_sgd(network net, data d, int n); |
| | | float train_network_batch(network net, data d, int n); |
| | | void train_network(network net, data d); |
| | | matrix network_predict_data(network net, data test); |
| | | float network_accuracy(network net, data d); |
| | | float *get_network_output(network net); |
| | |
| | | int get_predicted_class_network(network net); |
| | | void print_network(network net); |
| | | void visualize_network(network net); |
| | | void save_network(network net, char *filename); |
| | | int resize_network(network net, int h, int w, int c); |
| | | int get_network_input_size(network net); |
| | | |
| | |
| | | check_error(cl); |
| | | } |
| | | |
| | | cl_mem cl_sub_array(cl_mem src, int offset, int size) |
| | | { |
| | | cl_buffer_region r; |
| | | r.origin = offset*sizeof(float); |
| | | r.size = size*sizeof(float); |
| | | cl_mem sub = clCreateSubBuffer(src, CL_MEM_USE_HOST_PTR, CL_BUFFER_CREATE_TYPE_REGION, &r, 0); |
| | | return sub; |
| | | } |
| | | |
| | | cl_mem cl_make_array(float *x, int n) |
| | | { |
| | | cl_setup(); |
| | |
| | | void cl_write_array(cl_mem mem, float *x, int n); |
| | | cl_mem cl_make_array(float *x, int n); |
| | | void cl_copy_array(cl_mem src, cl_mem dst, int n); |
| | | cl_mem cl_sub_array(cl_mem src, int offset, int size); |
| | | #endif |
| | | #endif |
| | |
| | | return def; |
| | | } |
| | | |
| | | float option_find_float_quiet(list *l, char *key, float def) |
| | | { |
| | | char *v = option_find(l, key); |
| | | if(v) return atof(v); |
| | | return def; |
| | | } |
| | | |
| | | float option_find_float(list *l, char *key, float def) |
| | | { |
| | | char *v = option_find(l, key); |
| | |
| | | char *option_find_str(list *l, char *key, char *def); |
| | | int option_find_int(list *l, char *key, int def); |
| | | float option_find_float(list *l, char *key, float def); |
| | | float option_find_float_quiet(list *l, char *key, float def); |
| | | void option_unused(list *l); |
| | | |
| | | #endif |
| | |
| | | #include "maxpool_layer.h" |
| | | #include "normalization_layer.h" |
| | | #include "softmax_layer.h" |
| | | #include "dropout_layer.h" |
| | | #include "list.h" |
| | | #include "option_list.h" |
| | | #include "utils.h" |
| | |
| | | int is_convolutional(section *s); |
| | | int is_connected(section *s); |
| | | int is_maxpool(section *s); |
| | | int is_dropout(section *s); |
| | | int is_softmax(section *s); |
| | | int is_normalization(section *s); |
| | | list *read_cfg(char *filename); |
| | |
| | | free(s); |
| | | } |
| | | |
| | | convolutional_layer *parse_convolutional(list *options, network net, int count) |
| | | convolutional_layer *parse_convolutional(list *options, network *net, int count) |
| | | { |
| | | int i; |
| | | int h,w,c; |
| | | float learning_rate, momentum, decay; |
| | | int n = option_find_int(options, "filters",1); |
| | | int size = option_find_int(options, "size",1); |
| | | int stride = option_find_int(options, "stride",1); |
| | |
| | | char *activation_s = option_find_str(options, "activation", "sigmoid"); |
| | | ACTIVATION activation = get_activation(activation_s); |
| | | if(count == 0){ |
| | | learning_rate = option_find_float(options, "learning_rate", .001); |
| | | momentum = option_find_float(options, "momentum", .9); |
| | | decay = option_find_float(options, "decay", .0001); |
| | | h = option_find_int(options, "height",1); |
| | | w = option_find_int(options, "width",1); |
| | | c = option_find_int(options, "channels",1); |
| | | net.batch = option_find_int(options, "batch",1); |
| | | net->batch = option_find_int(options, "batch",1); |
| | | net->learning_rate = learning_rate; |
| | | net->momentum = momentum; |
| | | net->decay = decay; |
| | | }else{ |
| | | image m = get_network_image_layer(net, count-1); |
| | | learning_rate = option_find_float_quiet(options, "learning_rate", net->learning_rate); |
| | | momentum = option_find_float_quiet(options, "momentum", net->momentum); |
| | | decay = option_find_float_quiet(options, "decay", net->decay); |
| | | image m = get_network_image_layer(*net, count-1); |
| | | h = m.h; |
| | | w = m.w; |
| | | c = m.c; |
| | | if(h == 0) error("Layer before convolutional layer must output image."); |
| | | } |
| | | convolutional_layer *layer = make_convolutional_layer(net.batch,h,w,c,n,size,stride,pad,activation); |
| | | convolutional_layer *layer = make_convolutional_layer(net->batch,h,w,c,n,size,stride,pad,activation,learning_rate,momentum,decay); |
| | | char *data = option_find_str(options, "data", 0); |
| | | if(data){ |
| | | char *curr = data; |
| | |
| | | curr = next+1; |
| | | } |
| | | } |
| | | char *weights = option_find_str(options, "weights", 0); |
| | | char *biases = option_find_str(options, "biases", 0); |
| | | if(biases){ |
| | | char *curr = biases; |
| | | char *next = biases; |
| | | int done = 0; |
| | | for(i = 0; i < n && !done; ++i){ |
| | | while(*++next !='\0' && *next != ','); |
| | | if(*next == '\0') done = 1; |
| | | *next = '\0'; |
| | | sscanf(curr, "%g", &layer->biases[i]); |
| | | curr = next+1; |
| | | } |
| | | } |
| | | if(weights){ |
| | | char *curr = weights; |
| | | char *next = weights; |
| | | int done = 0; |
| | | for(i = 0; i < c*n*size*size && !done; ++i){ |
| | | while(*++next !='\0' && *next != ','); |
| | | if(*next == '\0') done = 1; |
| | | *next = '\0'; |
| | | sscanf(curr, "%g", &layer->filters[i]); |
| | | curr = next+1; |
| | | } |
| | | } |
| | | option_unused(options); |
| | | return layer; |
| | | } |
| | | |
| | | connected_layer *parse_connected(list *options, network net, int count) |
| | | connected_layer *parse_connected(list *options, network *net, int count) |
| | | { |
| | | int i; |
| | | int input; |
| | | float learning_rate, momentum, decay; |
| | | int output = option_find_int(options, "output",1); |
| | | float dropout = option_find_float(options, "dropout", 0.); |
| | | char *activation_s = option_find_str(options, "activation", "sigmoid"); |
| | | ACTIVATION activation = get_activation(activation_s); |
| | | if(count == 0){ |
| | | input = option_find_int(options, "input",1); |
| | | net.batch = option_find_int(options, "batch",1); |
| | | net->batch = option_find_int(options, "batch",1); |
| | | learning_rate = option_find_float(options, "learning_rate", .001); |
| | | momentum = option_find_float(options, "momentum", .9); |
| | | decay = option_find_float(options, "decay", .0001); |
| | | net->learning_rate = learning_rate; |
| | | net->momentum = momentum; |
| | | net->decay = decay; |
| | | }else{ |
| | | input = get_network_output_size_layer(net, count-1); |
| | | learning_rate = option_find_float_quiet(options, "learning_rate", net->learning_rate); |
| | | momentum = option_find_float_quiet(options, "momentum", net->momentum); |
| | | decay = option_find_float_quiet(options, "decay", net->decay); |
| | | input = get_network_output_size_layer(*net, count-1); |
| | | } |
| | | connected_layer *layer = make_connected_layer(net.batch, input, output, dropout, activation); |
| | | connected_layer *layer = make_connected_layer(net->batch, input, output, activation,learning_rate,momentum,decay); |
| | | char *data = option_find_str(options, "data", 0); |
| | | if(data){ |
| | | char *curr = data; |
| | |
| | | return layer; |
| | | } |
| | | |
| | | softmax_layer *parse_softmax(list *options, network net, int count) |
| | | softmax_layer *parse_softmax(list *options, network *net, int count) |
| | | { |
| | | int input; |
| | | if(count == 0){ |
| | | input = option_find_int(options, "input",1); |
| | | net.batch = option_find_int(options, "batch",1); |
| | | net->batch = option_find_int(options, "batch",1); |
| | | }else{ |
| | | input = get_network_output_size_layer(net, count-1); |
| | | input = get_network_output_size_layer(*net, count-1); |
| | | } |
| | | softmax_layer *layer = make_softmax_layer(net.batch, input); |
| | | softmax_layer *layer = make_softmax_layer(net->batch, input); |
| | | option_unused(options); |
| | | return layer; |
| | | } |
| | | |
| | | maxpool_layer *parse_maxpool(list *options, network net, int count) |
| | | maxpool_layer *parse_maxpool(list *options, network *net, int count) |
| | | { |
| | | int h,w,c; |
| | | int stride = option_find_int(options, "stride",1); |
| | | int size = option_find_int(options, "size",stride); |
| | | if(count == 0){ |
| | | h = option_find_int(options, "height",1); |
| | | w = option_find_int(options, "width",1); |
| | | c = option_find_int(options, "channels",1); |
| | | net.batch = option_find_int(options, "batch",1); |
| | | net->batch = option_find_int(options, "batch",1); |
| | | }else{ |
| | | image m = get_network_image_layer(net, count-1); |
| | | image m = get_network_image_layer(*net, count-1); |
| | | h = m.h; |
| | | w = m.w; |
| | | c = m.c; |
| | | if(h == 0) error("Layer before convolutional layer must output image."); |
| | | } |
| | | maxpool_layer *layer = make_maxpool_layer(net.batch,h,w,c,stride); |
| | | maxpool_layer *layer = make_maxpool_layer(net->batch,h,w,c,size,stride); |
| | | option_unused(options); |
| | | return layer; |
| | | } |
| | | |
| | | normalization_layer *parse_normalization(list *options, network net, int count) |
| | | dropout_layer *parse_dropout(list *options, network *net, int count) |
| | | { |
| | | int input; |
| | | float probability = option_find_float(options, "probability", .5); |
| | | if(count == 0){ |
| | | net->batch = option_find_int(options, "batch",1); |
| | | input = option_find_int(options, "input",1); |
| | | }else{ |
| | | input = get_network_output_size_layer(*net, count-1); |
| | | } |
| | | dropout_layer *layer = make_dropout_layer(net->batch,input,probability); |
| | | option_unused(options); |
| | | return layer; |
| | | } |
| | | |
| | | normalization_layer *parse_normalization(list *options, network *net, int count) |
| | | { |
| | | int h,w,c; |
| | | int size = option_find_int(options, "size",1); |
| | |
| | | h = option_find_int(options, "height",1); |
| | | w = option_find_int(options, "width",1); |
| | | c = option_find_int(options, "channels",1); |
| | | net.batch = option_find_int(options, "batch",1); |
| | | net->batch = option_find_int(options, "batch",1); |
| | | }else{ |
| | | image m = get_network_image_layer(net, count-1); |
| | | image m = get_network_image_layer(*net, count-1); |
| | | h = m.h; |
| | | w = m.w; |
| | | c = m.c; |
| | | if(h == 0) error("Layer before convolutional layer must output image."); |
| | | } |
| | | normalization_layer *layer = make_normalization_layer(net.batch,h,w,c,size, alpha, beta, kappa); |
| | | normalization_layer *layer = make_normalization_layer(net->batch,h,w,c,size, alpha, beta, kappa); |
| | | option_unused(options); |
| | | return layer; |
| | | } |
| | |
| | | section *s = (section *)n->val; |
| | | list *options = s->options; |
| | | if(is_convolutional(s)){ |
| | | convolutional_layer *layer = parse_convolutional(options, net, count); |
| | | convolutional_layer *layer = parse_convolutional(options, &net, count); |
| | | net.types[count] = CONVOLUTIONAL; |
| | | net.layers[count] = layer; |
| | | net.batch = layer->batch; |
| | | }else if(is_connected(s)){ |
| | | connected_layer *layer = parse_connected(options, net, count); |
| | | connected_layer *layer = parse_connected(options, &net, count); |
| | | net.types[count] = CONNECTED; |
| | | net.layers[count] = layer; |
| | | net.batch = layer->batch; |
| | | }else if(is_softmax(s)){ |
| | | softmax_layer *layer = parse_softmax(options, net, count); |
| | | softmax_layer *layer = parse_softmax(options, &net, count); |
| | | net.types[count] = SOFTMAX; |
| | | net.layers[count] = layer; |
| | | net.batch = layer->batch; |
| | | }else if(is_maxpool(s)){ |
| | | maxpool_layer *layer = parse_maxpool(options, net, count); |
| | | maxpool_layer *layer = parse_maxpool(options, &net, count); |
| | | net.types[count] = MAXPOOL; |
| | | net.layers[count] = layer; |
| | | net.batch = layer->batch; |
| | | }else if(is_normalization(s)){ |
| | | normalization_layer *layer = parse_normalization(options, net, count); |
| | | normalization_layer *layer = parse_normalization(options, &net, count); |
| | | net.types[count] = NORMALIZATION; |
| | | net.layers[count] = layer; |
| | | net.batch = layer->batch; |
| | | }else if(is_dropout(s)){ |
| | | dropout_layer *layer = parse_dropout(options, &net, count); |
| | | net.types[count] = DROPOUT; |
| | | net.layers[count] = layer; |
| | | }else{ |
| | | fprintf(stderr, "Type not recognized: %s\n", s->type); |
| | | } |
| | |
| | | return (strcmp(s->type, "[max]")==0 |
| | | || strcmp(s->type, "[maxpool]")==0); |
| | | } |
| | | int is_dropout(section *s) |
| | | { |
| | | return (strcmp(s->type, "[dropout]")==0); |
| | | } |
| | | |
| | | int is_softmax(section *s) |
| | | { |
| | |
| | | return sections; |
| | | } |
| | | |
| | | void print_convolutional_cfg(FILE *fp, convolutional_layer *l, network net, int count) |
| | | { |
| | | int i; |
| | | fprintf(fp, "[convolutional]\n"); |
| | | if(count == 0) { |
| | | fprintf(fp, "batch=%d\n" |
| | | "height=%d\n" |
| | | "width=%d\n" |
| | | "channels=%d\n" |
| | | "learning_rate=%g\n" |
| | | "momentum=%g\n" |
| | | "decay=%g\n", |
| | | l->batch,l->h, l->w, l->c, l->learning_rate, l->momentum, l->decay); |
| | | } else { |
| | | if(l->learning_rate != net.learning_rate) |
| | | fprintf(fp, "learning_rate=%g\n", l->learning_rate); |
| | | if(l->momentum != net.momentum) |
| | | fprintf(fp, "momentum=%g\n", l->momentum); |
| | | if(l->decay != net.decay) |
| | | fprintf(fp, "decay=%g\n", l->decay); |
| | | } |
| | | fprintf(fp, "filters=%d\n" |
| | | "size=%d\n" |
| | | "stride=%d\n" |
| | | "pad=%d\n" |
| | | "activation=%s\n", |
| | | l->n, l->size, l->stride, l->pad, |
| | | get_activation_string(l->activation)); |
| | | fprintf(fp, "biases="); |
| | | for(i = 0; i < l->n; ++i) fprintf(fp, "%g,", l->biases[i]); |
| | | fprintf(fp, "\n"); |
| | | fprintf(fp, "weights="); |
| | | for(i = 0; i < l->n*l->c*l->size*l->size; ++i) fprintf(fp, "%g,", l->filters[i]); |
| | | fprintf(fp, "\n\n"); |
| | | } |
| | | void print_connected_cfg(FILE *fp, connected_layer *l, network net, int count) |
| | | { |
| | | int i; |
| | | fprintf(fp, "[connected]\n"); |
| | | if(count == 0){ |
| | | fprintf(fp, "batch=%d\n" |
| | | "input=%d\n" |
| | | "learning_rate=%g\n" |
| | | "momentum=%g\n" |
| | | "decay=%g\n", |
| | | l->batch, l->inputs, l->learning_rate, l->momentum, l->decay); |
| | | } else { |
| | | if(l->learning_rate != net.learning_rate) |
| | | fprintf(fp, "learning_rate=%g\n", l->learning_rate); |
| | | if(l->momentum != net.momentum) |
| | | fprintf(fp, "momentum=%g\n", l->momentum); |
| | | if(l->decay != net.decay) |
| | | fprintf(fp, "decay=%g\n", l->decay); |
| | | } |
| | | fprintf(fp, "output=%d\n" |
| | | "activation=%s\n", |
| | | l->outputs, |
| | | get_activation_string(l->activation)); |
| | | fprintf(fp, "data="); |
| | | for(i = 0; i < l->outputs; ++i) fprintf(fp, "%g,", l->biases[i]); |
| | | for(i = 0; i < l->inputs*l->outputs; ++i) fprintf(fp, "%g,", l->weights[i]); |
| | | fprintf(fp, "\n\n"); |
| | | } |
| | | |
| | | void print_maxpool_cfg(FILE *fp, maxpool_layer *l, network net, int count) |
| | | { |
| | | fprintf(fp, "[maxpool]\n"); |
| | | if(count == 0) fprintf(fp, "batch=%d\n" |
| | | "height=%d\n" |
| | | "width=%d\n" |
| | | "channels=%d\n", |
| | | l->batch,l->h, l->w, l->c); |
| | | fprintf(fp, "size=%d\nstride=%d\n\n", l->size, l->stride); |
| | | } |
| | | |
| | | void print_normalization_cfg(FILE *fp, normalization_layer *l, network net, int count) |
| | | { |
| | | fprintf(fp, "[localresponsenormalization]\n"); |
| | | if(count == 0) fprintf(fp, "batch=%d\n" |
| | | "height=%d\n" |
| | | "width=%d\n" |
| | | "channels=%d\n", |
| | | l->batch,l->h, l->w, l->c); |
| | | fprintf(fp, "size=%d\n" |
| | | "alpha=%g\n" |
| | | "beta=%g\n" |
| | | "kappa=%g\n\n", l->size, l->alpha, l->beta, l->kappa); |
| | | } |
| | | |
| | | void print_softmax_cfg(FILE *fp, softmax_layer *l, network net, int count) |
| | | { |
| | | fprintf(fp, "[softmax]\n"); |
| | | if(count == 0) fprintf(fp, "batch=%d\ninput=%d\n", l->batch, l->inputs); |
| | | fprintf(fp, "\n"); |
| | | } |
| | | |
| | | void save_network(network net, char *filename) |
| | | { |
| | | FILE *fp = fopen(filename, "w"); |
| | | if(!fp) file_error(filename); |
| | | int i; |
| | | for(i = 0; i < net.n; ++i) |
| | | { |
| | | if(net.types[i] == CONVOLUTIONAL) |
| | | print_convolutional_cfg(fp, (convolutional_layer *)net.layers[i], net, i); |
| | | else if(net.types[i] == CONNECTED) |
| | | print_connected_cfg(fp, (connected_layer *)net.layers[i], net, i); |
| | | else if(net.types[i] == MAXPOOL) |
| | | print_maxpool_cfg(fp, (maxpool_layer *)net.layers[i], net, i); |
| | | else if(net.types[i] == NORMALIZATION) |
| | | print_normalization_cfg(fp, (normalization_layer *)net.layers[i], net, i); |
| | | else if(net.types[i] == SOFTMAX) |
| | | print_softmax_cfg(fp, (softmax_layer *)net.layers[i], net, i); |
| | | } |
| | | fclose(fp); |
| | | } |
| | | |
| | |
| | | #include "network.h" |
| | | |
| | | network parse_network_cfg(char *filename); |
| | | void save_network(network net, char *filename); |
| | | |
| | | #endif |
| | |
| | | #include "softmax_layer.h" |
| | | #include "mini_blas.h" |
| | | #include <math.h> |
| | | #include <stdlib.h> |
| | | #include <stdio.h> |
| | |
| | | layer->inputs = inputs; |
| | | layer->output = calloc(inputs*batch, sizeof(float)); |
| | | layer->delta = calloc(inputs*batch, sizeof(float)); |
| | | layer->jacobian = calloc(inputs*inputs*batch, sizeof(float)); |
| | | return layer; |
| | | } |
| | | |
| | |
| | | |
| | | void backward_softmax_layer(const softmax_layer layer, float *input, float *delta) |
| | | { |
| | | /* |
| | | int i,j,b; |
| | | for(b = 0; b < layer.batch; ++b){ |
| | | for(i = 0; i < layer.inputs; ++i){ |
| | | for(j = 0; j < layer.inputs; ++j){ |
| | | int d = (i==j); |
| | | layer.jacobian[b*layer.inputs*layer.inputs + i*layer.inputs + j] = |
| | | layer.output[b*layer.inputs + i] * (d - layer.output[b*layer.inputs + j]); |
| | | } |
| | | } |
| | | } |
| | | for(b = 0; b < layer.batch; ++b){ |
| | | int M = layer.inputs; |
| | | int N = 1; |
| | | int K = layer.inputs; |
| | | float *A = layer.jacobian + b*layer.inputs*layer.inputs; |
| | | float *B = layer.delta + b*layer.inputs; |
| | | float *C = delta + b*layer.inputs; |
| | | gemm(0,0,M,N,K,1,A,K,B,N,0,C,N); |
| | | } |
| | | */ |
| | | |
| | | int i; |
| | | for(i = 0; i < layer.inputs*layer.batch; ++i){ |
| | | delta[i] = layer.delta[i]; |
| | |
| | | int batch; |
| | | float *delta; |
| | | float *output; |
| | | float *jacobian; |
| | | } softmax_layer; |
| | | |
| | | softmax_layer *make_softmax_layer(int batch, int inputs); |