From 9b3c7136f34d4cad593467cd785f44ebb05bf878 Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Thu, 16 Oct 2014 22:17:23 +0000
Subject: [PATCH] Fixing up maxpool layer

---
 src/network.c         |   72 +++++++++++--
 src/maxpool_layer.h   |    2 
 src/network.h         |    5 
 Makefile              |    1 
 src/connected_layer.c |   72 +++++++++++++
 src/connected_layer.h |    8 +
 src/cnn.c             |   27 +---
 src/maxpool_layer.c   |   57 +++++-----
 8 files changed, 173 insertions(+), 71 deletions(-)

diff --git a/Makefile b/Makefile
index c4abedd..315e626 100644
--- a/Makefile
+++ b/Makefile
@@ -7,7 +7,6 @@
 endif
 UNAME = $(shell uname)
 OPTS=-Ofast -flto
-OPTS=-Ofast -flto
 ifeq ($(UNAME), Darwin)
 COMMON+= -isystem /usr/local/Cellar/opencv/2.4.6.1/include/opencv -isystem /usr/local/Cellar/opencv/2.4.6.1/include
 ifeq ($(GPU), 1)
diff --git a/src/cnn.c b/src/cnn.c
index df3efa6..bfba26a 100644
--- a/src/cnn.c
+++ b/src/cnn.c
@@ -278,29 +278,20 @@
 	free_data(train);
 }
 
-void train_full()
+void train_assira()
 {
-	network net = parse_network_cfg("cfg/imagenet.cfg");
+	network net = parse_network_cfg("cfg/assira.cfg");
 	srand(2222222);
 	int i = 0;
 	char *labels[] = {"cat","dog"};
-	float lr = .00001;
-	float momentum = .9;
-	float decay = 0.01;
 	while(1){
 		i += 1000;
-		data train = load_data_image_pathfile_random("images/assira/train.list", 1000, labels, 2, 256, 256);
-		//image im = float_to_image(256, 256, 3,train.X.vals[0]);
-		//visualize_network(net);
-		//cvWaitKey(100);
-		//show_image(im, "input");
-		//cvWaitKey(100);
-		//scale_data_rows(train, 1./255.);
+		data train = load_data_image_pathfile_random("data/assira/train.list", 1000, labels, 2, 256, 256);
 		normalize_data_rows(train);
 		clock_t start = clock(), end;
-		float loss = train_network_sgd(net, train, 1000);
+		float loss = train_network_sgd_gpu(net, train, 10);
 		end = clock();
-		printf("%d: %f, Time: %lf seconds, LR: %f, Momentum: %f, Decay: %f\n", i, loss, (float)(end-start)/CLOCKS_PER_SEC, lr, momentum, decay);
+		printf("%d: %f, Time: %lf seconds\n", i, loss, (float)(end-start)/CLOCKS_PER_SEC );
 		free_data(train);
 		if(i%10000==0){
 			char buff[256];
@@ -367,10 +358,10 @@
     data train = load_all_cifar10();
     while(++count <= 10000){
         clock_t start = clock(), end;
-        float loss = train_network_sgd(net, train, iters);
+        float loss = train_network_sgd_gpu(net, train, iters);
         end = clock();
-        visualize_network(net);
-        cvWaitKey(5000);
+        //visualize_network(net);
+        //cvWaitKey(5000);
 
         //float test_acc = network_accuracy(net, test);
         //printf("%d: Loss: %f, Test Acc: %f, Time: %lf seconds, LR: %f, Momentum: %f, Decay: %f\n", count, loss, test_acc,(float)(end-start)/CLOCKS_PER_SEC, net.learning_rate, net.momentum, net.decay);
@@ -902,7 +893,7 @@
 
 int main(int argc, char *argv[])
 {
-    //train_full();
+    //train_assira();
     //test_distribution();
     //feenableexcept(FE_DIVBYZERO | FE_INVALID | FE_OVERFLOW);
 
diff --git a/src/connected_layer.c b/src/connected_layer.c
index 03590d6..ba83dc3 100644
--- a/src/connected_layer.c
+++ b/src/connected_layer.c
@@ -38,9 +38,17 @@
     for(i = 0; i < outputs; ++i){
         //layer->biases[i] = rand_normal()*scale + scale;
         layer->biases[i] = 1;
-        }
+    }
 
     #ifdef GPU
+    layer->weights_cl = cl_make_array(layer->weights, inputs*outputs);
+    layer->biases_cl = cl_make_array(layer->biases, outputs);
+
+    layer->weight_updates_cl = cl_make_array(layer->weight_updates, inputs*outputs);
+    layer->bias_updates_cl = cl_make_array(layer->bias_updates, outputs);
+
+    layer->output_cl = cl_make_array(layer->output, outputs*batch);
+    layer->delta_cl = cl_make_array(layer->delta, outputs*batch);
     #endif
     layer->activation = activation;
     return layer;
@@ -76,8 +84,8 @@
 {
     int i;
     gradient_array(layer.output, layer.outputs*layer.batch, layer.activation, layer.delta);
-    for(i = 0; i < layer.outputs*layer.batch; ++i){
-        layer.bias_updates[i%layer.outputs] += layer.delta[i];
+    for(i = 0; i < layer.batch; ++i){
+        axpy_cpu(layer.outputs, 1, layer.delta + i*layer.outputs, 1, layer.bias_updates, 1);
     }
     int m = layer.inputs;
     int k = layer.batch;
@@ -98,3 +106,61 @@
     if(c) gemm(0,1,m,n,k,1,a,k,b,k,0,c,n);
 }
 
+#ifdef GPU
+
+void update_connected_layer_gpu(connected_layer layer)
+{
+    axpy_ongpu(layer.outputs, layer.learning_rate, layer.bias_updates_cl, 1, layer.biases_cl, 1);
+    scal_ongpu(layer.outputs, layer.momentum, layer.bias_updates_cl, 1);
+
+    scal_ongpu(layer.inputs*layer.outputs, 1.-layer.learning_rate*layer.decay, layer.weights_cl, 1);
+    axpy_ongpu(layer.inputs*layer.outputs, layer.learning_rate, layer.weight_updates_cl, 1, layer.weights_cl, 1);
+    scal_ongpu(layer.inputs*layer.outputs, layer.momentum, layer.weight_updates_cl, 1);
+}
+
+void forward_connected_layer_gpu(connected_layer layer, cl_mem input)
+{
+    int i;
+    for(i = 0; i < layer.batch; ++i){
+        cl_mem sub = cl_sub_array(layer.output_cl, i*layer.outputs, layer.outputs);
+        copy_ongpu(layer.outputs, layer.biases_cl, 1, sub, 1);
+        clReleaseMemObject(sub);
+    }
+    int m = layer.batch;
+    int k = layer.inputs;
+    int n = layer.outputs;
+    cl_mem a = input;
+    cl_mem b = layer.weights_cl;
+    cl_mem c = layer.output_cl;
+    gemm_ongpu(0,0,m,n,k,1,a,k,b,n,1,c,n);
+    activate_array_ongpu(layer.output_cl, layer.outputs*layer.batch, layer.activation);
+}
+
+void backward_connected_layer_gpu(connected_layer layer, cl_mem input, cl_mem delta)
+{
+    int i;
+    gradient_array_ongpu(layer.output_cl, layer.outputs*layer.batch, layer.activation, layer.delta_cl);
+    for(i = 0; i < layer.batch; ++i){
+        cl_mem sub = cl_sub_array(layer.delta_cl, i*layer.outputs, layer.outputs);
+        axpy_ongpu(layer.outputs, 1, sub, 1, layer.bias_updates_cl, 1);
+        clReleaseMemObject(sub);
+    }
+    int m = layer.inputs;
+    int k = layer.batch;
+    int n = layer.outputs;
+    cl_mem a = input;
+    cl_mem b = layer.delta_cl;
+    cl_mem c = layer.weight_updates_cl;
+    gemm_ongpu(1,0,m,n,k,1,a,m,b,n,1,c,n);
+
+    m = layer.batch;
+    k = layer.outputs;
+    n = layer.inputs;
+
+    a = layer.delta_cl;
+    b = layer.weights_cl;
+    c = delta;
+
+    if(c) gemm_ongpu(0,1,m,n,k,1,a,k,b,k,0,c,n);
+}
+    #endif
diff --git a/src/connected_layer.h b/src/connected_layer.h
index 9181fe2..19bcfa2 100644
--- a/src/connected_layer.h
+++ b/src/connected_layer.h
@@ -31,9 +31,6 @@
     cl_mem weight_updates_cl;
     cl_mem bias_updates_cl;
 
-    cl_mem weight_momentum_cl;
-    cl_mem bias_momentum_cl;
-
     cl_mem output_cl;
     cl_mem delta_cl;
     #endif
@@ -47,6 +44,11 @@
 void backward_connected_layer(connected_layer layer, float *input, float *delta);
 void update_connected_layer(connected_layer layer);
 
+#ifdef GPU
+void forward_connected_layer_gpu(connected_layer layer, cl_mem input);
+void backward_connected_layer_gpu(connected_layer layer, cl_mem input, cl_mem delta);
+void update_connected_layer_gpu(connected_layer layer);
+#endif
 
 #endif
 
diff --git a/src/maxpool_layer.c b/src/maxpool_layer.c
index 070eaba..01eed45 100644
--- a/src/maxpool_layer.c
+++ b/src/maxpool_layer.c
@@ -27,7 +27,7 @@
     layer->c = c;
     layer->size = size;
     layer->stride = stride;
-    layer->max_indexes = calloc(((h-1)/stride+1) * ((w-1)/stride+1) * c*batch, sizeof(int));
+    layer->indexes = calloc(((h-1)/stride+1) * ((w-1)/stride+1) * c*batch, sizeof(int));
     layer->output = calloc(((h-1)/stride+1) * ((w-1)/stride+1) * c*batch, sizeof(float));
     layer->delta = calloc(((h-1)/stride+1) * ((w-1)/stride+1) * c*batch, sizeof(float));
     return layer;
@@ -44,36 +44,35 @@
 
 void forward_maxpool_layer(const maxpool_layer layer, float *input)
 {
-    int b;
+    int b,i,j,k,l,m;
+    int w_offset = (-layer.size-1)/2 + 1;
+    int h_offset = (-layer.size-1)/2 + 1;
+
+    int h = (layer.h-1)/layer.stride + 1;
+    int w = (layer.w-1)/layer.stride + 1;
+    int c = layer.c;
+
     for(b = 0; b < layer.batch; ++b){
-        int h = (layer.h-1)/layer.stride + 1;
-        int w = (layer.w-1)/layer.stride + 1;
-        int c = layer.c;
-
-        int i,j,k,l,m;
-        for(k = 0; k < layer.c; ++k){
-            for(i = 0; i < layer.h; i += layer.stride){
-                for(j = 0; j < layer.w; j += layer.stride){
-                    int out_index = j/layer.stride + w*(i/layer.stride + h*(k + c*b));
-                    layer.output[out_index] = -FLT_MAX;
-                    int lower = (-layer.size-1)/2 + 1;
-                    int upper = layer.size/2 + 1;
-
-                    int lh = (i+lower < 0)       ? 0 : i+lower;
-                    int uh = (i+upper > layer.h) ? layer.h : i+upper;
-
-                    int lw = (j+lower < 0)       ? 0 : j+lower;
-                    int uw = (j+upper > layer.w) ? layer.w : j+upper;
-                    for(l = lh; l < uh; ++l){
-                        for(m = lw; m < uw; ++m){
-                            //printf("%d %d\n", l, m);
-                            int index = m + layer.w*(l + layer.h*(k + b*layer.c));
-                            if(input[index] > layer.output[out_index]){
-                                layer.output[out_index] = input[index];
-                                layer.max_indexes[out_index] = index;
-                            }
+        for(k = 0; k < c; ++k){
+            for(i = 0; i < h; ++i){
+                for(j = 0; j < w; ++j){
+                    int out_index = j + w*(i + h*(k + c*b));
+                    float max = -FLT_MAX;
+                    int max_i = -1;
+                    for(l = 0; l < layer.size; ++l){
+                        for(m = 0; m < layer.size; ++m){
+                            int cur_h = h_offset + i*layer.stride + l;
+                            int cur_w = w_offset + j*layer.stride + m;
+                            int index = cur_w + layer.w*(cur_h + layer.h*(k + b*layer.c));
+                            int valid = (cur_h >= 0 && cur_h < layer.h &&
+                                         cur_w >= 0 && cur_w < layer.w);
+                            float val = (valid != 0) ? input[index] : -INFINITY;
+                            max_i = (val > max) ? index : max_i;
+                            max   = (val > max) ? val   : max;
                         }
                     }
+                    layer.output[out_index] = max;
+                    layer.indexes[out_index] = max_i;
                 }
             }
         }
@@ -88,7 +87,7 @@
     int c = layer.c;
     memset(delta, 0, layer.batch*layer.h*layer.w*layer.c*sizeof(float));
     for(i = 0; i < h*w*c*layer.batch; ++i){
-        int index = layer.max_indexes[i];
+        int index = layer.indexes[i];
         delta[index] += layer.delta[i];
     }
 }
diff --git a/src/maxpool_layer.h b/src/maxpool_layer.h
index 9dd0482..9edb214 100644
--- a/src/maxpool_layer.h
+++ b/src/maxpool_layer.h
@@ -8,7 +8,7 @@
     int h,w,c;
     int stride;
     int size;
-    int *max_indexes;
+    int *indexes;
     float *delta;
     float *output;
 } maxpool_layer;
diff --git a/src/network.c b/src/network.c
index e4e4c8e..f9b4667 100644
--- a/src/network.c
+++ b/src/network.c
@@ -24,7 +24,8 @@
     net.outputs = 0;
     net.output = 0;
     #ifdef GPU
-    net.input_cl = 0;
+    net.input_cl = calloc(1, sizeof(cl_mem));
+    net.truth_cl = calloc(1, sizeof(cl_mem));
     #endif
     return net;
 }
@@ -43,12 +44,12 @@
             cost_layer layer = *(cost_layer *)net.layers[i];
             forward_cost_layer_gpu(layer, input, truth);
         }
-        /*
         else if(net.types[i] == CONNECTED){
             connected_layer layer = *(connected_layer *)net.layers[i];
-            forward_connected_layer(layer, input, train);
-            input = layer.output;
+            forward_connected_layer_gpu(layer, input);
+            input = layer.output_cl;
         }
+        /*
         else if(net.types[i] == SOFTMAX){
             softmax_layer layer = *(softmax_layer *)net.layers[i];
             forward_softmax_layer(layer, input);
@@ -94,6 +95,10 @@
             cost_layer layer = *(cost_layer *)net.layers[i];
             backward_cost_layer_gpu(layer, prev_input, prev_delta);
         }
+        else if(net.types[i] == CONNECTED){
+            connected_layer layer = *(connected_layer *)net.layers[i];
+            backward_connected_layer_gpu(layer, prev_input, prev_delta);
+        }
     }
 }
 
@@ -105,18 +110,9 @@
             convolutional_layer layer = *(convolutional_layer *)net.layers[i];
             update_convolutional_layer_gpu(layer);
         }
-        else if(net.types[i] == MAXPOOL){
-            //maxpool_layer layer = *(maxpool_layer *)net.layers[i];
-        }
-        else if(net.types[i] == SOFTMAX){
-            //maxpool_layer layer = *(maxpool_layer *)net.layers[i];
-        }
-        else if(net.types[i] == NORMALIZATION){
-            //maxpool_layer layer = *(maxpool_layer *)net.layers[i];
-        }
         else if(net.types[i] == CONNECTED){
             connected_layer layer = *(connected_layer *)net.layers[i];
-            update_connected_layer(layer);
+            update_connected_layer_gpu(layer);
         }
     }
 }
@@ -127,6 +123,10 @@
         convolutional_layer layer = *(convolutional_layer *)net.layers[i];
         return layer.output_cl;
     }
+    else if(net.types[i] == CONNECTED){
+        connected_layer layer = *(connected_layer *)net.layers[i];
+        return layer.output_cl;
+    }
     return 0;
 }
 
@@ -136,6 +136,10 @@
         convolutional_layer layer = *(convolutional_layer *)net.layers[i];
         return layer.delta_cl;
     }
+    else if(net.types[i] == CONNECTED){
+        connected_layer layer = *(connected_layer *)net.layers[i];
+        return layer.delta_cl;
+    }
     return 0;
 }
 
@@ -347,6 +351,46 @@
     }
 }
 
+#ifdef GPU
+float train_network_datum_gpu(network net, float *x, float *y)
+{
+    int x_size = get_network_input_size(net)*net.batch;
+    int y_size = get_network_output_size(net)*net.batch;
+    if(!*net.input_cl){
+        *net.input_cl = cl_make_array(x, x_size);
+        *net.truth_cl = cl_make_array(y, y_size);
+    }else{
+        cl_write_array(*net.input_cl, x, x_size);
+        cl_write_array(*net.truth_cl, y, y_size);
+    }
+    forward_network_gpu(net, *net.input_cl, *net.truth_cl, 1);
+    //int class = get_predicted_class_network(net);
+    backward_network_gpu(net, *net.input_cl);
+    float error = get_network_cost(net);
+    update_network_gpu(net);
+    //return (y[class]?1:0);
+    return error;
+}
+float train_network_sgd_gpu(network net, data d, int n)
+{
+    int batch = net.batch;
+    float *X = calloc(batch*d.X.cols, sizeof(float));
+    float *y = calloc(batch*d.y.cols, sizeof(float));
+
+    int i;
+    float sum = 0;
+    for(i = 0; i < n; ++i){
+        get_batch(d, batch, X, y);
+        float err = train_network_datum_gpu(net, X, y);
+        sum += err;
+    }
+    free(X);
+    free(y);
+    return (float)sum/(n*batch);
+}
+#endif
+
+
 float train_network_datum(network net, float *x, float *y)
 {
     forward_network(net, x, y, 1);
diff --git a/src/network.h b/src/network.h
index 37c145d..22e277c 100644
--- a/src/network.h
+++ b/src/network.h
@@ -30,8 +30,8 @@
     float *output;
 
     #ifdef GPU
-    cl_mem input_cl;
-    cl_mem output_cl;
+    cl_mem *input_cl;
+    cl_mem *truth_cl;
     #endif
 } network;
 
@@ -41,6 +41,7 @@
 void update_network_gpu(network net);
 cl_mem get_network_output_cl_layer(network net, int i);
 cl_mem get_network_delta_cl_layer(network net, int i);
+float train_network_sgd_gpu(network net, data d, int n);
 #endif
 
 network make_network(int n, int batch);

--
Gitblit v1.10.0