From 4bdf96bd6aafbec6bc3f0eab8739d6652878fd24 Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Fri, 06 Dec 2013 21:26:09 +0000
Subject: [PATCH] New data format

---
 src/network.c             |   97 +++++++---
 src/matrix.c              |   11 +
 src/utils.h               |    1 
 src/network.h             |   11 
 Makefile                  |    8 
 src/convolutional_layer.c |   13 -
 src/connected_layer.c     |    1 
 src/parser.c              |    2 
 src/data.c                |  179 +++++++++++++------
 src/data.h                |   24 +-
 src/tests.c               |  162 ++++--------------
 src/utils.c               |   13 +
 12 files changed, 279 insertions(+), 243 deletions(-)

diff --git a/Makefile b/Makefile
index 6cd3999..e1238d6 100644
--- a/Makefile
+++ b/Makefile
@@ -1,5 +1,11 @@
 CC=gcc
-COMMON=-Wall `pkg-config --cflags opencv` -isystem /usr/local/Cellar/opencv/2.4.6.1/include/opencv -isystem /usr/local/Cellar/opencv/2.4.6.1/include
+COMMON=-Wall `pkg-config --cflags opencv`
+UNAME = $(shell uname)
+ifeq ($(UNAME), Darwin)
+COMMON += -isystem /usr/local/Cellar/opencv/2.4.6.1/include/opencv -isystem /usr/local/Cellar/opencv/2.4.6.1/include
+else
+COMMON += -march=native
+endif
 CFLAGS= $(COMMON) -O3 -ffast-math -flto
 #CFLAGS= $(COMMON) -O0 -g 
 LDFLAGS=`pkg-config --libs opencv` -lm
diff --git a/src/connected_layer.c b/src/connected_layer.c
index d769e1f..0344c71 100644
--- a/src/connected_layer.c
+++ b/src/connected_layer.c
@@ -69,7 +69,6 @@
             int index = i*layer.inputs+j;
             layer.weight_momentum[index] = step*(layer.weight_updates[index] - decay*layer.weights[index]) + momentum*layer.weight_momentum[index];
             layer.weights[index] += layer.weight_momentum[index];
-            //layer.weights[index] = constrain(layer.weights[index], 100.);
         }
     }
     memset(layer.bias_updates, 0, layer.outputs*sizeof(double));
diff --git a/src/convolutional_layer.c b/src/convolutional_layer.c
index 45b55b8..5accaab 100644
--- a/src/convolutional_layer.c
+++ b/src/convolutional_layer.c
@@ -143,26 +143,22 @@
     for(i = 0; i < layer.n; ++i){
         kernel_update(in_image, layer.kernel_updates[i], layer.stride, i, out_delta, layer.edge);
         layer.bias_updates[i] += avg_image_layer(out_delta, i);
-        //printf("%30.20lf\n", layer.bias_updates[i]);
     }
 }
 
 void update_convolutional_layer(convolutional_layer layer, double step, double momentum, double decay)
 {
-    //step = .01;
     int i,j;
     for(i = 0; i < layer.n; ++i){
         layer.bias_momentum[i] = step*(layer.bias_updates[i]) 
                                 + momentum*layer.bias_momentum[i];
         layer.biases[i] += layer.bias_momentum[i];
-        //layer.biases[i] = constrain(layer.biases[i],1.);
         layer.bias_updates[i] = 0;
         int pixels = layer.kernels[i].h*layer.kernels[i].w*layer.kernels[i].c;
         for(j = 0; j < pixels; ++j){
             layer.kernel_momentum[i].data[j] = step*(layer.kernel_updates[i].data[j] - decay*layer.kernels[i].data[j]) 
                                                 + momentum*layer.kernel_momentum[i].data[j];
             layer.kernels[i].data[j] += layer.kernel_momentum[i].data[j];
-            //layer.kernels[i].data[j] = constrain(layer.kernels[i].data[j], 1.);
         }
         zero_image(layer.kernel_updates[i]);
     }
@@ -188,14 +184,6 @@
         int w_offset = i*(size+border);
         image k = layer.kernels[i];
         image copy = copy_image(k);
-        /*
-        printf("Kernel %d - Bias: %f, Channels:",i,layer.biases[i]);
-        for(j = 0; j < k.c; ++j){
-            double a = avg_image_layer(k, j);
-            printf("%f, ", a);
-        }
-        printf("\n");
-        */
         normalize_image(copy);
         for(j = 0; j < k.c; ++j){
             set_pixel(copy,0,0,j,layer.biases[i]);
@@ -227,7 +215,6 @@
 {
     int i;
     char buff[256];
-    //image vis = make_image(layer.n*layer.size, layer.size*layer.kernels[0].c, 3);
     for(i = 0; i < layer.n; ++i){
         image k = layer.kernels[i];
         sprintf(buff, "Kernel %d", i);
diff --git a/src/data.c b/src/data.c
index 9e5791f..b209197 100644
--- a/src/data.c
+++ b/src/data.c
@@ -1,23 +1,12 @@
 #include "data.h"
 #include "list.h"
 #include "utils.h"
+#include "image.h"
 
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
 
-batch make_batch(int n, int k)
-{
-    batch b;
-    b.n = n;
-    if(k < 3) k = 1;
-    b.images = calloc(n, sizeof(image));
-    b.truth = calloc(n, sizeof(double *));
-    int i;
-    for(i =0 ; i < n; ++i) b.truth[i] = calloc(k, sizeof(double));
-    return b;
-}
-
 list *get_paths(char *filename)
 {
     char *path;
@@ -41,75 +30,145 @@
     }
 }
 
-batch load_list(list *paths, char **labels, int k)
-{
-    char *path;
-    batch data = make_batch(paths->size, 2);
-    node *n = paths->front;
-    int i;
-    for(i = 0; i < data.n; ++i){
-        path = (char *)n->val;
-        data.images[i] = load_image(path);
-        fill_truth(path, labels, k, data.truth[i]);
-        n = n->next;
-    }
-    return data;
-}
-
-batch get_all_data(char *filename, char **labels, int k)
-{
-    list *paths = get_paths(filename);
-    batch b = load_list(paths, labels, k);
-    free_list_contents(paths);
-    free_list(paths);
-    return b;
-}
-
-void free_batch(batch b)
+data load_data_image_paths(char **paths, int n, char **labels, int k)
 {
     int i;
-    for(i = 0; i < b.n; ++i){
-        free_image(b.images[i]);
-        free(b.truth[i]);
+    data d;
+    d.shallow = 0;
+    d.X.rows = n;
+    d.X.vals = calloc(d.X.rows, sizeof(double*));
+    d.y = make_matrix(n, k);
+
+    for(i = 0; i < n; ++i){
+        image im = load_image(paths[i]);
+        d.X.vals[i] = im.data;
+        d.X.cols = im.h*im.w*im.c;
+        fill_truth(paths[i], labels, k, d.y.vals[i]);
     }
-    free(b.images);
-    free(b.truth);
+    return d;
 }
 
-batch get_batch(char *filename, int curr, int total, char **labels, int k)
+data load_data_image_pathfile(char *filename, char **labels, int k)
 {
     list *plist = get_paths(filename);
     char **paths = (char **)list_to_array(plist);
-    int i;
-    int start = curr*plist->size/total;
-    int end = (curr+1)*plist->size/total;
-    batch b = make_batch(end-start, 2);
-    for(i = start; i < end; ++i){
-        b.images[i-start] = load_image(paths[i]);
-        fill_truth(paths[i], labels, k, b.truth[i-start]);
-    }
+    data d = load_data_image_paths(paths, plist->size, labels, k);
     free_list_contents(plist);
     free_list(plist);
     free(paths);
-    return b;
+    return d;
 }
 
-batch random_batch(char *filename, int n, char **labels, int k)
+void free_data(data d)
+{
+    if(!d.shallow){
+        free_matrix(d.X);
+        free_matrix(d.y);
+    }else{
+        free(d.X.vals);
+        free(d.y.vals);
+    }
+}
+
+data load_data_image_pathfile_part(char *filename, int part, int total, char **labels, int k)
 {
     list *plist = get_paths(filename);
     char **paths = (char **)list_to_array(plist);
+    int start = part*plist->size/total;
+    int end = (part+1)*plist->size/total;
+    data d = load_data_image_paths(paths+start, end-start, labels, k);
+    free_list_contents(plist);
+    free_list(plist);
+    free(paths);
+    return d;
+}
+
+data load_data_image_pathfile_random(char *filename, int n, char **labels, int k)
+{
     int i;
-    batch b = make_batch(n, 2);
+    list *plist = get_paths(filename);
+    char **paths = (char **)list_to_array(plist);
+    char **random_paths = calloc(n, sizeof(char*));
     for(i = 0; i < n; ++i){
         int index = rand()%plist->size;
-        b.images[i] = load_image(paths[index]);
-        //scale_image(b.images[i], 1./255.);
-        z_normalize_image(b.images[i]);
-        fill_truth(paths[index], labels, k, b.truth[i]);
-        //print_image(b.images[i]);
+        random_paths[i] = paths[index];
     }
+    data d = load_data_image_paths(random_paths, n, labels, k);
     free_list_contents(plist);
     free_list(plist);
     free(paths);
-    return b;
+    free(random_paths);
+    return d;
 }
+
+data load_categorical_data_csv(char *filename, int target, int k)
+{
+    data d;
+    d.shallow = 0;
+    matrix X = csv_to_matrix(filename);
+    double *truth_1d = pop_column(&X, target);
+    double **truth = one_hot_encode(truth_1d, X.rows, k);
+    matrix y;
+    y.rows = X.rows;
+    y.cols = k;
+    y.vals = truth;
+    d.X = X;
+    d.y = y;
+    free(truth_1d);
+    return d;
+}
+
+void randomize_data(data d)
+{
+    int i;
+    for(i = d.X.rows-1; i > 0; --i){
+        int index = rand()%i;
+        double *swap = d.X.vals[index];
+        d.X.vals[index] = d.X.vals[i];
+        d.X.vals[i] = swap;
+
+        swap = d.y.vals[index];
+        d.y.vals[index] = d.y.vals[i];
+        d.y.vals[i] = swap;
+    }
+}
+
+void normalize_data_rows(data d)
+{
+    int i;
+    for(i = 0; i < d.X.rows; ++i){
+        normalize_array(d.X.vals[i], d.X.cols);
+    }
+}
+
+data *cv_split_data(data d, int part, int total)
+{
+    data *split = calloc(2, sizeof(data));
+    int i;
+    int start = part*d.X.rows/total;
+    int end = (part+1)*d.X.rows/total;
+    data train;
+    data test;
+    train.shallow = test.shallow = 1;
+
+    test.X.rows = test.y.rows = end-start;
+    train.X.rows = train.y.rows = d.X.rows - (end-start);
+    train.X.cols = test.X.cols = d.X.cols;
+    train.y.cols = test.y.cols = d.y.cols;
+    for(i = 0; i < start; ++i){
+        train.X.vals[i] = d.X.vals[i];
+        train.y.vals[i] = d.y.vals[i];
+    }
+    for(i = start; i < end; ++i){
+        test.X.vals[i-start] = d.X.vals[i];
+        test.y.vals[i-start] = d.y.vals[i];
+    }
+    for(i = end; i < d.X.rows; ++i){
+        train.X.vals[i-(start-end)] = d.X.vals[i];
+        train.y.vals[i-(start-end)] = d.y.vals[i];
+    }
+    split[0] = train;
+    split[1] = test;
+    return split;
+}
+
diff --git a/src/data.h b/src/data.h
index c01384c..3c16574 100644
--- a/src/data.h
+++ b/src/data.h
@@ -1,18 +1,24 @@
 #ifndef DATA_H
 #define DATA_H
 
-#include "image.h"
+#include "matrix.h"
 
 typedef struct{
-    int n;
-    image *images;
-    double **truth;
-} batch;
+    matrix X;
+    matrix y;
+    int shallow;
+} data;
 
-batch get_all_data(char *filename, char **labels, int k);
-batch random_batch(char *filename, int n, char **labels, int k);
-batch get_batch(char *filename, int curr, int total, char **labels, int k);
-void free_batch(batch b);
 
+data load_data_image_pathfile(char *filename, char **labels, int k);
+void free_data(data d);
+data load_data_image_pathfile(char *filename, char **labels, int k);
+data load_data_image_pathfile_part(char *filename, int part, int total, 
+                                                char **labels, int k);
+data load_data_image_pathfile_random(char *filename, int n, char **labels, int k);
+data load_categorical_data_csv(char *filename, int target, int k);
+void normalize_data_rows(data d);
+void randomize_data(data d);
+data *cv_split_data(data d, int part, int total);
 
 #endif
diff --git a/src/matrix.c b/src/matrix.c
index 562a364..5627b87 100644
--- a/src/matrix.c
+++ b/src/matrix.c
@@ -13,6 +13,17 @@
     free(m.vals);
 }
 
+void matrix_add_matrix(matrix from, matrix to)
+{
+    assert(from.rows == to.rows && from.cols == to.cols);
+    int i,j;
+    for(i = 0; i < from.rows; ++i){
+        for(j = 0; j < from.cols; ++j){
+            to.vals[i][j] += from.vals[i][j];
+        }
+    }
+}
+
 matrix make_matrix(int rows, int cols)
 {
     matrix m;
diff --git a/src/network.c b/src/network.c
index faedb8c..29234da 100644
--- a/src/network.c
+++ b/src/network.c
@@ -15,6 +15,8 @@
     net.n = n;
     net.layers = calloc(net.n, sizeof(void *));
     net.types = calloc(net.n, sizeof(LAYER_TYPE));
+    net.outputs = 0;
+    net.output = 0;
     return net;
 }
 
@@ -45,13 +47,13 @@
     }
 }
 
-void update_network(network net, double step)
+void update_network(network net, double step, double momentum, double decay)
 {
     int i;
     for(i = 0; i < net.n; ++i){
         if(net.types[i] == CONVOLUTIONAL){
             convolutional_layer layer = *(convolutional_layer *)net.layers[i];
-            update_convolutional_layer(layer, step, 0.9, .01);
+            update_convolutional_layer(layer, step, momentum, decay);
         }
         else if(net.types[i] == MAXPOOL){
             //maxpool_layer layer = *(maxpool_layer *)net.layers[i];
@@ -61,7 +63,7 @@
         }
         else if(net.types[i] == CONNECTED){
             connected_layer layer = *(connected_layer *)net.layers[i];
-            update_connected_layer(layer, step, .9, 0);
+            update_connected_layer(layer, step, momentum, decay);
         }
     }
 }
@@ -111,8 +113,26 @@
     return get_network_delta_layer(net, net.n-1);
 }
 
-void learn_network(network net, double *input)
+void calculate_error_network(network net, double *truth)
 {
+    double *delta = get_network_delta(net);
+    double *out = get_network_output(net);
+    int i, k = get_network_output_size(net);
+    for(i = 0; i < k; ++i){
+        delta[i] = truth[i] - out[i];
+    }
+}
+
+int get_predicted_class_network(network net)
+{
+    double *out = get_network_output(net);
+    int k = get_network_output_size(net);
+    return max_index(out, k);
+}
+
+void backward_network(network net, double *input, double *truth)
+{
+    calculate_error_network(net, truth);
     int i;
     double *prev_input;
     double *prev_delta;
@@ -145,40 +165,43 @@
     }
 }
 
-void train_network_batch(network net, batch b)
+int train_network_datum(network net, double *x, double *y, double step, double momentum, double decay)
 {
-    int i,j;
-    int k = get_network_output_size(net);
+        forward_network(net, x);
+        int class = get_predicted_class_network(net);
+        backward_network(net, x, y);
+        update_network(net, step, momentum, decay);
+        return (y[class]?1:0);
+}
+
+double train_network_sgd(network net, data d, double step, double momentum,double decay)
+{
+    int i;
     int correct = 0;
-    for(i = 0; i < b.n; ++i){
-        show_image(b.images[i], "Input");
-        forward_network(net, b.images[i].data);
-        image o = get_network_image(net);
-        if(o.h) show_image_collapsed(o, "Output");
-        double *output = get_network_output(net);
-        double *delta = get_network_delta(net);
-        int max_k = 0;
-        double max = 0;
-        for(j = 0; j < k; ++j){
-            delta[j] = b.truth[i][j]-output[j];
-            if(output[j] > max) {
-                max = output[j];
-                max_k = j;
-            }
+    for(i = 0; i < d.X.rows; ++i){
+        int index = rand()%d.X.rows;
+        correct += train_network_datum(net, d.X.vals[index], d.y.vals[index], step, momentum, decay);
+        if((i+1)%10 == 0){
+            printf("%d: %f\n", (i+1), (double)correct/(i+1));
         }
-        if(b.truth[i][max_k]) ++correct;
-        printf("%f\n", (double)correct/(i+1));
-        learn_network(net, b.images[i].data);
-        update_network(net, .001);
+    }
+    return (double)correct/d.X.rows;
+}
+
+void train_network(network net, data d, double step, double momentum, double decay)
+{
+    int i;
+    int correct = 0;
+    for(i = 0; i < d.X.rows; ++i){
+        correct += train_network_datum(net, d.X.vals[i], d.y.vals[i], step, momentum, decay);
         if(i%100 == 0){
             visualize_network(net);
-            cvWaitKey(100);
+            cvWaitKey(10);
         }
     }
     visualize_network(net);
-    print_network(net);
     cvWaitKey(100);
-    printf("Accuracy: %f\n", (double)correct/b.n);
+    printf("Accuracy: %f\n", (double)correct/d.X.rows);
 }
 
 int get_network_output_size_layer(network net, int i)
@@ -250,7 +273,7 @@
 {
     int i,j;
     for(i = 0; i < net.n; ++i){
-        double *output;
+        double *output = 0;
         int n = 0;
         if(net.types[i] == CONVOLUTIONAL){
             convolutional_layer layer = *(convolutional_layer *)net.layers[i];
@@ -283,3 +306,17 @@
         fprintf(stderr, "\n");
     }
 }
+double network_accuracy(network net, data d)
+{
+    int i;
+    int correct = 0;
+    int k = get_network_output_size(net);
+    for(i = 0; i < d.X.rows; ++i){
+        forward_network(net, d.X.vals[i]);
+        double *out = get_network_output(net);
+        int guess = max_index(out, k);
+        if(d.y.vals[i][guess]) ++correct;
+    }
+    return (double)correct/d.X.rows;
+}
+
diff --git a/src/network.h b/src/network.h
index c655c91..3614c52 100644
--- a/src/network.h
+++ b/src/network.h
@@ -16,13 +16,17 @@
     int n;
     void **layers;
     LAYER_TYPE *types;
+    int outputs;
+    double *output;
 } network;
 
 network make_network(int n);
 void forward_network(network net, double *input);
-void learn_network(network net, double *input);
-void update_network(network net, double step);
-void train_network_batch(network net, batch b);
+void backward_network(network net, double *input, double *truth);
+void update_network(network net, double step, double momentum, double decay);
+double train_network_sgd(network net, data d, double step, double momentum,double decay);
+void train_network(network net, data d, double step, double momentum, double decay);
+double network_accuracy(network net, data d);
 double *get_network_output(network net);
 double *get_network_output_layer(network net, int i);
 double *get_network_delta_layer(network net, int i);
@@ -31,6 +35,7 @@
 int get_network_output_size(network net);
 image get_network_image(network net);
 image get_network_image_layer(network net, int i);
+int get_predicted_class_network(network net);
 void print_network(network net);
 void visualize_network(network net);
 
diff --git a/src/parser.c b/src/parser.c
index dc1db2b..eeb6f93 100644
--- a/src/parser.c
+++ b/src/parser.c
@@ -107,6 +107,8 @@
         ++count;
         n = n->next;
     }   
+    net.outputs = get_network_output_size(net);
+    net.output = get_network_output(net);
     return net;
 }
 
diff --git a/src/tests.c b/src/tests.c
index c221042..d7d9389 100644
--- a/src/tests.c
+++ b/src/tests.c
@@ -166,19 +166,16 @@
         avgerr = .99 * avgerr + .01 * err;
         if(count % 1000000 == 0) printf("%f %f :%f AVG %f \n", truth, out[0], err, avgerr);
         delta[0] = truth - out[0];
-        learn_network(net, input);
-        update_network(net, .001);
+        backward_network(net, input, &truth);
+        update_network(net, .001,0,0);
     }
 }
 
 void test_data()
 {
     char *labels[] = {"cat","dog"};
-    batch train = random_batch("train_paths.txt", 101,labels, 2);
-    show_image(train.images[0], "Test Data Loading");
-    show_image(train.images[100], "Test Data Loading");
-    show_image(train.images[10], "Test Data Loading");
-    free_batch(train);
+    data train = load_data_image_pathfile_random("train_paths.txt", 101,labels, 2);
+    free_data(train);
 }
 
 void test_full()
@@ -188,110 +185,37 @@
     int i = 0;
     char *labels[] = {"cat","dog"};
     while(i++ < 1000 || 1){
-        batch train = random_batch("train_paths.txt", 1000, labels, 2);
-        train_network_batch(net, train);
-        free_batch(train);
+        data train = load_data_image_pathfile_random("train_paths.txt", 1000, labels, 2);
+        train_network(net, train, .0005, 0, 0);
+        free_data(train);
         printf("Round %d\n", i);
     }
 }
 
-double error_network(network net, matrix m, double **truth)
-{
-    int i;
-    int correct = 0;
-    int k = get_network_output_size(net);
-    for(i = 0; i < m.rows; ++i){
-        forward_network(net, m.vals[i]);
-        double *out = get_network_output(net);
-        int guess = max_index(out, k);
-        if(truth[i][guess]) ++correct;
-    }
-    return (double)correct/m.rows;
-}
-
-double **one_hot(double *a, int n, int k)
-{
-    int i;
-    double **t = calloc(n, sizeof(double*));
-    for(i = 0; i < n; ++i){
-        t[i] = calloc(k, sizeof(double));
-        int index = (int)a[i];
-        t[i][index] = 1;
-    }
-    return t;
-}
-
 void test_nist()
 {
-    srand(999999);
+    srand(444444);
     network net = parse_network_cfg("nist.cfg");
-    matrix m = csv_to_matrix("mnist/mnist_train.csv");
-    matrix test = csv_to_matrix("mnist/mnist_test.csv");
-    double *truth_1d = pop_column(&m, 0);
-    double **truth = one_hot(truth_1d, m.rows, 10);
-    double *test_truth_1d = pop_column(&test, 0);
-    double **test_truth = one_hot(test_truth_1d, test.rows, 10);
-    int i,j;
-    clock_t start = clock(), end;
-    for(i = 0; i < test.rows; ++i){
-        normalize_array(test.vals[i], 28*28);
-        //scale_array(m.vals[i], 28*28, 1./255.);
-        //translate_array(m.vals[i], 28*28, -.1);
-    }
-    for(i = 0; i < m.rows; ++i){
-        normalize_array(m.vals[i], 28*28);
-        //scale_array(m.vals[i], 28*28, 1./255.);
-        //translate_array(m.vals[i], 28*28, -.1);
-    }
+    data train = load_categorical_data_csv("mnist/mnist_train.csv", 0, 10);
+    data test = load_categorical_data_csv("mnist/mnist_test.csv",0,10);
+    normalize_data_rows(train);
+    normalize_data_rows(test);
+    randomize_data(train);
     int count = 0;
     double lr = .0005;
-    while(++count <= 300){
-        //lr *= .99;
-        int index = 0;
-        int correct = 0;
-        int number = 1000;
-        for(i = 0; i < number; ++i){
-            index = rand()%m.rows;
-            forward_network(net, m.vals[index]);
-            double *out = get_network_output(net);
-            double *delta = get_network_delta(net);
-            int max_i = 0;
-            double max = out[0];
-            for(j = 0; j < 10; ++j){
-                delta[j] = truth[index][j]-out[j];
-                if(out[j] > max){
-                    max = out[j];
-                    max_i = j;
-                }
-            }
-            if(truth[index][max_i]) ++correct;
-            learn_network(net, m.vals[index]);
-            update_network(net, lr);
-        }
-        print_network(net);
-        image input = double_to_image(28,28,1, m.vals[index]);
-        //show_image(input, "Input");
-        image o = get_network_image(net);
-        //show_image_collapsed(o, "Output");
-        visualize_network(net);
-        cvWaitKey(10);
-        //double test_acc = error_network(net, m, truth);
-        fprintf(stderr, "\n%5d: %f %f\n\n",count, (double)correct/number, lr);
-        if(count % 10 == 0 && 0){
-            double train_acc = error_network(net, m, truth);
-            fprintf(stderr, "\nTRAIN: %f\n", train_acc);
-            double test_acc = error_network(net, test, test_truth);
-            fprintf(stderr, "TEST: %f\n\n", test_acc);
-            printf("%d, %f, %f\n", count, train_acc, test_acc);
-        }
-        if(count % (m.rows/number) == 0) lr /= 2; 
+    while(++count <= 1){
+        double acc = train_network_sgd(net, train, lr, .9, .001);
+        printf("Training Accuracy: %lf", acc);
+        lr /= 2; 
     }
-            double train_acc = error_network(net, m, truth);
-            fprintf(stderr, "\nTRAIN: %f\n", train_acc);
-            double test_acc = error_network(net, test, test_truth);
-            fprintf(stderr, "TEST: %f\n\n", test_acc);
-            printf("%d, %f, %f\n", count, train_acc, test_acc);
-    end = clock();
+    /*
+    double train_acc = network_accuracy(net, train);
+    fprintf(stderr, "\nTRAIN: %f\n", train_acc);
+    double test_acc = network_accuracy(net, test);
+    fprintf(stderr, "TEST: %f\n\n", test_acc);
+    printf("%d, %f, %f\n", count, train_acc, test_acc);
+    */
+    //end = clock();
     //printf("Neural Net Learning: %lf seconds\n", (double)(end-start)/CLOCKS_PER_SEC);
 }
 
@@ -315,9 +239,9 @@
 {
     network net = parse_network_cfg("connected.cfg");
     matrix m = csv_to_matrix("train.csv");
-    matrix ho = hold_out_matrix(&m, 2500);
+    //matrix ho = hold_out_matrix(&m, 2500);
     double *truth = pop_column(&m, 0);
-    double *ho_truth = pop_column(&ho, 0);
+    //double *ho_truth = pop_column(&ho, 0);
     int i;
     clock_t start = clock(), end;
     int count = 0;
@@ -333,8 +257,8 @@
             delta[0] = truth[index] - out[0];
             // printf("%f\n", delta[0]);
             //printf("%f %f\n", truth[index], out[0]);
-            learn_network(net, m.vals[index]);
-            update_network(net, .00001);
+            //backward_network(net, m.vals[index], );
+            update_network(net, .00001, 0,0);
         }
         //double test_acc = error_network(net, m, truth);
         //double valid_acc = error_network(net, ho, ho_truth);
@@ -356,33 +280,19 @@
     printf("Neural Net Learning: %lf seconds\n", (double)(end-start)/CLOCKS_PER_SEC);
 }
 
-void test_random_preprocess()
+void test_split()
 {
-    FILE *file = fopen("train.csv", "w");
-    char *labels[] = {"cat","dog"};
-    int i,j,k;
-    srand(0);
-    network net = parse_network_cfg("convolutional.cfg");
-    for(i = 0; i < 100; ++i){
-        printf("%d\n", i);
-        batch part = get_batch("train_paths.txt", i, 100, labels, 2);
-        for(j = 0; j < part.n; ++j){
-            forward_network(net, part.images[j].data);
-            double *out = get_network_output(net);
-            fprintf(file, "%f", part.truth[j][0]);
-            for(k = 0; k < get_network_output_size(net); ++k){
-                fprintf(file, ",%f", out[k]);
-            }
-            fprintf(file, "\n");
-        }
-        free_batch(part);
-    }
+    data train = load_categorical_data_csv("mnist/mnist_train.csv", 0, 10);
+    data *split = cv_split_data(train, 0, 13);
+    printf("%d, %d, %d\n", train.X.rows, split[0].X.rows, split[1].X.rows);
 }
 
+
 int main()
 {
     //test_kernel_update();
-    test_nist();
+    test_split();
+   // test_nist();
     //test_full();
     //test_random_preprocess();
     //test_random_classify();
diff --git a/src/utils.c b/src/utils.c
index 3b8b5a8..5180fe6 100644
--- a/src/utils.c
+++ b/src/utils.c
@@ -216,3 +216,16 @@
     for(i = 0; i < 12; ++i) sum += (double)rand()/RAND_MAX;
     return sum-6.;
 }
+
+double **one_hot_encode(double *a, int n, int k)
+{
+    int i;
+    double **t = calloc(n, sizeof(double*));
+    for(i = 0; i < n; ++i){
+        t[i] = calloc(k, sizeof(double));
+        int index = (int)a[i];
+        t[i][index] = 1;
+    }
+    return t;
+}
+
diff --git a/src/utils.h b/src/utils.h
index 04747a4..cf38016 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -22,5 +22,6 @@
 double rand_normal();
 double mean_array(double *a, int n);
 double variance_array(double *a, int n);
+double **one_hot_encode(double *a, int n, int k);
 #endif
 

--
Gitblit v1.10.0