From f88baf4a3a756140cef3ca07be98cabb803d80ae Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Thu, 18 Dec 2014 23:46:45 +0000
Subject: [PATCH] 99 problems
---
src/network.c | 5 ++++-
src/network_gpu.c | 5 ++++-
Makefile | 2 +-
src/dropout_layer.h | 2 ++
src/parser.c | 6 ++++++
src/cnn.c | 14 ++++++++------
src/dropout_layer.c | 9 +++++++--
src/dropout_layer.cl | 4 ++--
8 files changed, 34 insertions(+), 13 deletions(-)
diff --git a/Makefile b/Makefile
index 3247999..a76c532 100644
--- a/Makefile
+++ b/Makefile
@@ -27,7 +27,7 @@
endif
endif
CFLAGS= $(COMMON) $(OPTS)
-#CFLAGS= $(COMMON) -O0 -g
+CFLAGS= $(COMMON) -O0 -g
LDFLAGS+=`pkg-config --libs opencv` -lm -pthread
VPATH=./src/
EXEC=cnn
diff --git a/src/cnn.c b/src/cnn.c
index 7f163a4..fd83ee8 100644
--- a/src/cnn.c
+++ b/src/cnn.c
@@ -380,22 +380,24 @@
void train_nist(char *cfgfile)
{
srand(222222);
- srand(time(0));
- network net = parse_network_cfg(cfgfile);
+ // srand(time(0));
data train = load_categorical_data_csv("data/mnist/mnist_train.csv", 0, 10);
data test = load_categorical_data_csv("data/mnist/mnist_test.csv",0,10);
- normalize_data_rows(train);
- normalize_data_rows(test);
+ network net = parse_network_cfg(cfgfile);
int count = 0;
int iters = 60000/net.batch + 1;
while(++count <= 10){
clock_t start = clock(), end;
+ normalize_data_rows(train);
+ normalize_data_rows(test);
float loss = train_network_sgd(net, train, iters);
- end = clock();
float test_acc = 0;
- //if(count%1 == 0) test_acc = network_accuracy(net, test);
+ if(count%1 == 0) test_acc = network_accuracy(net, test);
+ end = clock();
printf("%d: Loss: %f, Test Acc: %f, Time: %lf seconds\n", count, loss, test_acc,(float)(end-start)/CLOCKS_PER_SEC);
}
+ free_data(train);
+ free_data(test);
char buff[256];
sprintf(buff, "%s.trained", cfgfile);
save_network(net, buff);
diff --git a/src/dropout_layer.c b/src/dropout_layer.c
index d4616d5..8104b56 100644
--- a/src/dropout_layer.c
+++ b/src/dropout_layer.c
@@ -10,9 +10,11 @@
layer->probability = probability;
layer->inputs = inputs;
layer->batch = batch;
+ layer->output = calloc(inputs*batch, sizeof(float));
layer->rand = calloc(inputs*batch, sizeof(float));
layer->scale = 1./(1.-probability);
#ifdef GPU
+ layer->output_cl = cl_make_array(layer->output, inputs*batch);
layer->rand_cl = cl_make_array(layer->rand, inputs*batch);
#endif
return layer;
@@ -24,14 +26,15 @@
for(i = 0; i < layer.batch * layer.inputs; ++i){
float r = rand_uniform();
layer.rand[i] = r;
- if(r < layer.probability) input[i] = 0;
- else input[i] *= layer.scale;
+ if(r < layer.probability) layer.output[i] = 0;
+ else layer.output[i] = input[i]*layer.scale;
}
}
void backward_dropout_layer(dropout_layer layer, float *delta)
{
int i;
+ if(!delta) return;
for(i = 0; i < layer.batch * layer.inputs; ++i){
float r = layer.rand[i];
if(r < layer.probability) delta[i] = 0;
@@ -66,6 +69,7 @@
cl.error = clSetKernelArg(kernel, i++, sizeof(layer.rand_cl), (void*) &layer.rand_cl);
cl.error = clSetKernelArg(kernel, i++, sizeof(layer.probability), (void*) &layer.probability);
cl.error = clSetKernelArg(kernel, i++, sizeof(layer.scale), (void*) &layer.scale);
+ cl.error = clSetKernelArg(kernel, i++, sizeof(layer.output_cl), (void*) &layer.output_cl);
check_error(cl);
const size_t global_size[] = {size};
@@ -86,6 +90,7 @@
cl.error = clSetKernelArg(kernel, i++, sizeof(layer.rand_cl), (void*) &layer.rand_cl);
cl.error = clSetKernelArg(kernel, i++, sizeof(layer.probability), (void*) &layer.probability);
cl.error = clSetKernelArg(kernel, i++, sizeof(layer.scale), (void*) &layer.scale);
+ cl.error = clSetKernelArg(kernel, i++, sizeof(delta), (void*) &delta);
check_error(cl);
const size_t global_size[] = {size};
diff --git a/src/dropout_layer.cl b/src/dropout_layer.cl
index 729dbc4..341b80f 100644
--- a/src/dropout_layer.cl
+++ b/src/dropout_layer.cl
@@ -1,5 +1,5 @@
-__kernel void yoloswag420blazeit360noscope(__global float *input, __global float *rand, float prob, float scale)
+__kernel void yoloswag420blazeit360noscope(__global float *input, __global float *rand, float prob, float scale, __global float *output)
{
int id = get_global_id(0);
- input[id] = (rand[id] < prob) ? 0 : input[id]*scale;
+ output[id] = (rand[id] < prob) ? 0 : input[id]*scale;
}
diff --git a/src/dropout_layer.h b/src/dropout_layer.h
index 0a6f034..12a819e 100644
--- a/src/dropout_layer.h
+++ b/src/dropout_layer.h
@@ -8,8 +8,10 @@
float probability;
float scale;
float *rand;
+ float *output;
#ifdef GPU
cl_mem rand_cl;
+ cl_mem output_cl;
#endif
} dropout_layer;
diff --git a/src/network.c b/src/network.c
index cffb2b9..0bf5357 100644
--- a/src/network.c
+++ b/src/network.c
@@ -74,6 +74,7 @@
if(!train) continue;
dropout_layer layer = *(dropout_layer *)net.layers[i];
forward_dropout_layer(layer, input);
+ input = layer.output;
}
else if(net.types[i] == FREEWEIGHT){
if(!train) continue;
@@ -119,7 +120,8 @@
softmax_layer layer = *(softmax_layer *)net.layers[i];
return layer.output;
} else if(net.types[i] == DROPOUT){
- return get_network_output_layer(net, i-1);
+ dropout_layer layer = *(dropout_layer *)net.layers[i];
+ return layer.output;
} else if(net.types[i] == FREEWEIGHT){
return get_network_output_layer(net, i-1);
} else if(net.types[i] == CONNECTED){
@@ -153,6 +155,7 @@
softmax_layer layer = *(softmax_layer *)net.layers[i];
return layer.delta;
} else if(net.types[i] == DROPOUT){
+ if(i == 0) return 0;
return get_network_delta_layer(net, i-1);
} else if(net.types[i] == FREEWEIGHT){
return get_network_delta_layer(net, i-1);
diff --git a/src/network_gpu.c b/src/network_gpu.c
index 969cd9d..6ff95c8 100644
--- a/src/network_gpu.c
+++ b/src/network_gpu.c
@@ -52,6 +52,7 @@
if(!train) continue;
dropout_layer layer = *(dropout_layer *)net.layers[i];
forward_dropout_layer_gpu(layer, input);
+ input = layer.output_cl;
}
else if(net.types[i] == CROP){
crop_layer layer = *(crop_layer *)net.layers[i];
@@ -138,7 +139,8 @@
softmax_layer layer = *(softmax_layer *)net.layers[i];
return layer.output_cl;
} else if(net.types[i] == DROPOUT){
- return get_network_output_cl_layer(net, i-1);
+ dropout_layer layer = *(dropout_layer *)net.layers[i];
+ return layer.output_cl;
}
return 0;
}
@@ -161,6 +163,7 @@
softmax_layer layer = *(softmax_layer *)net.layers[i];
return layer.delta_cl;
} else if(net.types[i] == DROPOUT){
+ if(i == 0) return 0;
return get_network_delta_cl_layer(net, i-1);
}
return 0;
diff --git a/src/parser.c b/src/parser.c
index 2069753..d53e87c 100644
--- a/src/parser.c
+++ b/src/parser.c
@@ -245,6 +245,12 @@
if(count == 0){
net->batch = option_find_int(options, "batch",1);
input = option_find_int(options, "input",1);
+ float learning_rate = option_find_float(options, "learning_rate", .001);
+ float momentum = option_find_float(options, "momentum", .9);
+ float decay = option_find_float(options, "decay", .0001);
+ net->learning_rate = learning_rate;
+ net->momentum = momentum;
+ net->decay = decay;
}else{
input = get_network_output_size_layer(*net, count-1);
}
--
Gitblit v1.10.0