From 16d06ec0db241261d0d030722e440206ed8aad77 Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Mon, 29 Feb 2016 21:54:12 +0000
Subject: [PATCH] stuff
---
src/cifar.c | 95 +++
Makefile | 8
src/rnn.c | 1
src/nightmare.c | 88 +-
src/classifier.c | 169 +++++
src/image.c | 60 +
src/coco.c | 4
src/imagenet.c | 4
src/blas.c | 23
src/rnn_vid.c | 210 +++++++
src/crnn_layer.h | 24
src/convolutional_kernels.cu | 16
src/crnn_layer.c | 277 ++++++++++
src/image.h | 10
src/layer.h | 3
src/utils.c | 19
src/network.c | 11
src/coco_demo.c | 152 +++++
src/cost_layer.c | 26
src/utils.h | 1
src/network.h | 1
src/network_kernels.cu | 7
src/data.c | 89 +++
src/blas.h | 8
src/data.h | 5
src/rnn_layer.c | 2
src/tag.c | 144 +++++
src/parser.c | 102 ++-
src/blas_kernels.cu | 33 +
src/darknet.c | 9
30 files changed, 1,453 insertions(+), 148 deletions(-)
diff --git a/Makefile b/Makefile
index c9b6eca..528437d 100644
--- a/Makefile
+++ b/Makefile
@@ -1,5 +1,5 @@
-GPU=0
-OPENCV=0
+GPU=1
+OPENCV=1
DEBUG=0
ARCH= --gpu-architecture=compute_20 --gpu-code=compute_20
@@ -34,9 +34,9 @@
LDFLAGS+= -L/usr/local/cuda/lib64 -lcuda -lcudart -lcublas -lcurand
endif
-OBJ=gemm.o utils.o cuda.o deconvolutional_layer.o convolutional_layer.o list.o image.o activations.o im2col.o col2im.o blas.o crop_layer.o dropout_layer.o maxpool_layer.o softmax_layer.o data.o matrix.o network.o connected_layer.o cost_layer.o parser.o option_list.o darknet.o detection_layer.o imagenet.o captcha.o route_layer.o writing.o box.o nightmare.o normalization_layer.o avgpool_layer.o coco.o dice.o yolo.o layer.o compare.o classifier.o local_layer.o swag.o shortcut_layer.o activation_layer.o rnn_layer.o rnn.o
+OBJ=gemm.o utils.o cuda.o deconvolutional_layer.o convolutional_layer.o list.o image.o activations.o im2col.o col2im.o blas.o crop_layer.o dropout_layer.o maxpool_layer.o softmax_layer.o data.o matrix.o network.o connected_layer.o cost_layer.o parser.o option_list.o darknet.o detection_layer.o imagenet.o captcha.o route_layer.o writing.o box.o nightmare.o normalization_layer.o avgpool_layer.o coco.o dice.o yolo.o layer.o compare.o classifier.o local_layer.o swag.o shortcut_layer.o activation_layer.o rnn_layer.o rnn.o rnn_vid.o crnn_layer.o coco_demo.o tag.o cifar.o
ifeq ($(GPU), 1)
-OBJ+=convolutional_kernels.o deconvolutional_kernels.o activation_kernels.o im2col_kernels.o col2im_kernels.o blas_kernels.o crop_layer_kernels.o dropout_layer_kernels.o maxpool_layer_kernels.o softmax_layer_kernels.o network_kernels.o avgpool_layer_kernels.o yolo_kernels.o coco_kernels.o
+OBJ+=convolutional_kernels.o deconvolutional_kernels.o activation_kernels.o im2col_kernels.o col2im_kernels.o blas_kernels.o crop_layer_kernels.o dropout_layer_kernels.o maxpool_layer_kernels.o softmax_layer_kernels.o network_kernels.o avgpool_layer_kernels.o yolo_kernels.o
endif
OBJS = $(addprefix $(OBJDIR), $(OBJ))
diff --git a/src/blas.c b/src/blas.c
index d7948bb..978f1ed 100644
--- a/src/blas.c
+++ b/src/blas.c
@@ -115,13 +115,30 @@
for(i = 0; i < N; ++i) Y[i*INCY] = X[i*INCX];
}
-void smooth_l1_cpu(int n, float *pred, float *truth, float *delta)
+void smooth_l1_cpu(int n, float *pred, float *truth, float *delta, float *error)
{
int i;
for(i = 0; i < n; ++i){
float diff = truth[i] - pred[i];
- if(fabs(diff) > 1) delta[i] = diff;
- else delta[i] = (diff > 0) ? 1 : -1;
+ float abs_val = fabs(diff);
+ if(abs_val < 1) {
+ error[i] = diff * diff;
+ delta[i] = diff;
+ }
+ else {
+ error[i] = 2*abs_val - 1;
+ delta[i] = (diff < 0) ? -1 : 1;
+ }
+ }
+}
+
+void l2_cpu(int n, float *pred, float *truth, float *delta, float *error)
+{
+ int i;
+ for(i = 0; i < n; ++i){
+ float diff = truth[i] - pred[i];
+ error[i] = diff * diff;
+ delta[i] = diff;
}
}
diff --git a/src/blas.h b/src/blas.h
index f5189e5..030ef66 100644
--- a/src/blas.h
+++ b/src/blas.h
@@ -17,7 +17,6 @@
float dot_cpu(int N, float *X, int INCX, float *Y, int INCY);
void test_gpu_blas();
void shortcut_cpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out);
-void smooth_l1_cpu(int n, float *pred, float *truth, float *delta);
void mean_cpu(float *x, int batch, int filters, int spatial, float *mean);
void variance_cpu(float *x, float *mean, int batch, int filters, int spatial, float *variance);
@@ -29,6 +28,9 @@
void variance_delta_cpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta);
void normalize_delta_cpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta);
+void smooth_l1_cpu(int n, float *pred, float *truth, float *delta, float *error);
+void l2_cpu(int n, float *pred, float *truth, float *delta, float *error);
+
#ifdef GPU
void axpy_ongpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY);
void axpy_ongpu_offset(int N, float ALPHA, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY);
@@ -53,9 +55,11 @@
void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance);
void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean);
void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out);
-void smooth_l1_gpu(int n, float *pred, float *truth, float *delta);
void scale_bias_gpu(float *output, float *biases, int batch, int n, int size);
void backward_scale_gpu(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates);
void scale_bias_gpu(float *output, float *biases, int batch, int n, int size);
+
+void smooth_l1_gpu(int n, float *pred, float *truth, float *delta, float *error);
+void l2_gpu(int n, float *pred, float *truth, float *delta, float *error);
#endif
#endif
diff --git a/src/blas_kernels.cu b/src/blas_kernels.cu
index 61db29f..be0e553 100644
--- a/src/blas_kernels.cu
+++ b/src/blas_kernels.cu
@@ -410,18 +410,41 @@
check_error(cudaPeekAtLastError());
}
-__global__ void smooth_l1_kernel(int n, float *pred, float *truth, float *delta)
+__global__ void smooth_l1_kernel(int n, float *pred, float *truth, float *delta, float *error)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
float diff = truth[i] - pred[i];
- if(abs(diff) > 1) delta[i] = diff;
- else delta[i] = (diff > 0) ? 1 : -1;
+ float abs_val = abs(diff);
+ if(abs_val < 1) {
+ error[i] = diff * diff;
+ delta[i] = diff;
+ }
+ else {
+ error[i] = 2*abs_val - 1;
+ delta[i] = (diff < 0) ? -1 : 1;
+ }
}
}
-extern "C" void smooth_l1_gpu(int n, float *pred, float *truth, float *delta)
+extern "C" void smooth_l1_gpu(int n, float *pred, float *truth, float *delta, float *error)
{
- smooth_l1_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta);
+ smooth_l1_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void l2_kernel(int n, float *pred, float *truth, float *delta, float *error)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < n){
+ float diff = truth[i] - pred[i];
+ error[i] = diff * diff; //I know this is technically wrong, deal with it.
+ delta[i] = diff;
+ }
+}
+
+extern "C" void l2_gpu(int n, float *pred, float *truth, float *delta, float *error)
+{
+ l2_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error);
check_error(cudaPeekAtLastError());
}
diff --git a/src/cifar.c b/src/cifar.c
new file mode 100644
index 0000000..f887877
--- /dev/null
+++ b/src/cifar.c
@@ -0,0 +1,95 @@
+#include "network.h"
+#include "utils.h"
+#include "parser.h"
+#include "option_list.h"
+#include "blas.h"
+
+#ifdef OPENCV
+#include "opencv2/highgui/highgui_c.h"
+#endif
+
+void train_cifar(char *cfgfile, char *weightfile)
+{
+ data_seed = time(0);
+ srand(time(0));
+ float avg_loss = -1;
+ char *base = basecfg(cfgfile);
+ printf("%s\n", base);
+ network net = parse_network_cfg(cfgfile);
+ if(weightfile){
+ load_weights(&net, weightfile);
+ }
+ printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net.learning_rate, net.momentum, net.decay);
+
+ char *backup_directory = "/home/pjreddie/backup/";
+ int classes = 10;
+ int N = 50000;
+
+ char **labels = get_labels("data/cifar/labels.txt");
+ int epoch = (*net.seen)/N;
+ data train = load_all_cifar10();
+ while(get_current_batch(net) < net.max_batches || net.max_batches == 0){
+ clock_t time=clock();
+
+ float loss = train_network_sgd(net, train, 1);
+ if(avg_loss == -1) avg_loss = loss;
+ avg_loss = avg_loss*.9 + loss*.1;
+ printf("%d, %.3f: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), (float)(*net.seen)/N, loss, avg_loss, get_current_rate(net), sec(clock()-time), *net.seen);
+ if(*net.seen/N > epoch){
+ epoch = *net.seen/N;
+ char buff[256];
+ sprintf(buff, "%s/%s_%d.weights",backup_directory,base, epoch);
+ save_weights(net, buff);
+ }
+ if(get_current_batch(net)%100 == 0){
+ char buff[256];
+ sprintf(buff, "%s/%s.backup",backup_directory,base);
+ save_weights(net, buff);
+ }
+ }
+ char buff[256];
+ sprintf(buff, "%s/%s.weights", backup_directory, base);
+ save_weights(net, buff);
+
+ free_network(net);
+ free_ptrs((void**)labels, classes);
+ free(base);
+ free_data(train);
+}
+
+void test_cifar(char *filename, char *weightfile)
+{
+ network net = parse_network_cfg(filename);
+ if(weightfile){
+ load_weights(&net, weightfile);
+ }
+ srand(time(0));
+
+ clock_t time;
+ float avg_acc = 0;
+ float avg_top5 = 0;
+ data test = load_cifar10_data("data/cifar/cifar-10-batches-bin/test_batch.bin");
+
+ time=clock();
+
+ float *acc = network_accuracies(net, test, 2);
+ avg_acc += acc[0];
+ avg_top5 += acc[1];
+ printf("top1: %f, %lf seconds, %d images\n", avg_acc, sec(clock()-time), test.X.rows);
+ free_data(test);
+}
+
+void run_cifar(int argc, char **argv)
+{
+ if(argc < 4){
+ fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]);
+ return;
+ }
+
+ char *cfg = argv[3];
+ char *weights = (argc > 4) ? argv[4] : 0;
+ if(0==strcmp(argv[2], "train")) train_cifar(cfg, weights);
+ else if(0==strcmp(argv[2], "test")) test_cifar(cfg, weights);
+}
+
+
diff --git a/src/classifier.c b/src/classifier.c
index 9924c37..fdbe534 100644
--- a/src/classifier.c
+++ b/src/classifier.c
@@ -70,6 +70,11 @@
load_args args = {0};
args.w = net.w;
args.h = net.h;
+
+ args.min = net.w;
+ args.max = net.max_crop;
+ args.size = net.w;
+
args.paths = paths;
args.classes = classes;
args.n = imgs;
@@ -88,6 +93,16 @@
load_thread = load_data_in_thread(args);
printf("Loaded: %lf seconds\n", sec(clock()-time));
time=clock();
+
+/*
+ int u;
+ for(u = 0; u < net.batch; ++u){
+ image im = float_to_image(net.w, net.h, 3, train.X.vals[u]);
+ show_image(im, "loaded");
+ cvWaitKey(0);
+ }
+ */
+
float loss = train_network(net, train);
if(avg_loss == -1) avg_loss = loss;
avg_loss = avg_loss*.9 + loss*.1;
@@ -99,7 +114,7 @@
sprintf(buff, "%s/%s_%d.weights",backup_directory,base, epoch);
save_weights(net, buff);
}
- if(*net.seen%1000 == 0){
+ if(*net.seen%100 == 0){
char buff[256];
sprintf(buff, "%s/%s.backup",backup_directory,base);
save_weights(net, buff);
@@ -152,13 +167,14 @@
load_args args = {0};
args.w = net.w;
args.h = net.h;
+
args.paths = paths;
args.classes = classes;
args.n = num;
args.m = 0;
args.labels = labels;
args.d = &buffer;
- args.type = CLASSIFICATION_DATA;
+ args.type = OLD_CLASSIFICATION_DATA;
pthread_t load_thread = load_data_in_thread(args);
for(i = 1; i <= splits; ++i){
@@ -221,19 +237,22 @@
break;
}
}
- image im = load_image_color(paths[i], 256, 256);
+ int w = net.w;
+ int h = net.h;
+ image im = load_image_color(paths[i], w, h);
+ int shift = 32;
image images[10];
- images[0] = crop_image(im, -16, -16, 256, 256);
- images[1] = crop_image(im, 16, -16, 256, 256);
- images[2] = crop_image(im, 0, 0, 256, 256);
- images[3] = crop_image(im, -16, 16, 256, 256);
- images[4] = crop_image(im, 16, 16, 256, 256);
+ images[0] = crop_image(im, -shift, -shift, w, h);
+ images[1] = crop_image(im, shift, -shift, w, h);
+ images[2] = crop_image(im, 0, 0, w, h);
+ images[3] = crop_image(im, -shift, shift, w, h);
+ images[4] = crop_image(im, shift, shift, w, h);
flip_image(im);
- images[5] = crop_image(im, -16, -16, 256, 256);
- images[6] = crop_image(im, 16, -16, 256, 256);
- images[7] = crop_image(im, 0, 0, 256, 256);
- images[8] = crop_image(im, -16, 16, 256, 256);
- images[9] = crop_image(im, 16, 16, 256, 256);
+ images[5] = crop_image(im, -shift, -shift, w, h);
+ images[6] = crop_image(im, shift, -shift, w, h);
+ images[7] = crop_image(im, 0, 0, w, h);
+ images[8] = crop_image(im, -shift, shift, w, h);
+ images[9] = crop_image(im, shift, shift, w, h);
float *pred = calloc(classes, sizeof(float));
for(j = 0; j < 10; ++j){
float *p = network_predict(net, images[j].data);
@@ -252,6 +271,122 @@
}
}
+void validate_classifier_full(char *datacfg, char *filename, char *weightfile)
+{
+ int i, j;
+ network net = parse_network_cfg(filename);
+ set_batch_network(&net, 1);
+ if(weightfile){
+ load_weights(&net, weightfile);
+ }
+ srand(time(0));
+
+ list *options = read_data_cfg(datacfg);
+
+ char *label_list = option_find_str(options, "labels", "data/labels.list");
+ char *valid_list = option_find_str(options, "valid", "data/train.list");
+ int classes = option_find_int(options, "classes", 2);
+ int topk = option_find_int(options, "top", 1);
+
+ char **labels = get_labels(label_list);
+ list *plist = get_paths(valid_list);
+
+ char **paths = (char **)list_to_array(plist);
+ int m = plist->size;
+ free_list(plist);
+
+ float avg_acc = 0;
+ float avg_topk = 0;
+ int *indexes = calloc(topk, sizeof(int));
+
+ for(i = 0; i < m; ++i){
+ int class = -1;
+ char *path = paths[i];
+ for(j = 0; j < classes; ++j){
+ if(strstr(path, labels[j])){
+ class = j;
+ break;
+ }
+ }
+ image im = load_image_color(paths[i], 0, 0);
+ resize_network(&net, im.w, im.h);
+ //show_image(im, "orig");
+ //show_image(crop, "cropped");
+ //cvWaitKey(0);
+ float *pred = network_predict(net, im.data);
+
+ free_image(im);
+ top_k(pred, classes, topk, indexes);
+
+ if(indexes[0] == class) avg_acc += 1;
+ for(j = 0; j < topk; ++j){
+ if(indexes[j] == class) avg_topk += 1;
+ }
+
+ printf("%d: top 1: %f, top %d: %f\n", i, avg_acc/(i+1), topk, avg_topk/(i+1));
+ }
+}
+
+
+void validate_classifier_single(char *datacfg, char *filename, char *weightfile)
+{
+ int i, j;
+ network net = parse_network_cfg(filename);
+ set_batch_network(&net, 1);
+ if(weightfile){
+ load_weights(&net, weightfile);
+ }
+ srand(time(0));
+
+ list *options = read_data_cfg(datacfg);
+
+ char *label_list = option_find_str(options, "labels", "data/labels.list");
+ char *valid_list = option_find_str(options, "valid", "data/train.list");
+ int classes = option_find_int(options, "classes", 2);
+ int topk = option_find_int(options, "top", 1);
+
+ char **labels = get_labels(label_list);
+ list *plist = get_paths(valid_list);
+
+ char **paths = (char **)list_to_array(plist);
+ int m = plist->size;
+ free_list(plist);
+
+ float avg_acc = 0;
+ float avg_topk = 0;
+ int *indexes = calloc(topk, sizeof(int));
+
+ for(i = 0; i < m; ++i){
+ int class = -1;
+ char *path = paths[i];
+ for(j = 0; j < classes; ++j){
+ if(strstr(path, labels[j])){
+ class = j;
+ break;
+ }
+ }
+ image im = load_image_color(paths[i], 0, 0);
+ image resized = resize_min(im, net.w);
+ image crop = crop_image(resized, (resized.w - net.w)/2, (resized.h - net.h)/2, net.w, net.h);
+ //show_image(im, "orig");
+ //show_image(crop, "cropped");
+ //cvWaitKey(0);
+ float *pred = network_predict(net, crop.data);
+
+ free_image(im);
+ free_image(resized);
+ free_image(crop);
+ top_k(pred, classes, topk, indexes);
+
+ if(indexes[0] == class) avg_acc += 1;
+ for(j = 0; j < topk; ++j){
+ if(indexes[j] == class) avg_topk += 1;
+ }
+
+ printf("%d: top 1: %f, top %d: %f\n", i, avg_acc/(i+1), topk, avg_topk/(i+1));
+ }
+}
+
void validate_classifier_multi(char *datacfg, char *filename, char *weightfile)
{
int i, j;
@@ -271,7 +406,7 @@
char **labels = get_labels(label_list);
list *plist = get_paths(valid_list);
- int scales[] = {224, 256, 384, 480, 640};
+ int scales[] = {224, 256, 384, 480, 512};
int nscales = sizeof(scales)/sizeof(scales[0]);
char **paths = (char **)list_to_array(plist);
@@ -402,7 +537,7 @@
args.m = 0;
args.labels = 0;
args.d = &buffer;
- args.type = CLASSIFICATION_DATA;
+ args.type = OLD_CLASSIFICATION_DATA;
pthread_t load_thread = load_data_in_thread(args);
for(curr = net.batch; curr < m; curr += net.batch){
@@ -420,7 +555,7 @@
time=clock();
matrix pred = network_predict_data(net, val);
-
+
int i, j;
if (target_layer >= 0){
//layer l = net.layers[target_layer];
@@ -461,6 +596,8 @@
else if(0==strcmp(argv[2], "valid")) validate_classifier(data, cfg, weights);
else if(0==strcmp(argv[2], "valid10")) validate_classifier_10(data, cfg, weights);
else if(0==strcmp(argv[2], "validmulti")) validate_classifier_multi(data, cfg, weights);
+ else if(0==strcmp(argv[2], "validsingle")) validate_classifier_single(data, cfg, weights);
+ else if(0==strcmp(argv[2], "validfull")) validate_classifier_full(data, cfg, weights);
}
diff --git a/src/coco.c b/src/coco.c
index 41c2d80..947bef2 100644
--- a/src/coco.c
+++ b/src/coco.c
@@ -389,10 +389,10 @@
void demo_coco(char *cfgfile, char *weightfile, float thresh, int cam_index, char *filename);
static void demo(char *cfgfile, char *weightfile, float thresh, int cam_index, char* filename)
{
- #if defined(OPENCV) && defined(GPU)
+ #if defined(OPENCV)
demo_coco(cfgfile, weightfile, thresh, cam_index, filename);
#else
- fprintf(stderr, "Need to compile with GPU and OpenCV for demo.\n");
+ fprintf(stderr, "Need to compile with OpenCV for demo.\n");
#endif
}
diff --git a/src/coco_demo.c b/src/coco_demo.c
new file mode 100644
index 0000000..4ba8eef
--- /dev/null
+++ b/src/coco_demo.c
@@ -0,0 +1,152 @@
+#include "network.h"
+#include "detection_layer.h"
+#include "cost_layer.h"
+#include "utils.h"
+#include "parser.h"
+#include "box.h"
+#include "image.h"
+#include <sys/time.h>
+
+#define FRAMES 1
+
+#ifdef OPENCV
+#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/imgproc/imgproc.hpp"
+void convert_coco_detections(float *predictions, int classes, int num, int square, int side, int w, int h, float thresh, float **probs, box *boxes, int only_objectness);
+
+extern char *coco_classes[];
+extern image coco_labels[];
+
+static float **probs;
+static box *boxes;
+static network net;
+static image in ;
+static image in_s ;
+static image det ;
+static image det_s;
+static image disp ;
+static CvCapture * cap;
+static float fps = 0;
+static float demo_thresh = 0;
+
+static float *predictions[FRAMES];
+static int demo_index = 0;
+static image images[FRAMES];
+static float *avg;
+
+void *fetch_in_thread_coco(void *ptr)
+{
+ in = get_image_from_stream(cap);
+ in_s = resize_image(in, net.w, net.h);
+ return 0;
+}
+
+void *detect_in_thread_coco(void *ptr)
+{
+ float nms = .4;
+
+ detection_layer l = net.layers[net.n-1];
+ float *X = det_s.data;
+ float *prediction = network_predict(net, X);
+
+ memcpy(predictions[demo_index], prediction, l.outputs*sizeof(float));
+ mean_arrays(predictions, FRAMES, l.outputs, avg);
+
+ free_image(det_s);
+ convert_coco_detections(avg, l.classes, l.n, l.sqrt, l.side, 1, 1, demo_thresh, probs, boxes, 0);
+ if (nms > 0) do_nms(boxes, probs, l.side*l.side*l.n, l.classes, nms);
+ printf("\033[2J");
+ printf("\033[1;1H");
+ printf("\nFPS:%.0f\n",fps);
+ printf("Objects:\n\n");
+
+ images[demo_index] = det;
+ det = images[(demo_index + FRAMES/2 + 1)%FRAMES];
+ demo_index = (demo_index + 1)%FRAMES;
+
+ draw_detections(det, l.side*l.side*l.n, demo_thresh, boxes, probs, coco_classes, coco_labels, 80);
+ return 0;
+}
+
+void demo_coco(char *cfgfile, char *weightfile, float thresh, int cam_index, const char *filename)
+{
+ demo_thresh = thresh;
+ printf("YOLO demo\n");
+ net = parse_network_cfg(cfgfile);
+ if(weightfile){
+ load_weights(&net, weightfile);
+ }
+ set_batch_network(&net, 1);
+
+ srand(2222222);
+
+ if(filename){
+ cap = cvCaptureFromFile(filename);
+ }else{
+ cap = cvCaptureFromCAM(cam_index);
+ }
+
+ if(!cap) error("Couldn't connect to webcam.\n");
+ cvNamedWindow("YOLO", CV_WINDOW_NORMAL);
+ cvResizeWindow("YOLO", 512, 512);
+
+ detection_layer l = net.layers[net.n-1];
+ int j;
+
+ avg = (float *) calloc(l.outputs, sizeof(float));
+ for(j = 0; j < FRAMES; ++j) predictions[j] = (float *) calloc(l.outputs, sizeof(float));
+ for(j = 0; j < FRAMES; ++j) images[j] = make_image(1,1,3);
+
+ boxes = (box *)calloc(l.side*l.side*l.n, sizeof(box));
+ probs = (float **)calloc(l.side*l.side*l.n, sizeof(float *));
+ for(j = 0; j < l.side*l.side*l.n; ++j) probs[j] = (float *)calloc(l.classes, sizeof(float *));
+
+ pthread_t fetch_thread;
+ pthread_t detect_thread;
+
+ fetch_in_thread_coco(0);
+ det = in;
+ det_s = in_s;
+
+ fetch_in_thread_coco(0);
+ detect_in_thread_coco(0);
+ disp = det;
+ det = in;
+ det_s = in_s;
+
+ for(j = 0; j < FRAMES/2; ++j){
+ fetch_in_thread_coco(0);
+ detect_in_thread_coco(0);
+ disp = det;
+ det = in;
+ det_s = in_s;
+ }
+
+ while(1){
+ struct timeval tval_before, tval_after, tval_result;
+ gettimeofday(&tval_before, NULL);
+ if(pthread_create(&fetch_thread, 0, fetch_in_thread_coco, 0)) error("Thread creation failed");
+ if(pthread_create(&detect_thread, 0, detect_in_thread_coco, 0)) error("Thread creation failed");
+ show_image(disp, "YOLO");
+ save_image(disp, "YOLO");
+ free_image(disp);
+ cvWaitKey(10);
+ pthread_join(fetch_thread, 0);
+ pthread_join(detect_thread, 0);
+
+ disp = det;
+ det = in;
+ det_s = in_s;
+
+ gettimeofday(&tval_after, NULL);
+ timersub(&tval_after, &tval_before, &tval_result);
+ float curr = 1000000.f/((long int)tval_result.tv_usec);
+ fps = .9*fps + .1*curr;
+ }
+}
+#else
+void demo_coco(char *cfgfile, char *weightfile, float thresh, int cam_index){
+ fprintf(stderr, "YOLO-COCO demo needs OpenCV for webcam images.\n");
+}
+#endif
+
diff --git a/src/convolutional_kernels.cu b/src/convolutional_kernels.cu
index 4fdc1a1..4f474d6 100644
--- a/src/convolutional_kernels.cu
+++ b/src/convolutional_kernels.cu
@@ -121,11 +121,11 @@
check_error(cudaPeekAtLastError());
}
-void swap_binary(convolutional_layer l)
+void swap_binary(convolutional_layer *l)
{
- float *swap = l.filters_gpu;
- l.filters_gpu = l.binary_filters_gpu;
- l.binary_filters_gpu = swap;
+ float *swap = l->filters_gpu;
+ l->filters_gpu = l->binary_filters_gpu;
+ l->binary_filters_gpu = swap;
}
void forward_convolutional_layer_gpu(convolutional_layer l, network_state state)
@@ -139,7 +139,7 @@
fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);
if(l.binary){
binarize_filters_gpu(l.filters_gpu, l.n, l.c*l.size*l.size, l.binary_filters_gpu);
- swap_binary(l);
+ swap_binary(&l);
}
for(i = 0; i < l.batch; ++i){
@@ -172,7 +172,7 @@
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, n);
activate_array_ongpu(l.output_gpu, m*n*l.batch, l.activation);
- if(l.binary) swap_binary(l);
+ if(l.binary) swap_binary(&l);
}
void backward_convolutional_layer_gpu(convolutional_layer l, network_state state)
@@ -206,7 +206,7 @@
gemm_ongpu(0,1,m,n,k,1,a + i*m*k,k,b,k,1,c,n);
if(state.delta){
- if(l.binary) swap_binary(l);
+ if(l.binary) swap_binary(&l);
float * a = l.filters_gpu;
float * b = l.delta_gpu;
float * c = l.col_image_gpu;
@@ -214,7 +214,7 @@
gemm_ongpu(1,0,n,k,m,1,a,n,b + i*k*m,k,0,c,k);
col2im_ongpu(l.col_image_gpu, l.c, l.h, l.w, l.size, l.stride, l.pad, state.delta + i*l.c*l.h*l.w);
- if(l.binary) swap_binary(l);
+ if(l.binary) swap_binary(&l);
}
}
}
diff --git a/src/cost_layer.c b/src/cost_layer.c
index 39ae809..fdba777 100644
--- a/src/cost_layer.c
+++ b/src/cost_layer.c
@@ -41,9 +41,11 @@
l.outputs = inputs;
l.cost_type = cost_type;
l.delta = calloc(inputs*batch, sizeof(float));
- l.output = calloc(1, sizeof(float));
+ l.output = calloc(inputs*batch, sizeof(float));
+ l.cost = calloc(1, sizeof(float));
#ifdef GPU
- l.delta_gpu = cuda_make_array(l.delta, inputs*batch);
+ l.delta_gpu = cuda_make_array(l.output, inputs*batch);
+ l.output_gpu = cuda_make_array(l.delta, inputs*batch);
#endif
return l;
}
@@ -53,9 +55,12 @@
l->inputs = inputs;
l->outputs = inputs;
l->delta = realloc(l->delta, inputs*l->batch*sizeof(float));
+ l->output = realloc(l->output, inputs*l->batch*sizeof(float));
#ifdef GPU
cuda_free(l->delta_gpu);
+ cuda_free(l->output_gpu);
l->delta_gpu = cuda_make_array(l->delta, inputs*l->batch);
+ l->output_gpu = cuda_make_array(l->output, inputs*l->batch);
#endif
}
@@ -69,13 +74,11 @@
}
}
if(l.cost_type == SMOOTH){
- smooth_l1_cpu(l.batch*l.inputs, state.input, state.truth, l.delta);
+ smooth_l1_cpu(l.batch*l.inputs, state.input, state.truth, l.delta, l.output);
} else {
- copy_cpu(l.batch*l.inputs, state.truth, 1, l.delta, 1);
- axpy_cpu(l.batch*l.inputs, -1, state.input, 1, l.delta, 1);
+ l2_cpu(l.batch*l.inputs, state.input, state.truth, l.delta, l.output);
}
- *(l.output) = dot_cpu(l.batch*l.inputs, l.delta, 1, l.delta, 1);
- //printf("cost: %f\n", *l.output);
+ l.cost[0] = sum_array(l.output, l.batch*l.inputs);
}
void backward_cost_layer(const cost_layer l, network_state state)
@@ -103,14 +106,13 @@
}
if(l.cost_type == SMOOTH){
- smooth_l1_gpu(l.batch*l.inputs, state.input, state.truth, l.delta_gpu);
+ smooth_l1_gpu(l.batch*l.inputs, state.input, state.truth, l.delta_gpu, l.output_gpu);
} else {
- copy_ongpu(l.batch*l.inputs, state.truth, 1, l.delta_gpu, 1);
- axpy_ongpu(l.batch*l.inputs, -1, state.input, 1, l.delta_gpu, 1);
+ l2_gpu(l.batch*l.inputs, state.input, state.truth, l.delta_gpu, l.output_gpu);
}
- cuda_pull_array(l.delta_gpu, l.delta, l.batch*l.inputs);
- *(l.output) = dot_cpu(l.batch*l.inputs, l.delta, 1, l.delta, 1);
+ cuda_pull_array(l.output_gpu, l.output, l.batch*l.inputs);
+ l.cost[0] = sum_array(l.output, l.batch*l.inputs);
}
void backward_cost_layer_gpu(const cost_layer l, network_state state)
diff --git a/src/crnn_layer.c b/src/crnn_layer.c
new file mode 100644
index 0000000..ed65665
--- /dev/null
+++ b/src/crnn_layer.c
@@ -0,0 +1,277 @@
+#include "crnn_layer.h"
+#include "convolutional_layer.h"
+#include "utils.h"
+#include "cuda.h"
+#include "blas.h"
+#include "gemm.h"
+
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+static void increment_layer(layer *l, int steps)
+{
+ int num = l->outputs*l->batch*steps;
+ l->output += num;
+ l->delta += num;
+ l->x += num;
+ l->x_norm += num;
+
+#ifdef GPU
+ l->output_gpu += num;
+ l->delta_gpu += num;
+ l->x_gpu += num;
+ l->x_norm_gpu += num;
+#endif
+}
+
+layer make_crnn_layer(int batch, int h, int w, int c, int hidden_filters, int output_filters, int steps, ACTIVATION activation, int batch_normalize)
+{
+ fprintf(stderr, "CRNN Layer: %d x %d x %d image, %d filters\n", h,w,c,output_filters);
+ batch = batch / steps;
+ layer l = {0};
+ l.batch = batch;
+ l.type = CRNN;
+ l.steps = steps;
+ l.h = h;
+ l.w = w;
+ l.c = c;
+ l.out_h = h;
+ l.out_w = w;
+ l.out_c = output_filters;
+ l.inputs = h*w*c;
+ l.hidden = h * w * hidden_filters;
+ l.outputs = l.out_h * l.out_w * l.out_c;
+
+ l.state = calloc(l.hidden*batch*(steps+1), sizeof(float));
+
+ l.input_layer = malloc(sizeof(layer));
+ fprintf(stderr, "\t\t");
+ *(l.input_layer) = make_convolutional_layer(batch*steps, h, w, c, hidden_filters, 3, 1, 1, activation, batch_normalize, 0);
+ l.input_layer->batch = batch;
+
+ l.self_layer = malloc(sizeof(layer));
+ fprintf(stderr, "\t\t");
+ *(l.self_layer) = make_convolutional_layer(batch*steps, h, w, hidden_filters, hidden_filters, 3, 1, 1, activation, batch_normalize, 0);
+ l.self_layer->batch = batch;
+
+ l.output_layer = malloc(sizeof(layer));
+ fprintf(stderr, "\t\t");
+ *(l.output_layer) = make_convolutional_layer(batch*steps, h, w, hidden_filters, output_filters, 3, 1, 1, activation, batch_normalize, 0);
+ l.output_layer->batch = batch;
+
+ l.output = l.output_layer->output;
+ l.delta = l.output_layer->delta;
+
+#ifdef GPU
+ l.state_gpu = cuda_make_array(l.state, l.hidden*batch*(steps+1));
+ l.output_gpu = l.output_layer->output_gpu;
+ l.delta_gpu = l.output_layer->delta_gpu;
+#endif
+
+ return l;
+}
+
+void update_crnn_layer(layer l, int batch, float learning_rate, float momentum, float decay)
+{
+ update_convolutional_layer(*(l.input_layer), batch, learning_rate, momentum, decay);
+ update_convolutional_layer(*(l.self_layer), batch, learning_rate, momentum, decay);
+ update_convolutional_layer(*(l.output_layer), batch, learning_rate, momentum, decay);
+}
+
+void forward_crnn_layer(layer l, network_state state)
+{
+ network_state s = {0};
+ s.train = state.train;
+ int i;
+ layer input_layer = *(l.input_layer);
+ layer self_layer = *(l.self_layer);
+ layer output_layer = *(l.output_layer);
+
+ fill_cpu(l.outputs * l.batch * l.steps, 0, output_layer.delta, 1);
+ fill_cpu(l.hidden * l.batch * l.steps, 0, self_layer.delta, 1);
+ fill_cpu(l.hidden * l.batch * l.steps, 0, input_layer.delta, 1);
+ if(state.train) fill_cpu(l.hidden * l.batch, 0, l.state, 1);
+
+ for (i = 0; i < l.steps; ++i) {
+ s.input = state.input;
+ forward_convolutional_layer(input_layer, s);
+
+ s.input = l.state;
+ forward_convolutional_layer(self_layer, s);
+
+ float *old_state = l.state;
+ if(state.train) l.state += l.hidden*l.batch;
+ if(l.shortcut){
+ copy_cpu(l.hidden * l.batch, old_state, 1, l.state, 1);
+ }else{
+ fill_cpu(l.hidden * l.batch, 0, l.state, 1);
+ }
+ axpy_cpu(l.hidden * l.batch, 1, input_layer.output, 1, l.state, 1);
+ axpy_cpu(l.hidden * l.batch, 1, self_layer.output, 1, l.state, 1);
+
+ s.input = l.state;
+ forward_convolutional_layer(output_layer, s);
+
+ state.input += l.inputs*l.batch;
+ increment_layer(&input_layer, 1);
+ increment_layer(&self_layer, 1);
+ increment_layer(&output_layer, 1);
+ }
+}
+
+void backward_crnn_layer(layer l, network_state state)
+{
+ network_state s = {0};
+ s.train = state.train;
+ int i;
+ layer input_layer = *(l.input_layer);
+ layer self_layer = *(l.self_layer);
+ layer output_layer = *(l.output_layer);
+
+ increment_layer(&input_layer, l.steps-1);
+ increment_layer(&self_layer, l.steps-1);
+ increment_layer(&output_layer, l.steps-1);
+
+ l.state += l.hidden*l.batch*l.steps;
+ for (i = l.steps-1; i >= 0; --i) {
+ copy_cpu(l.hidden * l.batch, input_layer.output, 1, l.state, 1);
+ axpy_cpu(l.hidden * l.batch, 1, self_layer.output, 1, l.state, 1);
+
+ s.input = l.state;
+ s.delta = self_layer.delta;
+ backward_convolutional_layer(output_layer, s);
+
+ l.state -= l.hidden*l.batch;
+ /*
+ if(i > 0){
+ copy_cpu(l.hidden * l.batch, input_layer.output - l.hidden*l.batch, 1, l.state, 1);
+ axpy_cpu(l.hidden * l.batch, 1, self_layer.output - l.hidden*l.batch, 1, l.state, 1);
+ }else{
+ fill_cpu(l.hidden * l.batch, 0, l.state, 1);
+ }
+ */
+
+ s.input = l.state;
+ s.delta = self_layer.delta - l.hidden*l.batch;
+ if (i == 0) s.delta = 0;
+ backward_convolutional_layer(self_layer, s);
+
+ copy_cpu(l.hidden*l.batch, self_layer.delta, 1, input_layer.delta, 1);
+ if (i > 0 && l.shortcut) axpy_cpu(l.hidden*l.batch, 1, self_layer.delta, 1, self_layer.delta - l.hidden*l.batch, 1);
+ s.input = state.input + i*l.inputs*l.batch;
+ if(state.delta) s.delta = state.delta + i*l.inputs*l.batch;
+ else s.delta = 0;
+ backward_convolutional_layer(input_layer, s);
+
+ increment_layer(&input_layer, -1);
+ increment_layer(&self_layer, -1);
+ increment_layer(&output_layer, -1);
+ }
+}
+
+#ifdef GPU
+
+void pull_crnn_layer(layer l)
+{
+ pull_convolutional_layer(*(l.input_layer));
+ pull_convolutional_layer(*(l.self_layer));
+ pull_convolutional_layer(*(l.output_layer));
+}
+
+void push_crnn_layer(layer l)
+{
+ push_convolutional_layer(*(l.input_layer));
+ push_convolutional_layer(*(l.self_layer));
+ push_convolutional_layer(*(l.output_layer));
+}
+
+void update_crnn_layer_gpu(layer l, int batch, float learning_rate, float momentum, float decay)
+{
+ update_convolutional_layer_gpu(*(l.input_layer), batch, learning_rate, momentum, decay);
+ update_convolutional_layer_gpu(*(l.self_layer), batch, learning_rate, momentum, decay);
+ update_convolutional_layer_gpu(*(l.output_layer), batch, learning_rate, momentum, decay);
+}
+
+void forward_crnn_layer_gpu(layer l, network_state state)
+{
+ network_state s = {0};
+ s.train = state.train;
+ int i;
+ layer input_layer = *(l.input_layer);
+ layer self_layer = *(l.self_layer);
+ layer output_layer = *(l.output_layer);
+
+ fill_ongpu(l.outputs * l.batch * l.steps, 0, output_layer.delta_gpu, 1);
+ fill_ongpu(l.hidden * l.batch * l.steps, 0, self_layer.delta_gpu, 1);
+ fill_ongpu(l.hidden * l.batch * l.steps, 0, input_layer.delta_gpu, 1);
+ if(state.train) fill_ongpu(l.hidden * l.batch, 0, l.state_gpu, 1);
+
+ for (i = 0; i < l.steps; ++i) {
+ s.input = state.input;
+ forward_convolutional_layer_gpu(input_layer, s);
+
+ s.input = l.state_gpu;
+ forward_convolutional_layer_gpu(self_layer, s);
+
+ float *old_state = l.state_gpu;
+ if(state.train) l.state_gpu += l.hidden*l.batch;
+ if(l.shortcut){
+ copy_ongpu(l.hidden * l.batch, old_state, 1, l.state_gpu, 1);
+ }else{
+ fill_ongpu(l.hidden * l.batch, 0, l.state_gpu, 1);
+ }
+ axpy_ongpu(l.hidden * l.batch, 1, input_layer.output_gpu, 1, l.state_gpu, 1);
+ axpy_ongpu(l.hidden * l.batch, 1, self_layer.output_gpu, 1, l.state_gpu, 1);
+
+ s.input = l.state_gpu;
+ forward_convolutional_layer_gpu(output_layer, s);
+
+ state.input += l.inputs*l.batch;
+ increment_layer(&input_layer, 1);
+ increment_layer(&self_layer, 1);
+ increment_layer(&output_layer, 1);
+ }
+}
+
+void backward_crnn_layer_gpu(layer l, network_state state)
+{
+ network_state s = {0};
+ s.train = state.train;
+ int i;
+ layer input_layer = *(l.input_layer);
+ layer self_layer = *(l.self_layer);
+ layer output_layer = *(l.output_layer);
+ increment_layer(&input_layer, l.steps - 1);
+ increment_layer(&self_layer, l.steps - 1);
+ increment_layer(&output_layer, l.steps - 1);
+ l.state_gpu += l.hidden*l.batch*l.steps;
+ for (i = l.steps-1; i >= 0; --i) {
+ copy_ongpu(l.hidden * l.batch, input_layer.output_gpu, 1, l.state_gpu, 1);
+ axpy_ongpu(l.hidden * l.batch, 1, self_layer.output_gpu, 1, l.state_gpu, 1);
+
+ s.input = l.state_gpu;
+ s.delta = self_layer.delta_gpu;
+ backward_convolutional_layer_gpu(output_layer, s);
+
+ l.state_gpu -= l.hidden*l.batch;
+
+ s.input = l.state_gpu;
+ s.delta = self_layer.delta_gpu - l.hidden*l.batch;
+ if (i == 0) s.delta = 0;
+ backward_convolutional_layer_gpu(self_layer, s);
+
+ copy_ongpu(l.hidden*l.batch, self_layer.delta_gpu, 1, input_layer.delta_gpu, 1);
+ if (i > 0 && l.shortcut) axpy_ongpu(l.hidden*l.batch, 1, self_layer.delta_gpu, 1, self_layer.delta_gpu - l.hidden*l.batch, 1);
+ s.input = state.input + i*l.inputs*l.batch;
+ if(state.delta) s.delta = state.delta + i*l.inputs*l.batch;
+ else s.delta = 0;
+ backward_convolutional_layer_gpu(input_layer, s);
+
+ increment_layer(&input_layer, -1);
+ increment_layer(&self_layer, -1);
+ increment_layer(&output_layer, -1);
+ }
+}
+#endif
diff --git a/src/crnn_layer.h b/src/crnn_layer.h
new file mode 100644
index 0000000..0da942e
--- /dev/null
+++ b/src/crnn_layer.h
@@ -0,0 +1,24 @@
+
+#ifndef CRNN_LAYER_H
+#define CRNN_LAYER_H
+
+#include "activations.h"
+#include "layer.h"
+#include "network.h"
+
+layer make_crnn_layer(int batch, int h, int w, int c, int hidden_filters, int output_filters, int steps, ACTIVATION activation, int batch_normalize);
+
+void forward_crnn_layer(layer l, network_state state);
+void backward_crnn_layer(layer l, network_state state);
+void update_crnn_layer(layer l, int batch, float learning_rate, float momentum, float decay);
+
+#ifdef GPU
+void forward_crnn_layer_gpu(layer l, network_state state);
+void backward_crnn_layer_gpu(layer l, network_state state);
+void update_crnn_layer_gpu(layer l, int batch, float learning_rate, float momentum, float decay);
+void push_crnn_layer(layer l);
+void pull_crnn_layer(layer l);
+#endif
+
+#endif
+
diff --git a/src/darknet.c b/src/darknet.c
index c4006ce..5722729 100644
--- a/src/darknet.c
+++ b/src/darknet.c
@@ -21,6 +21,9 @@
extern void run_compare(int argc, char **argv);
extern void run_classifier(int argc, char **argv);
extern void run_char_rnn(int argc, char **argv);
+extern void run_vid_rnn(int argc, char **argv);
+extern void run_tag(int argc, char **argv);
+extern void run_cifar(int argc, char **argv);
void change_rate(char *filename, float scale, float add)
{
@@ -223,12 +226,18 @@
average(argc, argv);
} else if (0 == strcmp(argv[1], "yolo")){
run_yolo(argc, argv);
+ } else if (0 == strcmp(argv[1], "cifar")){
+ run_cifar(argc, argv);
} else if (0 == strcmp(argv[1], "rnn")){
run_char_rnn(argc, argv);
+ } else if (0 == strcmp(argv[1], "vid")){
+ run_vid_rnn(argc, argv);
} else if (0 == strcmp(argv[1], "coco")){
run_coco(argc, argv);
} else if (0 == strcmp(argv[1], "classifier")){
run_classifier(argc, argv);
+ } else if (0 == strcmp(argv[1], "tag")){
+ run_tag(argc, argv);
} else if (0 == strcmp(argv[1], "compare")){
run_compare(argc, argv);
} else if (0 == strcmp(argv[1], "dice")){
diff --git a/src/data.c b/src/data.c
index 88c8991..c429a73 100644
--- a/src/data.c
+++ b/src/data.c
@@ -82,6 +82,27 @@
return X;
}
+matrix load_image_cropped_paths(char **paths, int n, int min, int max, int size)
+{
+ int i;
+ matrix X;
+ X.rows = n;
+ X.vals = calloc(X.rows, sizeof(float*));
+ X.cols = 0;
+
+ for(i = 0; i < n; ++i){
+ image im = load_image_color(paths[i], 0, 0);
+ image crop = random_crop_image(im, min, max, size);
+ int flip = rand_r(&data_seed)%2;
+ if (flip) flip_image(crop);
+ free_image(im);
+ X.vals[i] = crop.data;
+ X.cols = crop.h*crop.w*crop.c;
+ }
+ return X;
+}
+
+
box_label *read_boxes(char *filename, int *n)
{
box_label *boxes = calloc(1, sizeof(box_label));
@@ -386,6 +407,33 @@
return y;
}
+matrix load_tags_paths(char **paths, int n, int k)
+{
+ matrix y = make_matrix(n, k);
+ int i;
+ int count = 0;
+ for(i = 0; i < n; ++i){
+ char *label = find_replace(paths[i], "imgs", "labels");
+ label = find_replace(label, "_iconl.jpeg", ".txt");
+ FILE *file = fopen(label, "r");
+ if(!file){
+ label = find_replace(label, "labels", "labels2");
+ file = fopen(label, "r");
+ if(!file) continue;
+ }
+ ++count;
+ int tag;
+ while(fscanf(file, "%d", &tag) == 1){
+ if(tag < k){
+ y.vals[i][tag] = 1;
+ }
+ }
+ fclose(file);
+ }
+ printf("%d/%d\n", count, n);
+ return y;
+}
+
char **get_labels(char *filename)
{
list *plist = get_paths(filename);
@@ -641,8 +689,10 @@
//printf("Loading data: %d\n", rand_r(&data_seed));
load_args a = *(struct load_args*)ptr;
- if (a.type == CLASSIFICATION_DATA){
+ if (a.type == OLD_CLASSIFICATION_DATA){
*a.d = load_data(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h);
+ } else if (a.type == CLASSIFICATION_DATA){
+ *a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.min, a.max, a.size);
} else if (a.type == DETECTION_DATA){
*a.d = load_data_detection(a.n, a.paths, a.m, a.classes, a.w, a.h, a.num_boxes, a.background);
} else if (a.type == WRITING_DATA){
@@ -656,6 +706,9 @@
} else if (a.type == IMAGE_DATA){
*(a.im) = load_image_color(a.path, 0, 0);
*(a.resized) = resize_image(*(a.im), a.w, a.h);
+ } else if (a.type == TAG_DATA){
+ *a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size);
+ //*a.d = load_data(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h);
}
free(ptr);
return 0;
@@ -696,6 +749,30 @@
return d;
}
+data load_data_augment(char **paths, int n, int m, char **labels, int k, int min, int max, int size)
+{
+ if(m) paths = get_random_paths(paths, n, m);
+ data d;
+ d.shallow = 0;
+ d.X = load_image_cropped_paths(paths, n, min, max, size);
+ d.y = load_labels_paths(paths, n, labels, k);
+ if(m) free(paths);
+ return d;
+}
+
+data load_data_tag(char **paths, int n, int m, int k, int min, int max, int size)
+{
+ if(m) paths = get_random_paths(paths, n, m);
+ data d = {0};
+ d.w = size;
+ d.h = size;
+ d.shallow = 0;
+ d.X = load_image_cropped_paths(paths, n, min, max, size);
+ d.y = load_tags_paths(paths, n, k);
+ if(m) free(paths);
+ return d;
+}
+
matrix concat_matrix(matrix m1, matrix m2)
{
int i, count = 0;
@@ -759,8 +836,8 @@
X.vals[i][j] = (double)bytes[j+1];
}
}
- translate_data_rows(d, -128);
- scale_data_rows(d, 1./128);
+ //translate_data_rows(d, -128);
+ scale_data_rows(d, 1./255);
//normalize_data_rows(d);
fclose(fp);
return d;
@@ -800,7 +877,7 @@
for(b = 0; b < 5; ++b){
char buff[256];
- sprintf(buff, "data/cifar10/data_batch_%d.bin", b+1);
+ sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1);
FILE *fp = fopen(buff, "rb");
if(!fp) file_error(buff);
for(i = 0; i < 10000; ++i){
@@ -815,8 +892,8 @@
fclose(fp);
}
//normalize_data_rows(d);
- translate_data_rows(d, -128);
- scale_data_rows(d, 1./128);
+ //translate_data_rows(d, -128);
+ scale_data_rows(d, 1./255);
return d;
}
diff --git a/src/data.h b/src/data.h
index 0ebdfc3..a3036a8 100644
--- a/src/data.h
+++ b/src/data.h
@@ -27,7 +27,7 @@
} data;
typedef enum {
- CLASSIFICATION_DATA, DETECTION_DATA, CAPTCHA_DATA, REGION_DATA, IMAGE_DATA, COMPARE_DATA, WRITING_DATA, SWAG_DATA
+ CLASSIFICATION_DATA, DETECTION_DATA, CAPTCHA_DATA, REGION_DATA, IMAGE_DATA, COMPARE_DATA, WRITING_DATA, SWAG_DATA, TAG_DATA, OLD_CLASSIFICATION_DATA
} data_type;
typedef struct load_args{
@@ -43,6 +43,7 @@
int nh;
int nw;
int num_boxes;
+ int min, max, size;
int classes;
int background;
float jitter;
@@ -67,6 +68,8 @@
data load_data_captcha_encode(char **paths, int n, int m, int w, int h);
data load_data(char **paths, int n, int m, char **labels, int k, int w, int h);
data load_data_detection(int n, char **paths, int m, int classes, int w, int h, int num_boxes, int background);
+data load_data_tag(char **paths, int n, int m, int k, int min, int max, int size);
+data load_data_augment(char **paths, int n, int m, char **labels, int k, int min, int max, int size);
box_label *read_boxes(char *filename, int *n);
data load_cifar10_data(char *filename);
diff --git a/src/image.c b/src/image.c
index 60ccfb8..e2cf97f 100644
--- a/src/image.c
+++ b/src/image.c
@@ -4,11 +4,6 @@
#include <stdio.h>
#include <math.h>
-#ifdef OPENCV
-#include "opencv2/highgui/highgui_c.h"
-#include "opencv2/imgproc/imgproc_c.h"
-#endif
-
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
@@ -330,6 +325,16 @@
}
#ifdef OPENCV
+image get_image_from_stream(CvCapture *cap)
+{
+ IplImage* src = cvQueryFrame(cap);
+ image im = ipl_to_image(src);
+ rgbgr_image(im);
+ return im;
+}
+#endif
+
+#ifdef OPENCV
void save_image_jpg(image p, char *name)
{
image copy = copy_image(p);
@@ -459,6 +464,39 @@
return cropped;
}
+image resize_min(image im, int min)
+{
+ int w = im.w;
+ int h = im.h;
+ if(w < h){
+ h = (h * min) / w;
+ w = min;
+ } else {
+ w = (w * min) / h;
+ h = min;
+ }
+ image resized = resize_image(im, w, h);
+ return resized;
+}
+
+image random_crop_image(image im, int low, int high, int size)
+{
+ int r = rand_int(low, high);
+ image resized = resize_min(im, r);
+ int dx = rand_int(0, resized.w - size);
+ int dy = rand_int(0, resized.h - size);
+ image crop = crop_image(resized, dx, dy, size, size);
+
+ /*
+ show_image(im, "orig");
+ show_image(crop, "cropped");
+ cvWaitKey(0);
+ */
+
+ free_image(resized);
+ return crop;
+}
+
float three_way_max(float a, float b, float c)
{
return (a > b) ? ( (a > c) ? a : c) : ( (b > c) ? b : c) ;
@@ -724,7 +762,7 @@
image exp5 = copy_image(im);
exposure_image(exp5, .5);
- #ifdef GPU
+#ifdef GPU
image r = resize_image(im, im.w, im.h);
image black = make_image(im.w*2 + 3, im.h*2 + 3, 9);
image black2 = make_image(im.w, im.h, 3);
@@ -741,7 +779,7 @@
cuda_pull_array(black2_gpu, black2.data, black2.w*black2.h*black2.c);
show_image_layers(black, "Black");
show_image(black2, "Recreate");
- #endif
+#endif
show_image(im, "Original");
show_image(gray, "Gray");
@@ -788,8 +826,12 @@
if( (src = cvLoadImage(filename, flag)) == 0 )
{
- printf("Cannot load image \"%s\"\n", filename);
- exit(0);
+ fprintf(stderr, "Cannot load image \"%s\"\n", filename);
+ char buff[256];
+ sprintf(buff, "echo %s >> bad.list", filename);
+ system(buff);
+ return make_image(10,10,3);
+ //exit(0);
}
image out = ipl_to_image(src);
cvReleaseImage(&src);
diff --git a/src/image.h b/src/image.h
index 4846bc1..b4a7a23 100644
--- a/src/image.h
+++ b/src/image.h
@@ -8,6 +8,11 @@
#include <math.h>
#include "box.h"
+#ifdef OPENCV
+#include "opencv2/highgui/highgui_c.h"
+#include "opencv2/imgproc/imgproc_c.h"
+#endif
+
typedef struct {
int h;
int w;
@@ -25,8 +30,9 @@
image image_distance(image a, image b);
void scale_image(image m, float s);
image crop_image(image im, int dx, int dy, int w, int h);
+image random_crop_image(image im, int low, int high, int size);
image resize_image(image im, int w, int h);
-image resize_image2(image im, int w, int h);
+image resize_min(image im, int min);
void translate_image(image m, float s);
void normalize_image(image p);
image rotate_image(image m, float rad);
@@ -53,6 +59,8 @@
#ifdef OPENCV
void save_image_jpg(image p, char *name);
+image get_image_from_stream(CvCapture *cap);
+image ipl_to_image(IplImage* src);
#endif
void print_image(image m);
diff --git a/src/imagenet.c b/src/imagenet.c
index 4c4d2bd..1625526 100644
--- a/src/imagenet.c
+++ b/src/imagenet.c
@@ -39,7 +39,7 @@
args.m = N;
args.labels = labels;
args.d = &buffer;
- args.type = CLASSIFICATION_DATA;
+ args.type = OLD_CLASSIFICATION_DATA;
load_thread = load_data_in_thread(args);
int epoch = (*net.seen)/N;
@@ -115,7 +115,7 @@
args.m = 0;
args.labels = labels;
args.d = &buffer;
- args.type = CLASSIFICATION_DATA;
+ args.type = OLD_CLASSIFICATION_DATA;
pthread_t load_thread = load_data_in_thread(args);
for(i = 1; i <= splits; ++i){
diff --git a/src/layer.h b/src/layer.h
index 91042a2..9308370 100644
--- a/src/layer.h
+++ b/src/layer.h
@@ -22,7 +22,8 @@
LOCAL,
SHORTCUT,
ACTIVE,
- RNN
+ RNN,
+ CRNN
} LAYER_TYPE;
typedef enum{
diff --git a/src/network.c b/src/network.c
index 32c3ba1..e6fb51e 100644
--- a/src/network.c
+++ b/src/network.c
@@ -9,6 +9,7 @@
#include "crop_layer.h"
#include "connected_layer.h"
#include "rnn_layer.h"
+#include "crnn_layer.h"
#include "local_layer.h"
#include "convolutional_layer.h"
#include "activation_layer.h"
@@ -85,6 +86,8 @@
return "connected";
case RNN:
return "rnn";
+ case CRNN:
+ return "crnn";
case MAXPOOL:
return "maxpool";
case AVGPOOL:
@@ -149,6 +152,8 @@
forward_connected_layer(l, state);
} else if(l.type == RNN){
forward_rnn_layer(l, state);
+ } else if(l.type == CRNN){
+ forward_crnn_layer(l, state);
} else if(l.type == CROP){
forward_crop_layer(l, state);
} else if(l.type == COST){
@@ -185,6 +190,8 @@
update_connected_layer(l, update_batch, rate, net.momentum, net.decay);
} else if(l.type == RNN){
update_rnn_layer(l, update_batch, rate, net.momentum, net.decay);
+ } else if(l.type == CRNN){
+ update_crnn_layer(l, update_batch, rate, net.momentum, net.decay);
} else if(l.type == LOCAL){
update_local_layer(l, update_batch, rate, net.momentum, net.decay);
}
@@ -205,7 +212,7 @@
int count = 0;
for(i = 0; i < net.n; ++i){
if(net.layers[i].type == COST){
- sum += net.layers[i].output[0];
+ sum += net.layers[i].cost[0];
++count;
}
if(net.layers[i].type == DETECTION){
@@ -261,6 +268,8 @@
backward_connected_layer(l, state);
} else if(l.type == RNN){
backward_rnn_layer(l, state);
+ } else if(l.type == CRNN){
+ backward_crnn_layer(l, state);
} else if(l.type == LOCAL){
backward_local_layer(l, state);
} else if(l.type == COST){
diff --git a/src/network.h b/src/network.h
index 3d7c574..f4f8b5c 100644
--- a/src/network.h
+++ b/src/network.h
@@ -36,6 +36,7 @@
int inputs;
int h, w, c;
+ int max_crop;
#ifdef GPU
float **input_gpu;
diff --git a/src/network_kernels.cu b/src/network_kernels.cu
index ea12819..730634e 100644
--- a/src/network_kernels.cu
+++ b/src/network_kernels.cu
@@ -16,6 +16,7 @@
#include "crop_layer.h"
#include "connected_layer.h"
#include "rnn_layer.h"
+#include "crnn_layer.h"
#include "detection_layer.h"
#include "convolutional_layer.h"
#include "activation_layer.h"
@@ -59,6 +60,8 @@
forward_connected_layer_gpu(l, state);
} else if(l.type == RNN){
forward_rnn_layer_gpu(l, state);
+ } else if(l.type == CRNN){
+ forward_crnn_layer_gpu(l, state);
} else if(l.type == CROP){
forward_crop_layer_gpu(l, state);
} else if(l.type == COST){
@@ -122,6 +125,8 @@
backward_connected_layer_gpu(l, state);
} else if(l.type == RNN){
backward_rnn_layer_gpu(l, state);
+ } else if(l.type == CRNN){
+ backward_crnn_layer_gpu(l, state);
} else if(l.type == COST){
backward_cost_layer_gpu(l, state);
} else if(l.type == ROUTE){
@@ -147,6 +152,8 @@
update_connected_layer_gpu(l, update_batch, rate, net.momentum, net.decay);
} else if(l.type == RNN){
update_rnn_layer_gpu(l, update_batch, rate, net.momentum, net.decay);
+ } else if(l.type == CRNN){
+ update_crnn_layer_gpu(l, update_batch, rate, net.momentum, net.decay);
} else if(l.type == LOCAL){
update_local_layer_gpu(l, update_batch, rate, net.momentum, net.decay);
}
diff --git a/src/nightmare.c b/src/nightmare.c
index 2b1c76c..ec7166c 100644
--- a/src/nightmare.c
+++ b/src/nightmare.c
@@ -8,6 +8,8 @@
#include "opencv2/highgui/highgui_c.h"
#endif
+// ./darknet nightmare cfg/extractor.recon.cfg ~/trained/yolo-coco.conv frame6.png -reconstruct -iters 500 -i 3 -lambda .1 -rate .01 -smooth 2
+
float abs_mean(float *x, int n)
{
int i;
@@ -31,8 +33,8 @@
void optimize_picture(network *net, image orig, int max_layer, float scale, float rate, float thresh, int norm)
{
- scale_image(orig, 2);
- translate_image(orig, -1);
+ //scale_image(orig, 2);
+ //translate_image(orig, -1);
net->n = max_layer + 1;
int dx = rand()%16 - 8;
@@ -98,8 +100,8 @@
translate_image(orig, mean);
*/
- translate_image(orig, 1);
- scale_image(orig, .5);
+ //translate_image(orig, 1);
+ //scale_image(orig, .5);
//normalize_image(orig);
constrain_image(orig);
@@ -133,50 +135,47 @@
}
}
-void reconstruct_picture(network net, float *features, image recon, image update, float rate, float momentum, float lambda, int smooth_size)
+void reconstruct_picture(network net, float *features, image recon, image update, float rate, float momentum, float lambda, int smooth_size, int iters)
{
- scale_image(recon, 2);
- translate_image(recon, -1);
+ int iter = 0;
+ for (iter = 0; iter < iters; ++iter) {
+ image delta = make_image(recon.w, recon.h, recon.c);
- image delta = make_image(recon.w, recon.h, recon.c);
-
- network_state state = {0};
+ network_state state = {0};
#ifdef GPU
- state.input = cuda_make_array(recon.data, recon.w*recon.h*recon.c);
- state.delta = cuda_make_array(delta.data, delta.w*delta.h*delta.c);
- state.truth = cuda_make_array(features, get_network_output_size(net));
+ state.input = cuda_make_array(recon.data, recon.w*recon.h*recon.c);
+ state.delta = cuda_make_array(delta.data, delta.w*delta.h*delta.c);
+ state.truth = cuda_make_array(features, get_network_output_size(net));
- forward_network_gpu(net, state);
- backward_network_gpu(net, state);
+ forward_network_gpu(net, state);
+ backward_network_gpu(net, state);
- cuda_pull_array(state.delta, delta.data, delta.w*delta.h*delta.c);
+ cuda_pull_array(state.delta, delta.data, delta.w*delta.h*delta.c);
- cuda_free(state.input);
- cuda_free(state.delta);
- cuda_free(state.truth);
+ cuda_free(state.input);
+ cuda_free(state.delta);
+ cuda_free(state.truth);
#else
- state.input = recon.data;
- state.delta = delta.data;
- state.truth = features;
+ state.input = recon.data;
+ state.delta = delta.data;
+ state.truth = features;
- forward_network(net, state);
- backward_network(net, state);
+ forward_network(net, state);
+ backward_network(net, state);
#endif
- axpy_cpu(recon.w*recon.h*recon.c, 1, delta.data, 1, update.data, 1);
- smooth(recon, update, lambda, smooth_size);
+ axpy_cpu(recon.w*recon.h*recon.c, 1, delta.data, 1, update.data, 1);
+ smooth(recon, update, lambda, smooth_size);
- axpy_cpu(recon.w*recon.h*recon.c, rate, update.data, 1, recon.data, 1);
- scal_cpu(recon.w*recon.h*recon.c, momentum, update.data, 1);
+ axpy_cpu(recon.w*recon.h*recon.c, rate, update.data, 1, recon.data, 1);
+ scal_cpu(recon.w*recon.h*recon.c, momentum, update.data, 1);
- translate_image(recon, 1);
- scale_image(recon, .5);
+ //float mag = mag_array(recon.data, recon.w*recon.h*recon.c);
+ //scal_cpu(recon.w*recon.h*recon.c, 600/mag, recon.data, 1);
- float mag = mag_array(recon.data, recon.w*recon.h*recon.c);
- scal_cpu(recon.w*recon.h*recon.c, 600/mag, recon.data, 1);
-
- constrain_image(recon);
- free_image(delta);
+ constrain_image(recon);
+ free_image(delta);
+ }
}
@@ -226,7 +225,7 @@
im = resized;
}
- float *features;
+ float *features = 0;
image update;
if (reconstruct){
resize_network(&net, im.w, im.h);
@@ -241,13 +240,19 @@
printf("%d features\n", out_im.w*out_im.h*out_im.c);
- im = resize_image(im, im.w*2, im.h);
- f_im = resize_image(f_im, f_im.w*2, f_im.h);
+ im = resize_image(im, im.w, im.h);
+ f_im = resize_image(f_im, f_im.w, f_im.h);
features = f_im.data;
+ int i;
+ for(i = 0; i < 14*14*512; ++i){
+ features[i] += rand_uniform(-.19, .19);
+ }
+
free_image(im);
im = make_random_image(im.w, im.h, im.c);
update = make_image(im.w, im.h, im.c);
+
}
int e;
@@ -259,11 +264,12 @@
fprintf(stderr, "%d, ", n);
fflush(stderr);
if(reconstruct){
- reconstruct_picture(net, features, im, update, rate, momentum, lambda, smooth_size);
+ reconstruct_picture(net, features, im, update, rate, momentum, lambda, smooth_size, 1);
+ //if ((n+1)%30 == 0) rate *= .5;
show_image(im, "reconstruction");
- #ifdef OPENCV
+#ifdef OPENCV
cvWaitKey(10);
- #endif
+#endif
}else{
int layer = max_layer + rand()%range - range/2;
int octave = rand()%octaves;
diff --git a/src/parser.c b/src/parser.c
index 8051fd7..97ce7a1 100644
--- a/src/parser.c
+++ b/src/parser.c
@@ -12,6 +12,7 @@
#include "deconvolutional_layer.h"
#include "connected_layer.h"
#include "rnn_layer.h"
+#include "crnn_layer.h"
#include "maxpool_layer.h"
#include "softmax_layer.h"
#include "dropout_layer.h"
@@ -36,6 +37,7 @@
int is_deconvolutional(section *s);
int is_connected(section *s);
int is_rnn(section *s);
+int is_crnn(section *s);
int is_maxpool(section *s);
int is_avgpool(section *s);
int is_dropout(section *s);
@@ -169,6 +171,21 @@
return layer;
}
+layer parse_crnn(list *options, size_params params)
+{
+ int output_filters = option_find_int(options, "output_filters",1);
+ int hidden_filters = option_find_int(options, "hidden_filters",1);
+ char *activation_s = option_find_str(options, "activation", "logistic");
+ ACTIVATION activation = get_activation(activation_s);
+ int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0);
+
+ layer l = make_crnn_layer(params.batch, params.w, params.h, params.c, hidden_filters, output_filters, params.time_steps, activation, batch_normalize);
+
+ l.shortcut = option_find_int_quiet(options, "shortcut", 0);
+
+ return l;
+}
+
layer parse_rnn(list *options, size_params params)
{
int output = option_find_int(options, "output",1);
@@ -419,6 +436,7 @@
net->w = option_find_int_quiet(options, "width",0);
net->c = option_find_int_quiet(options, "channels",0);
net->inputs = option_find_int_quiet(options, "inputs", net->h * net->w * net->c);
+ net->max_crop = option_find_int_quiet(options, "max_crop",net->w*2);
if(!net->inputs && !(net->h && net->w && net->c)) error("No input parameters supplied");
@@ -501,6 +519,8 @@
l = parse_deconvolutional(options, params);
}else if(is_rnn(s)){
l = parse_rnn(options, params);
+ }else if(is_crnn(s)){
+ l = parse_crnn(options, params);
}else if(is_connected(s)){
l = parse_connected(options, params);
}else if(is_crop(s)){
@@ -591,6 +611,10 @@
return (strcmp(s->type, "[net]")==0
|| strcmp(s->type, "[network]")==0);
}
+int is_crnn(section *s)
+{
+ return (strcmp(s->type, "[crnn]")==0);
+}
int is_rnn(section *s)
{
return (strcmp(s->type, "[rnn]")==0);
@@ -705,6 +729,23 @@
fclose(fp);
}
+void save_convolutional_weights(layer l, FILE *fp)
+{
+#ifdef GPU
+ if(gpu_index >= 0){
+ pull_convolutional_layer(l);
+ }
+#endif
+ int num = l.n*l.c*l.size*l.size;
+ fwrite(l.biases, sizeof(float), l.n, fp);
+ if (l.batch_normalize){
+ fwrite(l.scales, sizeof(float), l.n, fp);
+ fwrite(l.rolling_mean, sizeof(float), l.n, fp);
+ fwrite(l.rolling_variance, sizeof(float), l.n, fp);
+ }
+ fwrite(l.filters, sizeof(float), num, fp);
+}
+
void save_connected_weights(layer l, FILE *fp)
{
#ifdef GPU
@@ -739,25 +780,17 @@
for(i = 0; i < net.n && i < cutoff; ++i){
layer l = net.layers[i];
if(l.type == CONVOLUTIONAL){
-#ifdef GPU
- if(gpu_index >= 0){
- pull_convolutional_layer(l);
- }
-#endif
- int num = l.n*l.c*l.size*l.size;
- fwrite(l.biases, sizeof(float), l.n, fp);
- if (l.batch_normalize){
- fwrite(l.scales, sizeof(float), l.n, fp);
- fwrite(l.rolling_mean, sizeof(float), l.n, fp);
- fwrite(l.rolling_variance, sizeof(float), l.n, fp);
- }
- fwrite(l.filters, sizeof(float), num, fp);
+ save_convolutional_weights(l, fp);
} if(l.type == CONNECTED){
save_connected_weights(l, fp);
} if(l.type == RNN){
save_connected_weights(*(l.input_layer), fp);
save_connected_weights(*(l.self_layer), fp);
save_connected_weights(*(l.output_layer), fp);
+ } if(l.type == CRNN){
+ save_convolutional_weights(*(l.input_layer), fp);
+ save_convolutional_weights(*(l.self_layer), fp);
+ save_convolutional_weights(*(l.output_layer), fp);
} if(l.type == LOCAL){
#ifdef GPU
if(gpu_index >= 0){
@@ -809,6 +842,27 @@
#endif
}
+void load_convolutional_weights(layer l, FILE *fp)
+{
+ int num = l.n*l.c*l.size*l.size;
+ fread(l.biases, sizeof(float), l.n, fp);
+ if (l.batch_normalize && (!l.dontloadscales)){
+ fread(l.scales, sizeof(float), l.n, fp);
+ fread(l.rolling_mean, sizeof(float), l.n, fp);
+ fread(l.rolling_variance, sizeof(float), l.n, fp);
+ }
+ fread(l.filters, sizeof(float), num, fp);
+ if (l.flipped) {
+ transpose_matrix(l.filters, l.c*l.size*l.size, l.n);
+ }
+#ifdef GPU
+ if(gpu_index >= 0){
+ push_convolutional_layer(l);
+ }
+#endif
+}
+
+
void load_weights_upto(network *net, char *filename, int cutoff)
{
fprintf(stderr, "Loading weights from %s...", filename);
@@ -830,22 +884,7 @@
layer l = net->layers[i];
if (l.dontload) continue;
if(l.type == CONVOLUTIONAL){
- int num = l.n*l.c*l.size*l.size;
- fread(l.biases, sizeof(float), l.n, fp);
- if (l.batch_normalize && (!l.dontloadscales)){
- fread(l.scales, sizeof(float), l.n, fp);
- fread(l.rolling_mean, sizeof(float), l.n, fp);
- fread(l.rolling_variance, sizeof(float), l.n, fp);
- }
- fread(l.filters, sizeof(float), num, fp);
- if (l.flipped) {
- transpose_matrix(l.filters, l.c*l.size*l.size, l.n);
- }
-#ifdef GPU
- if(gpu_index >= 0){
- push_convolutional_layer(l);
- }
-#endif
+ load_convolutional_weights(l, fp);
}
if(l.type == DECONVOLUTIONAL){
int num = l.n*l.c*l.size*l.size;
@@ -860,6 +899,11 @@
if(l.type == CONNECTED){
load_connected_weights(l, fp, transpose);
}
+ if(l.type == CRNN){
+ load_convolutional_weights(*(l.input_layer), fp);
+ load_convolutional_weights(*(l.self_layer), fp);
+ load_convolutional_weights(*(l.output_layer), fp);
+ }
if(l.type == RNN){
load_connected_weights(*(l.input_layer), fp, transpose);
load_connected_weights(*(l.self_layer), fp, transpose);
diff --git a/src/rnn.c b/src/rnn.c
index 3865209..30fa4bd 100644
--- a/src/rnn.c
+++ b/src/rnn.c
@@ -71,6 +71,7 @@
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net.learning_rate, net.momentum, net.decay);
int batch = net.batch;
int steps = net.time_steps;
+ //*net.seen = 0;
int i = (*net.seen)/net.batch;
clock_t time;
diff --git a/src/rnn_layer.c b/src/rnn_layer.c
index 384169a..35cf992 100644
--- a/src/rnn_layer.c
+++ b/src/rnn_layer.c
@@ -10,7 +10,7 @@
#include <stdlib.h>
#include <string.h>
-void increment_layer(layer *l, int steps)
+static void increment_layer(layer *l, int steps)
{
int num = l->outputs*l->batch*steps;
l->output += num;
diff --git a/src/rnn_vid.c b/src/rnn_vid.c
new file mode 100644
index 0000000..183ae77
--- /dev/null
+++ b/src/rnn_vid.c
@@ -0,0 +1,210 @@
+#include "network.h"
+#include "cost_layer.h"
+#include "utils.h"
+#include "parser.h"
+#include "blas.h"
+
+#ifdef OPENCV
+#include "opencv2/highgui/highgui_c.h"
+
+void reconstruct_picture(network net, float *features, image recon, image update, float rate, float momentum, float lambda, int smooth_size, int iters);
+
+
+typedef struct {
+ float *x;
+ float *y;
+} float_pair;
+
+float_pair get_rnn_vid_data(network net, char **files, int n, int batch, int steps)
+{
+ int b;
+ assert(net.batch == steps + 1);
+ image out_im = get_network_image(net);
+ int output_size = out_im.w*out_im.h*out_im.c;
+ printf("%d %d %d\n", out_im.w, out_im.h, out_im.c);
+ float *feats = calloc(net.batch*batch*output_size, sizeof(float));
+ for(b = 0; b < batch; ++b){
+ int input_size = net.w*net.h*net.c;
+ float *input = calloc(input_size*net.batch, sizeof(float));
+ char *filename = files[rand()%n];
+ CvCapture *cap = cvCaptureFromFile(filename);
+ int frames = cvGetCaptureProperty(cap, CV_CAP_PROP_FRAME_COUNT);
+ int index = rand() % (frames - steps - 2);
+ if (frames < (steps + 4)){
+ --b;
+ free(input);
+ continue;
+ }
+
+ printf("frames: %d, index: %d\n", frames, index);
+ cvSetCaptureProperty(cap, CV_CAP_PROP_POS_FRAMES, index);
+
+ int i;
+ for(i = 0; i < net.batch; ++i){
+ IplImage* src = cvQueryFrame(cap);
+ image im = ipl_to_image(src);
+ rgbgr_image(im);
+ image re = resize_image(im, net.w, net.h);
+ //show_image(re, "loaded");
+ //cvWaitKey(10);
+ memcpy(input + i*input_size, re.data, input_size*sizeof(float));
+ free_image(im);
+ free_image(re);
+ }
+ float *output = network_predict(net, input);
+
+ free(input);
+
+ for(i = 0; i < net.batch; ++i){
+ memcpy(feats + (b + i*batch)*output_size, output + i*output_size, output_size*sizeof(float));
+ }
+
+ cvReleaseCapture(&cap);
+ }
+
+ //printf("%d %d %d\n", out_im.w, out_im.h, out_im.c);
+ float_pair p = {0};
+ p.x = feats;
+ p.y = feats + output_size*batch; //+ out_im.w*out_im.h*out_im.c;
+
+ return p;
+}
+
+
+void train_vid_rnn(char *cfgfile, char *weightfile)
+{
+ char *train_videos = "data/vid/train.txt";
+ char *backup_directory = "/home/pjreddie/backup/";
+ srand(time(0));
+ data_seed = time(0);
+ char *base = basecfg(cfgfile);
+ printf("%s\n", base);
+ float avg_loss = -1;
+ network net = parse_network_cfg(cfgfile);
+ if(weightfile){
+ load_weights(&net, weightfile);
+ }
+ printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net.learning_rate, net.momentum, net.decay);
+ int imgs = net.batch*net.subdivisions;
+ int i = *net.seen/imgs;
+
+ list *plist = get_paths(train_videos);
+ int N = plist->size;
+ char **paths = (char **)list_to_array(plist);
+ clock_t time;
+ int steps = net.time_steps;
+ int batch = net.batch / net.time_steps;
+
+ network extractor = parse_network_cfg("cfg/extractor.cfg");
+ load_weights(&extractor, "/home/pjreddie/trained/yolo-coco.conv");
+
+ while(get_current_batch(net) < net.max_batches){
+ i += 1;
+ time=clock();
+ float_pair p = get_rnn_vid_data(extractor, paths, N, batch, steps);
+
+ float loss = train_network_datum(net, p.x, p.y) / (net.batch);
+
+
+ free(p.x);
+ if (avg_loss < 0) avg_loss = loss;
+ avg_loss = avg_loss*.9 + loss*.1;
+
+ fprintf(stderr, "%d: %f, %f avg, %f rate, %lf seconds\n", i, loss, avg_loss, get_current_rate(net), sec(clock()-time));
+ if(i%100==0){
+ char buff[256];
+ sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
+ save_weights(net, buff);
+ }
+ if(i%10==0){
+ char buff[256];
+ sprintf(buff, "%s/%s.backup", backup_directory, base);
+ save_weights(net, buff);
+ }
+ }
+ char buff[256];
+ sprintf(buff, "%s/%s_final.weights", backup_directory, base);
+ save_weights(net, buff);
+}
+
+
+image save_reconstruction(network net, image *init, float *feat, char *name, int i)
+{
+ image recon;
+ if (init) {
+ recon = copy_image(*init);
+ } else {
+ recon = make_random_image(net.w, net.h, 3);
+ }
+
+ image update = make_image(net.w, net.h, 3);
+ reconstruct_picture(net, feat, recon, update, .01, .9, .1, 2, 50);
+ char buff[256];
+ sprintf(buff, "%s%d", name, i);
+ save_image(recon, buff);
+ free_image(update);
+ return recon;
+}
+
+void generate_vid_rnn(char *cfgfile, char *weightfile)
+{
+ network extractor = parse_network_cfg("cfg/extractor.recon.cfg");
+ load_weights(&extractor, "/home/pjreddie/trained/yolo-coco.conv");
+
+ network net = parse_network_cfg(cfgfile);
+ if(weightfile){
+ load_weights(&net, weightfile);
+ }
+ set_batch_network(&extractor, 1);
+ set_batch_network(&net, 1);
+
+ int i;
+ CvCapture *cap = cvCaptureFromFile("/extra/vid/ILSVRC2015/Data/VID/snippets/val/ILSVRC2015_val_00007030.mp4");
+ float *feat;
+ float *next;
+ image last;
+ for(i = 0; i < 25; ++i){
+ image im = get_image_from_stream(cap);
+ image re = resize_image(im, extractor.w, extractor.h);
+ feat = network_predict(extractor, re.data);
+ if(i > 0){
+ printf("%f %f\n", mean_array(feat, 14*14*512), variance_array(feat, 14*14*512));
+ printf("%f %f\n", mean_array(next, 14*14*512), variance_array(next, 14*14*512));
+ printf("%f\n", mse_array(feat, 14*14*512));
+ axpy_cpu(14*14*512, -1, feat, 1, next, 1);
+ printf("%f\n", mse_array(next, 14*14*512));
+ }
+ next = network_predict(net, feat);
+
+ free_image(im);
+
+ free_image(save_reconstruction(extractor, 0, feat, "feat", i));
+ free_image(save_reconstruction(extractor, 0, next, "next", i));
+ if (i==24) last = copy_image(re);
+ free_image(re);
+ }
+ for(i = 0; i < 30; ++i){
+ next = network_predict(net, next);
+ image new = save_reconstruction(extractor, &last, next, "new", i);
+ free_image(last);
+ last = new;
+ }
+}
+
+void run_vid_rnn(int argc, char **argv)
+{
+ if(argc < 4){
+ fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]);
+ return;
+ }
+
+ char *cfg = argv[3];
+ char *weights = (argc > 4) ? argv[4] : 0;
+ //char *filename = (argc > 5) ? argv[5]: 0;
+ if(0==strcmp(argv[2], "train")) train_vid_rnn(cfg, weights);
+ else if(0==strcmp(argv[2], "generate")) generate_vid_rnn(cfg, weights);
+}
+#else
+void run_vid_rnn(int argc, char **argv){}
+#endif
+
diff --git a/src/tag.c b/src/tag.c
new file mode 100644
index 0000000..8b63d31
--- /dev/null
+++ b/src/tag.c
@@ -0,0 +1,144 @@
+#include "network.h"
+#include "utils.h"
+#include "parser.h"
+
+#ifdef OPENCV
+#include "opencv2/highgui/highgui_c.h"
+#endif
+
+void train_tag(char *cfgfile, char *weightfile)
+{
+ data_seed = time(0);
+ srand(time(0));
+ float avg_loss = -1;
+ char *base = basecfg(cfgfile);
+ char *backup_directory = "/home/pjreddie/backup/";
+ printf("%s\n", base);
+ network net = parse_network_cfg(cfgfile);
+ if(weightfile){
+ load_weights(&net, weightfile);
+ }
+ printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net.learning_rate, net.momentum, net.decay);
+ int imgs = 1024;
+ list *plist = get_paths("/home/pjreddie/tag/train.list");
+ char **paths = (char **)list_to_array(plist);
+ printf("%d\n", plist->size);
+ int N = plist->size;
+ clock_t time;
+ pthread_t load_thread;
+ data train;
+ data buffer;
+
+ load_args args = {0};
+ args.w = net.w;
+ args.h = net.h;
+
+ args.min = net.w;
+ args.max = net.max_crop;
+ args.size = net.w;
+
+ args.paths = paths;
+ args.classes = net.outputs;
+ args.n = imgs;
+ args.m = N;
+ args.d = &buffer;
+ args.type = TAG_DATA;
+
+ fprintf(stderr, "%d classes\n", net.outputs);
+
+ load_thread = load_data_in_thread(args);
+ int epoch = (*net.seen)/N;
+ while(get_current_batch(net) < net.max_batches || net.max_batches == 0){
+ time=clock();
+ pthread_join(load_thread, 0);
+ train = buffer;
+
+ load_thread = load_data_in_thread(args);
+ printf("Loaded: %lf seconds\n", sec(clock()-time));
+ time=clock();
+ float loss = train_network(net, train);
+ if(avg_loss == -1) avg_loss = loss;
+ avg_loss = avg_loss*.9 + loss*.1;
+ printf("%d, %.3f: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), (float)(*net.seen)/N, loss, avg_loss, get_current_rate(net), sec(clock()-time), *net.seen);
+ free_data(train);
+ if(*net.seen/N > epoch){
+ epoch = *net.seen/N;
+ char buff[256];
+ sprintf(buff, "%s/%s_%d.weights",backup_directory,base, epoch);
+ save_weights(net, buff);
+ }
+ if(get_current_batch(net)%100 == 0){
+ char buff[256];
+ sprintf(buff, "%s/%s.backup",backup_directory,base);
+ save_weights(net, buff);
+ }
+ }
+ char buff[256];
+ sprintf(buff, "%s/%s.weights", backup_directory, base);
+ save_weights(net, buff);
+
+ pthread_join(load_thread, 0);
+ free_data(buffer);
+ free_network(net);
+ free_ptrs((void**)paths, plist->size);
+ free_list(plist);
+ free(base);
+}
+
+void test_tag(char *cfgfile, char *weightfile, char *filename)
+{
+ network net = parse_network_cfg(cfgfile);
+ if(weightfile){
+ load_weights(&net, weightfile);
+ }
+ set_batch_network(&net, 1);
+ srand(2222222);
+ int i = 0;
+ char **names = get_labels("data/tags.txt");
+ clock_t time;
+ int indexes[10];
+ char buff[256];
+ char *input = buff;
+ while(1){
+ if(filename){
+ strncpy(input, filename, 256);
+ }else{
+ printf("Enter Image Path: ");
+ fflush(stdout);
+ input = fgets(input, 256, stdin);
+ if(!input) return;
+ strtok(input, "\n");
+ }
+ image im = load_image_color(input, net.w, net.h);
+ //resize_network(&net, im.w, im.h);
+ printf("%d %d\n", im.w, im.h);
+
+ float *X = im.data;
+ time=clock();
+ float *predictions = network_predict(net, X);
+ top_predictions(net, 10, indexes);
+ printf("%s: Predicted in %f seconds.\n", input, sec(clock()-time));
+ for(i = 0; i < 10; ++i){
+ int index = indexes[i];
+ printf("%.1f%%: %s\n", predictions[index]*100, names[index]);
+ }
+ free_image(im);
+ if (filename) break;
+ }
+}
+
+
+void run_tag(int argc, char **argv)
+{
+ if(argc < 4){
+ fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]);
+ return;
+ }
+
+ char *cfg = argv[3];
+ char *weights = (argc > 4) ? argv[4] : 0;
+ char *filename = (argc > 5) ? argv[5] : 0;
+ if(0==strcmp(argv[2], "train")) train_tag(cfg, weights);
+ else if(0==strcmp(argv[2], "test")) test_tag(cfg, weights, filename);
+}
+
diff --git a/src/utils.c b/src/utils.c
index ec87a26..398d18a 100644
--- a/src/utils.c
+++ b/src/utils.c
@@ -2,6 +2,7 @@
#include <stdlib.h>
#include <string.h>
#include <math.h>
+#include <assert.h>
#include <unistd.h>
#include <float.h>
#include <limits.h>
@@ -137,15 +138,18 @@
char *find_replace(char *str, char *orig, char *rep)
{
static char buffer[4096];
+ static char buffer2[4096];
+ static char buffer3[4096];
char *p;
if(!(p = strstr(str, orig))) // Is 'orig' even in 'str'?
return str;
- strncpy(buffer, str, p-str); // Copy characters from 'str' start to 'orig' st$
- buffer[p-str] = '\0';
+ strncpy(buffer2, str, p-str); // Copy characters from 'str' start to 'orig' st$
+ buffer2[p-str] = '\0';
- sprintf(buffer+(p-str), "%s%s", rep, p+strlen(orig));
+ sprintf(buffer3, "%s%s%s", buffer2, rep, p+strlen(orig));
+ sprintf(buffer, "%s", buffer3);
return buffer;
}
@@ -174,7 +178,8 @@
void error(const char *s)
{
perror(s);
- exit(0);
+ assert(0);
+ exit(-1);
}
void malloc_error()
@@ -450,6 +455,12 @@
return max_i;
}
+int rand_int(int min, int max)
+{
+ int r = (rand()%(max - min + 1)) + min;
+ return r;
+}
+
// From http://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform
#define TWO_PI 6.2831853071795864769252866
float rand_normal()
diff --git a/src/utils.h b/src/utils.h
index 96bd6cf..3af85d3 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -35,6 +35,7 @@
float mse_array(float *a, int n);
float rand_normal();
float rand_uniform(float min, float max);
+int rand_int(int min, int max);
float sum_array(float *a, int n);
float mean_array(float *a, int n);
void mean_arrays(float **a, int n, int els, float *avg);
--
Gitblit v1.10.0