Can validate on imagenet now
14 files modified
1 files added
| | |
| | | CC=gcc |
| | | GPU=1 |
| | | CLBLAS=0 |
| | | |
| | | CC=gcc |
| | | COMMON=-Wall -Wfatal-errors `pkg-config --cflags opencv` -I/usr/local/cuda/include/ |
| | | ifeq ($(GPU), 1) |
| | | COMMON+=-DGPU |
| | | else |
| | | endif |
| | | |
| | | ifeq ($(CLBLAS), 1) |
| | | COMMON+=-DCLBLAS |
| | | LDFLAGS=-lclBLAS |
| | | endif |
| | | |
| | | UNAME = $(shell uname) |
| | | OPTS=-Ofast -flto |
| | | ifeq ($(UNAME), Darwin) |
| | |
| | | else |
| | | OPTS+= -march=native |
| | | ifeq ($(GPU), 1) |
| | | LDFLAGS= -lOpenCL |
| | | LDFLAGS+= -lOpenCL |
| | | endif |
| | | endif |
| | | CFLAGS= $(COMMON) $(OPTS) |
| | |
| | | EXEC=cnn |
| | | OBJDIR=./obj/ |
| | | |
| | | OBJ=network.o image.o cnn.o connected_layer.o maxpool_layer.o activations.o list.o option_list.o parser.o utils.o data.o matrix.o softmax_layer.o mini_blas.o convolutional_layer.o gemm.o normalization_layer.o opencl.o im2col.o col2im.o axpy.o dropout_layer.o crop_layer.o freeweight_layer.o cost_layer.o |
| | | OBJ=network.o network_gpu.o image.o cnn.o connected_layer.o maxpool_layer.o activations.o list.o option_list.o parser.o utils.o data.o matrix.o softmax_layer.o mini_blas.o convolutional_layer.o gemm.o normalization_layer.o opencl.o im2col.o col2im.o axpy.o dropout_layer.o crop_layer.o freeweight_layer.o cost_layer.o |
| | | OBJS = $(addprefix $(OBJDIR), $(OBJ)) |
| | | |
| | | all: $(EXEC) |
| | |
| | | free_data(train); |
| | | } |
| | | |
| | | void train_assira() |
| | | void train_asirra() |
| | | { |
| | | network net = parse_network_cfg("cfg/assira.cfg"); |
| | | network net = parse_network_cfg("cfg/imagenet.cfg"); |
| | | int imgs = 1000/net.batch+1; |
| | | //imgs = 1; |
| | | srand(2222222); |
| | |
| | | char *labels[] = {"cat","dog"}; |
| | | clock_t time; |
| | | while(1){ |
| | | i += 1000; |
| | | i += 1; |
| | | time=clock(); |
| | | data train = load_data_image_pathfile_random("data/assira/train.list", imgs*net.batch, labels, 2, 256, 256); |
| | | normalize_data_rows(train); |
| | | printf("Loaded: %lf seconds\n", sec(clock()-time)); |
| | | time=clock(); |
| | | float loss = train_network_sgd(net, train, imgs); |
| | | printf("%d: %f, Time: %lf seconds\n", i, loss, sec(clock()-time)); |
| | | float loss = train_network_data_gpu(net, train, imgs); |
| | | printf("%d: %f, Time: %lf seconds\n", i*net.batch*imgs, loss, sec(clock()-time)); |
| | | free_data(train); |
| | | if(i%10000==0){ |
| | | if(i%10==0){ |
| | | char buff[256]; |
| | | sprintf(buff, "cfg/assira_backup_%d.cfg", i); |
| | | sprintf(buff, "cfg/asirra_backup_%d.cfg", i); |
| | | save_network(net, buff); |
| | | } |
| | | //lr *= .99; |
| | |
| | | |
| | | void train_imagenet() |
| | | { |
| | | network net = parse_network_cfg("cfg/imagenet_small_830.cfg"); |
| | | float avg_loss = 1; |
| | | network net = parse_network_cfg("/home/pjreddie/imagenet_backup/imagenet_nin_2680.cfg"); |
| | | printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net.learning_rate, net.momentum, net.decay); |
| | | int imgs = 1000/net.batch+1; |
| | | srand(6472345); |
| | | srand(time(0)); |
| | | int i = 0; |
| | | char **labels = get_labels("/home/pjreddie/data/imagenet/cls.labels.list"); |
| | | list *plist = get_paths("/data/imagenet/cls.train.list"); |
| | |
| | | i += 1; |
| | | time=clock(); |
| | | data train = load_data_random(imgs*net.batch, paths, plist->size, labels, 1000, 256, 256); |
| | | //translate_data_rows(train, -144); |
| | | normalize_data_rows(train); |
| | | printf("Loaded: %lf seconds\n", sec(clock()-time)); |
| | | time=clock(); |
| | | #ifdef GPU |
| | | float loss = train_network_data_gpu(net, train, imgs); |
| | | printf("%d: %f, %lf seconds, %d images\n", i, loss, sec(clock()-time), i*imgs*net.batch); |
| | | avg_loss = avg_loss*.9 + loss*.1; |
| | | printf("%d: %f, %f avg, %lf seconds, %d images\n", i, loss, avg_loss, sec(clock()-time), i*imgs*net.batch); |
| | | #endif |
| | | free_data(train); |
| | | if(i%10==0){ |
| | | char buff[256]; |
| | | sprintf(buff, "/home/pjreddie/imagenet_backup/imagenet_small_%d.cfg", i); |
| | | sprintf(buff, "/home/pjreddie/imagenet_backup/imagenet_nin_%d.cfg", i); |
| | | save_network(net, buff); |
| | | } |
| | | } |
| | | } |
| | | |
| | | void validate_imagenet(char *filename) |
| | | { |
| | | int i; |
| | | network net = parse_network_cfg(filename); |
| | | srand(time(0)); |
| | | |
| | | char **labels = get_labels("/home/pjreddie/data/imagenet/cls.val.labels.list"); |
| | | char *path = "/home/pjreddie/data/imagenet/cls.val.list"; |
| | | |
| | | clock_t time; |
| | | float avg_acc = 0; |
| | | int splits = 50; |
| | | for(i = 0; i < splits; ++i){ |
| | | time=clock(); |
| | | data val = load_data_image_pathfile_part(path, i, splits, labels, 1000, 256, 256); |
| | | normalize_data_rows(val); |
| | | printf("Loaded: %d images in %lf seconds\n", val.X.rows, sec(clock()-time)); |
| | | time=clock(); |
| | | #ifdef GPU |
| | | float acc = network_accuracy_gpu(net, val); |
| | | avg_acc += acc; |
| | | printf("%d: %f, %f avg, %lf seconds, %d images\n", i, acc, avg_acc/(i+1), sec(clock()-time), val.X.rows); |
| | | #endif |
| | | free_data(val); |
| | | } |
| | | } |
| | | |
| | | void train_imagenet_small() |
| | | { |
| | | network net = parse_network_cfg("cfg/imagenet_small.cfg"); |
| | |
| | | while(1){ |
| | | gets(filename); |
| | | image im = load_image_color(filename, 256, 256); |
| | | normalize_image(im); |
| | | z_normalize_image(im); |
| | | printf("%d %d %d\n", im.h, im.w, im.c); |
| | | float *X = im.data; |
| | | time=clock(); |
| | |
| | | } |
| | | } |
| | | |
| | | void test_visualize() |
| | | void test_visualize(char *filename) |
| | | { |
| | | network net = parse_network_cfg("cfg/imagenet.cfg"); |
| | | network net = parse_network_cfg(filename); |
| | | visualize_network(net); |
| | | cvWaitKey(0); |
| | | } |
| | |
| | | |
| | | int main(int argc, char *argv[]) |
| | | { |
| | | int i; |
| | | int ksize = 3; |
| | | int stride = 4; |
| | | int width_col = 20; |
| | | for(i = 0; i < 10; ++i){ |
| | | int start = (i<ksize)?0:(i-ksize)/stride + 1; |
| | | int start2 = (i-ksize+stride)/stride; |
| | | int end = i/stride + 1; |
| | | end = (width_col < end) ? width_col : end; |
| | | printf("%d: %d vs %d, %d\n", i, start,start2, end); |
| | | } |
| | | if(argc != 2){ |
| | | if(argc < 2){ |
| | | fprintf(stderr, "usage: %s <function>\n", argv[0]); |
| | | return 0; |
| | | } |
| | | if(0==strcmp(argv[1], "train")) train_imagenet(); |
| | | else if(0==strcmp(argv[1], "asirra")) train_asirra(); |
| | | else if(0==strcmp(argv[1], "train_small")) train_imagenet_small(); |
| | | else if(0==strcmp(argv[1], "test_correct")) test_gpu_net(); |
| | | else if(0==strcmp(argv[1], "test")) test_imagenet(); |
| | | else if(0==strcmp(argv[1], "visualize")) test_visualize(); |
| | | else if(0==strcmp(argv[1], "visualize")) test_visualize(argv[2]); |
| | | else if(0==strcmp(argv[1], "valid")) validate_imagenet(argv[2]); |
| | | #ifdef GPU |
| | | else if(0==strcmp(argv[1], "test_gpu")) test_gpu_blas(); |
| | | #endif |
| | |
| | | //layer->weight_adapt = calloc(inputs*outputs, sizeof(float)); |
| | | layer->weights = calloc(inputs*outputs, sizeof(float)); |
| | | float scale = 1./inputs; |
| | | scale = .05; |
| | | scale = .01; |
| | | for(i = 0; i < inputs*outputs; ++i) |
| | | layer->weights[i] = scale*2*(rand_uniform()-.5); |
| | | |
| | |
| | | layer->bias_updates = calloc(n, sizeof(float)); |
| | | layer->bias_momentum = calloc(n, sizeof(float)); |
| | | float scale = 1./(size*size*c); |
| | | scale = .05; |
| | | scale = .01; |
| | | for(i = 0; i < c*n*size*size; ++i) layer->filters[i] = scale*2*(rand_uniform()-.5); |
| | | for(i = 0; i < n; ++i){ |
| | | //layer->biases[i] = rand_normal()*scale + scale; |
| | |
| | | void forward_cost_layer_gpu(cost_layer layer, cl_mem input, cl_mem truth) |
| | | { |
| | | if (!truth) return; |
| | | |
| | | |
| | | copy_ongpu(layer.batch*layer.inputs, truth, 1, layer.delta_cl, 1); |
| | | axpy_ongpu(layer.batch*layer.inputs, -1, input, 1, layer.delta_cl, 1); |
| | | cl_read_array(layer.delta_cl, layer.delta, layer.batch*layer.inputs); |
| | |
| | | |
| | | data load_data_image_pathfile_part(char *filename, int part, int total, char **labels, int k, int h, int w) |
| | | { |
| | | clock_t time = clock(); |
| | | list *plist = get_paths(filename); |
| | | char **paths = (char **)list_to_array(plist); |
| | | int start = part*plist->size/total; |
| | |
| | | |
| | | #include "opencl.h" |
| | | #include <math.h> |
| | | //#include <clBLAS.h> |
| | | |
| | | #ifdef CLBLAS |
| | | #include <clBLAS.h> |
| | | #endif |
| | | |
| | | #define STR_HELPER(x) #x |
| | | #define STR(x) STR_HELPER(x) |
| | |
| | | float BETA, |
| | | cl_mem C_gpu, int ldc) |
| | | { |
| | | /* |
| | | cl_setup(); |
| | | cl_command_queue queue = cl.queue; |
| | | cl_event event; |
| | | cl.error = clblasSgemm(clblasRowMajor, TA?clblasTrans:clblasNoTrans, TB?clblasTrans:clblasNoTrans,M, N, K,ALPHA, A_gpu, 0, lda,B_gpu, 0, ldb,BETA, C_gpu, 0, ldc,1, &queue, 0, NULL, &event); |
| | | */ |
| | | |
| | | gemm_ongpu_offset(TA, TB, M, N, K, ALPHA, A_gpu, 0, lda, B_gpu, 0, ldb, BETA, C_gpu, 0, ldc); |
| | | } |
| | | |
| | |
| | | float BETA, |
| | | cl_mem C_gpu, int c_off, int ldc) |
| | | { |
| | | #ifdef CLBLAS |
| | | cl_setup(); |
| | | cl_command_queue queue = cl.queue; |
| | | cl_event event; |
| | | cl.error = clblasSgemm(clblasRowMajor, TA?clblasTrans:clblasNoTrans, TB?clblasTrans:clblasNoTrans,M, N, K,ALPHA, A_gpu, a_off, lda,B_gpu, b_off, ldb,BETA, C_gpu, c_off, ldc,1, &queue, 0, NULL, &event); |
| | | check_error(cl); |
| | | #else |
| | | //printf("gpu: %d %d %d %d %d\n",TA, TB, M, N, K); |
| | | cl_setup(); |
| | | cl_kernel gemm_kernel = get_gemm_kernel(); |
| | |
| | | |
| | | clEnqueueNDRangeKernel(queue, gemm_kernel, 2, 0, global_size, local_size, 0, 0, 0); |
| | | check_error(cl); |
| | | #endif |
| | | } |
| | | |
| | | void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA, |
| | |
| | | |
| | | void time_ongpu(int TA, int TB, int m, int k, int n) |
| | | { |
| | | int iter = 128; |
| | | int iter = 10; |
| | | float *a = random_matrix(m,k); |
| | | float *b = random_matrix(k,n); |
| | | |
| | |
| | | for(i = 0; i<iter; ++i){ |
| | | gemm_ongpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n); |
| | | } |
| | | double flop = m*n*(2.*k+3.)*iter; |
| | | double flop = m*n*k*iter; |
| | | double gflop = flop/pow(10., 9); |
| | | end = clock(); |
| | | double seconds = sec(end-start); |
| | |
| | | test_gpu_accuracy(0,1,1000,10,100); |
| | | test_gpu_accuracy(1,1,1000,10,100); |
| | | */ |
| | | time_ongpu(0,0,128,1200,4096); |
| | | time_ongpu(0,0,128,1200,4096); |
| | | time_ongpu(0,0,128,1200,4096); |
| | | |
| | | time_ongpu(0,1,128,1200,4096); |
| | | time_ongpu(1,0,1200,4096,128); |
| | | time_ongpu(1,0,4096,1200,128); |
| | | time_ongpu(1,0,1200,128,4096); |
| | | |
| | | test_gpu_accuracy(0,0,131,4093,1199); |
| | | test_gpu_accuracy(0,1,131,4093,1199); |
| | | test_gpu_accuracy(1,0,131,4093,1199); |
| | | test_gpu_accuracy(1,1,131,4093,1199); |
| | | /* |
| | | |
| | | time_ongpu(0,0,1024,1024,1024); |
| | | time_ongpu(0,1,1024,1024,1024); |
| | |
| | | time_ongpu(0,1,128,4096,1200); |
| | | time_ongpu(1,0,128,4096,1200); |
| | | time_ongpu(1,1,128,4096,1200); |
| | | */ |
| | | |
| | | /* |
| | | time_gpu_random_matrix(0,0,1000,1000,100); |
| | |
| | | exit(0); |
| | | } |
| | | if(h && w && (src->height != h || src->width != w)){ |
| | | printf("Resized!\n"); |
| | | //printf("Resized!\n"); |
| | | IplImage *resized = resizeImage(src, h, w, 1); |
| | | cvReleaseImage(&src); |
| | | src = resized; |
| | |
| | | return net; |
| | | } |
| | | |
| | | #ifdef GPU |
| | | |
| | | void forward_network_gpu(network net, cl_mem input, cl_mem truth, int train) |
| | | { |
| | | //printf("start\n"); |
| | | int i; |
| | | for(i = 0; i < net.n; ++i){ |
| | | //clock_t time = clock(); |
| | | if(net.types[i] == CONVOLUTIONAL){ |
| | | convolutional_layer layer = *(convolutional_layer *)net.layers[i]; |
| | | forward_convolutional_layer_gpu(layer, input); |
| | | input = layer.output_cl; |
| | | } |
| | | else if(net.types[i] == COST){ |
| | | cost_layer layer = *(cost_layer *)net.layers[i]; |
| | | forward_cost_layer_gpu(layer, input, truth); |
| | | } |
| | | else if(net.types[i] == CONNECTED){ |
| | | connected_layer layer = *(connected_layer *)net.layers[i]; |
| | | forward_connected_layer_gpu(layer, input); |
| | | input = layer.output_cl; |
| | | } |
| | | else if(net.types[i] == MAXPOOL){ |
| | | maxpool_layer layer = *(maxpool_layer *)net.layers[i]; |
| | | forward_maxpool_layer_gpu(layer, input); |
| | | input = layer.output_cl; |
| | | } |
| | | else if(net.types[i] == SOFTMAX){ |
| | | softmax_layer layer = *(softmax_layer *)net.layers[i]; |
| | | forward_softmax_layer_gpu(layer, input); |
| | | input = layer.output_cl; |
| | | } |
| | | //printf("%d %f\n", i, sec(clock()-time)); |
| | | /* |
| | | else if(net.types[i] == CROP){ |
| | | crop_layer layer = *(crop_layer *)net.layers[i]; |
| | | forward_crop_layer(layer, input); |
| | | input = layer.output; |
| | | } |
| | | else if(net.types[i] == NORMALIZATION){ |
| | | normalization_layer layer = *(normalization_layer *)net.layers[i]; |
| | | forward_normalization_layer(layer, input); |
| | | input = layer.output; |
| | | } |
| | | */ |
| | | } |
| | | } |
| | | |
| | | void backward_network_gpu(network net, cl_mem input) |
| | | { |
| | | int i; |
| | | cl_mem prev_input; |
| | | cl_mem prev_delta; |
| | | for(i = net.n-1; i >= 0; --i){ |
| | | //clock_t time = clock(); |
| | | if(i == 0){ |
| | | prev_input = input; |
| | | prev_delta = 0; |
| | | }else{ |
| | | prev_input = get_network_output_cl_layer(net, i-1); |
| | | prev_delta = get_network_delta_cl_layer(net, i-1); |
| | | } |
| | | if(net.types[i] == CONVOLUTIONAL){ |
| | | convolutional_layer layer = *(convolutional_layer *)net.layers[i]; |
| | | backward_convolutional_layer_gpu(layer, prev_delta); |
| | | } |
| | | else if(net.types[i] == COST){ |
| | | cost_layer layer = *(cost_layer *)net.layers[i]; |
| | | backward_cost_layer_gpu(layer, prev_input, prev_delta); |
| | | } |
| | | else if(net.types[i] == CONNECTED){ |
| | | connected_layer layer = *(connected_layer *)net.layers[i]; |
| | | backward_connected_layer_gpu(layer, prev_input, prev_delta); |
| | | } |
| | | else if(net.types[i] == MAXPOOL){ |
| | | maxpool_layer layer = *(maxpool_layer *)net.layers[i]; |
| | | backward_maxpool_layer_gpu(layer, prev_delta); |
| | | } |
| | | else if(net.types[i] == SOFTMAX){ |
| | | softmax_layer layer = *(softmax_layer *)net.layers[i]; |
| | | backward_softmax_layer_gpu(layer, prev_delta); |
| | | } |
| | | //printf("back: %d %f\n", i, sec(clock()-time)); |
| | | } |
| | | } |
| | | |
| | | void update_network_gpu(network net) |
| | | { |
| | | int i; |
| | | for(i = 0; i < net.n; ++i){ |
| | | if(net.types[i] == CONVOLUTIONAL){ |
| | | convolutional_layer layer = *(convolutional_layer *)net.layers[i]; |
| | | update_convolutional_layer_gpu(layer); |
| | | } |
| | | else if(net.types[i] == CONNECTED){ |
| | | connected_layer layer = *(connected_layer *)net.layers[i]; |
| | | update_connected_layer_gpu(layer); |
| | | } |
| | | } |
| | | } |
| | | |
| | | cl_mem get_network_output_cl_layer(network net, int i) |
| | | { |
| | | if(net.types[i] == CONVOLUTIONAL){ |
| | | convolutional_layer layer = *(convolutional_layer *)net.layers[i]; |
| | | return layer.output_cl; |
| | | } |
| | | else if(net.types[i] == CONNECTED){ |
| | | connected_layer layer = *(connected_layer *)net.layers[i]; |
| | | return layer.output_cl; |
| | | } |
| | | else if(net.types[i] == MAXPOOL){ |
| | | maxpool_layer layer = *(maxpool_layer *)net.layers[i]; |
| | | return layer.output_cl; |
| | | } |
| | | else if(net.types[i] == SOFTMAX){ |
| | | softmax_layer layer = *(softmax_layer *)net.layers[i]; |
| | | return layer.output_cl; |
| | | } |
| | | return 0; |
| | | } |
| | | |
| | | cl_mem get_network_delta_cl_layer(network net, int i) |
| | | { |
| | | if(net.types[i] == CONVOLUTIONAL){ |
| | | convolutional_layer layer = *(convolutional_layer *)net.layers[i]; |
| | | return layer.delta_cl; |
| | | } |
| | | else if(net.types[i] == CONNECTED){ |
| | | connected_layer layer = *(connected_layer *)net.layers[i]; |
| | | return layer.delta_cl; |
| | | } |
| | | else if(net.types[i] == MAXPOOL){ |
| | | maxpool_layer layer = *(maxpool_layer *)net.layers[i]; |
| | | return layer.delta_cl; |
| | | } |
| | | else if(net.types[i] == SOFTMAX){ |
| | | softmax_layer layer = *(softmax_layer *)net.layers[i]; |
| | | return layer.delta_cl; |
| | | } |
| | | return 0; |
| | | } |
| | | |
| | | #endif |
| | | |
| | | void forward_network(network net, float *input, float *truth, int train) |
| | | { |
| | |
| | | } |
| | | |
| | | |
| | | #ifdef GPU |
| | | float train_network_datum_gpu(network net, float *x, float *y) |
| | | { |
| | | int x_size = get_network_input_size(net)*net.batch; |
| | | int y_size = get_network_output_size(net)*net.batch; |
| | | clock_t time = clock(); |
| | | if(!*net.input_cl){ |
| | | *net.input_cl = cl_make_array(x, x_size); |
| | | *net.truth_cl = cl_make_array(y, y_size); |
| | | }else{ |
| | | cl_write_array(*net.input_cl, x, x_size); |
| | | cl_write_array(*net.truth_cl, y, y_size); |
| | | } |
| | | //printf("trans %f\n", sec(clock()-time)); |
| | | time = clock(); |
| | | forward_network_gpu(net, *net.input_cl, *net.truth_cl, 1); |
| | | //printf("forw %f\n", sec(clock()-time)); |
| | | time = clock(); |
| | | backward_network_gpu(net, *net.input_cl); |
| | | //printf("back %f\n", sec(clock()-time)); |
| | | time = clock(); |
| | | float error = get_network_cost(net); |
| | | update_network_gpu(net); |
| | | //printf("updt %f\n", sec(clock()-time)); |
| | | time = clock(); |
| | | return error; |
| | | } |
| | | |
| | | float train_network_sgd_gpu(network net, data d, int n) |
| | | { |
| | | int batch = net.batch; |
| | | float *X = calloc(batch*d.X.cols, sizeof(float)); |
| | | float *y = calloc(batch*d.y.cols, sizeof(float)); |
| | | |
| | | int i; |
| | | float sum = 0; |
| | | for(i = 0; i < n; ++i){ |
| | | get_random_batch(d, batch, X, y); |
| | | float err = train_network_datum_gpu(net, X, y); |
| | | sum += err; |
| | | } |
| | | free(X); |
| | | free(y); |
| | | return (float)sum/(n*batch); |
| | | } |
| | | |
| | | float train_network_data_gpu(network net, data d, int n) |
| | | { |
| | | int batch = net.batch; |
| | | float *X = calloc(batch*d.X.cols, sizeof(float)); |
| | | float *y = calloc(batch*d.y.cols, sizeof(float)); |
| | | |
| | | int i; |
| | | float sum = 0; |
| | | for(i = 0; i < n; ++i){ |
| | | get_next_batch(d, batch, i*batch, X, y); |
| | | float err = train_network_datum_gpu(net, X, y); |
| | | sum += err; |
| | | } |
| | | free(X); |
| | | free(y); |
| | | return (float)sum/(n*batch); |
| | | } |
| | | #endif |
| | | |
| | | |
| | | float train_network_datum(network net, float *x, float *y) |
| | |
| | | free(y); |
| | | return (float)sum/(n*batch); |
| | | } |
| | | |
| | | float train_network_batch(network net, data d, int n) |
| | | { |
| | | int i,j; |
| | |
| | | return (float)sum/(n*batch); |
| | | } |
| | | |
| | | float train_network_data_cpu(network net, data d, int n) |
| | | { |
| | | int batch = net.batch; |
| | | float *X = calloc(batch*d.X.cols, sizeof(float)); |
| | | float *y = calloc(batch*d.y.cols, sizeof(float)); |
| | | |
| | | int i; |
| | | float sum = 0; |
| | | for(i = 0; i < n; ++i){ |
| | | get_next_batch(d, batch, i*batch, X, y); |
| | | float err = train_network_datum(net, X, y); |
| | | sum += err; |
| | | } |
| | | free(X); |
| | | free(y); |
| | | return (float)sum/(n*batch); |
| | | } |
| | | |
| | | void train_network(network net, data d) |
| | | { |
| | |
| | | } |
| | | } |
| | | |
| | | |
| | | float *network_predict(network net, float *input) |
| | | { |
| | | forward_network(net, input, 0, 0); |
| | |
| | | int i,j,b; |
| | | int k = get_network_output_size(net); |
| | | matrix pred = make_matrix(test.X.rows, k); |
| | | float *X = calloc(net.batch*test.X.rows, sizeof(float)); |
| | | float *X = calloc(net.batch*test.X.cols, sizeof(float)); |
| | | for(i = 0; i < test.X.rows; i += net.batch){ |
| | | for(b = 0; b < net.batch; ++b){ |
| | | if(i+b == test.X.rows) break; |
| | |
| | | cl_mem get_network_delta_cl_layer(network net, int i); |
| | | float train_network_sgd_gpu(network net, data d, int n); |
| | | float train_network_data_gpu(network net, data d, int n); |
| | | float *network_predict_gpu(network net, float *input); |
| | | float network_accuracy_gpu(network net, data d); |
| | | #endif |
| | | |
| | | network make_network(int n, int batch); |
| | |
| | | void update_network(network net); |
| | | float train_network_sgd(network net, data d, int n); |
| | | float train_network_batch(network net, data d, int n); |
| | | float train_network_data_cpu(network net, data d, int n); |
| | | void train_network(network net, data d); |
| | | matrix network_predict_data(network net, data test); |
| | | float *network_predict(network net, float *input); |
| New file |
| | |
| | | #include <stdio.h> |
| | | #include <time.h> |
| | | |
| | | #include "network.h" |
| | | #include "image.h" |
| | | #include "data.h" |
| | | #include "utils.h" |
| | | |
| | | #include "crop_layer.h" |
| | | #include "connected_layer.h" |
| | | #include "convolutional_layer.h" |
| | | #include "maxpool_layer.h" |
| | | #include "cost_layer.h" |
| | | #include "normalization_layer.h" |
| | | #include "freeweight_layer.h" |
| | | #include "softmax_layer.h" |
| | | #include "dropout_layer.h" |
| | | |
| | | #ifdef GPU |
| | | |
| | | void forward_network_gpu(network net, cl_mem input, cl_mem truth, int train) |
| | | { |
| | | //printf("start\n"); |
| | | int i; |
| | | for(i = 0; i < net.n; ++i){ |
| | | //clock_t time = clock(); |
| | | if(net.types[i] == CONVOLUTIONAL){ |
| | | convolutional_layer layer = *(convolutional_layer *)net.layers[i]; |
| | | forward_convolutional_layer_gpu(layer, input); |
| | | input = layer.output_cl; |
| | | } |
| | | else if(net.types[i] == COST){ |
| | | cost_layer layer = *(cost_layer *)net.layers[i]; |
| | | forward_cost_layer_gpu(layer, input, truth); |
| | | } |
| | | else if(net.types[i] == CONNECTED){ |
| | | connected_layer layer = *(connected_layer *)net.layers[i]; |
| | | forward_connected_layer_gpu(layer, input); |
| | | input = layer.output_cl; |
| | | } |
| | | else if(net.types[i] == MAXPOOL){ |
| | | maxpool_layer layer = *(maxpool_layer *)net.layers[i]; |
| | | forward_maxpool_layer_gpu(layer, input); |
| | | input = layer.output_cl; |
| | | } |
| | | else if(net.types[i] == SOFTMAX){ |
| | | softmax_layer layer = *(softmax_layer *)net.layers[i]; |
| | | forward_softmax_layer_gpu(layer, input); |
| | | input = layer.output_cl; |
| | | } |
| | | //printf("%d %f\n", i, sec(clock()-time)); |
| | | /* |
| | | else if(net.types[i] == CROP){ |
| | | crop_layer layer = *(crop_layer *)net.layers[i]; |
| | | forward_crop_layer(layer, input); |
| | | input = layer.output; |
| | | } |
| | | else if(net.types[i] == NORMALIZATION){ |
| | | normalization_layer layer = *(normalization_layer *)net.layers[i]; |
| | | forward_normalization_layer(layer, input); |
| | | input = layer.output; |
| | | } |
| | | */ |
| | | } |
| | | } |
| | | |
| | | void backward_network_gpu(network net, cl_mem input) |
| | | { |
| | | int i; |
| | | cl_mem prev_input; |
| | | cl_mem prev_delta; |
| | | for(i = net.n-1; i >= 0; --i){ |
| | | //clock_t time = clock(); |
| | | if(i == 0){ |
| | | prev_input = input; |
| | | prev_delta = 0; |
| | | }else{ |
| | | prev_input = get_network_output_cl_layer(net, i-1); |
| | | prev_delta = get_network_delta_cl_layer(net, i-1); |
| | | } |
| | | if(net.types[i] == CONVOLUTIONAL){ |
| | | convolutional_layer layer = *(convolutional_layer *)net.layers[i]; |
| | | backward_convolutional_layer_gpu(layer, prev_delta); |
| | | } |
| | | else if(net.types[i] == COST){ |
| | | cost_layer layer = *(cost_layer *)net.layers[i]; |
| | | backward_cost_layer_gpu(layer, prev_input, prev_delta); |
| | | } |
| | | else if(net.types[i] == CONNECTED){ |
| | | connected_layer layer = *(connected_layer *)net.layers[i]; |
| | | backward_connected_layer_gpu(layer, prev_input, prev_delta); |
| | | } |
| | | else if(net.types[i] == MAXPOOL){ |
| | | maxpool_layer layer = *(maxpool_layer *)net.layers[i]; |
| | | backward_maxpool_layer_gpu(layer, prev_delta); |
| | | } |
| | | else if(net.types[i] == SOFTMAX){ |
| | | softmax_layer layer = *(softmax_layer *)net.layers[i]; |
| | | backward_softmax_layer_gpu(layer, prev_delta); |
| | | } |
| | | //printf("back: %d %f\n", i, sec(clock()-time)); |
| | | } |
| | | } |
| | | |
| | | void update_network_gpu(network net) |
| | | { |
| | | int i; |
| | | for(i = 0; i < net.n; ++i){ |
| | | if(net.types[i] == CONVOLUTIONAL){ |
| | | convolutional_layer layer = *(convolutional_layer *)net.layers[i]; |
| | | update_convolutional_layer_gpu(layer); |
| | | } |
| | | else if(net.types[i] == CONNECTED){ |
| | | connected_layer layer = *(connected_layer *)net.layers[i]; |
| | | update_connected_layer_gpu(layer); |
| | | } |
| | | } |
| | | } |
| | | |
| | | cl_mem get_network_output_cl_layer(network net, int i) |
| | | { |
| | | if(net.types[i] == CONVOLUTIONAL){ |
| | | convolutional_layer layer = *(convolutional_layer *)net.layers[i]; |
| | | return layer.output_cl; |
| | | } |
| | | else if(net.types[i] == CONNECTED){ |
| | | connected_layer layer = *(connected_layer *)net.layers[i]; |
| | | return layer.output_cl; |
| | | } |
| | | else if(net.types[i] == MAXPOOL){ |
| | | maxpool_layer layer = *(maxpool_layer *)net.layers[i]; |
| | | return layer.output_cl; |
| | | } |
| | | else if(net.types[i] == SOFTMAX){ |
| | | softmax_layer layer = *(softmax_layer *)net.layers[i]; |
| | | return layer.output_cl; |
| | | } |
| | | return 0; |
| | | } |
| | | |
| | | cl_mem get_network_delta_cl_layer(network net, int i) |
| | | { |
| | | if(net.types[i] == CONVOLUTIONAL){ |
| | | convolutional_layer layer = *(convolutional_layer *)net.layers[i]; |
| | | return layer.delta_cl; |
| | | } |
| | | else if(net.types[i] == CONNECTED){ |
| | | connected_layer layer = *(connected_layer *)net.layers[i]; |
| | | return layer.delta_cl; |
| | | } |
| | | else if(net.types[i] == MAXPOOL){ |
| | | maxpool_layer layer = *(maxpool_layer *)net.layers[i]; |
| | | return layer.delta_cl; |
| | | } |
| | | else if(net.types[i] == SOFTMAX){ |
| | | softmax_layer layer = *(softmax_layer *)net.layers[i]; |
| | | return layer.delta_cl; |
| | | } |
| | | return 0; |
| | | } |
| | | |
| | | float train_network_datum_gpu(network net, float *x, float *y) |
| | | { |
| | | int x_size = get_network_input_size(net)*net.batch; |
| | | int y_size = get_network_output_size(net)*net.batch; |
| | | //clock_t time = clock(); |
| | | if(!*net.input_cl){ |
| | | *net.input_cl = cl_make_array(x, x_size); |
| | | *net.truth_cl = cl_make_array(y, y_size); |
| | | }else{ |
| | | cl_write_array(*net.input_cl, x, x_size); |
| | | cl_write_array(*net.truth_cl, y, y_size); |
| | | } |
| | | //printf("trans %f\n", sec(clock()-time)); |
| | | //time = clock(); |
| | | forward_network_gpu(net, *net.input_cl, *net.truth_cl, 1); |
| | | //printf("forw %f\n", sec(clock()-time)); |
| | | //time = clock(); |
| | | backward_network_gpu(net, *net.input_cl); |
| | | //printf("back %f\n", sec(clock()-time)); |
| | | //time = clock(); |
| | | update_network_gpu(net); |
| | | float error = get_network_cost(net); |
| | | //printf("updt %f\n", sec(clock()-time)); |
| | | //time = clock(); |
| | | return error; |
| | | } |
| | | |
| | | float train_network_sgd_gpu(network net, data d, int n) |
| | | { |
| | | int batch = net.batch; |
| | | float *X = calloc(batch*d.X.cols, sizeof(float)); |
| | | float *y = calloc(batch*d.y.cols, sizeof(float)); |
| | | |
| | | int i; |
| | | float sum = 0; |
| | | for(i = 0; i < n; ++i){ |
| | | get_random_batch(d, batch, X, y); |
| | | float err = train_network_datum_gpu(net, X, y); |
| | | sum += err; |
| | | } |
| | | free(X); |
| | | free(y); |
| | | return (float)sum/(n*batch); |
| | | } |
| | | |
| | | float train_network_data_gpu(network net, data d, int n) |
| | | { |
| | | int batch = net.batch; |
| | | float *X = calloc(batch*d.X.cols, sizeof(float)); |
| | | float *y = calloc(batch*d.y.cols, sizeof(float)); |
| | | |
| | | int i; |
| | | float sum = 0; |
| | | for(i = 0; i < n; ++i){ |
| | | get_next_batch(d, batch, i*batch, X, y); |
| | | float err = train_network_datum_gpu(net, X, y); |
| | | sum += err; |
| | | } |
| | | free(X); |
| | | free(y); |
| | | return (float)sum/(n*batch); |
| | | } |
| | | |
| | | float *get_network_output_layer_gpu(network net, int i) |
| | | { |
| | | if(net.types[i] == CONVOLUTIONAL){ |
| | | convolutional_layer layer = *(convolutional_layer *)net.layers[i]; |
| | | return layer.output; |
| | | } |
| | | else if(net.types[i] == CONNECTED){ |
| | | connected_layer layer = *(connected_layer *)net.layers[i]; |
| | | return layer.output; |
| | | } |
| | | else if(net.types[i] == MAXPOOL){ |
| | | maxpool_layer layer = *(maxpool_layer *)net.layers[i]; |
| | | return layer.output; |
| | | } |
| | | else if(net.types[i] == SOFTMAX){ |
| | | softmax_layer layer = *(softmax_layer *)net.layers[i]; |
| | | pull_softmax_layer_output(layer); |
| | | return layer.output; |
| | | } |
| | | return 0; |
| | | } |
| | | |
| | | float *get_network_output_gpu(network net) |
| | | { |
| | | int i; |
| | | for(i = net.n-1; i > 0; --i) if(net.types[i] != COST) break; |
| | | return get_network_output_layer_gpu(net, i); |
| | | } |
| | | |
| | | float *network_predict_gpu(network net, float *input) |
| | | { |
| | | |
| | | int size = get_network_input_size(net) * net.batch; |
| | | cl_mem input_cl = cl_make_array(input, size); |
| | | forward_network_gpu(net, input_cl, 0, 0); |
| | | float *out = get_network_output_gpu(net); |
| | | clReleaseMemObject(input_cl); |
| | | return out; |
| | | } |
| | | |
| | | matrix network_predict_data_gpu(network net, data test) |
| | | { |
| | | int i,j,b; |
| | | int k = get_network_output_size(net); |
| | | matrix pred = make_matrix(test.X.rows, k); |
| | | float *X = calloc(net.batch*test.X.cols, sizeof(float)); |
| | | for(i = 0; i < test.X.rows; i += net.batch){ |
| | | for(b = 0; b < net.batch; ++b){ |
| | | if(i+b == test.X.rows) break; |
| | | memcpy(X+b*test.X.cols, test.X.vals[i+b], test.X.cols*sizeof(float)); |
| | | } |
| | | float *out = network_predict_gpu(net, X); |
| | | for(b = 0; b < net.batch; ++b){ |
| | | if(i+b == test.X.rows) break; |
| | | for(j = 0; j < k; ++j){ |
| | | pred.vals[i+b][j] = out[j+b*k]; |
| | | } |
| | | } |
| | | } |
| | | free(X); |
| | | return pred; |
| | | } |
| | | float network_accuracy_gpu(network net, data d) |
| | | { |
| | | matrix guess = network_predict_data_gpu(net, d); |
| | | float acc = matrix_accuracy(d.y, guess); |
| | | free_matrix(guess); |
| | | return acc; |
| | | } |
| | | |
| | | |
| | | |
| | | #endif |
| | |
| | | #include <string.h> |
| | | #include <time.h> |
| | | #include <unistd.h> |
| | | //#include <clBLAS.h> |
| | | |
| | | #ifdef CLBLAS |
| | | #include <clBLAS.h> |
| | | #endif |
| | | |
| | | #include "opencl.h" |
| | | #include "utils.h" |
| | |
| | | |
| | | } |
| | | int index = getpid()%num_devices; |
| | | index = 1; |
| | | index = 0; |
| | | printf("%d rand, %d devices, %d index\n", getpid(), num_devices, index); |
| | | info.device = devices[index]; |
| | | fprintf(stderr, "Found %d device(s)\n", num_devices); |
| | |
| | | check_error(info); |
| | | info.queue = clCreateCommandQueue(info.context, info.device, 0, &info.error); |
| | | check_error(info); |
| | | for(i = 0; i < NUM_QUEUES; ++i){ |
| | | info.queues[i] = clCreateCommandQueue(info.context, info.device, 0, &info.error); |
| | | check_error(info); |
| | | } |
| | | //info.error = clblasSetup(); |
| | | #ifdef CLBLAS |
| | | info.error = clblasSetup(); |
| | | #endif |
| | | check_error(info); |
| | | info.initialized = 1; |
| | | return info; |
| | | } |
| | | |
| | | void wait_for_queues() |
| | | { |
| | | int i; |
| | | for(i = 0; i < NUM_QUEUES; ++i){ |
| | | clFinish(cl.queues[i]); |
| | | } |
| | | } |
| | | |
| | | cl_program cl_fprog(char *filename, char *options, cl_info info) |
| | | { |
| | | size_t srcsize; |
| | |
| | | #include <CL/cl.h> |
| | | #endif |
| | | |
| | | #define NUM_QUEUES 8 |
| | | |
| | | typedef struct { |
| | | int initialized; |
| | |
| | | cl_device_id device; |
| | | cl_context context; |
| | | cl_command_queue queue; |
| | | cl_command_queue queues[NUM_QUEUES]; |
| | | }cl_info; |
| | | |
| | | extern cl_info cl; |
| | | |
| | | void cl_setup(); |
| | | void wait_for_queues(); |
| | | void check_error(cl_info info); |
| | | cl_kernel get_kernel(char *filename, char *kernelname, char *options); |
| | | void cl_read_array(cl_mem mem, float *x, int n); |
| | |
| | | } |
| | | |
| | | #ifdef GPU |
| | | |
| | | void pull_softmax_layer_output(const softmax_layer layer) |
| | | { |
| | | cl_read_array(layer.output_cl, layer.output, layer.inputs*layer.batch); |
| | | } |
| | | |
| | | cl_kernel get_softmax_forward_kernel() |
| | | { |
| | | static int init = 0; |
| | |
| | | |
| | | clEnqueueNDRangeKernel(queue, kernel, 1, 0, global_size, 0, 0, 0, 0); |
| | | check_error(cl); |
| | | |
| | | /* |
| | | cl_read_array(layer.output_cl, layer.output, layer.inputs*layer.batch); |
| | | int z; |
| | | for(z = 0; z < layer.inputs*layer.batch; ++z) printf("%f,",layer.output[z]); |
| | | */ |
| | | } |
| | | |
| | | void backward_softmax_layer_gpu(const softmax_layer layer, cl_mem delta) |
| | |
| | | void backward_softmax_layer(const softmax_layer layer, float *delta); |
| | | |
| | | #ifdef GPU |
| | | void pull_softmax_layer_output(const softmax_layer layer); |
| | | void forward_softmax_layer_gpu(const softmax_layer layer, cl_mem input); |
| | | void backward_softmax_layer_gpu(const softmax_layer layer, cl_mem delta); |
| | | #endif |