Joseph Redmon
2015-02-07 2f62fe33c913cd9484fe7f2486889d12292c66e0
src/parser.c
@@ -86,6 +86,7 @@
        net->learning_rate = learning_rate;
        net->momentum = momentum;
        net->decay = decay;
        net->seen = option_find_int(options, "seen",0);
    }else{
        learning_rate = option_find_float_quiet(options, "learning_rate", net->learning_rate);
        momentum = option_find_float_quiet(options, "momentum", net->momentum);
@@ -102,7 +103,7 @@
    parse_data(weights, layer->filters, c*n*size*size);
    parse_data(biases, layer->biases, n);
    #ifdef GPU
    push_convolutional_layer(*layer);
    if(weights || biases) push_convolutional_layer(*layer);
    #endif
    option_unused(options);
    return layer;
@@ -136,7 +137,7 @@
    parse_data(biases, layer->biases, output);
    parse_data(weights, layer->weights, input*output);
    #ifdef GPU
    push_connected_layer(*layer);
    if(weights || biases) push_connected_layer(*layer);
    #endif
    option_unused(options);
    return layer;
@@ -148,6 +149,7 @@
    if(count == 0){
        input = option_find_int(options, "input",1);
        net->batch = option_find_int(options, "batch",1);
        net->seen = option_find_int(options, "seen",0);
    }else{
        input =  get_network_output_size_layer(*net, count-1);
    }
@@ -162,6 +164,7 @@
    if(count == 0){
        input = option_find_int(options, "input",1);
        net->batch = option_find_int(options, "batch",1);
        net->seen = option_find_int(options, "seen",0);
    }else{
        input =  get_network_output_size_layer(*net, count-1);
    }
@@ -190,6 +193,7 @@
        net->learning_rate = learning_rate;
        net->momentum = momentum;
        net->decay = decay;
        net->seen = option_find_int(options, "seen",0);
    }else{
        image m =  get_network_image_layer(*net, count-1);
        h = m.h;
@@ -212,6 +216,7 @@
        w = option_find_int(options, "width",1);
        c = option_find_int(options, "channels",1);
        net->batch = option_find_int(options, "batch",1);
        net->seen = option_find_int(options, "seen",0);
    }else{
        image m =  get_network_image_layer(*net, count-1);
        h = m.h;
@@ -224,6 +229,7 @@
    return layer;
}
/*
freeweight_layer *parse_freeweight(list *options, network *net, int count)
{
    int input;
@@ -237,6 +243,7 @@
    option_unused(options);
    return layer;
}
*/
dropout_layer *parse_dropout(list *options, network *net, int count)
{
@@ -251,6 +258,7 @@
        net->learning_rate = learning_rate;
        net->momentum = momentum;
        net->decay = decay;
        net->seen = option_find_int(options, "seen",0);
    }else{
        input =  get_network_output_size_layer(*net, count-1);
    }
@@ -271,6 +279,7 @@
        w = option_find_int(options, "width",1);
        c = option_find_int(options, "channels",1);
        net->batch = option_find_int(options, "batch",1);
        net->seen = option_find_int(options, "seen",0);
    }else{
        image m =  get_network_image_layer(*net, count-1);
        h = m.h;
@@ -326,9 +335,10 @@
            net.types[count] = DROPOUT;
            net.layers[count] = layer;
        }else if(is_freeweight(s)){
            freeweight_layer *layer = parse_freeweight(options, &net, count);
            net.types[count] = FREEWEIGHT;
            net.layers[count] = layer;
            //freeweight_layer *layer = parse_freeweight(options, &net, count);
            //net.types[count] = FREEWEIGHT;
            //net.layers[count] = layer;
            fprintf(stderr, "Type not recognized: %s\n", s->type);
        }else{
            fprintf(stderr, "Type not recognized: %s\n", s->type);
        }
@@ -387,8 +397,8 @@
int read_option(char *s, list *options)
{
    int i;
    int len = strlen(s);
    size_t i;
    size_t len = strlen(s);
    char *val = 0;
    for(i = 0; i < len; ++i){
        if(s[i] == '='){
@@ -440,6 +450,9 @@
void print_convolutional_cfg(FILE *fp, convolutional_layer *l, network net, int count)
{
    #ifdef GPU
    if(gpu_index >= 0)  pull_convolutional_layer(*l);
    #endif
    int i;
    fprintf(fp, "[convolutional]\n");
    if(count == 0) {
@@ -449,8 +462,9 @@
                "channels=%d\n"
                "learning_rate=%g\n"
                "momentum=%g\n"
                "decay=%g\n",
                l->batch,l->h, l->w, l->c, l->learning_rate, l->momentum, l->decay);
                "decay=%g\n"
                "seen=%d\n",
                l->batch,l->h, l->w, l->c, l->learning_rate, l->momentum, l->decay, net.seen);
    } else {
        if(l->learning_rate != net.learning_rate)
            fprintf(fp, "learning_rate=%g\n", l->learning_rate);
@@ -494,6 +508,9 @@
void print_connected_cfg(FILE *fp, connected_layer *l, network net, int count)
{
    #ifdef GPU
    if(gpu_index >= 0) pull_connected_layer(*l);
    #endif
    int i;
    fprintf(fp, "[connected]\n");
    if(count == 0){
@@ -501,8 +518,9 @@
                "input=%d\n"
                "learning_rate=%g\n"
                "momentum=%g\n"
                "decay=%g\n",
                l->batch, l->inputs, l->learning_rate, l->momentum, l->decay);
                "decay=%g\n"
                "seen=%d\n",
                l->batch, l->inputs, l->learning_rate, l->momentum, l->decay, net.seen);
    } else {
        if(l->learning_rate != net.learning_rate)
            fprintf(fp, "learning_rate=%g\n", l->learning_rate);
@@ -533,8 +551,9 @@
                "channels=%d\n"
                "learning_rate=%g\n"
                "momentum=%g\n"
                "decay=%g\n",
                l->batch,l->h, l->w, l->c, net.learning_rate, net.momentum, net.decay);
                "decay=%g\n"
                "seen=%d\n",
                l->batch,l->h, l->w, l->c, net.learning_rate, net.momentum, net.decay, net.seen);
    }
    fprintf(fp, "crop_height=%d\ncrop_width=%d\nflip=%d\n\n", l->crop_height, l->crop_width, l->flip);
}
@@ -578,6 +597,82 @@
    fprintf(fp, "\n");
}
void save_weights(network net, char *filename)
{
    printf("Saving weights to %s\n", filename);
    FILE *fp = fopen(filename, "w");
    if(!fp) file_error(filename);
    fwrite(&net.learning_rate, sizeof(float), 1, fp);
    fwrite(&net.momentum, sizeof(float), 1, fp);
    fwrite(&net.decay, sizeof(float), 1, fp);
    fwrite(&net.seen, sizeof(int), 1, fp);
    int i;
    for(i = 0; i < net.n; ++i){
        if(net.types[i] == CONVOLUTIONAL){
            convolutional_layer layer = *(convolutional_layer *) net.layers[i];
            #ifdef GPU
            if(gpu_index >= 0){
                pull_convolutional_layer(layer);
            }
            #endif
            int num = layer.n*layer.c*layer.size*layer.size;
            fwrite(layer.biases, sizeof(float), layer.n, fp);
            fwrite(layer.filters, sizeof(float), num, fp);
        }
        if(net.types[i] == CONNECTED){
            connected_layer layer = *(connected_layer *) net.layers[i];
            #ifdef GPU
            if(gpu_index >= 0){
                pull_connected_layer(layer);
            }
            #endif
            fwrite(layer.biases, sizeof(float), layer.outputs, fp);
            fwrite(layer.weights, sizeof(float), layer.outputs*layer.inputs, fp);
        }
    }
    fclose(fp);
}
void load_weights(network *net, char *filename)
{
    printf("Loading weights from %s\n", filename);
    FILE *fp = fopen(filename, "r");
    if(!fp) file_error(filename);
    fread(&net->learning_rate, sizeof(float), 1, fp);
    fread(&net->momentum, sizeof(float), 1, fp);
    fread(&net->decay, sizeof(float), 1, fp);
    fread(&net->seen, sizeof(int), 1, fp);
    set_learning_network(net, net->learning_rate, net->momentum, net->decay);
    int i;
    for(i = 0; i < net->n; ++i){
        if(net->types[i] == CONVOLUTIONAL){
            convolutional_layer layer = *(convolutional_layer *) net->layers[i];
            int num = layer.n*layer.c*layer.size*layer.size;
            fread(layer.biases, sizeof(float), layer.n, fp);
            fread(layer.filters, sizeof(float), num, fp);
            #ifdef GPU
            if(gpu_index >= 0){
                push_convolutional_layer(layer);
            }
            #endif
        }
        if(net->types[i] == CONNECTED){
            connected_layer layer = *(connected_layer *) net->layers[i];
            fread(layer.biases, sizeof(float), layer.outputs, fp);
            fread(layer.weights, sizeof(float), layer.outputs*layer.inputs, fp);
            #ifdef GPU
            if(gpu_index >= 0){
                push_connected_layer(layer);
            }
            #endif
        }
    }
    fclose(fp);
}
void save_network(network net, char *filename)
{