Joseph Redmon
2015-01-27 153705226d8ca746478b69eeac9bc854766daa11
src/parser.c
@@ -86,6 +86,7 @@
        net->learning_rate = learning_rate;
        net->momentum = momentum;
        net->decay = decay;
        net->seen = option_find_int(options, "seen",0);
    }else{
        learning_rate = option_find_float_quiet(options, "learning_rate", net->learning_rate);
        momentum = option_find_float_quiet(options, "momentum", net->momentum);
@@ -148,6 +149,7 @@
    if(count == 0){
        input = option_find_int(options, "input",1);
        net->batch = option_find_int(options, "batch",1);
        net->seen = option_find_int(options, "seen",0);
    }else{
        input =  get_network_output_size_layer(*net, count-1);
    }
@@ -162,6 +164,7 @@
    if(count == 0){
        input = option_find_int(options, "input",1);
        net->batch = option_find_int(options, "batch",1);
        net->seen = option_find_int(options, "seen",0);
    }else{
        input =  get_network_output_size_layer(*net, count-1);
    }
@@ -190,6 +193,7 @@
        net->learning_rate = learning_rate;
        net->momentum = momentum;
        net->decay = decay;
        net->seen = option_find_int(options, "seen",0);
    }else{
        image m =  get_network_image_layer(*net, count-1);
        h = m.h;
@@ -212,6 +216,7 @@
        w = option_find_int(options, "width",1);
        c = option_find_int(options, "channels",1);
        net->batch = option_find_int(options, "batch",1);
        net->seen = option_find_int(options, "seen",0);
    }else{
        image m =  get_network_image_layer(*net, count-1);
        h = m.h;
@@ -224,6 +229,7 @@
    return layer;
}
/*
freeweight_layer *parse_freeweight(list *options, network *net, int count)
{
    int input;
@@ -237,6 +243,7 @@
    option_unused(options);
    return layer;
}
*/
dropout_layer *parse_dropout(list *options, network *net, int count)
{
@@ -245,6 +252,13 @@
    if(count == 0){
        net->batch = option_find_int(options, "batch",1);
        input = option_find_int(options, "input",1);
        float learning_rate = option_find_float(options, "learning_rate", .001);
        float momentum = option_find_float(options, "momentum", .9);
        float decay = option_find_float(options, "decay", .0001);
        net->learning_rate = learning_rate;
        net->momentum = momentum;
        net->decay = decay;
        net->seen = option_find_int(options, "seen",0);
    }else{
        input =  get_network_output_size_layer(*net, count-1);
    }
@@ -265,6 +279,7 @@
        w = option_find_int(options, "width",1);
        c = option_find_int(options, "channels",1);
        net->batch = option_find_int(options, "batch",1);
        net->seen = option_find_int(options, "seen",0);
    }else{
        image m =  get_network_image_layer(*net, count-1);
        h = m.h;
@@ -320,9 +335,10 @@
            net.types[count] = DROPOUT;
            net.layers[count] = layer;
        }else if(is_freeweight(s)){
            freeweight_layer *layer = parse_freeweight(options, &net, count);
            net.types[count] = FREEWEIGHT;
            net.layers[count] = layer;
            //freeweight_layer *layer = parse_freeweight(options, &net, count);
            //net.types[count] = FREEWEIGHT;
            //net.layers[count] = layer;
            fprintf(stderr, "Type not recognized: %s\n", s->type);
        }else{
            fprintf(stderr, "Type not recognized: %s\n", s->type);
        }
@@ -381,8 +397,8 @@
int read_option(char *s, list *options)
{
    int i;
    int len = strlen(s);
    size_t i;
    size_t len = strlen(s);
    char *val = 0;
    for(i = 0; i < len; ++i){
        if(s[i] == '='){
@@ -434,6 +450,9 @@
void print_convolutional_cfg(FILE *fp, convolutional_layer *l, network net, int count)
{
    #ifdef GPU
    if(gpu_index >= 0)  pull_convolutional_layer(*l);
    #endif
    int i;
    fprintf(fp, "[convolutional]\n");
    if(count == 0) {
@@ -443,8 +462,9 @@
                "channels=%d\n"
                "learning_rate=%g\n"
                "momentum=%g\n"
                "decay=%g\n",
                l->batch,l->h, l->w, l->c, l->learning_rate, l->momentum, l->decay);
                "decay=%g\n"
                "seen=%d\n",
                l->batch,l->h, l->w, l->c, l->learning_rate, l->momentum, l->decay, net.seen);
    } else {
        if(l->learning_rate != net.learning_rate)
            fprintf(fp, "learning_rate=%g\n", l->learning_rate);
@@ -488,6 +508,9 @@
void print_connected_cfg(FILE *fp, connected_layer *l, network net, int count)
{
    #ifdef GPU
    if(gpu_index >= 0) pull_connected_layer(*l);
    #endif
    int i;
    fprintf(fp, "[connected]\n");
    if(count == 0){
@@ -495,8 +518,9 @@
                "input=%d\n"
                "learning_rate=%g\n"
                "momentum=%g\n"
                "decay=%g\n",
                l->batch, l->inputs, l->learning_rate, l->momentum, l->decay);
                "decay=%g\n"
                "seen=%d\n",
                l->batch, l->inputs, l->learning_rate, l->momentum, l->decay, net.seen);
    } else {
        if(l->learning_rate != net.learning_rate)
            fprintf(fp, "learning_rate=%g\n", l->learning_rate);
@@ -527,8 +551,9 @@
                "channels=%d\n"
                "learning_rate=%g\n"
                "momentum=%g\n"
                "decay=%g\n",
                l->batch,l->h, l->w, l->c, net.learning_rate, net.momentum, net.decay);
                "decay=%g\n"
                "seen=%d\n",
                l->batch,l->h, l->w, l->c, net.learning_rate, net.momentum, net.decay, net.seen);
    }
    fprintf(fp, "crop_height=%d\ncrop_width=%d\nflip=%d\n\n", l->crop_height, l->crop_width, l->flip);
}