| | |
| | | #include "softmax_layer.h" |
| | | #include "dropout_layer.h" |
| | | #include "detection_layer.h" |
| | | #include "region_layer.h" |
| | | #include "avgpool_layer.h" |
| | | #include "local_layer.h" |
| | | #include "route_layer.h" |
| | |
| | | int is_shortcut(section *s); |
| | | int is_cost(section *s); |
| | | int is_detection(section *s); |
| | | int is_region(section *s); |
| | | int is_route(section *s); |
| | | list *read_cfg(char *filename); |
| | | |
| | |
| | | return layer; |
| | | } |
| | | |
| | | layer parse_region(list *options, size_params params) |
| | | { |
| | | int coords = option_find_int(options, "coords", 4); |
| | | int classes = option_find_int(options, "classes", 20); |
| | | int num = option_find_int(options, "num", 1); |
| | | layer l = make_region_layer(params.batch, params.w, params.h, num, classes, coords); |
| | | assert(l.outputs == params.inputs); |
| | | |
| | | l.softmax = option_find_int(options, "softmax", 0); |
| | | l.max_boxes = option_find_int_quiet(options, "max",30); |
| | | l.jitter = option_find_float(options, "jitter", .2); |
| | | l.rescore = option_find_int_quiet(options, "rescore",0); |
| | | |
| | | l.coord_scale = option_find_float(options, "coord_scale", 1); |
| | | l.object_scale = option_find_float(options, "object_scale", 1); |
| | | l.noobject_scale = option_find_float(options, "noobject_scale", 1); |
| | | l.class_scale = option_find_float(options, "class_scale", 1); |
| | | return l; |
| | | } |
| | | detection_layer parse_detection(list *options, size_params params) |
| | | { |
| | | int coords = option_find_int(options, "coords", 1); |
| | |
| | | layer.softmax = option_find_int(options, "softmax", 0); |
| | | layer.sqrt = option_find_int(options, "sqrt", 0); |
| | | |
| | | layer.max_boxes = option_find_int_quiet(options, "max",30); |
| | | layer.coord_scale = option_find_float(options, "coord_scale", 1); |
| | | layer.forced = option_find_int(options, "forced", 0); |
| | | layer.object_scale = option_find_float(options, "object_scale", 1); |
| | | layer.noobject_scale = option_find_float(options, "noobject_scale", 1); |
| | | layer.class_scale = option_find_float(options, "class_scale", 1); |
| | | layer.jitter = option_find_float(options, "jitter", .2); |
| | | layer.random = option_find_int_quiet(options, "random", 0); |
| | | return layer; |
| | | } |
| | | |
| | |
| | | |
| | | learning_rate_policy get_policy(char *s) |
| | | { |
| | | if (strcmp(s, "random")==0) return RANDOM; |
| | | if (strcmp(s, "poly")==0) return POLY; |
| | | if (strcmp(s, "constant")==0) return CONSTANT; |
| | | if (strcmp(s, "step")==0) return STEP; |
| | |
| | | |
| | | char *policy_s = option_find_str(options, "policy", "constant"); |
| | | net->policy = get_policy(policy_s); |
| | | net->burn_in = option_find_int_quiet(options, "burn_in", 0); |
| | | if(net->policy == STEP){ |
| | | net->step = option_find_int(options, "step", 1); |
| | | net->scale = option_find_float(options, "scale", 1); |
| | |
| | | } else if (net->policy == SIG){ |
| | | net->gamma = option_find_float(options, "gamma", 1); |
| | | net->step = option_find_int(options, "step", 1); |
| | | } else if (net->policy == POLY){ |
| | | } else if (net->policy == POLY || net->policy == RANDOM){ |
| | | net->power = option_find_float(options, "power", 1); |
| | | } |
| | | net->max_batches = option_find_int(options, "max_batches", 0); |
| | |
| | | params.batch = net.batch; |
| | | params.time_steps = net.time_steps; |
| | | |
| | | size_t workspace_size = 0; |
| | | n = n->next; |
| | | int count = 0; |
| | | free_section(s); |
| | |
| | | l = parse_crop(options, params); |
| | | }else if(is_cost(s)){ |
| | | l = parse_cost(options, params); |
| | | }else if(is_region(s)){ |
| | | l = parse_region(options, params); |
| | | }else if(is_detection(s)){ |
| | | l = parse_detection(options, params); |
| | | }else if(is_softmax(s)){ |
| | |
| | | l.dontloadscales = option_find_int_quiet(options, "dontloadscales", 0); |
| | | option_unused(options); |
| | | net.layers[count] = l; |
| | | if (l.workspace_size > workspace_size) workspace_size = l.workspace_size; |
| | | free_section(s); |
| | | n = n->next; |
| | | ++count; |
| | |
| | | free_list(sections); |
| | | net.outputs = get_network_output_size(net); |
| | | net.output = get_network_output(net); |
| | | if(workspace_size){ |
| | | //printf("%ld\n", workspace_size); |
| | | #ifdef GPU |
| | | net.workspace = cuda_make_array(0, (workspace_size-1)/sizeof(float)+1); |
| | | #else |
| | | net.workspace = calloc(1, workspace_size); |
| | | #endif |
| | | } |
| | | return net; |
| | | } |
| | | |
| | |
| | | if (strcmp(type, "[crop]")==0) return CROP; |
| | | if (strcmp(type, "[cost]")==0) return COST; |
| | | if (strcmp(type, "[detection]")==0) return DETECTION; |
| | | if (strcmp(type, "[region]")==0) return REGION; |
| | | if (strcmp(type, "[local]")==0) return LOCAL; |
| | | if (strcmp(type, "[deconv]")==0 |
| | | || strcmp(type, "[deconvolutional]")==0) return DECONVOLUTIONAL; |
| | |
| | | { |
| | | return (strcmp(s->type, "[cost]")==0); |
| | | } |
| | | int is_region(section *s) |
| | | { |
| | | return (strcmp(s->type, "[region]")==0); |
| | | } |
| | | int is_detection(section *s) |
| | | { |
| | | return (strcmp(s->type, "[detection]")==0); |
| | |
| | | fwrite(l.filters, sizeof(float), num, fp); |
| | | } |
| | | |
| | | void save_batchnorm_weights(layer l, FILE *fp) |
| | | { |
| | | #ifdef GPU |
| | | if(gpu_index >= 0){ |
| | | pull_batchnorm_layer(l); |
| | | } |
| | | #endif |
| | | fwrite(l.scales, sizeof(float), l.c, fp); |
| | | fwrite(l.rolling_mean, sizeof(float), l.c, fp); |
| | | fwrite(l.rolling_variance, sizeof(float), l.c, fp); |
| | | } |
| | | |
| | | void save_connected_weights(layer l, FILE *fp) |
| | | { |
| | | #ifdef GPU |
| | |
| | | save_convolutional_weights(l, fp); |
| | | } if(l.type == CONNECTED){ |
| | | save_connected_weights(l, fp); |
| | | } if(l.type == BATCHNORM){ |
| | | save_batchnorm_weights(l, fp); |
| | | } if(l.type == RNN){ |
| | | save_connected_weights(*(l.input_layer), fp); |
| | | save_connected_weights(*(l.self_layer), fp); |
| | |
| | | if(transpose){ |
| | | transpose_matrix(l.weights, l.inputs, l.outputs); |
| | | } |
| | | //printf("Biases: %f mean %f variance\n", mean_array(l.biases, l.outputs), variance_array(l.biases, l.outputs)); |
| | | //printf("Weights: %f mean %f variance\n", mean_array(l.weights, l.outputs*l.inputs), variance_array(l.weights, l.outputs*l.inputs)); |
| | | //printf("Biases: %f mean %f variance\n", mean_array(l.biases, l.outputs), variance_array(l.biases, l.outputs)); |
| | | //printf("Weights: %f mean %f variance\n", mean_array(l.weights, l.outputs*l.inputs), variance_array(l.weights, l.outputs*l.inputs)); |
| | | if (l.batch_normalize && (!l.dontloadscales)){ |
| | | fread(l.scales, sizeof(float), l.outputs, fp); |
| | | fread(l.rolling_mean, sizeof(float), l.outputs, fp); |
| | |
| | | #endif |
| | | } |
| | | |
| | | void load_batchnorm_weights(layer l, FILE *fp) |
| | | { |
| | | fread(l.scales, sizeof(float), l.c, fp); |
| | | fread(l.rolling_mean, sizeof(float), l.c, fp); |
| | | fread(l.rolling_variance, sizeof(float), l.c, fp); |
| | | #ifdef GPU |
| | | if(gpu_index >= 0){ |
| | | push_batchnorm_layer(l); |
| | | } |
| | | #endif |
| | | } |
| | | |
| | | void load_convolutional_weights_binary(layer l, FILE *fp) |
| | | { |
| | | fread(l.biases, sizeof(float), l.n, fp); |
| | |
| | | } |
| | | } |
| | | } |
| | | binarize_filters2(l.filters, l.n, l.c*l.size*l.size, l.cfilters, l.scales); |
| | | #ifdef GPU |
| | | if(gpu_index >= 0){ |
| | | push_convolutional_layer(l); |
| | |
| | | if (l.flipped) { |
| | | transpose_matrix(l.filters, l.c*l.size*l.size, l.n); |
| | | } |
| | | if (l.binary) binarize_filters(l.filters, l.n, l.c*l.size*l.size, l.filters); |
| | | //if (l.binary) binarize_filters(l.filters, l.n, l.c*l.size*l.size, l.filters); |
| | | #ifdef GPU |
| | | if(gpu_index >= 0){ |
| | | push_convolutional_layer(l); |
| | |
| | | if(l.type == CONNECTED){ |
| | | load_connected_weights(l, fp, transpose); |
| | | } |
| | | if(l.type == BATCHNORM){ |
| | | load_batchnorm_weights(l, fp); |
| | | } |
| | | if(l.type == CRNN){ |
| | | load_convolutional_weights(*(l.input_layer), fp); |
| | | load_convolutional_weights(*(l.self_layer), fp); |