| | |
| | | layer.softmax = option_find_int(options, "softmax", 0); |
| | | layer.sqrt = option_find_int(options, "sqrt", 0); |
| | | |
| | | layer.max_boxes = option_find_int_quiet(options, "max",30); |
| | | layer.coord_scale = option_find_float(options, "coord_scale", 1); |
| | | layer.forced = option_find_int(options, "forced", 0); |
| | | layer.object_scale = option_find_float(options, "object_scale", 1); |
| | |
| | | |
| | | learning_rate_policy get_policy(char *s) |
| | | { |
| | | if (strcmp(s, "random")==0) return RANDOM; |
| | | if (strcmp(s, "poly")==0) return POLY; |
| | | if (strcmp(s, "constant")==0) return CONSTANT; |
| | | if (strcmp(s, "step")==0) return STEP; |
| | |
| | | } else if (net->policy == SIG){ |
| | | net->gamma = option_find_float(options, "gamma", 1); |
| | | net->step = option_find_int(options, "step", 1); |
| | | } else if (net->policy == POLY){ |
| | | } else if (net->policy == POLY || net->policy == RANDOM){ |
| | | net->power = option_find_float(options, "power", 1); |
| | | } |
| | | net->max_batches = option_find_int(options, "max_batches", 0); |
| | |
| | | params.batch = net.batch; |
| | | params.time_steps = net.time_steps; |
| | | |
| | | size_t workspace_size = 0; |
| | | n = n->next; |
| | | int count = 0; |
| | | free_section(s); |
| | |
| | | l.dontloadscales = option_find_int_quiet(options, "dontloadscales", 0); |
| | | option_unused(options); |
| | | net.layers[count] = l; |
| | | if (l.workspace_size > workspace_size) workspace_size = l.workspace_size; |
| | | free_section(s); |
| | | n = n->next; |
| | | ++count; |
| | |
| | | free_list(sections); |
| | | net.outputs = get_network_output_size(net); |
| | | net.output = get_network_output(net); |
| | | if(workspace_size){ |
| | | //printf("%ld\n", workspace_size); |
| | | #ifdef GPU |
| | | net.workspace = cuda_make_array(0, (workspace_size-1)/sizeof(float)+1); |
| | | #else |
| | | net.workspace = calloc(1, workspace_size); |
| | | #endif |
| | | } |
| | | return net; |
| | | } |
| | | |
| | |
| | | fwrite(l.filters, sizeof(float), num, fp); |
| | | } |
| | | |
| | | void save_batchnorm_weights(layer l, FILE *fp) |
| | | { |
| | | #ifdef GPU |
| | | if(gpu_index >= 0){ |
| | | pull_batchnorm_layer(l); |
| | | } |
| | | #endif |
| | | fwrite(l.scales, sizeof(float), l.c, fp); |
| | | fwrite(l.rolling_mean, sizeof(float), l.c, fp); |
| | | fwrite(l.rolling_variance, sizeof(float), l.c, fp); |
| | | } |
| | | |
| | | void save_connected_weights(layer l, FILE *fp) |
| | | { |
| | | #ifdef GPU |
| | |
| | | save_convolutional_weights(l, fp); |
| | | } if(l.type == CONNECTED){ |
| | | save_connected_weights(l, fp); |
| | | } if(l.type == BATCHNORM){ |
| | | save_batchnorm_weights(l, fp); |
| | | } if(l.type == RNN){ |
| | | save_connected_weights(*(l.input_layer), fp); |
| | | save_connected_weights(*(l.self_layer), fp); |
| | |
| | | if(transpose){ |
| | | transpose_matrix(l.weights, l.inputs, l.outputs); |
| | | } |
| | | //printf("Biases: %f mean %f variance\n", mean_array(l.biases, l.outputs), variance_array(l.biases, l.outputs)); |
| | | //printf("Weights: %f mean %f variance\n", mean_array(l.weights, l.outputs*l.inputs), variance_array(l.weights, l.outputs*l.inputs)); |
| | | //printf("Biases: %f mean %f variance\n", mean_array(l.biases, l.outputs), variance_array(l.biases, l.outputs)); |
| | | //printf("Weights: %f mean %f variance\n", mean_array(l.weights, l.outputs*l.inputs), variance_array(l.weights, l.outputs*l.inputs)); |
| | | if (l.batch_normalize && (!l.dontloadscales)){ |
| | | fread(l.scales, sizeof(float), l.outputs, fp); |
| | | fread(l.rolling_mean, sizeof(float), l.outputs, fp); |
| | |
| | | #endif |
| | | } |
| | | |
| | | void load_batchnorm_weights(layer l, FILE *fp) |
| | | { |
| | | fread(l.scales, sizeof(float), l.c, fp); |
| | | fread(l.rolling_mean, sizeof(float), l.c, fp); |
| | | fread(l.rolling_variance, sizeof(float), l.c, fp); |
| | | #ifdef GPU |
| | | if(gpu_index >= 0){ |
| | | push_batchnorm_layer(l); |
| | | } |
| | | #endif |
| | | } |
| | | |
| | | void load_convolutional_weights_binary(layer l, FILE *fp) |
| | | { |
| | | fread(l.biases, sizeof(float), l.n, fp); |
| | |
| | | } |
| | | } |
| | | } |
| | | binarize_filters2(l.filters, l.n, l.c*l.size*l.size, l.cfilters, l.scales); |
| | | #ifdef GPU |
| | | if(gpu_index >= 0){ |
| | | push_convolutional_layer(l); |
| | |
| | | if (l.flipped) { |
| | | transpose_matrix(l.filters, l.c*l.size*l.size, l.n); |
| | | } |
| | | if (l.binary) binarize_filters(l.filters, l.n, l.c*l.size*l.size, l.filters); |
| | | //if (l.binary) binarize_filters(l.filters, l.n, l.c*l.size*l.size, l.filters); |
| | | #ifdef GPU |
| | | if(gpu_index >= 0){ |
| | | push_convolutional_layer(l); |
| | |
| | | if(l.type == CONNECTED){ |
| | | load_connected_weights(l, fp, transpose); |
| | | } |
| | | if(l.type == BATCHNORM){ |
| | | load_batchnorm_weights(l, fp); |
| | | } |
| | | if(l.type == CRNN){ |
| | | load_convolutional_weights(*(l.input_layer), fp); |
| | | load_convolutional_weights(*(l.self_layer), fp); |