| | |
| | | |
| | | learning_rate_policy get_policy(char *s) |
| | | { |
| | | if (strcmp(s, "random")==0) return RANDOM; |
| | | if (strcmp(s, "poly")==0) return POLY; |
| | | if (strcmp(s, "constant")==0) return CONSTANT; |
| | | if (strcmp(s, "step")==0) return STEP; |
| | |
| | | } else if (net->policy == SIG){ |
| | | net->gamma = option_find_float(options, "gamma", 1); |
| | | net->step = option_find_int(options, "step", 1); |
| | | } else if (net->policy == POLY){ |
| | | } else if (net->policy == POLY || net->policy == RANDOM){ |
| | | net->power = option_find_float(options, "power", 1); |
| | | } |
| | | net->max_batches = option_find_int(options, "max_batches", 0); |
| | |
| | | fwrite(l.filters, sizeof(float), num, fp); |
| | | } |
| | | |
| | | void save_batchnorm_weights(layer l, FILE *fp) |
| | | { |
| | | #ifdef GPU |
| | | if(gpu_index >= 0){ |
| | | pull_batchnorm_layer(l); |
| | | } |
| | | #endif |
| | | fwrite(l.scales, sizeof(float), l.c, fp); |
| | | fwrite(l.rolling_mean, sizeof(float), l.c, fp); |
| | | fwrite(l.rolling_variance, sizeof(float), l.c, fp); |
| | | } |
| | | |
| | | void save_connected_weights(layer l, FILE *fp) |
| | | { |
| | | #ifdef GPU |
| | |
| | | save_convolutional_weights(l, fp); |
| | | } if(l.type == CONNECTED){ |
| | | save_connected_weights(l, fp); |
| | | } if(l.type == BATCHNORM){ |
| | | save_batchnorm_weights(l, fp); |
| | | } if(l.type == RNN){ |
| | | save_connected_weights(*(l.input_layer), fp); |
| | | save_connected_weights(*(l.self_layer), fp); |
| | |
| | | if(transpose){ |
| | | transpose_matrix(l.weights, l.inputs, l.outputs); |
| | | } |
| | | //printf("Biases: %f mean %f variance\n", mean_array(l.biases, l.outputs), variance_array(l.biases, l.outputs)); |
| | | //printf("Weights: %f mean %f variance\n", mean_array(l.weights, l.outputs*l.inputs), variance_array(l.weights, l.outputs*l.inputs)); |
| | | //printf("Biases: %f mean %f variance\n", mean_array(l.biases, l.outputs), variance_array(l.biases, l.outputs)); |
| | | //printf("Weights: %f mean %f variance\n", mean_array(l.weights, l.outputs*l.inputs), variance_array(l.weights, l.outputs*l.inputs)); |
| | | if (l.batch_normalize && (!l.dontloadscales)){ |
| | | fread(l.scales, sizeof(float), l.outputs, fp); |
| | | fread(l.rolling_mean, sizeof(float), l.outputs, fp); |
| | |
| | | #endif |
| | | } |
| | | |
| | | void load_batchnorm_weights(layer l, FILE *fp) |
| | | { |
| | | fread(l.scales, sizeof(float), l.c, fp); |
| | | fread(l.rolling_mean, sizeof(float), l.c, fp); |
| | | fread(l.rolling_variance, sizeof(float), l.c, fp); |
| | | #ifdef GPU |
| | | if(gpu_index >= 0){ |
| | | push_batchnorm_layer(l); |
| | | } |
| | | #endif |
| | | } |
| | | |
| | | void load_convolutional_weights_binary(layer l, FILE *fp) |
| | | { |
| | | fread(l.biases, sizeof(float), l.n, fp); |
| | |
| | | if(l.type == CONNECTED){ |
| | | load_connected_weights(l, fp, transpose); |
| | | } |
| | | if(l.type == BATCHNORM){ |
| | | load_batchnorm_weights(l, fp); |
| | | } |
| | | if(l.type == CRNN){ |
| | | load_convolutional_weights(*(l.input_layer), fp); |
| | | load_convolutional_weights(*(l.self_layer), fp); |