Joseph Redmon
2014-12-16 884045091b3a22d4dda3a9d743d076367c840ef7
src/network.c
@@ -125,6 +125,9 @@
    } else if(net.types[i] == CONNECTED){
        connected_layer layer = *(connected_layer *)net.layers[i];
        return layer.output;
    } else if(net.types[i] == CROP){
        crop_layer layer = *(crop_layer *)net.layers[i];
        return layer.output;
    } else if(net.types[i] == NORMALIZATION){
        normalization_layer layer = *(normalization_layer *)net.layers[i];
        return layer.output;
@@ -213,12 +216,16 @@
        }
        if(net.types[i] == CONVOLUTIONAL){
            convolutional_layer layer = *(convolutional_layer *)net.layers[i];
            backward_convolutional_layer(layer, prev_delta);
            backward_convolutional_layer(layer, prev_input, prev_delta);
        }
        else if(net.types[i] == MAXPOOL){
            maxpool_layer layer = *(maxpool_layer *)net.layers[i];
            if(i != 0) backward_maxpool_layer(layer, prev_delta);
        }
        else if(net.types[i] == DROPOUT){
            dropout_layer layer = *(dropout_layer *)net.layers[i];
            backward_dropout_layer(layer, prev_delta);
        }
        else if(net.types[i] == NORMALIZATION){
            normalization_layer layer = *(normalization_layer *)net.layers[i];
            if(i != 0) backward_normalization_layer(layer, prev_input, prev_delta);
@@ -238,17 +245,15 @@
    }
}
float train_network_datum(network net, float *x, float *y)
{
    #ifdef GPU
    if(gpu_index >= 0) return train_network_datum_gpu(net, x, y);
    #endif
    forward_network(net, x, y, 1);
    //int class = get_predicted_class_network(net);
    backward_network(net, x);
    float error = get_network_cost(net);
    update_network(net);
    //return (y[class]?1:0);
    return error;
}
@@ -270,6 +275,25 @@
    return (float)sum/(n*batch);
}
float train_network(network net, data d)
{
    int batch = net.batch;
    int n = d.X.rows / batch;
    float *X = calloc(batch*d.X.cols, sizeof(float));
    float *y = calloc(batch*d.y.cols, sizeof(float));
    int i;
    float sum = 0;
    for(i = 0; i < n; ++i){
        get_next_batch(d, batch, i*batch, X, y);
        float err = train_network_datum(net, X, y);
        sum += err;
    }
    free(X);
    free(y);
    return (float)sum/(n*batch);
}
float train_network_batch(network net, data d, int n)
{
    int i,j;
@@ -289,40 +313,65 @@
    return (float)sum/(n*batch);
}
float train_network_data_cpu(network net, data d, int n)
{
    int batch = net.batch;
    float *X = calloc(batch*d.X.cols, sizeof(float));
    float *y = calloc(batch*d.y.cols, sizeof(float));
    int i;
    float sum = 0;
    for(i = 0; i < n; ++i){
        get_next_batch(d, batch, i*batch, X, y);
        float err = train_network_datum(net, X, y);
        sum += err;
    }
    free(X);
    free(y);
    return (float)sum/(n*batch);
}
void train_network(network net, data d)
void set_learning_network(network *net, float rate, float momentum, float decay)
{
    int i;
    int correct = 0;
    for(i = 0; i < d.X.rows; ++i){
        correct += train_network_datum(net, d.X.vals[i], d.y.vals[i]);
        if(i%100 == 0){
            visualize_network(net);
            cvWaitKey(10);
    net->learning_rate=rate;
    net->momentum = momentum;
    net->decay = decay;
    for(i = 0; i < net->n; ++i){
        if(net->types[i] == CONVOLUTIONAL){
            convolutional_layer *layer = (convolutional_layer *)net->layers[i];
            layer->learning_rate=rate;
            layer->momentum = momentum;
            layer->decay = decay;
        }
        else if(net->types[i] == CONNECTED){
            connected_layer *layer = (connected_layer *)net->layers[i];
            layer->learning_rate=rate;
            layer->momentum = momentum;
            layer->decay = decay;
        }
    }
    visualize_network(net);
    cvWaitKey(100);
    fprintf(stderr, "Accuracy: %f\n", (float)correct/d.X.rows);
}
void set_batch_network(network *net, int b)
{
    net->batch = b;
    int i;
    for(i = 0; i < net->n; ++i){
        if(net->types[i] == CONVOLUTIONAL){
            convolutional_layer *layer = (convolutional_layer *)net->layers[i];
            layer->batch = b;
        }
        else if(net->types[i] == MAXPOOL){
            maxpool_layer *layer = (maxpool_layer *)net->layers[i];
            layer->batch = b;
        }
        else if(net->types[i] == CONNECTED){
            connected_layer *layer = (connected_layer *)net->layers[i];
            layer->batch = b;
        } else if(net->types[i] == DROPOUT){
            dropout_layer *layer = (dropout_layer *) net->layers[i];
            layer->batch = b;
        }
        else if(net->types[i] == FREEWEIGHT){
            freeweight_layer *layer = (freeweight_layer *) net->layers[i];
            layer->batch = b;
        }
        else if(net->types[i] == SOFTMAX){
            softmax_layer *layer = (softmax_layer *)net->layers[i];
            layer->batch = b;
        }
        else if(net->types[i] == COST){
            cost_layer *layer = (cost_layer *)net->layers[i];
            layer->batch = b;
        }
    }
}
int get_network_input_size_layer(network net, int i)
{
    if(net.types[i] == CONVOLUTIONAL){
@@ -339,6 +388,9 @@
    } else if(net.types[i] == DROPOUT){
        dropout_layer layer = *(dropout_layer *) net.layers[i];
        return layer.inputs;
    } else if(net.types[i] == CROP){
        crop_layer layer = *(crop_layer *) net.layers[i];
        return layer.c*layer.h*layer.w;
    }
    else if(net.types[i] == FREEWEIGHT){
        freeweight_layer layer = *(freeweight_layer *) net.layers[i];
@@ -348,6 +400,7 @@
        softmax_layer layer = *(softmax_layer *)net.layers[i];
        return layer.inputs;
    }
    printf("Can't find input size\n");
    return 0;
}
@@ -363,6 +416,10 @@
        image output = get_maxpool_image(layer);
        return output.h*output.w*output.c;
    }
     else if(net.types[i] == CROP){
        crop_layer layer = *(crop_layer *) net.layers[i];
        return layer.c*layer.crop_height*layer.crop_width;
    }
    else if(net.types[i] == CONNECTED){
        connected_layer layer = *(connected_layer *)net.layers[i];
        return layer.outputs;
@@ -379,6 +436,7 @@
        softmax_layer layer = *(softmax_layer *)net.layers[i];
        return layer.inputs;
    }
    printf("Can't find output size\n");
    return 0;
}
@@ -486,6 +544,10 @@
float *network_predict(network net, float *input)
{
    #ifdef GPU
        if(gpu_index >= 0) return network_predict_gpu(net, input);
    #endif
    forward_network(net, input, 0, 0);
    float *out = get_network_output(net);
    return out;
@@ -586,15 +648,26 @@
float network_accuracy(network net, data d)
{
    matrix guess = network_predict_data(net, d);
    float acc = matrix_accuracy(d.y, guess);
    float acc = matrix_topk_accuracy(d.y, guess,1);
    free_matrix(guess);
    return acc;
}
float *network_accuracies(network net, data d)
{
    static float acc[2];
    matrix guess = network_predict_data(net, d);
    acc[0] = matrix_topk_accuracy(d.y, guess,1);
    acc[1] = matrix_topk_accuracy(d.y, guess,5);
    free_matrix(guess);
    return acc;
}
float network_accuracy_multi(network net, data d, int n)
{
    matrix guess = network_predict_data_multi(net, d, n);
    float acc = matrix_accuracy(d.y, guess);
    float acc = matrix_topk_accuracy(d.y, guess,1);
    free_matrix(guess);
    return acc;
}