From b714004546b97e9a43fae3e385dbefb56cecafb6 Mon Sep 17 00:00:00 2001
From: AlexeyAB <alexeyab84@gmail.com>
Date: Thu, 26 Oct 2017 15:04:26 +0000
Subject: [PATCH] Fixed bug with: net->seen

---
 src/layer.h |   67 ++++++++++++++++++++++++++++++---
 1 files changed, 60 insertions(+), 7 deletions(-)

diff --git a/src/layer.h b/src/layer.h
index 2376929..eb480c0 100644
--- a/src/layer.h
+++ b/src/layer.h
@@ -2,6 +2,10 @@
 #define BASE_LAYER_H
 
 #include "activations.h"
+#include "stddef.h"
+#include "tree.h"
+
+struct network_state;
 
 struct layer;
 typedef struct layer layer;
@@ -27,6 +31,9 @@
     CRNN,
     BATCHNORM,
     NETWORK,
+    XNOR,
+    REGION,
+    REORG,
     BLANK
 } LAYER_TYPE;
 
@@ -38,6 +45,12 @@
     LAYER_TYPE type;
     ACTIVATION activation;
     COST_TYPE cost_type;
+    void (*forward)   (struct layer, struct network_state);
+    void (*backward)  (struct layer, struct network_state);
+    void (*update)    (struct layer, int, float, float, float);
+    void (*forward_gpu)   (struct layer, struct network_state);
+    void (*backward_gpu)  (struct layer, struct network_state);
+    void (*update_gpu)    (struct layer, int, float, float, float);
     int batch_normalize;
     int shortcut;
     int batch;
@@ -49,10 +62,12 @@
     int h,w,c;
     int out_h, out_w, out_c;
     int n;
+    int max_boxes;
     int groups;
     int size;
     int side;
     int stride;
+    int reverse;
     int pad;
     int sqrt;
     int flip;
@@ -67,6 +82,7 @@
     float saturation;
     float exposure;
     float shift;
+    float ratio;
     int softmax;
     int classes;
     int coords;
@@ -76,6 +92,21 @@
     int does_cost;
     int joint;
     int noadjust;
+    int reorg;
+    int log;
+
+    int adam;
+    float B1;
+    float B2;
+    float eps;
+    float *m_gpu;
+    float *v_gpu;
+    int t;
+    float *m;
+    float *v;
+
+    tree *softmax_tree;
+    int  *map;
 
     float alpha;
     float beta;
@@ -85,6 +116,11 @@
     float object_scale;
     float noobject_scale;
     float class_scale;
+    int bias_match;
+    int random;
+    float thresh;
+    int classfix;
+    int absolute;
 
     int dontload;
     int dontloadscales;
@@ -96,16 +132,17 @@
     int *indexes;
     float *rand;
     float *cost;
-    float *filters;
-    char  *cfilters;
-    float *filter_updates;
+    char  *cweights;
     float *state;
+    float *prev_state;
+    float *forgot_state;
+    float *forgot_delta;
     float *state_delta;
 
     float *concat;
     float *concat_delta;
 
-    float *binary_filters;
+    float *binary_weights;
 
     float *biases;
     float *bias_updates;
@@ -157,6 +194,14 @@
     struct layer *input_h_layer;
     struct layer *state_h_layer;
 
+    float *z_cpu;
+    float *r_cpu;
+    float *h_cpu;
+
+    float *binary_input;
+
+    size_t workspace_size;
+
     #ifdef GPU
     float *z_gpu;
     float *r_gpu;
@@ -174,11 +219,9 @@
     float * save_delta_gpu;
     float * concat_gpu;
     float * concat_delta_gpu;
-    float * filters_gpu;
-    float * filter_updates_gpu;
 
     float *binary_input_gpu;
-    float *binary_filters_gpu;
+    float *binary_weights_gpu;
 
     float * mean_gpu;
     float * variance_gpu;
@@ -207,6 +250,16 @@
     float * rand_gpu;
     float * squared_gpu;
     float * norms_gpu;
+    #ifdef CUDNN
+    cudnnTensorDescriptor_t srcTensorDesc, dstTensorDesc;
+    cudnnTensorDescriptor_t dsrcTensorDesc, ddstTensorDesc;
+    cudnnFilterDescriptor_t weightDesc;
+    cudnnFilterDescriptor_t dweightDesc;
+    cudnnConvolutionDescriptor_t convDesc;
+    cudnnConvolutionFwdAlgo_t fw_algo;
+    cudnnConvolutionBwdDataAlgo_t bd_algo;
+    cudnnConvolutionBwdFilterAlgo_t bf_algo;
+    #endif
     #endif
 };
 

--
Gitblit v1.10.0