From e6c97a53a7b5ac4014d30d236ea2bf5adb4bb521 Mon Sep 17 00:00:00 2001
From: AlexeyAB <alexeyab84@gmail.com>
Date: Tue, 07 Aug 2018 20:19:50 +0000
Subject: [PATCH] Maxpool fixes

---
 src/layer.h |   71 +++++++++++++++++++++++++++++++----
 1 files changed, 62 insertions(+), 9 deletions(-)

diff --git a/src/layer.h b/src/layer.h
index 5e9dc3a..8a58c92 100644
--- a/src/layer.h
+++ b/src/layer.h
@@ -3,6 +3,9 @@
 
 #include "activations.h"
 #include "stddef.h"
+#include "tree.h"
+
+struct network_state;
 
 struct layer;
 typedef struct layer layer;
@@ -30,7 +33,10 @@
     NETWORK,
     XNOR,
     REGION,
+	YOLO,
     REORG,
+	UPSAMPLE,
+	REORG_OLD,
     BLANK
 } LAYER_TYPE;
 
@@ -38,10 +44,28 @@
     SSE, MASKED, SMOOTH
 } COST_TYPE;
 
+typedef struct {
+	int batch;
+	float learning_rate;
+	float momentum;
+	float decay;
+	int adam;
+	float B1;
+	float B2;
+	float eps;
+	int t;
+} update_args;
+
 struct layer{
     LAYER_TYPE type;
     ACTIVATION activation;
     COST_TYPE cost_type;
+    void (*forward)   (struct layer, struct network_state);
+    void (*backward)  (struct layer, struct network_state);
+    void (*update)    (struct layer, int, float, float, float);
+    void (*forward_gpu)   (struct layer, struct network_state);
+    void (*backward_gpu)  (struct layer, struct network_state);
+    void (*update_gpu)    (struct layer, int, float, float, float);
     int batch_normalize;
     int shortcut;
     int batch;
@@ -58,6 +82,7 @@
     int size;
     int side;
     int stride;
+    int reverse;
     int pad;
     int sqrt;
     int flip;
@@ -73,6 +98,7 @@
     float exposure;
     float shift;
     float ratio;
+	int focal_loss;
     int softmax;
     int classes;
     int coords;
@@ -84,6 +110,23 @@
     int noadjust;
     int reorg;
     int log;
+	int tanh;
+	int *mask;
+	int total;
+	float bflops;
+
+    int adam;
+    float B1;
+    float B2;
+    float eps;
+    float *m_gpu;
+    float *v_gpu;
+    int t;
+    float *m;
+    float *v;
+
+    tree *softmax_tree;
+    int  *map;
 
     float alpha;
     float beta;
@@ -92,9 +135,19 @@
     float coord_scale;
     float object_scale;
     float noobject_scale;
+	float mask_scale;
     float class_scale;
+    int bias_match;
     int random;
+	float ignore_thresh;
+	float truth_thresh;
+    float thresh;
+	float focus;
+    int classfix;
+    int absolute;
 
+    int onlyforward;
+    int stopbackward;
     int dontload;
     int dontloadscales;
 
@@ -105,9 +158,7 @@
     int *indexes;
     float *rand;
     float *cost;
-    float *filters;
-    char  *cfilters;
-    float *filter_updates;
+    char  *cweights;
     float *state;
     float *prev_state;
     float *forgot_state;
@@ -117,7 +168,7 @@
     float *concat;
     float *concat_delta;
 
-    float *binary_filters;
+    float *binary_weights;
 
     float *biases;
     float *bias_updates;
@@ -194,11 +245,9 @@
     float * save_delta_gpu;
     float * concat_gpu;
     float * concat_delta_gpu;
-    float * filters_gpu;
-    float * filter_updates_gpu;
 
     float *binary_input_gpu;
-    float *binary_filters_gpu;
+    float *binary_weights_gpu;
 
     float * mean_gpu;
     float * variance_gpu;
@@ -216,6 +265,9 @@
     float * weights_gpu;
     float * weight_updates_gpu;
 
+	float * weights_gpu16;
+	float * weight_updates_gpu16;
+
     float * biases_gpu;
     float * bias_updates_gpu;
 
@@ -230,8 +282,9 @@
     #ifdef CUDNN
     cudnnTensorDescriptor_t srcTensorDesc, dstTensorDesc;
     cudnnTensorDescriptor_t dsrcTensorDesc, ddstTensorDesc;
-    cudnnFilterDescriptor_t filterDesc;
-    cudnnFilterDescriptor_t dfilterDesc;
+	cudnnTensorDescriptor_t normTensorDesc, normDstTensorDesc, normDstTensorDescF16;
+    cudnnFilterDescriptor_t weightDesc;
+    cudnnFilterDescriptor_t dweightDesc;
     cudnnConvolutionDescriptor_t convDesc;
     cudnnConvolutionFwdAlgo_t fw_algo;
     cudnnConvolutionBwdDataAlgo_t bd_algo;

--
Gitblit v1.10.0