From ae1768e5831caa95214b93b08ee711aede36df07 Mon Sep 17 00:00:00 2001
From: AlexeyAB <alexeyab84@gmail.com>
Date: Mon, 05 Mar 2018 20:26:09 +0000
Subject: [PATCH] Removed random=1 from resnet152_yolo.cfg. Until resize_network() isn't supported for [shortcut] layer

---
 src/layer.h |   65 ++++++++++++++++++++++++++++----
 1 files changed, 56 insertions(+), 9 deletions(-)

diff --git a/src/layer.h b/src/layer.h
index d53fe38..4e0af56 100644
--- a/src/layer.h
+++ b/src/layer.h
@@ -3,6 +3,9 @@
 
 #include "activations.h"
 #include "stddef.h"
+#include "tree.h"
+
+struct network_state;
 
 struct layer;
 typedef struct layer layer;
@@ -28,6 +31,10 @@
     CRNN,
     BATCHNORM,
     NETWORK,
+    XNOR,
+    REGION,
+    REORG,
+	REORG_OLD,
     BLANK
 } LAYER_TYPE;
 
@@ -39,6 +46,12 @@
     LAYER_TYPE type;
     ACTIVATION activation;
     COST_TYPE cost_type;
+    void (*forward)   (struct layer, struct network_state);
+    void (*backward)  (struct layer, struct network_state);
+    void (*update)    (struct layer, int, float, float, float);
+    void (*forward_gpu)   (struct layer, struct network_state);
+    void (*backward_gpu)  (struct layer, struct network_state);
+    void (*update_gpu)    (struct layer, int, float, float, float);
     int batch_normalize;
     int shortcut;
     int batch;
@@ -51,10 +64,12 @@
     int out_h, out_w, out_c;
     int n;
     int max_boxes;
+	int small_object;
     int groups;
     int size;
     int side;
     int stride;
+    int reverse;
     int pad;
     int sqrt;
     int flip;
@@ -69,6 +84,8 @@
     float saturation;
     float exposure;
     float shift;
+    float ratio;
+	int focal_loss;
     int softmax;
     int classes;
     int coords;
@@ -78,6 +95,21 @@
     int does_cost;
     int joint;
     int noadjust;
+    int reorg;
+    int log;
+
+    int adam;
+    float B1;
+    float B2;
+    float eps;
+    float *m_gpu;
+    float *v_gpu;
+    int t;
+    float *m;
+    float *v;
+
+    tree *softmax_tree;
+    int  *map;
 
     float alpha;
     float beta;
@@ -87,7 +119,14 @@
     float object_scale;
     float noobject_scale;
     float class_scale;
+    int bias_match;
+    int random;
+    float thresh;
+    int classfix;
+    int absolute;
 
+    int onlyforward;
+    int stopbackward;
     int dontload;
     int dontloadscales;
 
@@ -98,16 +137,17 @@
     int *indexes;
     float *rand;
     float *cost;
-    float *filters;
-    char  *cfilters;
-    float *filter_updates;
+    char  *cweights;
     float *state;
+    float *prev_state;
+    float *forgot_state;
+    float *forgot_delta;
     float *state_delta;
 
     float *concat;
     float *concat_delta;
 
-    float *binary_filters;
+    float *binary_weights;
 
     float *biases;
     float *bias_updates;
@@ -159,6 +199,12 @@
     struct layer *input_h_layer;
     struct layer *state_h_layer;
 
+    float *z_cpu;
+    float *r_cpu;
+    float *h_cpu;
+
+    float *binary_input;
+
     size_t workspace_size;
 
     #ifdef GPU
@@ -178,11 +224,9 @@
     float * save_delta_gpu;
     float * concat_gpu;
     float * concat_delta_gpu;
-    float * filters_gpu;
-    float * filter_updates_gpu;
 
     float *binary_input_gpu;
-    float *binary_filters_gpu;
+    float *binary_weights_gpu;
 
     float * mean_gpu;
     float * variance_gpu;
@@ -200,6 +244,9 @@
     float * weights_gpu;
     float * weight_updates_gpu;
 
+	float * weights_gpu16;
+	float * weight_updates_gpu16;
+
     float * biases_gpu;
     float * bias_updates_gpu;
 
@@ -214,8 +261,8 @@
     #ifdef CUDNN
     cudnnTensorDescriptor_t srcTensorDesc, dstTensorDesc;
     cudnnTensorDescriptor_t dsrcTensorDesc, ddstTensorDesc;
-    cudnnFilterDescriptor_t filterDesc;
-    cudnnFilterDescriptor_t dfilterDesc;
+    cudnnFilterDescriptor_t weightDesc;
+    cudnnFilterDescriptor_t dweightDesc;
     cudnnConvolutionDescriptor_t convDesc;
     cudnnConvolutionFwdAlgo_t fw_algo;
     cudnnConvolutionBwdDataAlgo_t bd_algo;

--
Gitblit v1.10.0