From 481b57a96a9ef29b112caec1bb3e17ffb043ceae Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Sun, 25 Sep 2016 06:12:54 +0000
Subject: [PATCH] So I have this new programming paradigm.......

---
 src/layer.h |   48 +++++++++++++++++++++++++++++++++++++++++-------
 1 files changed, 41 insertions(+), 7 deletions(-)

diff --git a/src/layer.h b/src/layer.h
index 2376929..ea6862b 100644
--- a/src/layer.h
+++ b/src/layer.h
@@ -2,6 +2,9 @@
 #define BASE_LAYER_H
 
 #include "activations.h"
+#include "stddef.h"
+
+struct network_state;
 
 struct layer;
 typedef struct layer layer;
@@ -27,6 +30,9 @@
     CRNN,
     BATCHNORM,
     NETWORK,
+    XNOR,
+    REGION,
+    REORG,
     BLANK
 } LAYER_TYPE;
 
@@ -38,6 +44,12 @@
     LAYER_TYPE type;
     ACTIVATION activation;
     COST_TYPE cost_type;
+    void (*forward)   (struct layer, struct network_state);
+    void (*backward)  (struct layer, struct network_state);
+    void (*update)    (struct layer, int, float, float, float);
+    void (*forward_gpu)   (struct layer, struct network_state);
+    void (*backward_gpu)  (struct layer, struct network_state);
+    void (*update_gpu)    (struct layer, int, float, float, float);
     int batch_normalize;
     int shortcut;
     int batch;
@@ -49,6 +61,7 @@
     int h,w,c;
     int out_h, out_w, out_c;
     int n;
+    int max_boxes;
     int groups;
     int size;
     int side;
@@ -67,6 +80,7 @@
     float saturation;
     float exposure;
     float shift;
+    float ratio;
     int softmax;
     int classes;
     int coords;
@@ -76,6 +90,8 @@
     int does_cost;
     int joint;
     int noadjust;
+    int reorg;
+    int log;
 
     float alpha;
     float beta;
@@ -85,6 +101,7 @@
     float object_scale;
     float noobject_scale;
     float class_scale;
+    int random;
 
     int dontload;
     int dontloadscales;
@@ -96,16 +113,17 @@
     int *indexes;
     float *rand;
     float *cost;
-    float *filters;
-    char  *cfilters;
-    float *filter_updates;
+    char  *cweights;
     float *state;
+    float *prev_state;
+    float *forgot_state;
+    float *forgot_delta;
     float *state_delta;
 
     float *concat;
     float *concat_delta;
 
-    float *binary_filters;
+    float *binary_weights;
 
     float *biases;
     float *bias_updates;
@@ -157,6 +175,14 @@
     struct layer *input_h_layer;
     struct layer *state_h_layer;
 
+    float *z_cpu;
+    float *r_cpu;
+    float *h_cpu;
+
+    float *binary_input;
+
+    size_t workspace_size;
+
     #ifdef GPU
     float *z_gpu;
     float *r_gpu;
@@ -174,11 +200,9 @@
     float * save_delta_gpu;
     float * concat_gpu;
     float * concat_delta_gpu;
-    float * filters_gpu;
-    float * filter_updates_gpu;
 
     float *binary_input_gpu;
-    float *binary_filters_gpu;
+    float *binary_weights_gpu;
 
     float * mean_gpu;
     float * variance_gpu;
@@ -207,6 +231,16 @@
     float * rand_gpu;
     float * squared_gpu;
     float * norms_gpu;
+    #ifdef CUDNN
+    cudnnTensorDescriptor_t srcTensorDesc, dstTensorDesc;
+    cudnnTensorDescriptor_t dsrcTensorDesc, ddstTensorDesc;
+    cudnnFilterDescriptor_t weightDesc;
+    cudnnFilterDescriptor_t dweightDesc;
+    cudnnConvolutionDescriptor_t convDesc;
+    cudnnConvolutionFwdAlgo_t fw_algo;
+    cudnnConvolutionBwdDataAlgo_t bd_algo;
+    cudnnConvolutionBwdFilterAlgo_t bf_algo;
+    #endif
     #endif
 };
 

--
Gitblit v1.10.0