From b5938098d12d5c9fe48e7cc71ae3d75b7306833f Mon Sep 17 00:00:00 2001
From: Alexey <AlexeyAB@users.noreply.github.com>
Date: Mon, 02 Jan 2017 12:33:31 +0000
Subject: [PATCH] Update Readme.md - pragma-libs in How to compile

---
 src/layer.h |   57 ++++++++++++++++++++++++++++++++++++++++++++++++---------
 1 files changed, 48 insertions(+), 9 deletions(-)

diff --git a/src/layer.h b/src/layer.h
index d53fe38..eb480c0 100644
--- a/src/layer.h
+++ b/src/layer.h
@@ -3,6 +3,9 @@
 
 #include "activations.h"
 #include "stddef.h"
+#include "tree.h"
+
+struct network_state;
 
 struct layer;
 typedef struct layer layer;
@@ -28,6 +31,9 @@
     CRNN,
     BATCHNORM,
     NETWORK,
+    XNOR,
+    REGION,
+    REORG,
     BLANK
 } LAYER_TYPE;
 
@@ -39,6 +45,12 @@
     LAYER_TYPE type;
     ACTIVATION activation;
     COST_TYPE cost_type;
+    void (*forward)   (struct layer, struct network_state);
+    void (*backward)  (struct layer, struct network_state);
+    void (*update)    (struct layer, int, float, float, float);
+    void (*forward_gpu)   (struct layer, struct network_state);
+    void (*backward_gpu)  (struct layer, struct network_state);
+    void (*update_gpu)    (struct layer, int, float, float, float);
     int batch_normalize;
     int shortcut;
     int batch;
@@ -55,6 +67,7 @@
     int size;
     int side;
     int stride;
+    int reverse;
     int pad;
     int sqrt;
     int flip;
@@ -69,6 +82,7 @@
     float saturation;
     float exposure;
     float shift;
+    float ratio;
     int softmax;
     int classes;
     int coords;
@@ -78,6 +92,21 @@
     int does_cost;
     int joint;
     int noadjust;
+    int reorg;
+    int log;
+
+    int adam;
+    float B1;
+    float B2;
+    float eps;
+    float *m_gpu;
+    float *v_gpu;
+    int t;
+    float *m;
+    float *v;
+
+    tree *softmax_tree;
+    int  *map;
 
     float alpha;
     float beta;
@@ -87,6 +116,11 @@
     float object_scale;
     float noobject_scale;
     float class_scale;
+    int bias_match;
+    int random;
+    float thresh;
+    int classfix;
+    int absolute;
 
     int dontload;
     int dontloadscales;
@@ -98,16 +132,17 @@
     int *indexes;
     float *rand;
     float *cost;
-    float *filters;
-    char  *cfilters;
-    float *filter_updates;
+    char  *cweights;
     float *state;
+    float *prev_state;
+    float *forgot_state;
+    float *forgot_delta;
     float *state_delta;
 
     float *concat;
     float *concat_delta;
 
-    float *binary_filters;
+    float *binary_weights;
 
     float *biases;
     float *bias_updates;
@@ -159,6 +194,12 @@
     struct layer *input_h_layer;
     struct layer *state_h_layer;
 
+    float *z_cpu;
+    float *r_cpu;
+    float *h_cpu;
+
+    float *binary_input;
+
     size_t workspace_size;
 
     #ifdef GPU
@@ -178,11 +219,9 @@
     float * save_delta_gpu;
     float * concat_gpu;
     float * concat_delta_gpu;
-    float * filters_gpu;
-    float * filter_updates_gpu;
 
     float *binary_input_gpu;
-    float *binary_filters_gpu;
+    float *binary_weights_gpu;
 
     float * mean_gpu;
     float * variance_gpu;
@@ -214,8 +253,8 @@
     #ifdef CUDNN
     cudnnTensorDescriptor_t srcTensorDesc, dstTensorDesc;
     cudnnTensorDescriptor_t dsrcTensorDesc, ddstTensorDesc;
-    cudnnFilterDescriptor_t filterDesc;
-    cudnnFilterDescriptor_t dfilterDesc;
+    cudnnFilterDescriptor_t weightDesc;
+    cudnnFilterDescriptor_t dweightDesc;
     cudnnConvolutionDescriptor_t convDesc;
     cudnnConvolutionFwdAlgo_t fw_algo;
     cudnnConvolutionBwdDataAlgo_t bd_algo;

--
Gitblit v1.10.0