From 1c05ebf522f0bb5776ba51a46d94aa101220fea1 Mon Sep 17 00:00:00 2001
From: AlexeyAB <alexeyab84@gmail.com>
Date: Thu, 07 Jun 2018 00:39:30 +0000
Subject: [PATCH] Minor fix
---
src/layer.h | 171 ++++++++++++++++++++++++++++++++++++++++++++++++++++-----
1 files changed, 156 insertions(+), 15 deletions(-)
diff --git a/src/layer.h b/src/layer.h
index 1b12009..8a58c92 100644
--- a/src/layer.h
+++ b/src/layer.h
@@ -2,6 +2,10 @@
#define BASE_LAYER_H
#include "activations.h"
+#include "stddef.h"
+#include "tree.h"
+
+struct network_state;
struct layer;
typedef struct layer layer;
@@ -20,18 +24,50 @@
NORMALIZATION,
AVGPOOL,
LOCAL,
- SHORTCUT
+ SHORTCUT,
+ ACTIVE,
+ RNN,
+ GRU,
+ CRNN,
+ BATCHNORM,
+ NETWORK,
+ XNOR,
+ REGION,
+ YOLO,
+ REORG,
+ UPSAMPLE,
+ REORG_OLD,
+ BLANK
} LAYER_TYPE;
typedef enum{
- SSE, MASKED
+ SSE, MASKED, SMOOTH
} COST_TYPE;
+typedef struct {
+ int batch;
+ float learning_rate;
+ float momentum;
+ float decay;
+ int adam;
+ float B1;
+ float B2;
+ float eps;
+ int t;
+} update_args;
+
struct layer{
LAYER_TYPE type;
ACTIVATION activation;
COST_TYPE cost_type;
+ void (*forward) (struct layer, struct network_state);
+ void (*backward) (struct layer, struct network_state);
+ void (*update) (struct layer, int, float, float, float);
+ void (*forward_gpu) (struct layer, struct network_state);
+ void (*backward_gpu) (struct layer, struct network_state);
+ void (*update_gpu) (struct layer, int, float, float, float);
int batch_normalize;
+ int shortcut;
int batch;
int forced;
int flipped;
@@ -41,21 +77,28 @@
int h,w,c;
int out_h, out_w, out_c;
int n;
+ int max_boxes;
int groups;
int size;
int side;
int stride;
+ int reverse;
int pad;
- int crop_width;
- int crop_height;
int sqrt;
int flip;
int index;
+ int binary;
+ int xnor;
+ int steps;
+ int hidden;
+ float dot;
float angle;
float jitter;
float saturation;
float exposure;
float shift;
+ float ratio;
+ int focal_loss;
int softmax;
int classes;
int coords;
@@ -65,6 +108,25 @@
int does_cost;
int joint;
int noadjust;
+ int reorg;
+ int log;
+ int tanh;
+ int *mask;
+ int total;
+ float bflops;
+
+ int adam;
+ float B1;
+ float B2;
+ float eps;
+ float *m_gpu;
+ float *v_gpu;
+ int t;
+ float *m;
+ float *v;
+
+ tree *softmax_tree;
+ int *map;
float alpha;
float beta;
@@ -73,19 +135,40 @@
float coord_scale;
float object_scale;
float noobject_scale;
+ float mask_scale;
float class_scale;
+ int bias_match;
+ int random;
+ float ignore_thresh;
+ float truth_thresh;
+ float thresh;
+ float focus;
+ int classfix;
+ int absolute;
+ int onlyforward;
+ int stopbackward;
int dontload;
int dontloadscales;
+ float temperature;
float probability;
float scale;
int *indexes;
float *rand;
float *cost;
- float *filters;
- float *filter_updates;
+ char *cweights;
+ float *state;
+ float *prev_state;
+ float *forgot_state;
+ float *forgot_delta;
+ float *state_delta;
+
+ float *concat;
+ float *concat_delta;
+
+ float *binary_weights;
float *biases;
float *bias_updates;
@@ -108,16 +191,63 @@
float * mean;
float * variance;
+ float * mean_delta;
+ float * variance_delta;
+
float * rolling_mean;
float * rolling_variance;
- #ifdef GPU
- int *indexes_gpu;
- float * filters_gpu;
- float * filter_updates_gpu;
+ float * x;
+ float * x_norm;
- float * spatial_mean_gpu;
- float * spatial_variance_gpu;
+ struct layer *input_layer;
+ struct layer *self_layer;
+ struct layer *output_layer;
+
+ struct layer *input_gate_layer;
+ struct layer *state_gate_layer;
+ struct layer *input_save_layer;
+ struct layer *state_save_layer;
+ struct layer *input_state_layer;
+ struct layer *state_state_layer;
+
+ struct layer *input_z_layer;
+ struct layer *state_z_layer;
+
+ struct layer *input_r_layer;
+ struct layer *state_r_layer;
+
+ struct layer *input_h_layer;
+ struct layer *state_h_layer;
+
+ float *z_cpu;
+ float *r_cpu;
+ float *h_cpu;
+
+ float *binary_input;
+
+ size_t workspace_size;
+
+ #ifdef GPU
+ float *z_gpu;
+ float *r_gpu;
+ float *h_gpu;
+
+ int *indexes_gpu;
+ float * prev_state_gpu;
+ float * forgot_state_gpu;
+ float * forgot_delta_gpu;
+ float * state_gpu;
+ float * state_delta_gpu;
+ float * gate_gpu;
+ float * gate_delta_gpu;
+ float * save_gpu;
+ float * save_delta_gpu;
+ float * concat_gpu;
+ float * concat_delta_gpu;
+
+ float *binary_input_gpu;
+ float *binary_weights_gpu;
float * mean_gpu;
float * variance_gpu;
@@ -125,9 +255,6 @@
float * rolling_mean_gpu;
float * rolling_variance_gpu;
- float * spatial_mean_delta_gpu;
- float * spatial_variance_delta_gpu;
-
float * variance_delta_gpu;
float * mean_delta_gpu;
@@ -138,6 +265,9 @@
float * weights_gpu;
float * weight_updates_gpu;
+ float * weights_gpu16;
+ float * weight_updates_gpu16;
+
float * biases_gpu;
float * bias_updates_gpu;
@@ -149,6 +279,17 @@
float * rand_gpu;
float * squared_gpu;
float * norms_gpu;
+ #ifdef CUDNN
+ cudnnTensorDescriptor_t srcTensorDesc, dstTensorDesc;
+ cudnnTensorDescriptor_t dsrcTensorDesc, ddstTensorDesc;
+ cudnnTensorDescriptor_t normTensorDesc, normDstTensorDesc, normDstTensorDescF16;
+ cudnnFilterDescriptor_t weightDesc;
+ cudnnFilterDescriptor_t dweightDesc;
+ cudnnConvolutionDescriptor_t convDesc;
+ cudnnConvolutionFwdAlgo_t fw_algo;
+ cudnnConvolutionBwdDataAlgo_t bd_algo;
+ cudnnConvolutionBwdFilterAlgo_t bf_algo;
+ #endif
#endif
};
--
Gitblit v1.10.0