From cd8a3dcb4ca42f22ad8f46a95e00977c92be6bbd Mon Sep 17 00:00:00 2001
From: AlexeyAB <alexeyab84@gmail.com>
Date: Thu, 08 Feb 2018 23:22:42 +0000
Subject: [PATCH] Compile fixes
---
src/layer.h | 203 +++++++++++++++++++++++++++++++++++++++++++++++---
1 files changed, 191 insertions(+), 12 deletions(-)
diff --git a/src/layer.h b/src/layer.h
index a591f03..db012f1 100644
--- a/src/layer.h
+++ b/src/layer.h
@@ -2,6 +2,13 @@
#define BASE_LAYER_H
#include "activations.h"
+#include "stddef.h"
+#include "tree.h"
+
+struct network_state;
+
+struct layer;
+typedef struct layer layer;
typedef enum {
CONVOLUTIONAL,
@@ -13,33 +20,71 @@
DROPOUT,
CROP,
ROUTE,
- COST
+ COST,
+ NORMALIZATION,
+ AVGPOOL,
+ LOCAL,
+ SHORTCUT,
+ ACTIVE,
+ RNN,
+ GRU,
+ CRNN,
+ BATCHNORM,
+ NETWORK,
+ XNOR,
+ REGION,
+ REORG,
+ BLANK
} LAYER_TYPE;
typedef enum{
- SSE, MASKED
+ SSE, MASKED, SMOOTH
} COST_TYPE;
-typedef struct {
+struct layer{
LAYER_TYPE type;
ACTIVATION activation;
COST_TYPE cost_type;
+ void (*forward) (struct layer, struct network_state);
+ void (*backward) (struct layer, struct network_state);
+ void (*update) (struct layer, int, float, float, float);
+ void (*forward_gpu) (struct layer, struct network_state);
+ void (*backward_gpu) (struct layer, struct network_state);
+ void (*update_gpu) (struct layer, int, float, float, float);
+ int batch_normalize;
+ int shortcut;
int batch;
+ int forced;
+ int flipped;
int inputs;
int outputs;
+ int truths;
int h,w,c;
int out_h, out_w, out_c;
int n;
+ int max_boxes;
+ int small_object;
int groups;
int size;
+ int side;
int stride;
+ int reverse;
int pad;
- int crop_width;
- int crop_height;
+ int sqrt;
int flip;
+ int index;
+ int binary;
+ int xnor;
+ int steps;
+ int hidden;
+ float dot;
float angle;
+ float jitter;
float saturation;
float exposure;
+ float shift;
+ float ratio;
+ int softmax;
int classes;
int coords;
int background;
@@ -47,20 +92,67 @@
int objectness;
int does_cost;
int joint;
+ int noadjust;
+ int reorg;
+ int log;
+ int adam;
+ float B1;
+ float B2;
+ float eps;
+ float *m_gpu;
+ float *v_gpu;
+ int t;
+ float *m;
+ float *v;
+
+ tree *softmax_tree;
+ int *map;
+
+ float alpha;
+ float beta;
+ float kappa;
+
+ float coord_scale;
+ float object_scale;
+ float noobject_scale;
+ float class_scale;
+ int bias_match;
+ int random;
+ float thresh;
+ int classfix;
+ int absolute;
+
+ int onlyforward;
+ int stopbackward;
int dontload;
+ int dontloadscales;
+ float temperature;
float probability;
float scale;
+
int *indexes;
float *rand;
float *cost;
- float *filters;
- float *filter_updates;
+ char *cweights;
+ float *state;
+ float *prev_state;
+ float *forgot_state;
+ float *forgot_delta;
+ float *state_delta;
+
+ float *concat;
+ float *concat_delta;
+
+ float *binary_weights;
float *biases;
float *bias_updates;
+ float *scales;
+ float *scale_updates;
+
float *weights;
float *weight_updates;
@@ -69,24 +161,111 @@
int * input_sizes;
float * delta;
float * output;
+ float * squared;
+ float * norms;
+
+ float * spatial_mean;
+ float * mean;
+ float * variance;
+
+ float * mean_delta;
+ float * variance_delta;
+
+ float * rolling_mean;
+ float * rolling_variance;
+
+ float * x;
+ float * x_norm;
+
+ struct layer *input_layer;
+ struct layer *self_layer;
+ struct layer *output_layer;
+
+ struct layer *input_gate_layer;
+ struct layer *state_gate_layer;
+ struct layer *input_save_layer;
+ struct layer *state_save_layer;
+ struct layer *input_state_layer;
+ struct layer *state_state_layer;
+
+ struct layer *input_z_layer;
+ struct layer *state_z_layer;
+
+ struct layer *input_r_layer;
+ struct layer *state_r_layer;
+
+ struct layer *input_h_layer;
+ struct layer *state_h_layer;
+
+ float *z_cpu;
+ float *r_cpu;
+ float *h_cpu;
+
+ float *binary_input;
+
+ size_t workspace_size;
#ifdef GPU
+ float *z_gpu;
+ float *r_gpu;
+ float *h_gpu;
+
int *indexes_gpu;
- float * filters_gpu;
- float * filter_updates_gpu;
+ float * prev_state_gpu;
+ float * forgot_state_gpu;
+ float * forgot_delta_gpu;
+ float * state_gpu;
+ float * state_delta_gpu;
+ float * gate_gpu;
+ float * gate_delta_gpu;
+ float * save_gpu;
+ float * save_delta_gpu;
+ float * concat_gpu;
+ float * concat_delta_gpu;
+
+ float *binary_input_gpu;
+ float *binary_weights_gpu;
+
+ float * mean_gpu;
+ float * variance_gpu;
+
+ float * rolling_mean_gpu;
+ float * rolling_variance_gpu;
+
+ float * variance_delta_gpu;
+ float * mean_delta_gpu;
float * col_image_gpu;
+ float * x_gpu;
+ float * x_norm_gpu;
float * weights_gpu;
- float * biases_gpu;
-
float * weight_updates_gpu;
+
+ float * biases_gpu;
float * bias_updates_gpu;
+ float * scales_gpu;
+ float * scale_updates_gpu;
+
float * output_gpu;
float * delta_gpu;
float * rand_gpu;
+ float * squared_gpu;
+ float * norms_gpu;
+ #ifdef CUDNN
+ cudnnTensorDescriptor_t srcTensorDesc, dstTensorDesc;
+ cudnnTensorDescriptor_t dsrcTensorDesc, ddstTensorDesc;
+ cudnnFilterDescriptor_t weightDesc;
+ cudnnFilterDescriptor_t dweightDesc;
+ cudnnConvolutionDescriptor_t convDesc;
+ cudnnConvolutionFwdAlgo_t fw_algo;
+ cudnnConvolutionBwdDataAlgo_t bd_algo;
+ cudnnConvolutionBwdFilterAlgo_t bf_algo;
#endif
-} layer;
+ #endif
+};
+
+void free_layer(layer);
#endif
--
Gitblit v1.10.0