From c6ecf1e0420737eafeb99b27b1d716b46a6cbb7a Mon Sep 17 00:00:00 2001
From: Jud White <github@judsonwhite.com>
Date: Sun, 25 Mar 2018 20:41:48 +0000
Subject: [PATCH] README.md: add notes to How to compile on Windows
---
src/layer.h | 64 +++++++++++++++++++++++++++----
1 files changed, 55 insertions(+), 9 deletions(-)
diff --git a/src/layer.h b/src/layer.h
index 10d64e5..3a0e03d 100644
--- a/src/layer.h
+++ b/src/layer.h
@@ -3,6 +3,9 @@
#include "activations.h"
#include "stddef.h"
+#include "tree.h"
+
+struct network_state;
struct layer;
typedef struct layer layer;
@@ -31,6 +34,7 @@
XNOR,
REGION,
REORG,
+ REORG_OLD,
BLANK
} LAYER_TYPE;
@@ -38,10 +42,28 @@
SSE, MASKED, SMOOTH
} COST_TYPE;
+typedef struct {
+ int batch;
+ float learning_rate;
+ float momentum;
+ float decay;
+ int adam;
+ float B1;
+ float B2;
+ float eps;
+ int t;
+} update_args;
+
struct layer{
LAYER_TYPE type;
ACTIVATION activation;
COST_TYPE cost_type;
+ void (*forward) (struct layer, struct network_state);
+ void (*backward) (struct layer, struct network_state);
+ void (*update) (struct layer, int, float, float, float);
+ void (*forward_gpu) (struct layer, struct network_state);
+ void (*backward_gpu) (struct layer, struct network_state);
+ void (*update_gpu) (struct layer, int, float, float, float);
int batch_normalize;
int shortcut;
int batch;
@@ -54,10 +76,12 @@
int out_h, out_w, out_c;
int n;
int max_boxes;
+ int small_object;
int groups;
int size;
int side;
int stride;
+ int reverse;
int pad;
int sqrt;
int flip;
@@ -72,6 +96,8 @@
float saturation;
float exposure;
float shift;
+ float ratio;
+ int focal_loss;
int softmax;
int classes;
int coords;
@@ -82,6 +108,20 @@
int joint;
int noadjust;
int reorg;
+ int log;
+
+ int adam;
+ float B1;
+ float B2;
+ float eps;
+ float *m_gpu;
+ float *v_gpu;
+ int t;
+ float *m;
+ float *v;
+
+ tree *softmax_tree;
+ int *map;
float alpha;
float beta;
@@ -91,8 +131,14 @@
float object_scale;
float noobject_scale;
float class_scale;
+ int bias_match;
int random;
+ float thresh;
+ int classfix;
+ int absolute;
+ int onlyforward;
+ int stopbackward;
int dontload;
int dontloadscales;
@@ -103,9 +149,7 @@
int *indexes;
float *rand;
float *cost;
- float *filters;
- char *cfilters;
- float *filter_updates;
+ char *cweights;
float *state;
float *prev_state;
float *forgot_state;
@@ -115,7 +159,7 @@
float *concat;
float *concat_delta;
- float *binary_filters;
+ float *binary_weights;
float *biases;
float *bias_updates;
@@ -192,11 +236,9 @@
float * save_delta_gpu;
float * concat_gpu;
float * concat_delta_gpu;
- float * filters_gpu;
- float * filter_updates_gpu;
float *binary_input_gpu;
- float *binary_filters_gpu;
+ float *binary_weights_gpu;
float * mean_gpu;
float * variance_gpu;
@@ -214,6 +256,9 @@
float * weights_gpu;
float * weight_updates_gpu;
+ float * weights_gpu16;
+ float * weight_updates_gpu16;
+
float * biases_gpu;
float * bias_updates_gpu;
@@ -228,8 +273,9 @@
#ifdef CUDNN
cudnnTensorDescriptor_t srcTensorDesc, dstTensorDesc;
cudnnTensorDescriptor_t dsrcTensorDesc, ddstTensorDesc;
- cudnnFilterDescriptor_t filterDesc;
- cudnnFilterDescriptor_t dfilterDesc;
+ cudnnTensorDescriptor_t normTensorDesc;
+ cudnnFilterDescriptor_t weightDesc;
+ cudnnFilterDescriptor_t dweightDesc;
cudnnConvolutionDescriptor_t convDesc;
cudnnConvolutionFwdAlgo_t fw_algo;
cudnnConvolutionBwdDataAlgo_t bd_algo;
--
Gitblit v1.10.0