From ae43c2bc32fbb838bfebeeaf2c2b058ccab5c83c Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@burninator.cs.washington.edu>
Date: Thu, 23 Jun 2016 05:31:14 +0000
Subject: [PATCH] hi

---
 src/network.c |   38 ++++++++++++++++++++++++++++++++++++++
 1 files changed, 38 insertions(+), 0 deletions(-)

diff --git a/src/network.c b/src/network.c
index e6fb51e..a9e5027 100644
--- a/src/network.c
+++ b/src/network.c
@@ -8,6 +8,7 @@
 
 #include "crop_layer.h"
 #include "connected_layer.h"
+#include "gru_layer.h"
 #include "rnn_layer.h"
 #include "crnn_layer.h"
 #include "local_layer.h"
@@ -16,6 +17,7 @@
 #include "deconvolutional_layer.h"
 #include "detection_layer.h"
 #include "normalization_layer.h"
+#include "batchnorm_layer.h"
 #include "maxpool_layer.h"
 #include "avgpool_layer.h"
 #include "cost_layer.h"
@@ -62,7 +64,10 @@
         case EXP:
             return net.learning_rate * pow(net.gamma, batch_num);
         case POLY:
+            if (batch_num < net.burn_in) return net.learning_rate * pow((float)batch_num / net.burn_in, net.power);
             return net.learning_rate * pow(1 - (float)batch_num / net.max_batches, net.power);
+        case RANDOM:
+            return net.learning_rate * pow(rand_uniform(0,1), net.power);
         case SIG:
             return net.learning_rate * (1./(1.+exp(net.gamma*(batch_num - net.step))));
         default:
@@ -86,6 +91,8 @@
             return "connected";
         case RNN:
             return "rnn";
+        case GRU:
+            return "gru";
         case CRNN:
             return "crnn";
         case MAXPOOL:
@@ -108,6 +115,8 @@
             return "shortcut";
         case NORMALIZATION:
             return "normalization";
+        case BATCHNORM:
+            return "batchnorm";
         default:
             break;
     }
@@ -129,6 +138,7 @@
 
 void forward_network(network net, network_state state)
 {
+    state.workspace = net.workspace;
     int i;
     for(i = 0; i < net.n; ++i){
         state.index = i;
@@ -146,12 +156,16 @@
             forward_local_layer(l, state);
         } else if(l.type == NORMALIZATION){
             forward_normalization_layer(l, state);
+        } else if(l.type == BATCHNORM){
+            forward_batchnorm_layer(l, state);
         } else if(l.type == DETECTION){
             forward_detection_layer(l, state);
         } else if(l.type == CONNECTED){
             forward_connected_layer(l, state);
         } else if(l.type == RNN){
             forward_rnn_layer(l, state);
+        } else if(l.type == GRU){
+            forward_gru_layer(l, state);
         } else if(l.type == CRNN){
             forward_crnn_layer(l, state);
         } else if(l.type == CROP){
@@ -190,6 +204,8 @@
             update_connected_layer(l, update_batch, rate, net.momentum, net.decay);
         } else if(l.type == RNN){
             update_rnn_layer(l, update_batch, rate, net.momentum, net.decay);
+        } else if(l.type == GRU){
+            update_gru_layer(l, update_batch, rate, net.momentum, net.decay);
         } else if(l.type == CRNN){
             update_crnn_layer(l, update_batch, rate, net.momentum, net.decay);
         } else if(l.type == LOCAL){
@@ -200,6 +216,9 @@
 
 float *get_network_output(network net)
 {
+    #ifdef GPU
+        return get_network_output_gpu(net);
+    #endif 
     int i;
     for(i = net.n-1; i > 0; --i) if(net.layers[i].type != COST) break;
     return net.layers[i].output;
@@ -235,6 +254,7 @@
     int i;
     float *original_input = state.input;
     float *original_delta = state.delta;
+    state.workspace = net.workspace;
     for(i = net.n-1; i >= 0; --i){
         state.index = i;
         if(i == 0){
@@ -254,6 +274,8 @@
             backward_activation_layer(l, state);
         } else if(l.type == NORMALIZATION){
             backward_normalization_layer(l, state);
+        } else if(l.type == BATCHNORM){
+            backward_batchnorm_layer(l, state);
         } else if(l.type == MAXPOOL){
             if(i != 0) backward_maxpool_layer(l, state);
         } else if(l.type == AVGPOOL){
@@ -268,6 +290,8 @@
             backward_connected_layer(l, state);
         } else if(l.type == RNN){
             backward_rnn_layer(l, state);
+        } else if(l.type == GRU){
+            backward_gru_layer(l, state);
         } else if(l.type == CRNN){
             backward_crnn_layer(l, state);
         } else if(l.type == LOCAL){
@@ -369,6 +393,11 @@
     int i;
     for(i = 0; i < net->n; ++i){
         net->layers[i].batch = b;
+        #ifdef CUDNN
+        if(net->layers[i].type == CONVOLUTIONAL){
+            cudnn_convolutional_setup(net->layers + i);
+        }
+        #endif
     }
 }
 
@@ -379,6 +408,7 @@
     net->w = w;
     net->h = h;
     int inputs = 0;
+    size_t workspace_size = 0;
     //fprintf(stderr, "Resizing to %d x %d...", w, h);
     //fflush(stderr);
     for (i = 0; i < net->n; ++i){
@@ -398,12 +428,20 @@
         }else{
             error("Cannot resize this type of layer");
         }
+        if(l.workspace_size > workspace_size) workspace_size = l.workspace_size;
         inputs = l.outputs;
         net->layers[i] = l;
         w = l.out_w;
         h = l.out_h;
         if(l.type == AVGPOOL) break;
     }
+#ifdef GPU
+        cuda_free(net->workspace);
+        net->workspace = cuda_make_array(0, (workspace_size-1)/sizeof(float)+1);
+#else
+        free(net->workspace);
+        net->workspace = calloc(1, workspace_size);
+#endif
     //fprintf(stderr, " Done!\n");
     return 0;
 }

--
Gitblit v1.10.0