From b36512ea2e7e6d2ec36a3241c1bf751e4e074fe1 Mon Sep 17 00:00:00 2001 From: vinjn <vinjn.z@gmail.com> Date: Sat, 26 May 2018 13:42:53 +0000 Subject: [PATCH] classifier.c - add the awesome training chart and make sure "top" is not bigger than "classes" in datacfg file. --- src/gru_layer.h | 23 +++++++++++------------ 1 files changed, 11 insertions(+), 12 deletions(-) diff --git a/src/gru_layer.h b/src/gru_layer.h index bb9478b..9e19cee 100644 --- a/src/gru_layer.h +++ b/src/gru_layer.h @@ -1,24 +1,23 @@ -#ifndef RNN_LAYER_H -#define RNN_LAYER_H +#ifndef GRU_LAYER_H +#define GRU_LAYER_H #include "activations.h" #include "layer.h" #include "network.h" -#define USET -layer make_rnn_layer(int batch, int inputs, int hidden, int outputs, int steps, ACTIVATION activation, int batch_normalize, int log); +layer make_gru_layer(int batch, int inputs, int outputs, int steps, int batch_normalize); -void forward_rnn_layer(layer l, network_state state); -void backward_rnn_layer(layer l, network_state state); -void update_rnn_layer(layer l, int batch, float learning_rate, float momentum, float decay); +void forward_gru_layer(layer l, network_state state); +void backward_gru_layer(layer l, network_state state); +void update_gru_layer(layer l, int batch, float learning_rate, float momentum, float decay); #ifdef GPU -void forward_rnn_layer_gpu(layer l, network_state state); -void backward_rnn_layer_gpu(layer l, network_state state); -void update_rnn_layer_gpu(layer l, int batch, float learning_rate, float momentum, float decay); -void push_rnn_layer(layer l); -void pull_rnn_layer(layer l); +void forward_gru_layer_gpu(layer l, network_state state); +void backward_gru_layer_gpu(layer l, network_state state); +void update_gru_layer_gpu(layer l, int batch, float learning_rate, float momentum, float decay); +void push_gru_layer(layer l); +void pull_gru_layer(layer l); #endif #endif -- Gitblit v1.10.0