From dbdd31ee211fe8b1ac7e93ceadf7b34b8d304f34 Mon Sep 17 00:00:00 2001
From: Roland Singer <roland.singer@desertbit.com>
Date: Wed, 22 Aug 2018 11:56:41 +0000
Subject: [PATCH] updated README to include information about learning rate adjustment for multiple GPUs
---
src/softmax_layer.h | 28 ++++++++++------------------
1 files changed, 10 insertions(+), 18 deletions(-)
diff --git a/src/softmax_layer.h b/src/softmax_layer.h
index 3632c74..821a8dd 100644
--- a/src/softmax_layer.h
+++ b/src/softmax_layer.h
@@ -1,27 +1,19 @@
#ifndef SOFTMAX_LAYER_H
#define SOFTMAX_LAYER_H
+#include "layer.h"
+#include "network.h"
-typedef struct {
- int inputs;
- int batch;
- int groups;
- float *delta;
- float *output;
- #ifdef GPU
- float * delta_gpu;
- float * output_gpu;
- #endif
-} softmax_layer;
+typedef layer softmax_layer;
-void softmax_array(float *input, int n, float *output);
-softmax_layer *make_softmax_layer(int batch, int groups, int inputs);
-void forward_softmax_layer(const softmax_layer layer, float *input);
-void backward_softmax_layer(const softmax_layer layer, float *delta);
+void softmax_array(float *input, int n, float temp, float *output);
+softmax_layer make_softmax_layer(int batch, int inputs, int groups);
+void forward_softmax_layer(const softmax_layer l, network_state state);
+void backward_softmax_layer(const softmax_layer l, network_state state);
#ifdef GPU
-void pull_softmax_layer_output(const softmax_layer layer);
-void forward_softmax_layer_gpu(const softmax_layer layer, float *input);
-void backward_softmax_layer_gpu(const softmax_layer layer, float *delta);
+void pull_softmax_layer_output(const softmax_layer l);
+void forward_softmax_layer_gpu(const softmax_layer l, network_state state);
+void backward_softmax_layer_gpu(const softmax_layer l, network_state state);
#endif
#endif
--
Gitblit v1.10.0