From 032acd408acd119a5bd5c132765fcdd6caedea6b Mon Sep 17 00:00:00 2001
From: AlexeyAB <alexeyab84@gmail.com>
Date: Tue, 22 May 2018 13:38:04 +0000
Subject: [PATCH] Experimental reinforcement learning.
---
src/yolo_layer.c | 11 ++++++-----
1 files changed, 6 insertions(+), 5 deletions(-)
diff --git a/src/yolo_layer.c b/src/yolo_layer.c
index ad62426..a03c18b 100644
--- a/src/yolo_layer.c
+++ b/src/yolo_layer.c
@@ -55,7 +55,7 @@
l.delta_gpu = cuda_make_array(l.delta, batch*l.outputs);
#endif
- fprintf(stderr, "detection\n");
+ fprintf(stderr, "yolo\n");
srand(0);
return l;
@@ -112,7 +112,7 @@
void delta_yolo_class(float *output, float *delta, int index, int class_id, int classes, int stride, float *avg_cat, int focal_loss)
{
int n;
- if (delta[index]){
+ if (delta[index + stride*class_id]){
delta[index + stride*class_id] = 1 - output[index + stride*class_id];
if(avg_cat) *avg_cat += output[index + stride*class_id];
return;
@@ -121,12 +121,13 @@
if (focal_loss) {
// Focal Loss
float alpha = 0.5; // 0.25 or 0.5
- //float gamma = 2; // hardcoded in many places of the grad-formula
+ //float gamma = 2; // hardcoded in many places of the grad-formula
int ti = index + stride*class_id;
float pt = output[ti] + 0.000000000000001F;
- //float grad = -(1 - pt) * (2 * pt*logf(pt) + pt - 1); // http://blog.csdn.net/linmingan/article/details/77885832
- float grad = (1 - pt) * (2 * pt*logf(pt) + pt - 1); // https://github.com/unsky/focal-loss
+ // http://fooplot.com/#W3sidHlwZSI6MCwiZXEiOiItKDEteCkqKDIqeCpsb2coeCkreC0xKSIsImNvbG9yIjoiIzAwMDAwMCJ9LHsidHlwZSI6MTAwMH1d
+ float grad = -(1 - pt) * (2 * pt*logf(pt) + pt - 1); // http://blog.csdn.net/linmingan/article/details/77885832
+ //float grad = (1 - pt) * (2 * pt*logf(pt) + pt - 1); // https://github.com/unsky/focal-loss
for (n = 0; n < classes; ++n) {
delta[index + stride*n] = (((n == class_id) ? 1 : 0) - output[index + stride*n]);
--
Gitblit v1.10.0