From 1b5afb45838e603fa6780762eb8cc59246dc2d81 Mon Sep 17 00:00:00 2001
From: IlyaOvodov <b@ovdv.ru>
Date: Tue, 08 May 2018 11:09:35 +0000
Subject: [PATCH] Output improvements for detector results: When printing detector results, output was done in random order, obfuscating results for interpreting. Now: 1. Text output includes coordinates of rects in (left,right,top,bottom in pixels) along with label and score 2. Text output is sorted by rect lefts to simplify finding appropriate rects on image 3. If several class probs are > thresh for some detection, the most probable is written first and coordinates for others are not repeated 4. Rects are imprinted in image in order by their best class prob, so most probable rects are always on top and not overlayed by less probable ones 5. Most probable label for rect is always written first Also: 6. Message about low GPU memory include required amount
---
src/activation_kernels.cu | 37 +++++++++++++++++++++++++++++++++++--
1 files changed, 35 insertions(+), 2 deletions(-)
diff --git a/src/activation_kernels.cu b/src/activation_kernels.cu
index 3dc3af0..d5f25a0 100644
--- a/src/activation_kernels.cu
+++ b/src/activation_kernels.cu
@@ -7,12 +7,31 @@
#include "cuda.h"
}
+
+__device__ float lhtan_activate_kernel(float x)
+{
+ if(x < 0) return .001*x;
+ if(x > 1) return .001*(x-1) + 1;
+ return x;
+}
+__device__ float lhtan_gradient_kernel(float x)
+{
+ if(x > 0 && x < 1) return 1;
+ return .001;
+}
+
+__device__ float hardtan_activate_kernel(float x)
+{
+ if (x < -1) return -1;
+ if (x > 1) return 1;
+ return x;
+}
__device__ float linear_activate_kernel(float x){return x;}
__device__ float logistic_activate_kernel(float x){return 1./(1. + exp(-x));}
__device__ float loggy_activate_kernel(float x){return 2./(1. + exp(-x)) - 1;}
__device__ float relu_activate_kernel(float x){return x*(x>0);}
__device__ float elu_activate_kernel(float x){return (x >= 0)*x + (x < 0)*(exp(x)-1);}
-__device__ float relie_activate_kernel(float x){return x*(x>0);}
+__device__ float relie_activate_kernel(float x){return (x>0) ? x : .01*x;}
__device__ float ramp_activate_kernel(float x){return x*(x>0)+.1*x;}
__device__ float leaky_activate_kernel(float x){return (x>0) ? x : .1*x;}
__device__ float tanh_activate_kernel(float x){return (2/(1 + exp(-2*x)) - 1);}
@@ -29,6 +48,12 @@
else return (x - n) + floor(x/2.);
}
+
+__device__ float hardtan_gradient_kernel(float x)
+{
+ if (x > -1 && x < 1) return 1;
+ return 0;
+}
__device__ float linear_gradient_kernel(float x){return 1;}
__device__ float logistic_gradient_kernel(float x){return (1-x)*x;}
__device__ float loggy_gradient_kernel(float x)
@@ -74,6 +99,10 @@
return plse_activate_kernel(x);
case STAIR:
return stair_activate_kernel(x);
+ case HARDTAN:
+ return hardtan_activate_kernel(x);
+ case LHTAN:
+ return lhtan_activate_kernel(x);
}
return 0;
}
@@ -103,6 +132,10 @@
return plse_gradient_kernel(x);
case STAIR:
return stair_gradient_kernel(x);
+ case HARDTAN:
+ return hardtan_gradient_kernel(x);
+ case LHTAN:
+ return lhtan_gradient_kernel(x);
}
return 0;
}
@@ -121,7 +154,7 @@
extern "C" void activate_array_ongpu(float *x, int n, ACTIVATION a)
{
- activate_array_kernel<<<cuda_gridsize(n), BLOCK>>>(x, n, a);
+ activate_array_kernel<<<cuda_gridsize(n), BLOCK, 0, get_cuda_stream()>>>(x, n, a);
check_error(cudaPeekAtLastError());
}
--
Gitblit v1.10.0