From 1b5afb45838e603fa6780762eb8cc59246dc2d81 Mon Sep 17 00:00:00 2001
From: IlyaOvodov <b@ovdv.ru>
Date: Tue, 08 May 2018 11:09:35 +0000
Subject: [PATCH] Output improvements for detector results: When printing detector results, output was done in random order, obfuscating results for interpreting. Now: 1. Text output includes coordinates of rects in (left,right,top,bottom in pixels) along with label and score 2. Text output is sorted by rect lefts to simplify finding appropriate rects on image 3. If several class probs are > thresh for some detection, the most probable is written first and coordinates for others are not repeated 4. Rects are imprinted in image in order by their best class prob, so most probable rects are always on top and not overlayed by less probable ones 5. Most probable label for rect is always written first Also: 6. Message about low GPU memory include required amount
---
src/deconvolutional_kernels.cu | 47 ++++++++++++++++++++++++++---------------------
1 files changed, 26 insertions(+), 21 deletions(-)
diff --git a/src/deconvolutional_kernels.cu b/src/deconvolutional_kernels.cu
index 1d05a80..d6259fb 100644
--- a/src/deconvolutional_kernels.cu
+++ b/src/deconvolutional_kernels.cu
@@ -1,3 +1,7 @@
+#include "cuda_runtime.h"
+#include "curand.h"
+#include "cublas_v2.h"
+
extern "C" {
#include "convolutional_layer.h"
#include "deconvolutional_layer.h"
@@ -9,7 +13,7 @@
#include "cuda.h"
}
-extern "C" void forward_deconvolutional_layer_gpu(deconvolutional_layer layer, float *in)
+extern "C" void forward_deconvolutional_layer_gpu(deconvolutional_layer layer, network_state state)
{
int i;
int out_h = deconvolutional_out_height(layer);
@@ -20,21 +24,22 @@
int n = layer.h*layer.w;
int k = layer.c;
- bias_output_gpu(layer.output_gpu, layer.biases_gpu, layer.batch, layer.n, size);
+ fill_ongpu(layer.outputs*layer.batch, 0, layer.output_gpu, 1);
for(i = 0; i < layer.batch; ++i){
- float *a = layer.filters_gpu;
- float *b = in + i*layer.c*layer.h*layer.w;
+ float *a = layer.weights_gpu;
+ float *b = state.input + i*layer.c*layer.h*layer.w;
float *c = layer.col_image_gpu;
gemm_ongpu(1,0,m,n,k,1,a,m,b,n,0,c,n);
col2im_ongpu(c, layer.n, out_h, out_w, layer.size, layer.stride, 0, layer.output_gpu+i*layer.n*size);
}
+ add_bias_gpu(layer.output_gpu, layer.biases_gpu, layer.batch, layer.n, size);
activate_array(layer.output_gpu, layer.batch*layer.n*size, layer.activation);
}
-extern "C" void backward_deconvolutional_layer_gpu(deconvolutional_layer layer, float *in, float *delta_gpu)
+extern "C" void backward_deconvolutional_layer_gpu(deconvolutional_layer layer, network_state state)
{
float alpha = 1./layer.batch;
int out_h = deconvolutional_out_height(layer);
@@ -45,29 +50,29 @@
gradient_array(layer.output_gpu, size*layer.n*layer.batch, layer.activation, layer.delta_gpu);
backward_bias(layer.bias_updates_gpu, layer.delta, layer.batch, layer.n, size);
- if(delta_gpu) memset(delta_gpu, 0, layer.batch*layer.h*layer.w*layer.c*sizeof(float));
+ if(state.delta) memset(state.delta, 0, layer.batch*layer.h*layer.w*layer.c*sizeof(float));
for(i = 0; i < layer.batch; ++i){
int m = layer.c;
int n = layer.size*layer.size*layer.n;
int k = layer.h*layer.w;
- float *a = in + i*m*n;
+ float *a = state.input + i*m*n;
float *b = layer.col_image_gpu;
- float *c = layer.filter_updates_gpu;
+ float *c = layer.weight_updates_gpu;
im2col_ongpu(layer.delta_gpu + i*layer.n*size, layer.n, out_h, out_w,
layer.size, layer.stride, 0, b);
gemm_ongpu(0,1,m,n,k,alpha,a,k,b,k,1,c,n);
- if(delta_gpu){
+ if(state.delta){
int m = layer.c;
int n = layer.h*layer.w;
int k = layer.size*layer.size*layer.n;
- float *a = layer.filters_gpu;
+ float *a = layer.weights_gpu;
float *b = layer.col_image_gpu;
- float *c = delta_gpu + i*n*m;
+ float *c = state.delta + i*n*m;
gemm(0,0,m,n,k,1,a,k,b,n,1,c,n);
}
@@ -76,29 +81,29 @@
extern "C" void pull_deconvolutional_layer(deconvolutional_layer layer)
{
- cuda_pull_array(layer.filters_gpu, layer.filters, layer.c*layer.n*layer.size*layer.size);
+ cuda_pull_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.biases_gpu, layer.biases, layer.n);
- cuda_pull_array(layer.filter_updates_gpu, layer.filter_updates, layer.c*layer.n*layer.size*layer.size);
+ cuda_pull_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
}
extern "C" void push_deconvolutional_layer(deconvolutional_layer layer)
{
- cuda_push_array(layer.filters_gpu, layer.filters, layer.c*layer.n*layer.size*layer.size);
+ cuda_push_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.biases_gpu, layer.biases, layer.n);
- cuda_push_array(layer.filter_updates_gpu, layer.filter_updates, layer.c*layer.n*layer.size*layer.size);
+ cuda_push_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
}
-extern "C" void update_deconvolutional_layer_gpu(deconvolutional_layer layer)
+extern "C" void update_deconvolutional_layer_gpu(deconvolutional_layer layer, float learning_rate, float momentum, float decay)
{
int size = layer.size*layer.size*layer.c*layer.n;
- axpy_ongpu(layer.n, layer.learning_rate, layer.bias_updates_gpu, 1, layer.biases_gpu, 1);
- scal_ongpu(layer.n,layer.momentum, layer.bias_updates_gpu, 1);
+ axpy_ongpu(layer.n, learning_rate, layer.bias_updates_gpu, 1, layer.biases_gpu, 1);
+ scal_ongpu(layer.n, momentum, layer.bias_updates_gpu, 1);
- axpy_ongpu(size, -layer.decay, layer.filters_gpu, 1, layer.filter_updates_gpu, 1);
- axpy_ongpu(size, layer.learning_rate, layer.filter_updates_gpu, 1, layer.filters_gpu, 1);
- scal_ongpu(size, layer.momentum, layer.filter_updates_gpu, 1);
+ axpy_ongpu(size, -decay, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
+ axpy_ongpu(size, learning_rate, layer.weight_updates_gpu, 1, layer.weights_gpu, 1);
+ scal_ongpu(size, momentum, layer.weight_updates_gpu, 1);
}
--
Gitblit v1.10.0