From c2738835f0a2435ab03f411af3d168aec389d2a6 Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Tue, 08 Dec 2015 01:18:04 +0000
Subject: [PATCH] Faster batch normalization
---
src/blas_kernels.cu | 205 +++++++++++++++++++++++++++++++-------------------
1 files changed, 127 insertions(+), 78 deletions(-)
diff --git a/src/blas_kernels.cu b/src/blas_kernels.cu
index 4da31d1..17955e4 100644
--- a/src/blas_kernels.cu
+++ b/src/blas_kernels.cu
@@ -48,28 +48,6 @@
variance_delta[i] *= -.5 * pow(variance[i] + .00001f, (float)(-3./2.));
}
-__global__ void spatial_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *spatial_variance_delta)
-{
- int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
- if (i >= batch*filters) return;
- int f = i%filters;
- int b = i/filters;
-
- int k;
- spatial_variance_delta[i] = 0;
- for (k = 0; k < spatial; ++k) {
- int index = b*filters*spatial + f*spatial + k;
- spatial_variance_delta[i] += delta[index]*(x[index] - mean[f]);
- }
- spatial_variance_delta[i] *= -.5 * pow(variance[f] + .00001f, (float)(-3./2.));
-}
-
-extern "C" void variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
-{
- variance_delta_kernel<<<cuda_gridsize(filters), BLOCK>>>(x, delta, mean, variance, batch, filters, spatial, variance_delta);
- check_error(cudaPeekAtLastError());
-}
-
__global__ void accumulate_kernel(float *x, int n, int groups, float *sum)
{
int k;
@@ -81,38 +59,62 @@
}
}
-extern "C" void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *spatial_variance_delta, float *variance_delta)
+__global__ void fast_mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
- spatial_variance_delta_kernel<<<cuda_gridsize(filters*batch), BLOCK>>>(x, delta, mean, variance, batch, filters, spatial, spatial_variance_delta);
- check_error(cudaPeekAtLastError());
- accumulate_kernel<<<cuda_gridsize(filters), BLOCK>>>(spatial_variance_delta, batch, filters, variance_delta);
- check_error(cudaPeekAtLastError());
-}
+ const int threads = BLOCK;
+ __shared__ float local[threads];
-__global__ void spatial_mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *spatial_mean_delta)
-{
- int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
- if (i >= batch*filters) return;
- int f = i%filters;
- int b = i/filters;
+ int id = threadIdx.x;
+ local[id] = 0;
- int k;
- spatial_mean_delta[i] = 0;
- for (k = 0; k < spatial; ++k) {
- int index = b*filters*spatial + f*spatial + k;
- spatial_mean_delta[i] += delta[index];
+ int filter = blockIdx.x;
+
+ int i, j;
+ for(j = 0; j < batch; ++j){
+ for(i = 0; i < spatial; i += threads){
+ int index = j*spatial*filters + filter*spatial + i + id;
+ local[id] += (i+id < spatial) ? delta[index] : 0;
+ }
}
- spatial_mean_delta[i] *= (-1./sqrt(variance[f] + .00001f));
+
+ if(id == 0){
+ mean_delta[filter] = 0;
+ for(i = 0; i < threads; ++i){
+ mean_delta[filter] += local[i];
+ }
+ mean_delta[filter] *= (-1./sqrt(variance[filter] + .00001f));
+ }
}
-extern "C" void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *spatial_mean_delta, float *mean_delta)
+__global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
{
- spatial_mean_delta_kernel<<<cuda_gridsize(filters*batch), BLOCK>>>(delta, variance, batch, filters, spatial, spatial_mean_delta);
- check_error(cudaPeekAtLastError());
- accumulate_kernel<<<cuda_gridsize(filters), BLOCK>>>(spatial_mean_delta, batch, filters, mean_delta);
- check_error(cudaPeekAtLastError());
+ const int threads = BLOCK;
+ __shared__ float local[threads];
+
+ int id = threadIdx.x;
+ local[id] = 0;
+
+ int filter = blockIdx.x;
+
+ int i, j;
+ for(j = 0; j < batch; ++j){
+ for(i = 0; i < spatial; i += threads){
+ int index = j*spatial*filters + filter*spatial + i + id;
+
+ local[id] += (i+id < spatial) ? delta[index]*(x[index] - mean[filter]) : 0;
+ }
+ }
+
+ if(id == 0){
+ variance_delta[filter] = 0;
+ for(i = 0; i < threads; ++i){
+ variance_delta[filter] += local[i];
+ }
+ variance_delta[filter] *= -.5 * pow(variance[filter] + .00001f, (float)(-3./2.));
+ }
}
+
__global__ void mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
@@ -134,6 +136,18 @@
check_error(cudaPeekAtLastError());
}
+extern "C" void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
+{
+ fast_mean_delta_kernel<<<filters, BLOCK>>>(delta, variance, batch, filters, spatial, mean_delta);
+ check_error(cudaPeekAtLastError());
+}
+
+extern "C" void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
+{
+ fast_variance_delta_kernel<<<filters, BLOCK>>>(x, delta, mean, variance, batch, filters, spatial, variance_delta);
+ check_error(cudaPeekAtLastError());
+}
+
__global__ void mean_kernel(float *x, int batch, int filters, int spatial, float *mean)
{
float scale = 1./(batch * spatial);
@@ -150,23 +164,6 @@
mean[i] *= scale;
}
-__global__ void spatial_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
-{
- float scale = 1./(spatial*batch-1);
- int k;
- int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
- if (i >= batch*filters) return;
- int f = i%filters;
- int b = i/filters;
-
- variance[i] = 0;
- for(k = 0; k < spatial; ++k){
- int index = b*filters*spatial + f*spatial + k;
- variance[i] += pow((x[index] - mean[f]), 2);
- }
- variance[i] *= scale;
-}
-
__global__ void variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
float scale = 1./(batch * spatial);
@@ -238,28 +235,80 @@
check_error(cudaPeekAtLastError());
}
+__global__ void fast_mean_kernel(float *x, int batch, int filters, int spatial, float *mean)
+{
+ const int threads = BLOCK;
+ __shared__ float local[threads];
+
+ int id = threadIdx.x;
+ local[id] = 0;
+
+ int filter = blockIdx.x;
+
+ int i, j;
+ for(j = 0; j < batch; ++j){
+ for(i = 0; i < spatial; i += threads){
+ int index = j*spatial*filters + filter*spatial + i + id;
+ local[id] += (i+id < spatial) ? x[index] : 0;
+ }
+ }
+
+ if(id == 0){
+ mean[filter] = 0;
+ for(i = 0; i < threads; ++i){
+ mean[filter] += local[i];
+ }
+ mean[filter] /= spatial * batch;
+ }
+}
+
+__global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
+{
+ const int threads = BLOCK;
+ __shared__ float local[threads];
+
+ int id = threadIdx.x;
+ local[id] = 0;
+
+ int filter = blockIdx.x;
+
+ int i, j;
+ for(j = 0; j < batch; ++j){
+ for(i = 0; i < spatial; i += threads){
+ int index = j*spatial*filters + filter*spatial + i + id;
+
+ local[id] += (i+id < spatial) ? pow((x[index] - mean[filter]), 2) : 0;
+ }
+ }
+
+ if(id == 0){
+ variance[filter] = 0;
+ for(i = 0; i < threads; ++i){
+ variance[filter] += local[i];
+ }
+ variance[filter] /= spatial * batch;
+ }
+}
+
+extern "C" void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean)
+{
+ fast_mean_kernel<<<filters, BLOCK>>>(x, batch, filters, spatial, mean);
+ check_error(cudaPeekAtLastError());
+}
+
+extern "C" void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
+{
+ fast_variance_kernel<<<filters, BLOCK>>>(x, mean, batch, filters, spatial, variance);
+ check_error(cudaPeekAtLastError());
+}
+
+
extern "C" void mean_gpu(float *x, int batch, int filters, int spatial, float *mean)
{
mean_kernel<<<cuda_gridsize(filters), BLOCK>>>(x, batch, filters, spatial, mean);
check_error(cudaPeekAtLastError());
}
-extern "C" void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *spatial_mean, float *mean)
-{
- mean_kernel<<<cuda_gridsize(filters*batch), BLOCK>>>(x, 1, filters*batch, spatial, spatial_mean);
- check_error(cudaPeekAtLastError());
- mean_kernel<<<cuda_gridsize(filters), BLOCK>>>(spatial_mean, batch, filters, 1, mean);
- check_error(cudaPeekAtLastError());
-}
-
-extern "C" void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *spatial_variance, float *variance)
-{
- spatial_variance_kernel<<<cuda_gridsize(batch*filters), BLOCK>>>(x, mean, batch, filters, spatial, spatial_variance);
- check_error(cudaPeekAtLastError());
- accumulate_kernel<<<cuda_gridsize(filters), BLOCK>>>(spatial_variance, batch, filters, variance);
- check_error(cudaPeekAtLastError());
-}
-
extern "C" void variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
variance_kernel<<<cuda_gridsize(filters), BLOCK>>>(x, mean, batch, filters, spatial, variance);
--
Gitblit v1.10.0