From c7b10ceadb1a78e7480d281444a31ae2a7dc1b05 Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Fri, 06 May 2016 23:25:16 +0000
Subject: [PATCH] so much need to commit
---
src/blas_kernels.cu | 284 ++++++++++++++++++++++++++++++++++++++++++++++++++++----
1 files changed, 262 insertions(+), 22 deletions(-)
diff --git a/src/blas_kernels.cu b/src/blas_kernels.cu
index 8f05eb9..ac537d8 100644
--- a/src/blas_kernels.cu
+++ b/src/blas_kernels.cu
@@ -1,6 +1,7 @@
#include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
+#include <assert.h>
extern "C" {
#include "blas.h"
@@ -8,13 +9,144 @@
#include "utils.h"
}
+__global__ void scale_bias_kernel(float *output, float *biases, int n, int size)
+{
+ int offset = blockIdx.x * blockDim.x + threadIdx.x;
+ int filter = blockIdx.y;
+ int batch = blockIdx.z;
+
+ if(offset < size) output[(batch*n+filter)*size + offset] *= biases[filter];
+}
+
+void scale_bias_gpu(float *output, float *biases, int batch, int n, int size)
+{
+ dim3 dimGrid((size-1)/BLOCK + 1, n, batch);
+ dim3 dimBlock(BLOCK, 1, 1);
+
+ scale_bias_kernel<<<dimGrid, dimBlock>>>(output, biases, n, size);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void backward_scale_kernel(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates)
+{
+ __shared__ float part[BLOCK];
+ int i,b;
+ int filter = blockIdx.x;
+ int p = threadIdx.x;
+ float sum = 0;
+ for(b = 0; b < batch; ++b){
+ for(i = 0; i < size; i += BLOCK){
+ int index = p + i + size*(filter + n*b);
+ sum += (p+i < size) ? delta[index]*x_norm[index] : 0;
+ }
+ }
+ part[p] = sum;
+ __syncthreads();
+ if (p == 0) {
+ for(i = 0; i < BLOCK; ++i) scale_updates[filter] += part[i];
+ }
+}
+
+void backward_scale_gpu(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates)
+{
+ backward_scale_kernel<<<n, BLOCK>>>(x_norm, delta, batch, n, size, scale_updates);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void add_bias_kernel(float *output, float *biases, int n, int size)
+{
+ int offset = blockIdx.x * blockDim.x + threadIdx.x;
+ int filter = blockIdx.y;
+ int batch = blockIdx.z;
+
+ if(offset < size) output[(batch*n+filter)*size + offset] += biases[filter];
+}
+
+void add_bias_gpu(float *output, float *biases, int batch, int n, int size)
+{
+ dim3 dimGrid((size-1)/BLOCK + 1, n, batch);
+ dim3 dimBlock(BLOCK, 1, 1);
+
+ add_bias_kernel<<<dimGrid, dimBlock>>>(output, biases, n, size);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void backward_bias_kernel(float *bias_updates, float *delta, int batch, int n, int size)
+{
+ __shared__ float part[BLOCK];
+ int i,b;
+ int filter = blockIdx.x;
+ int p = threadIdx.x;
+ float sum = 0;
+ for(b = 0; b < batch; ++b){
+ for(i = 0; i < size; i += BLOCK){
+ int index = p + i + size*(filter + n*b);
+ sum += (p+i < size) ? delta[index] : 0;
+ }
+ }
+ part[p] = sum;
+ __syncthreads();
+ if (p == 0) {
+ for(i = 0; i < BLOCK; ++i) bias_updates[filter] += part[i];
+ }
+}
+
+/*
+__global__ void dot_kernel(float *output, float scale, int batch, int n, int size, float *delta)
+{
+ int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ int f1 = index / n;
+ int f2 = index % n;
+ if (f2 <= f1) return;
+
+ float sum = 0;
+ float norm1 = 0;
+ float norm2 = 0;
+ int b, i;
+ for(b = 0; b < batch; ++b){
+ for(i = 0; i < size; ++i){
+ int i1 = b * size * n + f1 * size + i;
+ int i2 = b * size * n + f2 * size + i;
+ sum += output[i1] * output[i2];
+ norm1 += output[i1] * output[i1];
+ norm2 += output[i2] * output[i2];
+ }
+ }
+ norm1 = sqrt(norm1);
+ norm2 = sqrt(norm2);
+ float norm = norm1 * norm2;
+ sum = sum / norm;
+ for(b = 0; b < batch; ++b){
+ for(i = 0; i < size; ++i){
+ int i1 = b * size * n + f1 * size + i;
+ int i2 = b * size * n + f2 * size + i;
+ delta[i1] += - scale * sum * output[i2] / norm;
+ delta[i2] += - scale * sum * output[i1] / norm;
+ }
+ }
+}
+
+void dot_error_gpu(layer l)
+{
+ dot_kernel<<<cuda_gridsize(l.n*l.n), BLOCK>>>(l.output_gpu, l.dot, l.batch, l.n, l.out_w * l.out_h, l.delta_gpu);
+ check_error(cudaPeekAtLastError());
+}
+*/
+
+void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size)
+{
+ backward_bias_kernel<<<n, BLOCK>>>(bias_updates, delta, batch, n, size);
+ check_error(cudaPeekAtLastError());
+}
+
+
__global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index/spatial)%filters;
- x[index] = (x[index] - mean[f])/(sqrt(variance[f]) + .00001f);
+ x[index] = (x[index] - mean[f])/(sqrt(variance[f]) + .000001f);
}
__global__ void normalize_delta_kernel(int N, float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta)
@@ -23,7 +155,7 @@
if (index >= N) return;
int f = (index/spatial)%filters;
- delta[index] = delta[index] * 1./(sqrt(variance[f]) + .00001f) + variance_delta[f] * 2. * (x[index] - mean[f]) / (spatial * batch) + mean_delta[f]/(spatial*batch);
+ delta[index] = delta[index] * 1./(sqrt(variance[f]) + .000001f) + variance_delta[f] * 2. * (x[index] - mean[f]) / (spatial * batch) + mean_delta[f]/(spatial*batch);
}
extern "C" void normalize_delta_gpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta)
@@ -45,7 +177,7 @@
variance_delta[i] += delta[index]*(x[index] - mean[i]);
}
}
- variance_delta[i] *= -.5 * pow(variance[i] + .00001f, (float)(-3./2.));
+ variance_delta[i] *= -.5 * pow(variance[i] + .000001f, (float)(-3./2.));
}
__global__ void accumulate_kernel(float *x, int n, int groups, float *sum)
@@ -82,7 +214,7 @@
for(i = 0; i < threads; ++i){
mean_delta[filter] += local[i];
}
- mean_delta[filter] *= (-1./sqrt(variance[filter] + .00001f));
+ mean_delta[filter] *= (-1./sqrt(variance[filter] + .000001f));
}
}
@@ -110,7 +242,7 @@
for(i = 0; i < threads; ++i){
variance_delta[filter] += local[i];
}
- variance_delta[filter] *= -.5 * pow(variance[filter] + .00001f, (float)(-3./2.));
+ variance_delta[filter] *= -.5 * pow(variance[filter] + .000001f, (float)(-3./2.));
}
}
@@ -127,7 +259,7 @@
mean_delta[i] += delta[index];
}
}
- mean_delta[i] *= (-1./sqrt(variance[i] + .00001f));
+ mean_delta[i] *= (-1./sqrt(variance[i] + .000001f));
}
extern "C" void mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
@@ -166,7 +298,7 @@
__global__ void variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
- float scale = 1./(batch * spatial);
+ float scale = 1./(batch * spatial - 1);
int j,k;
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
@@ -198,6 +330,12 @@
if(i < N) X[i*INCX] = ALPHA;
}
+__global__ void constrain_kernel(int N, float ALPHA, float *X, int INCX)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < N) X[i*INCX] = min(ALPHA, max(-ALPHA, X[i*INCX]));
+}
+
__global__ void scal_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
@@ -287,7 +425,7 @@
for(i = 0; i < threads; ++i){
variance[filter] += local[i];
}
- variance[filter] /= spatial * batch;
+ variance[filter] /= (spatial * batch - 1);
}
}
@@ -362,6 +500,13 @@
check_error(cudaPeekAtLastError());
}
+extern "C" void constrain_ongpu(int N, float ALPHA, float * X, int INCX)
+{
+ constrain_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
+ check_error(cudaPeekAtLastError());
+}
+
+
extern "C" void scal_ongpu(int N, float ALPHA, float * X, int INCX)
{
scal_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
@@ -374,26 +519,121 @@
check_error(cudaPeekAtLastError());
}
-__global__ void shortcut_kernel(int size, float *out, int w, int h, int c, int batch, int sample, float *add, int stride, int c2, int min_c)
+__global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (id >= size) return;
- int i = id % (w/sample);
- id /= (w/sample);
- int j = id % (h/sample);
- id /= (h/sample);
- int k = id % min_c;
- id /= min_c;
- int b = id;
- int out_index = i*sample + w*(j*sample + h*(k + c*b));
- int add_index = b*w*stride/sample*h*stride/sample*c2 + i*stride + w*stride/sample*(j*stride + h*stride/sample*k);
+ int i = id % minw;
+ id /= minw;
+ int j = id % minh;
+ id /= minh;
+ int k = id % minc;
+ id /= minc;
+ int b = id % batch;
+
+ int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
+ int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
out[out_index] += add[add_index];
}
-extern "C" void shortcut_gpu(float *out, int w, int h, int c, int batch, int sample, float *add, int stride, int c2)
+extern "C" void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
{
- int min_c = (c < c2) ? c : c2;
- int size = batch * w/sample * h/sample * min_c;
- shortcut_kernel<<<cuda_gridsize(size), BLOCK>>>(size, out, w, h, c, batch, sample, add, stride, c2, min_c);
+ int minw = (w1 < w2) ? w1 : w2;
+ int minh = (h1 < h2) ? h1 : h2;
+ int minc = (c1 < c2) ? c1 : c2;
+
+ int stride = w1/w2;
+ int sample = w2/w1;
+ assert(stride == h1/h2);
+ assert(sample == h2/h1);
+ if(stride < 1) stride = 1;
+ if(sample < 1) sample = 1;
+
+ int size = batch * minw * minh * minc;
+ shortcut_kernel<<<cuda_gridsize(size), BLOCK>>>(size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, out);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void smooth_l1_kernel(int n, float *pred, float *truth, float *delta, float *error)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < n){
+ float diff = truth[i] - pred[i];
+ float abs_val = abs(diff);
+ if(abs_val < 1) {
+ error[i] = diff * diff;
+ delta[i] = diff;
+ }
+ else {
+ error[i] = 2*abs_val - 1;
+ delta[i] = (diff < 0) ? -1 : 1;
+ }
+ }
+}
+
+extern "C" void smooth_l1_gpu(int n, float *pred, float *truth, float *delta, float *error)
+{
+ smooth_l1_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void l2_kernel(int n, float *pred, float *truth, float *delta, float *error)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < n){
+ float diff = truth[i] - pred[i];
+ error[i] = diff * diff; //I know this is technically wrong, deal with it.
+ delta[i] = diff;
+ }
+}
+
+extern "C" void l2_gpu(int n, float *pred, float *truth, float *delta, float *error)
+{
+ l2_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error);
+ check_error(cudaPeekAtLastError());
+}
+
+
+__global__ void weighted_sum_kernel(int n, float *a, float *b, float *s, float *c)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < n){
+ c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0);
+ }
+}
+
+extern "C" void weighted_sum_gpu(float *a, float *b, float *s, int num, float *c)
+{
+ weighted_sum_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, s, c);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void weighted_delta_kernel(int n, float *a, float *b, float *s, float *da, float *db, float *ds, float *dc)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < n){
+ if(da) da[i] += dc[i] * s[i];
+ db[i] += dc[i] * (1-s[i]);
+ ds[i] += dc[i] * a[i] + dc[i] * -b[i];
+ }
+}
+
+extern "C" void weighted_delta_gpu(float *a, float *b, float *s, float *da, float *db, float *ds, int num, float *dc)
+{
+ weighted_delta_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, s, da, db, ds, dc);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void mult_add_into_kernel(int n, float *a, float *b, float *c)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < n){
+ c[i] += a[i]*b[i];
+ }
+}
+
+extern "C" void mult_add_into_gpu(int num, float *a, float *b, float *c)
+{
+ mult_add_into_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, c);
check_error(cudaPeekAtLastError());
}
--
Gitblit v1.10.0