From aebe937710ced03d03f73ab23f410f29685655c1 Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Thu, 11 Aug 2016 18:54:24 +0000
Subject: [PATCH] what do you even write here?
---
src/blas_kernels.cu | 277 ++++++++++++++++++++++++++++++++++++++++++++++++++++--
1 files changed, 264 insertions(+), 13 deletions(-)
diff --git a/src/blas_kernels.cu b/src/blas_kernels.cu
index 61db29f..3f7f1f9 100644
--- a/src/blas_kernels.cu
+++ b/src/blas_kernels.cu
@@ -9,13 +9,144 @@
#include "utils.h"
}
+__global__ void scale_bias_kernel(float *output, float *biases, int n, int size)
+{
+ int offset = blockIdx.x * blockDim.x + threadIdx.x;
+ int filter = blockIdx.y;
+ int batch = blockIdx.z;
+
+ if(offset < size) output[(batch*n+filter)*size + offset] *= biases[filter];
+}
+
+void scale_bias_gpu(float *output, float *biases, int batch, int n, int size)
+{
+ dim3 dimGrid((size-1)/BLOCK + 1, n, batch);
+ dim3 dimBlock(BLOCK, 1, 1);
+
+ scale_bias_kernel<<<dimGrid, dimBlock>>>(output, biases, n, size);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void backward_scale_kernel(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates)
+{
+ __shared__ float part[BLOCK];
+ int i,b;
+ int filter = blockIdx.x;
+ int p = threadIdx.x;
+ float sum = 0;
+ for(b = 0; b < batch; ++b){
+ for(i = 0; i < size; i += BLOCK){
+ int index = p + i + size*(filter + n*b);
+ sum += (p+i < size) ? delta[index]*x_norm[index] : 0;
+ }
+ }
+ part[p] = sum;
+ __syncthreads();
+ if (p == 0) {
+ for(i = 0; i < BLOCK; ++i) scale_updates[filter] += part[i];
+ }
+}
+
+void backward_scale_gpu(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates)
+{
+ backward_scale_kernel<<<n, BLOCK>>>(x_norm, delta, batch, n, size, scale_updates);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void add_bias_kernel(float *output, float *biases, int n, int size)
+{
+ int offset = blockIdx.x * blockDim.x + threadIdx.x;
+ int filter = blockIdx.y;
+ int batch = blockIdx.z;
+
+ if(offset < size) output[(batch*n+filter)*size + offset] += biases[filter];
+}
+
+void add_bias_gpu(float *output, float *biases, int batch, int n, int size)
+{
+ dim3 dimGrid((size-1)/BLOCK + 1, n, batch);
+ dim3 dimBlock(BLOCK, 1, 1);
+
+ add_bias_kernel<<<dimGrid, dimBlock>>>(output, biases, n, size);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void backward_bias_kernel(float *bias_updates, float *delta, int batch, int n, int size)
+{
+ __shared__ float part[BLOCK];
+ int i,b;
+ int filter = blockIdx.x;
+ int p = threadIdx.x;
+ float sum = 0;
+ for(b = 0; b < batch; ++b){
+ for(i = 0; i < size; i += BLOCK){
+ int index = p + i + size*(filter + n*b);
+ sum += (p+i < size) ? delta[index] : 0;
+ }
+ }
+ part[p] = sum;
+ __syncthreads();
+ if (p == 0) {
+ for(i = 0; i < BLOCK; ++i) bias_updates[filter] += part[i];
+ }
+}
+
+/*
+__global__ void dot_kernel(float *output, float scale, int batch, int n, int size, float *delta)
+{
+ int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ int f1 = index / n;
+ int f2 = index % n;
+ if (f2 <= f1) return;
+
+ float sum = 0;
+ float norm1 = 0;
+ float norm2 = 0;
+ int b, i;
+ for(b = 0; b < batch; ++b){
+ for(i = 0; i < size; ++i){
+ int i1 = b * size * n + f1 * size + i;
+ int i2 = b * size * n + f2 * size + i;
+ sum += output[i1] * output[i2];
+ norm1 += output[i1] * output[i1];
+ norm2 += output[i2] * output[i2];
+ }
+ }
+ norm1 = sqrt(norm1);
+ norm2 = sqrt(norm2);
+ float norm = norm1 * norm2;
+ sum = sum / norm;
+ for(b = 0; b < batch; ++b){
+ for(i = 0; i < size; ++i){
+ int i1 = b * size * n + f1 * size + i;
+ int i2 = b * size * n + f2 * size + i;
+ delta[i1] += - scale * sum * output[i2] / norm;
+ delta[i2] += - scale * sum * output[i1] / norm;
+ }
+ }
+}
+
+void dot_error_gpu(layer l)
+{
+ dot_kernel<<<cuda_gridsize(l.n*l.n), BLOCK>>>(l.output_gpu, l.dot, l.batch, l.n, l.out_w * l.out_h, l.delta_gpu);
+ check_error(cudaPeekAtLastError());
+}
+*/
+
+void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size)
+{
+ backward_bias_kernel<<<n, BLOCK>>>(bias_updates, delta, batch, n, size);
+ check_error(cudaPeekAtLastError());
+}
+
+
__global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (index >= N) return;
int f = (index/spatial)%filters;
- x[index] = (x[index] - mean[f])/(sqrt(variance[f]) + .00001f);
+ x[index] = (x[index] - mean[f])/(sqrt(variance[f]) + .000001f);
}
__global__ void normalize_delta_kernel(int N, float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta)
@@ -24,7 +155,7 @@
if (index >= N) return;
int f = (index/spatial)%filters;
- delta[index] = delta[index] * 1./(sqrt(variance[f]) + .00001f) + variance_delta[f] * 2. * (x[index] - mean[f]) / (spatial * batch) + mean_delta[f]/(spatial*batch);
+ delta[index] = delta[index] * 1./(sqrt(variance[f]) + .000001f) + variance_delta[f] * 2. * (x[index] - mean[f]) / (spatial * batch) + mean_delta[f]/(spatial*batch);
}
extern "C" void normalize_delta_gpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta)
@@ -46,7 +177,7 @@
variance_delta[i] += delta[index]*(x[index] - mean[i]);
}
}
- variance_delta[i] *= -.5 * pow(variance[i] + .00001f, (float)(-3./2.));
+ variance_delta[i] *= -.5 * pow(variance[i] + .000001f, (float)(-3./2.));
}
__global__ void accumulate_kernel(float *x, int n, int groups, float *sum)
@@ -83,7 +214,7 @@
for(i = 0; i < threads; ++i){
mean_delta[filter] += local[i];
}
- mean_delta[filter] *= (-1./sqrt(variance[filter] + .00001f));
+ mean_delta[filter] *= (-1./sqrt(variance[filter] + .000001f));
}
}
@@ -111,7 +242,7 @@
for(i = 0; i < threads; ++i){
variance_delta[filter] += local[i];
}
- variance_delta[filter] *= -.5 * pow(variance[filter] + .00001f, (float)(-3./2.));
+ variance_delta[filter] *= -.5 * pow(variance[filter] + .000001f, (float)(-3./2.));
}
}
@@ -128,7 +259,7 @@
mean_delta[i] += delta[index];
}
}
- mean_delta[i] *= (-1./sqrt(variance[i] + .00001f));
+ mean_delta[i] *= (-1./sqrt(variance[i] + .000001f));
}
extern "C" void mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
@@ -167,7 +298,7 @@
__global__ void variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
{
- float scale = 1./(batch * spatial);
+ float scale = 1./(batch * spatial - 1);
int j,k;
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i >= filters) return;
@@ -181,6 +312,38 @@
variance[i] *= scale;
}
+__global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i >= N) return;
+ int in_index = i;
+ int in_w = i%w;
+ i = i/w;
+ int in_h = i%h;
+ i = i/h;
+ int in_c = i%c;
+ i = i/c;
+ int b = i%batch;
+
+ int out_c = c/(stride*stride);
+
+ int c2 = in_c % out_c;
+ int offset = in_c / out_c;
+ int w2 = in_w*stride + offset % stride;
+ int h2 = in_h*stride + offset / stride;
+ //printf("%d\n", offset);
+ int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
+
+ // printf("%d %d %d\n", w2, h2, c2);
+ //printf("%d %d\n", in_index, out_index);
+ //if(out_index >= N || out_index < 0) printf("bad bad bad \n");
+
+ if(forward) out[out_index] = x[in_index];
+ else out[in_index] = x[out_index];
+ //if(forward) out[1] = x[1];
+ //else out[0] = x[0];
+}
+
__global__ void axpy_kernel(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
@@ -199,6 +362,12 @@
if(i < N) X[i*INCX] = ALPHA;
}
+__global__ void constrain_kernel(int N, float ALPHA, float *X, int INCX)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < N) X[i*INCX] = min(ALPHA, max(-ALPHA, X[i*INCX]));
+}
+
__global__ void scal_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
@@ -288,7 +457,7 @@
for(i = 0; i < threads; ++i){
variance[filter] += local[i];
}
- variance[filter] /= spatial * batch;
+ variance[filter] /= (spatial * batch - 1);
}
}
@@ -351,6 +520,13 @@
check_error(cudaPeekAtLastError());
}
+extern "C" void reorg_ongpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
+{
+ int size = w*h*c*batch;
+ reorg_kernel<<<cuda_gridsize(size), BLOCK>>>(size, x, w, h, c, batch, stride, forward, out);
+ check_error(cudaPeekAtLastError());
+}
+
extern "C" void mask_ongpu(int N, float * X, float mask_num, float * mask)
{
mask_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, mask_num, mask);
@@ -363,6 +539,13 @@
check_error(cudaPeekAtLastError());
}
+extern "C" void constrain_ongpu(int N, float ALPHA, float * X, int INCX)
+{
+ constrain_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
+ check_error(cudaPeekAtLastError());
+}
+
+
extern "C" void scal_ongpu(int N, float ALPHA, float * X, int INCX)
{
scal_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
@@ -410,18 +593,86 @@
check_error(cudaPeekAtLastError());
}
-__global__ void smooth_l1_kernel(int n, float *pred, float *truth, float *delta)
+__global__ void smooth_l1_kernel(int n, float *pred, float *truth, float *delta, float *error)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n){
float diff = truth[i] - pred[i];
- if(abs(diff) > 1) delta[i] = diff;
- else delta[i] = (diff > 0) ? 1 : -1;
+ float abs_val = abs(diff);
+ if(abs_val < 1) {
+ error[i] = diff * diff;
+ delta[i] = diff;
+ }
+ else {
+ error[i] = 2*abs_val - 1;
+ delta[i] = (diff < 0) ? -1 : 1;
+ }
}
}
-extern "C" void smooth_l1_gpu(int n, float *pred, float *truth, float *delta)
+extern "C" void smooth_l1_gpu(int n, float *pred, float *truth, float *delta, float *error)
{
- smooth_l1_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta);
+ smooth_l1_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void l2_kernel(int n, float *pred, float *truth, float *delta, float *error)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < n){
+ float diff = truth[i] - pred[i];
+ error[i] = diff * diff; //I know this is technically wrong, deal with it.
+ delta[i] = diff;
+ }
+}
+
+extern "C" void l2_gpu(int n, float *pred, float *truth, float *delta, float *error)
+{
+ l2_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error);
+ check_error(cudaPeekAtLastError());
+}
+
+
+__global__ void weighted_sum_kernel(int n, float *a, float *b, float *s, float *c)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < n){
+ c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0);
+ }
+}
+
+extern "C" void weighted_sum_gpu(float *a, float *b, float *s, int num, float *c)
+{
+ weighted_sum_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, s, c);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void weighted_delta_kernel(int n, float *a, float *b, float *s, float *da, float *db, float *ds, float *dc)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < n){
+ if(da) da[i] += dc[i] * s[i];
+ db[i] += dc[i] * (1-s[i]);
+ ds[i] += dc[i] * a[i] + dc[i] * -b[i];
+ }
+}
+
+extern "C" void weighted_delta_gpu(float *a, float *b, float *s, float *da, float *db, float *ds, int num, float *dc)
+{
+ weighted_delta_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, s, da, db, ds, dc);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void mult_add_into_kernel(int n, float *a, float *b, float *c)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < n){
+ c[i] += a[i]*b[i];
+ }
+}
+
+extern "C" void mult_add_into_gpu(int num, float *a, float *b, float *c)
+{
+ mult_add_into_kernel<<<cuda_gridsize(num), BLOCK>>>(num, a, b, c);
check_error(cudaPeekAtLastError());
}
--
Gitblit v1.10.0