| | |
| | | #include "cuda_runtime.h" |
| | | #include "curand.h" |
| | | #include "cublas_v2.h" |
| | | #include <assert.h> |
| | | |
| | | extern "C" { |
| | | #include "blas.h" |
| | | #include "cuda.h" |
| | | #include "utils.h" |
| | | } |
| | | |
| | | __global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial) |
| | | { |
| | | int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; |
| | | if (index >= N) return; |
| | | int f = (index/spatial)%filters; |
| | | |
| | | x[index] = (x[index] - mean[f])/(sqrt(variance[f]) + .000001f); |
| | | } |
| | | |
| | | __global__ void normalize_delta_kernel(int N, float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta) |
| | | { |
| | | int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; |
| | | if (index >= N) return; |
| | | int f = (index/spatial)%filters; |
| | | |
| | | delta[index] = delta[index] * 1./(sqrt(variance[f]) + .000001f) + variance_delta[f] * 2. * (x[index] - mean[f]) / (spatial * batch) + mean_delta[f]/(spatial*batch); |
| | | } |
| | | |
| | | extern "C" void normalize_delta_gpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta) |
| | | { |
| | | size_t N = batch*filters*spatial; |
| | | normalize_delta_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, mean, variance, mean_delta, variance_delta, batch, filters, spatial, delta); |
| | | check_error(cudaPeekAtLastError()); |
| | | } |
| | | |
| | | __global__ void variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) |
| | | { |
| | | int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; |
| | | if (i >= filters) return; |
| | | int j,k; |
| | | variance_delta[i] = 0; |
| | | for(j = 0; j < batch; ++j){ |
| | | for(k = 0; k < spatial; ++k){ |
| | | int index = j*filters*spatial + i*spatial + k; |
| | | variance_delta[i] += delta[index]*(x[index] - mean[i]); |
| | | } |
| | | } |
| | | variance_delta[i] *= -.5 * pow(variance[i] + .000001f, (float)(-3./2.)); |
| | | } |
| | | |
| | | __global__ void accumulate_kernel(float *x, int n, int groups, float *sum) |
| | | { |
| | | int k; |
| | | int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; |
| | | if (i >= groups) return; |
| | | sum[i] = 0; |
| | | for(k = 0; k < n; ++k){ |
| | | sum[i] += x[k*groups + i]; |
| | | } |
| | | } |
| | | |
| | | __global__ void fast_mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) |
| | | { |
| | | const int threads = BLOCK; |
| | | __shared__ float local[threads]; |
| | | |
| | | int id = threadIdx.x; |
| | | local[id] = 0; |
| | | |
| | | int filter = blockIdx.x; |
| | | |
| | | int i, j; |
| | | for(j = 0; j < batch; ++j){ |
| | | for(i = 0; i < spatial; i += threads){ |
| | | int index = j*spatial*filters + filter*spatial + i + id; |
| | | local[id] += (i+id < spatial) ? delta[index] : 0; |
| | | } |
| | | } |
| | | |
| | | if(id == 0){ |
| | | mean_delta[filter] = 0; |
| | | for(i = 0; i < threads; ++i){ |
| | | mean_delta[filter] += local[i]; |
| | | } |
| | | mean_delta[filter] *= (-1./sqrt(variance[filter] + .000001f)); |
| | | } |
| | | } |
| | | |
| | | __global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) |
| | | { |
| | | const int threads = BLOCK; |
| | | __shared__ float local[threads]; |
| | | |
| | | int id = threadIdx.x; |
| | | local[id] = 0; |
| | | |
| | | int filter = blockIdx.x; |
| | | |
| | | int i, j; |
| | | for(j = 0; j < batch; ++j){ |
| | | for(i = 0; i < spatial; i += threads){ |
| | | int index = j*spatial*filters + filter*spatial + i + id; |
| | | |
| | | local[id] += (i+id < spatial) ? delta[index]*(x[index] - mean[filter]) : 0; |
| | | } |
| | | } |
| | | |
| | | if(id == 0){ |
| | | variance_delta[filter] = 0; |
| | | for(i = 0; i < threads; ++i){ |
| | | variance_delta[filter] += local[i]; |
| | | } |
| | | variance_delta[filter] *= -.5 * pow(variance[filter] + .000001f, (float)(-3./2.)); |
| | | } |
| | | } |
| | | |
| | | |
| | | __global__ void mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) |
| | | { |
| | | int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; |
| | | if (i >= filters) return; |
| | | int j,k; |
| | | mean_delta[i] = 0; |
| | | for (j = 0; j < batch; ++j) { |
| | | for (k = 0; k < spatial; ++k) { |
| | | int index = j*filters*spatial + i*spatial + k; |
| | | mean_delta[i] += delta[index]; |
| | | } |
| | | } |
| | | mean_delta[i] *= (-1./sqrt(variance[i] + .000001f)); |
| | | } |
| | | |
| | | extern "C" void mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) |
| | | { |
| | | mean_delta_kernel<<<cuda_gridsize(filters), BLOCK>>>(delta, variance, batch, filters, spatial, mean_delta); |
| | | check_error(cudaPeekAtLastError()); |
| | | } |
| | | |
| | | extern "C" void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) |
| | | { |
| | | fast_mean_delta_kernel<<<filters, BLOCK>>>(delta, variance, batch, filters, spatial, mean_delta); |
| | | check_error(cudaPeekAtLastError()); |
| | | } |
| | | |
| | | extern "C" void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) |
| | | { |
| | | fast_variance_delta_kernel<<<filters, BLOCK>>>(x, delta, mean, variance, batch, filters, spatial, variance_delta); |
| | | check_error(cudaPeekAtLastError()); |
| | | } |
| | | |
| | | __global__ void mean_kernel(float *x, int batch, int filters, int spatial, float *mean) |
| | | { |
| | | float scale = 1./(batch * spatial); |
| | | int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; |
| | | if (i >= filters) return; |
| | | int j,k; |
| | | mean[i] = 0; |
| | | for(j = 0; j < batch; ++j){ |
| | | for(k = 0; k < spatial; ++k){ |
| | | int index = j*filters*spatial + i*spatial + k; |
| | | mean[i] += x[index]; |
| | | } |
| | | } |
| | | mean[i] *= scale; |
| | | } |
| | | |
| | | __global__ void variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance) |
| | | { |
| | | float scale = 1./(batch * spatial - 1); |
| | | int j,k; |
| | | int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; |
| | | if (i >= filters) return; |
| | | variance[i] = 0; |
| | | for(j = 0; j < batch; ++j){ |
| | | for(k = 0; k < spatial; ++k){ |
| | | int index = j*filters*spatial + i*spatial + k; |
| | | variance[i] += pow((x[index] - mean[i]), 2); |
| | | } |
| | | } |
| | | variance[i] *= scale; |
| | | } |
| | | |
| | | __global__ void axpy_kernel(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY) |
| | |
| | | if(i < N) X[i*INCX] *= ALPHA; |
| | | } |
| | | |
| | | __global__ void mask_kernel(int n, float *x, float *mask) |
| | | __global__ void fill_kernel(int N, float ALPHA, float *X, int INCX) |
| | | { |
| | | int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; |
| | | if(i < n && mask[i] == 0) x[i] = 0; |
| | | if(i < N) X[i*INCX] = ALPHA; |
| | | } |
| | | |
| | | __global__ void mask_kernel(int n, float *x, float mask_num, float *mask) |
| | | { |
| | | int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; |
| | | if(i < n && mask[i] == mask_num) x[i] = mask_num; |
| | | } |
| | | |
| | | __global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY) |
| | |
| | | if(i < N) Y[i*INCY] *= X[i*INCX]; |
| | | } |
| | | |
| | | |
| | | extern "C" void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial) |
| | | { |
| | | size_t N = batch*filters*spatial; |
| | | normalize_kernel<<<cuda_gridsize(N), BLOCK>>>(N, x, mean, variance, batch, filters, spatial); |
| | | check_error(cudaPeekAtLastError()); |
| | | } |
| | | |
| | | __global__ void fast_mean_kernel(float *x, int batch, int filters, int spatial, float *mean) |
| | | { |
| | | const int threads = BLOCK; |
| | | __shared__ float local[threads]; |
| | | |
| | | int id = threadIdx.x; |
| | | local[id] = 0; |
| | | |
| | | int filter = blockIdx.x; |
| | | |
| | | int i, j; |
| | | for(j = 0; j < batch; ++j){ |
| | | for(i = 0; i < spatial; i += threads){ |
| | | int index = j*spatial*filters + filter*spatial + i + id; |
| | | local[id] += (i+id < spatial) ? x[index] : 0; |
| | | } |
| | | } |
| | | |
| | | if(id == 0){ |
| | | mean[filter] = 0; |
| | | for(i = 0; i < threads; ++i){ |
| | | mean[filter] += local[i]; |
| | | } |
| | | mean[filter] /= spatial * batch; |
| | | } |
| | | } |
| | | |
| | | __global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance) |
| | | { |
| | | const int threads = BLOCK; |
| | | __shared__ float local[threads]; |
| | | |
| | | int id = threadIdx.x; |
| | | local[id] = 0; |
| | | |
| | | int filter = blockIdx.x; |
| | | |
| | | int i, j; |
| | | for(j = 0; j < batch; ++j){ |
| | | for(i = 0; i < spatial; i += threads){ |
| | | int index = j*spatial*filters + filter*spatial + i + id; |
| | | |
| | | local[id] += (i+id < spatial) ? pow((x[index] - mean[filter]), 2) : 0; |
| | | } |
| | | } |
| | | |
| | | if(id == 0){ |
| | | variance[filter] = 0; |
| | | for(i = 0; i < threads; ++i){ |
| | | variance[filter] += local[i]; |
| | | } |
| | | variance[filter] /= (spatial * batch - 1); |
| | | } |
| | | } |
| | | |
| | | extern "C" void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean) |
| | | { |
| | | fast_mean_kernel<<<filters, BLOCK>>>(x, batch, filters, spatial, mean); |
| | | check_error(cudaPeekAtLastError()); |
| | | } |
| | | |
| | | extern "C" void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) |
| | | { |
| | | fast_variance_kernel<<<filters, BLOCK>>>(x, mean, batch, filters, spatial, variance); |
| | | check_error(cudaPeekAtLastError()); |
| | | } |
| | | |
| | | |
| | | extern "C" void mean_gpu(float *x, int batch, int filters, int spatial, float *mean) |
| | | { |
| | | mean_kernel<<<cuda_gridsize(filters), BLOCK>>>(x, batch, filters, spatial, mean); |
| | | check_error(cudaPeekAtLastError()); |
| | | } |
| | | |
| | | extern "C" void variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) |
| | | { |
| | | variance_kernel<<<cuda_gridsize(filters), BLOCK>>>(x, mean, batch, filters, spatial, variance); |
| | | check_error(cudaPeekAtLastError()); |
| | | } |
| | | |
| | | extern "C" void axpy_ongpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY) |
| | | { |
| | | axpy_ongpu_offset(N, ALPHA, X, 0, INCX, Y, 0, INCY); |
| | |
| | | check_error(cudaPeekAtLastError()); |
| | | } |
| | | |
| | | extern "C" void mask_ongpu(int N, float * X, float * mask) |
| | | extern "C" void mask_ongpu(int N, float * X, float mask_num, float * mask) |
| | | { |
| | | mask_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, mask); |
| | | mask_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, mask_num, mask); |
| | | check_error(cudaPeekAtLastError()); |
| | | } |
| | | |
| | |
| | | scal_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); |
| | | check_error(cudaPeekAtLastError()); |
| | | } |
| | | |
| | | extern "C" void fill_ongpu(int N, float ALPHA, float * X, int INCX) |
| | | { |
| | | fill_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX); |
| | | check_error(cudaPeekAtLastError()); |
| | | } |
| | | |
| | | __global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out) |
| | | { |
| | | int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; |
| | | if (id >= size) return; |
| | | int i = id % minw; |
| | | id /= minw; |
| | | int j = id % minh; |
| | | id /= minh; |
| | | int k = id % minc; |
| | | id /= minc; |
| | | int b = id % batch; |
| | | |
| | | int out_index = i*sample + w2*(j*sample + h2*(k + c2*b)); |
| | | int add_index = i*stride + w1*(j*stride + h1*(k + c1*b)); |
| | | out[out_index] += add[add_index]; |
| | | } |
| | | |
| | | extern "C" void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out) |
| | | { |
| | | int minw = (w1 < w2) ? w1 : w2; |
| | | int minh = (h1 < h2) ? h1 : h2; |
| | | int minc = (c1 < c2) ? c1 : c2; |
| | | |
| | | int stride = w1/w2; |
| | | int sample = w2/w1; |
| | | assert(stride == h1/h2); |
| | | assert(sample == h2/h1); |
| | | if(stride < 1) stride = 1; |
| | | if(sample < 1) sample = 1; |
| | | |
| | | int size = batch * minw * minh * minc; |
| | | shortcut_kernel<<<cuda_gridsize(size), BLOCK>>>(size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, out); |
| | | check_error(cudaPeekAtLastError()); |
| | | } |
| | | |
| | | __global__ void smooth_l1_kernel(int n, float *pred, float *truth, float *delta, float *error) |
| | | { |
| | | int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; |
| | | if(i < n){ |
| | | float diff = truth[i] - pred[i]; |
| | | float abs_val = abs(diff); |
| | | if(abs_val < 1) { |
| | | error[i] = diff * diff; |
| | | delta[i] = diff; |
| | | } |
| | | else { |
| | | error[i] = 2*abs_val - 1; |
| | | delta[i] = (diff < 0) ? -1 : 1; |
| | | } |
| | | } |
| | | } |
| | | |
| | | extern "C" void smooth_l1_gpu(int n, float *pred, float *truth, float *delta, float *error) |
| | | { |
| | | smooth_l1_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error); |
| | | check_error(cudaPeekAtLastError()); |
| | | } |
| | | |
| | | __global__ void l2_kernel(int n, float *pred, float *truth, float *delta, float *error) |
| | | { |
| | | int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; |
| | | if(i < n){ |
| | | float diff = truth[i] - pred[i]; |
| | | error[i] = diff * diff; //I know this is technically wrong, deal with it. |
| | | delta[i] = diff; |
| | | } |
| | | } |
| | | |
| | | extern "C" void l2_gpu(int n, float *pred, float *truth, float *delta, float *error) |
| | | { |
| | | l2_kernel<<<cuda_gridsize(n), BLOCK>>>(n, pred, truth, delta, error); |
| | | check_error(cudaPeekAtLastError()); |
| | | } |