Faster batch normalization
| | |
| | | void variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance); |
| | | void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial); |
| | | |
| | | void mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta); |
| | | void variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta); |
| | | void normalize_delta_gpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta); |
| | | |
| | | void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *spatial_mean_delta, float *mean_delta); |
| | | void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *spatial_variance_delta, float *variance_delta); |
| | | void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta); |
| | | void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta); |
| | | |
| | | void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *spatial_variance, float *variance); |
| | | void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *spatial_mean, float *mean); |
| | | void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance); |
| | | void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean); |
| | | #endif |
| | | #endif |
| | |
| | | variance_delta[i] *= -.5 * pow(variance[i] + .00001f, (float)(-3./2.)); |
| | | } |
| | | |
| | | __global__ void spatial_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *spatial_variance_delta) |
| | | { |
| | | int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; |
| | | if (i >= batch*filters) return; |
| | | int f = i%filters; |
| | | int b = i/filters; |
| | | |
| | | int k; |
| | | spatial_variance_delta[i] = 0; |
| | | for (k = 0; k < spatial; ++k) { |
| | | int index = b*filters*spatial + f*spatial + k; |
| | | spatial_variance_delta[i] += delta[index]*(x[index] - mean[f]); |
| | | } |
| | | spatial_variance_delta[i] *= -.5 * pow(variance[f] + .00001f, (float)(-3./2.)); |
| | | } |
| | | |
| | | extern "C" void variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) |
| | | { |
| | | variance_delta_kernel<<<cuda_gridsize(filters), BLOCK>>>(x, delta, mean, variance, batch, filters, spatial, variance_delta); |
| | | check_error(cudaPeekAtLastError()); |
| | | } |
| | | |
| | | __global__ void accumulate_kernel(float *x, int n, int groups, float *sum) |
| | | { |
| | | int k; |
| | |
| | | } |
| | | } |
| | | |
| | | extern "C" void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *spatial_variance_delta, float *variance_delta) |
| | | __global__ void fast_mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) |
| | | { |
| | | spatial_variance_delta_kernel<<<cuda_gridsize(filters*batch), BLOCK>>>(x, delta, mean, variance, batch, filters, spatial, spatial_variance_delta); |
| | | check_error(cudaPeekAtLastError()); |
| | | accumulate_kernel<<<cuda_gridsize(filters), BLOCK>>>(spatial_variance_delta, batch, filters, variance_delta); |
| | | check_error(cudaPeekAtLastError()); |
| | | } |
| | | const int threads = BLOCK; |
| | | __shared__ float local[threads]; |
| | | |
| | | __global__ void spatial_mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *spatial_mean_delta) |
| | | { |
| | | int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; |
| | | if (i >= batch*filters) return; |
| | | int f = i%filters; |
| | | int b = i/filters; |
| | | int id = threadIdx.x; |
| | | local[id] = 0; |
| | | |
| | | int k; |
| | | spatial_mean_delta[i] = 0; |
| | | for (k = 0; k < spatial; ++k) { |
| | | int index = b*filters*spatial + f*spatial + k; |
| | | spatial_mean_delta[i] += delta[index]; |
| | | int filter = blockIdx.x; |
| | | |
| | | int i, j; |
| | | for(j = 0; j < batch; ++j){ |
| | | for(i = 0; i < spatial; i += threads){ |
| | | int index = j*spatial*filters + filter*spatial + i + id; |
| | | local[id] += (i+id < spatial) ? delta[index] : 0; |
| | | } |
| | | } |
| | | spatial_mean_delta[i] *= (-1./sqrt(variance[f] + .00001f)); |
| | | |
| | | if(id == 0){ |
| | | mean_delta[filter] = 0; |
| | | for(i = 0; i < threads; ++i){ |
| | | mean_delta[filter] += local[i]; |
| | | } |
| | | mean_delta[filter] *= (-1./sqrt(variance[filter] + .00001f)); |
| | | } |
| | | } |
| | | |
| | | extern "C" void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *spatial_mean_delta, float *mean_delta) |
| | | __global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) |
| | | { |
| | | spatial_mean_delta_kernel<<<cuda_gridsize(filters*batch), BLOCK>>>(delta, variance, batch, filters, spatial, spatial_mean_delta); |
| | | check_error(cudaPeekAtLastError()); |
| | | accumulate_kernel<<<cuda_gridsize(filters), BLOCK>>>(spatial_mean_delta, batch, filters, mean_delta); |
| | | check_error(cudaPeekAtLastError()); |
| | | const int threads = BLOCK; |
| | | __shared__ float local[threads]; |
| | | |
| | | int id = threadIdx.x; |
| | | local[id] = 0; |
| | | |
| | | int filter = blockIdx.x; |
| | | |
| | | int i, j; |
| | | for(j = 0; j < batch; ++j){ |
| | | for(i = 0; i < spatial; i += threads){ |
| | | int index = j*spatial*filters + filter*spatial + i + id; |
| | | |
| | | local[id] += (i+id < spatial) ? delta[index]*(x[index] - mean[filter]) : 0; |
| | | } |
| | | } |
| | | |
| | | if(id == 0){ |
| | | variance_delta[filter] = 0; |
| | | for(i = 0; i < threads; ++i){ |
| | | variance_delta[filter] += local[i]; |
| | | } |
| | | variance_delta[filter] *= -.5 * pow(variance[filter] + .00001f, (float)(-3./2.)); |
| | | } |
| | | } |
| | | |
| | | |
| | | __global__ void mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) |
| | | { |
| | | int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; |
| | |
| | | check_error(cudaPeekAtLastError()); |
| | | } |
| | | |
| | | extern "C" void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta) |
| | | { |
| | | fast_mean_delta_kernel<<<filters, BLOCK>>>(delta, variance, batch, filters, spatial, mean_delta); |
| | | check_error(cudaPeekAtLastError()); |
| | | } |
| | | |
| | | extern "C" void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta) |
| | | { |
| | | fast_variance_delta_kernel<<<filters, BLOCK>>>(x, delta, mean, variance, batch, filters, spatial, variance_delta); |
| | | check_error(cudaPeekAtLastError()); |
| | | } |
| | | |
| | | __global__ void mean_kernel(float *x, int batch, int filters, int spatial, float *mean) |
| | | { |
| | | float scale = 1./(batch * spatial); |
| | |
| | | mean[i] *= scale; |
| | | } |
| | | |
| | | __global__ void spatial_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance) |
| | | { |
| | | float scale = 1./(spatial*batch-1); |
| | | int k; |
| | | int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; |
| | | if (i >= batch*filters) return; |
| | | int f = i%filters; |
| | | int b = i/filters; |
| | | |
| | | variance[i] = 0; |
| | | for(k = 0; k < spatial; ++k){ |
| | | int index = b*filters*spatial + f*spatial + k; |
| | | variance[i] += pow((x[index] - mean[f]), 2); |
| | | } |
| | | variance[i] *= scale; |
| | | } |
| | | |
| | | __global__ void variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance) |
| | | { |
| | | float scale = 1./(batch * spatial); |
| | |
| | | check_error(cudaPeekAtLastError()); |
| | | } |
| | | |
| | | __global__ void fast_mean_kernel(float *x, int batch, int filters, int spatial, float *mean) |
| | | { |
| | | const int threads = BLOCK; |
| | | __shared__ float local[threads]; |
| | | |
| | | int id = threadIdx.x; |
| | | local[id] = 0; |
| | | |
| | | int filter = blockIdx.x; |
| | | |
| | | int i, j; |
| | | for(j = 0; j < batch; ++j){ |
| | | for(i = 0; i < spatial; i += threads){ |
| | | int index = j*spatial*filters + filter*spatial + i + id; |
| | | local[id] += (i+id < spatial) ? x[index] : 0; |
| | | } |
| | | } |
| | | |
| | | if(id == 0){ |
| | | mean[filter] = 0; |
| | | for(i = 0; i < threads; ++i){ |
| | | mean[filter] += local[i]; |
| | | } |
| | | mean[filter] /= spatial * batch; |
| | | } |
| | | } |
| | | |
| | | __global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance) |
| | | { |
| | | const int threads = BLOCK; |
| | | __shared__ float local[threads]; |
| | | |
| | | int id = threadIdx.x; |
| | | local[id] = 0; |
| | | |
| | | int filter = blockIdx.x; |
| | | |
| | | int i, j; |
| | | for(j = 0; j < batch; ++j){ |
| | | for(i = 0; i < spatial; i += threads){ |
| | | int index = j*spatial*filters + filter*spatial + i + id; |
| | | |
| | | local[id] += (i+id < spatial) ? pow((x[index] - mean[filter]), 2) : 0; |
| | | } |
| | | } |
| | | |
| | | if(id == 0){ |
| | | variance[filter] = 0; |
| | | for(i = 0; i < threads; ++i){ |
| | | variance[filter] += local[i]; |
| | | } |
| | | variance[filter] /= spatial * batch; |
| | | } |
| | | } |
| | | |
| | | extern "C" void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean) |
| | | { |
| | | fast_mean_kernel<<<filters, BLOCK>>>(x, batch, filters, spatial, mean); |
| | | check_error(cudaPeekAtLastError()); |
| | | } |
| | | |
| | | extern "C" void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) |
| | | { |
| | | fast_variance_kernel<<<filters, BLOCK>>>(x, mean, batch, filters, spatial, variance); |
| | | check_error(cudaPeekAtLastError()); |
| | | } |
| | | |
| | | |
| | | extern "C" void mean_gpu(float *x, int batch, int filters, int spatial, float *mean) |
| | | { |
| | | mean_kernel<<<cuda_gridsize(filters), BLOCK>>>(x, batch, filters, spatial, mean); |
| | | check_error(cudaPeekAtLastError()); |
| | | } |
| | | |
| | | extern "C" void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *spatial_mean, float *mean) |
| | | { |
| | | mean_kernel<<<cuda_gridsize(filters*batch), BLOCK>>>(x, 1, filters*batch, spatial, spatial_mean); |
| | | check_error(cudaPeekAtLastError()); |
| | | mean_kernel<<<cuda_gridsize(filters), BLOCK>>>(spatial_mean, batch, filters, 1, mean); |
| | | check_error(cudaPeekAtLastError()); |
| | | } |
| | | |
| | | extern "C" void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *spatial_variance, float *variance) |
| | | { |
| | | spatial_variance_kernel<<<cuda_gridsize(batch*filters), BLOCK>>>(x, mean, batch, filters, spatial, spatial_variance); |
| | | check_error(cudaPeekAtLastError()); |
| | | accumulate_kernel<<<cuda_gridsize(filters), BLOCK>>>(spatial_variance, batch, filters, variance); |
| | | check_error(cudaPeekAtLastError()); |
| | | } |
| | | |
| | | extern "C" void variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) |
| | | { |
| | | variance_kernel<<<cuda_gridsize(filters), BLOCK>>>(x, mean, batch, filters, spatial, variance); |
| | |
| | | sprintf(buff, "%s/%s_%d.weights",backup_directory,base, epoch); |
| | | save_weights(net, buff); |
| | | } |
| | | if(*net.seen%1000 == 0){ |
| | | char buff[256]; |
| | | sprintf(buff, "%s/%s.backup",backup_directory,base); |
| | | save_weights(net, buff); |
| | | } |
| | | } |
| | | char buff[256]; |
| | | sprintf(buff, "%s/%s.weights", backup_directory, base); |
| | |
| | | void train_coco(char *cfgfile, char *weightfile) |
| | | { |
| | | //char *train_images = "/home/pjreddie/data/voc/test/train.txt"; |
| | | char *train_images = "/home/pjreddie/data/coco/train.txt"; |
| | | //char *train_images = "/home/pjreddie/data/coco/train.txt"; |
| | | char *train_images = "data/coco.trainval.txt"; |
| | | char *backup_directory = "/home/pjreddie/backup/"; |
| | | srand(time(0)); |
| | | data_seed = time(0); |
| | |
| | | float * c = l.output_gpu; |
| | | gemm_ongpu(0,0,m,n,k,1,a,k,b,n,1,c,n); |
| | | activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation); |
| | | |
| | | /* |
| | | cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch); |
| | | float avg = mean_array(l.output, l.outputs*l.batch); |
| | | printf("%f\n", avg); |
| | | */ |
| | | } |
| | | |
| | | void backward_connected_layer_gpu(connected_layer l, network_state state) |
| | |
| | | |
| | | if(l.batch_normalize){ |
| | | if(state.train){ |
| | | fast_mean_gpu(l.output_gpu, l.batch, l.n, l.out_h*l.out_w, l.spatial_mean_gpu, l.mean_gpu); |
| | | fast_variance_gpu(l.output_gpu, l.mean_gpu, l.batch, l.n, l.out_h*l.out_w, l.spatial_variance_gpu, l.variance_gpu); |
| | | fast_mean_gpu(l.output_gpu, l.batch, l.n, l.out_h*l.out_w, l.mean_gpu); |
| | | fast_variance_gpu(l.output_gpu, l.mean_gpu, l.batch, l.n, l.out_h*l.out_w, l.variance_gpu); |
| | | |
| | | scal_ongpu(l.n, .95, l.rolling_mean_gpu, 1); |
| | | axpy_ongpu(l.n, .05, l.mean_gpu, 1, l.rolling_mean_gpu, 1); |
| | | scal_ongpu(l.n, .95, l.rolling_variance_gpu, 1); |
| | | axpy_ongpu(l.n, .05, l.variance_gpu, 1, l.rolling_variance_gpu, 1); |
| | | |
| | | // cuda_pull_array(l.variance_gpu, l.mean, l.n); |
| | | // printf("%f\n", l.mean[0]); |
| | | |
| | | copy_ongpu(l.outputs*l.batch, l.output_gpu, 1, l.x_gpu, 1); |
| | | normalize_gpu(l.output_gpu, l.mean_gpu, l.variance_gpu, l.batch, l.n, l.out_h*l.out_w); |
| | | copy_ongpu(l.outputs*l.batch, l.output_gpu, 1, l.x_norm_gpu, 1); |
| | |
| | | |
| | | scale_bias_gpu(l.delta_gpu, l.scales_gpu, l.batch, l.n, l.out_h*l.out_w); |
| | | |
| | | fast_mean_delta_gpu(l.delta_gpu, l.variance_gpu, l.batch, l.n, l.out_w*l.out_h, l.spatial_mean_delta_gpu, l.mean_delta_gpu); |
| | | fast_variance_delta_gpu(l.x_gpu, l.delta_gpu, l.mean_gpu, l.variance_gpu, l.batch, l.n, l.out_w*l.out_h, l.spatial_variance_delta_gpu, l.variance_delta_gpu); |
| | | fast_mean_delta_gpu(l.delta_gpu, l.variance_gpu, l.batch, l.n, l.out_w*l.out_h, l.mean_delta_gpu); |
| | | fast_variance_delta_gpu(l.x_gpu, l.delta_gpu, l.mean_gpu, l.variance_gpu, l.batch, l.n, l.out_w*l.out_h, l.variance_delta_gpu); |
| | | normalize_delta_gpu(l.x_gpu, l.mean_gpu, l.variance_gpu, l.mean_delta_gpu, l.variance_delta_gpu, l.batch, l.n, l.out_w*l.out_h, l.delta_gpu); |
| | | } |
| | | |
| | |
| | | } |
| | | |
| | | l.mean = calloc(n, sizeof(float)); |
| | | l.spatial_mean = calloc(n*l.batch, sizeof(float)); |
| | | |
| | | l.variance = calloc(n, sizeof(float)); |
| | | |
| | | l.rolling_mean = calloc(n, sizeof(float)); |
| | | l.rolling_variance = calloc(n, sizeof(float)); |
| | | } |
| | |
| | | l.rolling_mean_gpu = cuda_make_array(l.mean, n); |
| | | l.rolling_variance_gpu = cuda_make_array(l.variance, n); |
| | | |
| | | l.spatial_mean_gpu = cuda_make_array(l.spatial_mean, n*l.batch); |
| | | l.spatial_variance_gpu = cuda_make_array(l.spatial_mean, n*l.batch); |
| | | |
| | | l.spatial_mean_delta_gpu = cuda_make_array(l.spatial_mean, n*l.batch); |
| | | l.spatial_variance_delta_gpu = cuda_make_array(l.spatial_mean, n*l.batch); |
| | | |
| | | l.mean_delta_gpu = cuda_make_array(l.mean, n); |
| | | l.variance_delta_gpu = cuda_make_array(l.variance, n); |
| | | |