| | |
| | | #include "normalization_layer.h" |
| | | #include "blas.h" |
| | | #include <stdio.h> |
| | | |
| | | image get_normalization_image(normalization_layer layer) |
| | | layer make_normalization_layer(int batch, int w, int h, int c, int size, float alpha, float beta, float kappa) |
| | | { |
| | | int h = layer.h; |
| | | int w = layer.w; |
| | | int c = layer.c; |
| | | return float_to_image(h,w,c,layer.output); |
| | | } |
| | | |
| | | image get_normalization_delta(normalization_layer layer) |
| | | { |
| | | int h = layer.h; |
| | | int w = layer.w; |
| | | int c = layer.c; |
| | | return float_to_image(h,w,c,layer.delta); |
| | | } |
| | | |
| | | normalization_layer *make_normalization_layer(int batch, int h, int w, int c, int size, float alpha, float beta, float kappa) |
| | | { |
| | | fprintf(stderr, "Local Response Normalization Layer: %d x %d x %d image, %d size\n", h,w,c,size); |
| | | normalization_layer *layer = calloc(1, sizeof(normalization_layer)); |
| | | layer->batch = batch; |
| | | layer->h = h; |
| | | layer->w = w; |
| | | layer->c = c; |
| | | layer->kappa = kappa; |
| | | layer->size = size; |
| | | layer->alpha = alpha; |
| | | layer->beta = beta; |
| | | layer->output = calloc(h * w * c * batch, sizeof(float)); |
| | | layer->delta = calloc(h * w * c * batch, sizeof(float)); |
| | | layer->sums = calloc(h*w, sizeof(float)); |
| | | fprintf(stderr, "Local Response Normalization Layer: %d x %d x %d image, %d size\n", w,h,c,size); |
| | | layer layer = {0}; |
| | | layer.type = NORMALIZATION; |
| | | layer.batch = batch; |
| | | layer.h = layer.out_h = h; |
| | | layer.w = layer.out_w = w; |
| | | layer.c = layer.out_c = c; |
| | | layer.kappa = kappa; |
| | | layer.size = size; |
| | | layer.alpha = alpha; |
| | | layer.beta = beta; |
| | | layer.output = calloc(h * w * c * batch, sizeof(float)); |
| | | layer.delta = calloc(h * w * c * batch, sizeof(float)); |
| | | layer.squared = calloc(h * w * c * batch, sizeof(float)); |
| | | layer.norms = calloc(h * w * c * batch, sizeof(float)); |
| | | layer.inputs = w*h*c; |
| | | layer.outputs = layer.inputs; |
| | | #ifdef GPU |
| | | layer.output_gpu = cuda_make_array(0, h * w * c * batch); |
| | | layer.delta_gpu = cuda_make_array(0, h * w * c * batch); |
| | | layer.squared_gpu = cuda_make_array(0, h * w * c * batch); |
| | | layer.norms_gpu = cuda_make_array(0, h * w * c * batch); |
| | | #endif |
| | | return layer; |
| | | } |
| | | |
| | | void resize_normalization_layer(normalization_layer *layer, int h, int w, int c) |
| | | void resize_normalization_layer(layer *layer, int w, int h) |
| | | { |
| | | int c = layer->c; |
| | | int batch = layer->batch; |
| | | layer->h = h; |
| | | layer->w = w; |
| | | layer->c = c; |
| | | layer->output = realloc(layer->output, h * w * c * layer->batch * sizeof(float)); |
| | | layer->delta = realloc(layer->delta, h * w * c * layer->batch * sizeof(float)); |
| | | layer->sums = realloc(layer->sums, h*w * sizeof(float)); |
| | | layer->out_h = h; |
| | | layer->out_w = w; |
| | | layer->inputs = w*h*c; |
| | | layer->outputs = layer->inputs; |
| | | layer->output = realloc(layer->output, h * w * c * batch * sizeof(float)); |
| | | layer->delta = realloc(layer->delta, h * w * c * batch * sizeof(float)); |
| | | layer->squared = realloc(layer->squared, h * w * c * batch * sizeof(float)); |
| | | layer->norms = realloc(layer->norms, h * w * c * batch * sizeof(float)); |
| | | #ifdef GPU |
| | | cuda_free(layer->output_gpu); |
| | | cuda_free(layer->delta_gpu); |
| | | cuda_free(layer->squared_gpu); |
| | | cuda_free(layer->norms_gpu); |
| | | layer->output_gpu = cuda_make_array(0, h * w * c * batch); |
| | | layer->delta_gpu = cuda_make_array(0, h * w * c * batch); |
| | | layer->squared_gpu = cuda_make_array(0, h * w * c * batch); |
| | | layer->norms_gpu = cuda_make_array(0, h * w * c * batch); |
| | | #endif |
| | | } |
| | | |
| | | void add_square_array(float *src, float *dest, int n) |
| | | void forward_normalization_layer(const layer layer, network_state state) |
| | | { |
| | | int i; |
| | | for(i = 0; i < n; ++i){ |
| | | dest[i] += src[i]*src[i]; |
| | | } |
| | | } |
| | | void sub_square_array(float *src, float *dest, int n) |
| | | { |
| | | int i; |
| | | for(i = 0; i < n; ++i){ |
| | | dest[i] -= src[i]*src[i]; |
| | | } |
| | | } |
| | | int k,b; |
| | | int w = layer.w; |
| | | int h = layer.h; |
| | | int c = layer.c; |
| | | scal_cpu(w*h*c*layer.batch, 0, layer.squared, 1); |
| | | |
| | | void forward_normalization_layer(const normalization_layer layer, float *in) |
| | | { |
| | | int i,j,k; |
| | | memset(layer.sums, 0, layer.h*layer.w*sizeof(float)); |
| | | int imsize = layer.h*layer.w; |
| | | for(j = 0; j < layer.size/2; ++j){ |
| | | if(j < layer.c) add_square_array(in+j*imsize, layer.sums, imsize); |
| | | } |
| | | for(k = 0; k < layer.c; ++k){ |
| | | int next = k+layer.size/2; |
| | | int prev = k-layer.size/2-1; |
| | | if(next < layer.c) add_square_array(in+next*imsize, layer.sums, imsize); |
| | | if(prev > 0) sub_square_array(in+prev*imsize, layer.sums, imsize); |
| | | for(i = 0; i < imsize; ++i){ |
| | | layer.output[k*imsize + i] = in[k*imsize+i] / pow(layer.kappa + layer.alpha * layer.sums[i], layer.beta); |
| | | for(b = 0; b < layer.batch; ++b){ |
| | | float *squared = layer.squared + w*h*c*b; |
| | | float *norms = layer.norms + w*h*c*b; |
| | | float *input = state.input + w*h*c*b; |
| | | pow_cpu(w*h*c, 2, input, 1, squared, 1); |
| | | |
| | | const_cpu(w*h, layer.kappa, norms, 1); |
| | | for(k = 0; k < layer.size/2; ++k){ |
| | | axpy_cpu(w*h, layer.alpha, squared + w*h*k, 1, norms, 1); |
| | | } |
| | | |
| | | for(k = 1; k < layer.c; ++k){ |
| | | copy_cpu(w*h, norms + w*h*(k-1), 1, norms + w*h*k, 1); |
| | | int prev = k - ((layer.size-1)/2) - 1; |
| | | int next = k + (layer.size/2); |
| | | if(prev >= 0) axpy_cpu(w*h, -layer.alpha, squared + w*h*prev, 1, norms + w*h*k, 1); |
| | | if(next < layer.c) axpy_cpu(w*h, layer.alpha, squared + w*h*next, 1, norms + w*h*k, 1); |
| | | } |
| | | } |
| | | pow_cpu(w*h*c*layer.batch, -layer.beta, layer.norms, 1, layer.output, 1); |
| | | mul_cpu(w*h*c*layer.batch, state.input, 1, layer.output, 1); |
| | | } |
| | | |
| | | void backward_normalization_layer(const normalization_layer layer, float *in, float *delta) |
| | | void backward_normalization_layer(const layer layer, network_state state) |
| | | { |
| | | //TODO! |
| | | // TODO This is approximate ;-) |
| | | |
| | | int w = layer.w; |
| | | int h = layer.h; |
| | | int c = layer.c; |
| | | pow_cpu(w*h*c*layer.batch, -layer.beta, layer.norms, 1, state.delta, 1); |
| | | mul_cpu(w*h*c*layer.batch, layer.delta, 1, state.delta, 1); |
| | | } |
| | | |
| | | void visualize_normalization_layer(normalization_layer layer, char *window) |
| | | #ifdef GPU |
| | | void forward_normalization_layer_gpu(const layer layer, network_state state) |
| | | { |
| | | image delta = get_normalization_image(layer); |
| | | image dc = collapse_image_layers(delta, 1); |
| | | char buff[256]; |
| | | sprintf(buff, "%s: Output", window); |
| | | show_image(dc, buff); |
| | | save_image(dc, buff); |
| | | free_image(dc); |
| | | int k,b; |
| | | int w = layer.w; |
| | | int h = layer.h; |
| | | int c = layer.c; |
| | | scal_ongpu(w*h*c*layer.batch, 0, layer.squared_gpu, 1); |
| | | |
| | | for(b = 0; b < layer.batch; ++b){ |
| | | float *squared = layer.squared_gpu + w*h*c*b; |
| | | float *norms = layer.norms_gpu + w*h*c*b; |
| | | float *input = state.input + w*h*c*b; |
| | | pow_ongpu(w*h*c, 2, input, 1, squared, 1); |
| | | |
| | | const_ongpu(w*h, layer.kappa, norms, 1); |
| | | for(k = 0; k < layer.size/2; ++k){ |
| | | axpy_ongpu(w*h, layer.alpha, squared + w*h*k, 1, norms, 1); |
| | | } |
| | | |
| | | for(k = 1; k < layer.c; ++k){ |
| | | copy_ongpu(w*h, norms + w*h*(k-1), 1, norms + w*h*k, 1); |
| | | int prev = k - ((layer.size-1)/2) - 1; |
| | | int next = k + (layer.size/2); |
| | | if(prev >= 0) axpy_ongpu(w*h, -layer.alpha, squared + w*h*prev, 1, norms + w*h*k, 1); |
| | | if(next < layer.c) axpy_ongpu(w*h, layer.alpha, squared + w*h*next, 1, norms + w*h*k, 1); |
| | | } |
| | | } |
| | | pow_ongpu(w*h*c*layer.batch, -layer.beta, layer.norms_gpu, 1, layer.output_gpu, 1); |
| | | mul_ongpu(w*h*c*layer.batch, state.input, 1, layer.output_gpu, 1); |
| | | } |
| | | |
| | | void backward_normalization_layer_gpu(const layer layer, network_state state) |
| | | { |
| | | // TODO This is approximate ;-) |
| | | |
| | | int w = layer.w; |
| | | int h = layer.h; |
| | | int c = layer.c; |
| | | pow_ongpu(w*h*c*layer.batch, -layer.beta, layer.norms_gpu, 1, state.delta, 1); |
| | | mul_ongpu(w*h*c*layer.batch, layer.delta_gpu, 1, state.delta, 1); |
| | | } |
| | | #endif |