From e92f7d301c971b4d27aa3dcd1e4047e94f04b3fc Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Wed, 25 Mar 2015 01:27:12 +0000
Subject: [PATCH] smaller gridsize in bias

---
 src/maxpool_layer.c |  105 +++++++++++-----------------------------------------
 1 files changed, 22 insertions(+), 83 deletions(-)

diff --git a/src/maxpool_layer.c b/src/maxpool_layer.c
index c05e939..790cb28 100644
--- a/src/maxpool_layer.c
+++ b/src/maxpool_layer.c
@@ -1,4 +1,5 @@
 #include "maxpool_layer.h"
+#include "cuda.h"
 #include <stdio.h>
 
 image get_maxpool_image(maxpool_layer layer)
@@ -32,23 +33,32 @@
     layer->output =  calloc(output_size, sizeof(float));
     layer->delta =   calloc(output_size, sizeof(float));
     #ifdef GPU
-    layer->indexes_cl = cl_make_int_array(layer->indexes, output_size);
-    layer->output_cl  = cl_make_array(layer->output, output_size);
-    layer->delta_cl   = cl_make_array(layer->delta, output_size);
+    layer->indexes_gpu = cuda_make_int_array(output_size);
+    layer->output_gpu  = cuda_make_array(layer->output, output_size);
+    layer->delta_gpu   = cuda_make_array(layer->delta, output_size);
     #endif
     return layer;
 }
 
-void resize_maxpool_layer(maxpool_layer *layer, int h, int w, int c)
+void resize_maxpool_layer(maxpool_layer *layer, int h, int w)
 {
     layer->h = h;
     layer->w = w;
-    layer->c = c;
-    layer->output = realloc(layer->output, ((h-1)/layer->stride+1) * ((w-1)/layer->stride+1) * c * layer->batch* sizeof(float));
-    layer->delta = realloc(layer->delta, ((h-1)/layer->stride+1) * ((w-1)/layer->stride+1) * c * layer->batch*sizeof(float));
+    int output_size = ((h-1)/layer->stride+1) * ((w-1)/layer->stride+1) * layer->c * layer->batch;
+    layer->output = realloc(layer->output, output_size * sizeof(float));
+    layer->delta = realloc(layer->delta, output_size * sizeof(float));
+
+    #ifdef GPU
+    cuda_free((float *)layer->indexes_gpu);
+    cuda_free(layer->output_gpu);
+    cuda_free(layer->delta_gpu);
+    layer->indexes_gpu = cuda_make_int_array(output_size);
+    layer->output_gpu  = cuda_make_array(layer->output, output_size);
+    layer->delta_gpu   = cuda_make_array(layer->delta, output_size);
+    #endif
 }
 
-void forward_maxpool_layer(const maxpool_layer layer, float *input)
+void forward_maxpool_layer(const maxpool_layer layer, network_state state)
 {
     int b,i,j,k,l,m;
     int w_offset = (-layer.size-1)/2 + 1;
@@ -72,7 +82,7 @@
                             int index = cur_w + layer.w*(cur_h + layer.h*(k + b*layer.c));
                             int valid = (cur_h >= 0 && cur_h < layer.h &&
                                          cur_w >= 0 && cur_w < layer.w);
-                            float val = (valid != 0) ? input[index] : -FLT_MAX;
+                            float val = (valid != 0) ? state.input[index] : -FLT_MAX;
                             max_i = (val > max) ? index : max_i;
                             max   = (val > max) ? val   : max;
                         }
@@ -85,87 +95,16 @@
     }
 }
 
-void backward_maxpool_layer(const maxpool_layer layer, float *delta)
+void backward_maxpool_layer(const maxpool_layer layer, network_state state)
 {
     int i;
     int h = (layer.h-1)/layer.stride + 1;
     int w = (layer.w-1)/layer.stride + 1;
     int c = layer.c;
-    memset(delta, 0, layer.batch*layer.h*layer.w*layer.c*sizeof(float));
+    memset(state.delta, 0, layer.batch*layer.h*layer.w*layer.c*sizeof(float));
     for(i = 0; i < h*w*c*layer.batch; ++i){
         int index = layer.indexes[i];
-        delta[index] += layer.delta[i];
+        state.delta[index] += layer.delta[i];
     }
 }
 
-#ifdef GPU
-cl_kernel get_forward_kernel()
-{
-    static int init = 0;
-    static cl_kernel kernel;
-    if(!init){
-        kernel = get_kernel("src/maxpool_layer.cl", "forward", 0);
-        init = 1;
-    }
-    return kernel;
-}
-
-void forward_maxpool_layer_gpu(maxpool_layer layer, cl_mem input)
-{
-    int h = (layer.h-1)/layer.stride + 1;
-    int w = (layer.w-1)/layer.stride + 1;
-    int c = layer.c;
-    cl_kernel kernel = get_forward_kernel();
-    cl_command_queue queue = cl.queue;
-
-    cl_uint i = 0;
-    cl.error = clSetKernelArg(kernel, i++, sizeof(layer.h), (void*) &layer.h);
-    cl.error = clSetKernelArg(kernel, i++, sizeof(layer.w), (void*) &layer.w);
-    cl.error = clSetKernelArg(kernel, i++, sizeof(layer.c), (void*) &layer.c);
-    cl.error = clSetKernelArg(kernel, i++, sizeof(layer.stride), (void*) &layer.stride);
-    cl.error = clSetKernelArg(kernel, i++, sizeof(layer.size), (void*) &layer.size);
-    cl.error = clSetKernelArg(kernel, i++, sizeof(input), (void*) &input);
-    cl.error = clSetKernelArg(kernel, i++, sizeof(layer.output_cl), (void*) &layer.output_cl);
-    cl.error = clSetKernelArg(kernel, i++, sizeof(layer.indexes_cl), (void*) &layer.indexes_cl);
-    check_error(cl);
-
-    const size_t global_size[] = {h*w*c*layer.batch};
-
-    cl.error = clEnqueueNDRangeKernel(queue, kernel, 1, 0, global_size, 0, 0, 0, 0);
-    check_error(cl);
-}
-
-cl_kernel get_backward_kernel()
-{
-    static int init = 0;
-    static cl_kernel kernel;
-    if(!init){
-        kernel = get_kernel("src/maxpool_layer.cl", "backward", 0);
-        init = 1;
-    }
-    return kernel;
-}
-
-void backward_maxpool_layer_gpu(maxpool_layer layer, cl_mem delta)
-{
-    cl_kernel kernel = get_backward_kernel();
-    cl_command_queue queue = cl.queue;
-
-    cl_uint i = 0;
-    cl.error = clSetKernelArg(kernel, i++, sizeof(layer.h), (void*) &layer.h);
-    cl.error = clSetKernelArg(kernel, i++, sizeof(layer.w), (void*) &layer.w);
-    cl.error = clSetKernelArg(kernel, i++, sizeof(layer.c), (void*) &layer.c);
-    cl.error = clSetKernelArg(kernel, i++, sizeof(layer.stride), (void*) &layer.stride);
-    cl.error = clSetKernelArg(kernel, i++, sizeof(layer.size), (void*) &layer.size);
-    cl.error = clSetKernelArg(kernel, i++, sizeof(layer.delta_cl), (void*) &layer.delta_cl);
-    cl.error = clSetKernelArg(kernel, i++, sizeof(delta), (void*) &delta);
-    cl.error = clSetKernelArg(kernel, i++, sizeof(layer.indexes_cl), (void*) &layer.indexes_cl);
-    check_error(cl);
-
-    const size_t global_size[] = {layer.h*layer.w*layer.c*layer.batch};
-
-    cl.error = clEnqueueNDRangeKernel(queue, kernel, 1, 0, global_size, 0, 0, 0, 0);
-    check_error(cl);
-}
-
-#endif

--
Gitblit v1.10.0