From 787d5345609459f21fd65d2d8b4fcd55201e21a1 Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Mon, 13 Oct 2014 07:31:10 +0000
Subject: [PATCH] Convolutional working on GPU

---
 src/mini_blas.h |   52 +++++++++++++++++++++++++++++++++++++++++-----------
 1 files changed, 41 insertions(+), 11 deletions(-)

diff --git a/src/mini_blas.h b/src/mini_blas.h
index 34f15de..a155c35 100644
--- a/src/mini_blas.h
+++ b/src/mini_blas.h
@@ -1,3 +1,5 @@
+#include "opencl.h"
+
 void pm(int M, int N, float *A);
 void gemm(int TA, int TB, int M, int N, int K, float ALPHA, 
                     float *A, int lda, 
@@ -6,15 +8,41 @@
                     float *C, int ldc);
 float *random_matrix(int rows, int cols);
 void time_random_matrix(int TA, int TB, int m, int k, int n);
-void im2col_gpu(float* data_im, const int channels,
-        const int height, const int width, const int ksize, const int stride,
-        float* data_col);
-void im2col_cpu(float* data_im, const int channels,
-        const int height, const int width, const int ksize, const int stride,
-        float* data_col);
-void col2im_cpu(float* data_col, const int channels,
-        const int height, const int width, const int ksize, const int stride,
-        float* data_im);
+
+#ifdef GPU
+void axpy_ongpu(int N, float ALPHA, cl_mem X, int INCX, cl_mem Y, int INCY);
+void copy_ongpu(int N, cl_mem X, int INCX, cl_mem Y, int INCY);
+void scal_ongpu(int N, float ALPHA, cl_mem X, int INCX);
+void im2col_ongpu(cl_mem data_im, int batch,
+         int channels, int height, int width,
+         int ksize, int stride, int pad, cl_mem data_col);
+
+void col2im_gpu(float *data_col,  int batch,
+         int channels,  int height,  int width,
+         int ksize,  int stride,  int pad, float *data_im);
+void col2im_ongpu(cl_mem data_col, int batch,
+        int channels, int height, int width,
+        int ksize, int stride, int pad, cl_mem data_im);
+
+void im2col_gpu(float *data_im, int batch,
+         int channels, int height, int width,
+         int ksize, int stride, int pad, float *data_col);
+
+void gemm_ongpu(int TA, int TB, int M, int N, int K, float ALPHA, 
+        cl_mem A_gpu, int lda, 
+        cl_mem B_gpu, int ldb,
+        float BETA,
+        cl_mem C_gpu, int ldc);
+#endif
+
+void im2col_cpu(float* data_im, int batch,
+    int channels, int height, int width,
+    int ksize, int stride, int pad, float* data_col);
+
+void col2im_cpu(float* data_col, int batch,
+        int channels, int height, int width,
+        int ksize, int stride, int pad, float* data_im);
+
 void test_blas();
 
 void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA, 
@@ -27,6 +55,8 @@
                     float *B, int ldb,
                     float BETA,
                     float *C, int ldc);
-void axpy_cpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY);
-void scal_cpu(int N, float ALPHA, float *X, int INCX);
+inline void axpy_cpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY);
+inline void copy_cpu(int N, float *X, int INCX, float *Y, int INCY);
+inline void scal_cpu(int N, float ALPHA, float *X, int INCX);
+inline float dot_cpu(int N, float *X, int INCX, float *Y, int INCY);
 void test_gpu_blas();

--
Gitblit v1.10.0