From b36512ea2e7e6d2ec36a3241c1bf751e4e074fe1 Mon Sep 17 00:00:00 2001 From: vinjn <vinjn.z@gmail.com> Date: Sat, 26 May 2018 13:42:53 +0000 Subject: [PATCH] classifier.c - add the awesome training chart and make sure "top" is not bigger than "classes" in datacfg file. --- src/blas.h | 18 ++++++++++++++++++ 1 files changed, 18 insertions(+), 0 deletions(-) diff --git a/src/blas.h b/src/blas.h index 47d930c..c40422a 100644 --- a/src/blas.h +++ b/src/blas.h @@ -1,8 +1,10 @@ #ifndef BLAS_H #define BLAS_H +void flatten(float *x, int size, int layers, int batch, int forward); void pm(int M, int N, float *A); float *random_matrix(int rows, int cols); void time_random_matrix(int TA, int TB, int m, int k, int n); +void reorg_cpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out); void test_blas(); @@ -31,13 +33,20 @@ void smooth_l1_cpu(int n, float *pred, float *truth, float *delta, float *error); void l2_cpu(int n, float *pred, float *truth, float *delta, float *error); +void weighted_sum_cpu(float *a, float *b, float *s, int num, float *c); + +void softmax(float *input, int n, float temp, float *output, int stride); +void upsample_cpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out); #ifdef GPU +#include "cuda.h" + void axpy_ongpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY); void axpy_ongpu_offset(int N, float ALPHA, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY); void copy_ongpu(int N, float * X, int INCX, float * Y, int INCY); void copy_ongpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY); void scal_ongpu(int N, float ALPHA, float * X, int INCX); +void supp_ongpu(int N, float ALPHA, float * X, int INCX); void mask_ongpu(int N, float * X, float mask_num, float * mask); void const_ongpu(int N, float ALPHA, float *X, int INCX); void pow_ongpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY); @@ -68,6 +77,15 @@ void weighted_sum_gpu(float *a, float *b, float *s, int num, float *c); void mult_add_into_gpu(int num, float *a, float *b, float *c); +void reorg_ongpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out); + +void softmax_gpu(float *input, int n, int offset, int groups, float temp, float *output); +void adam_gpu(int n, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t); +void adam_update_gpu(float *w, float *d, float *m, float *v, float B1, float B2, float eps, float decay, float rate, int n, int batch, int t); + +void flatten_ongpu(float *x, int spatial, int layers, int batch, int forward, float *out); + +void upsample_gpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out); #endif #endif -- Gitblit v1.10.0