From dbdd31ee211fe8b1ac7e93ceadf7b34b8d304f34 Mon Sep 17 00:00:00 2001
From: Roland Singer <roland.singer@desertbit.com>
Date: Wed, 22 Aug 2018 11:56:41 +0000
Subject: [PATCH] updated README to include information about learning rate adjustment for multiple GPUs
---
src/gemm.h | 13 +++++++++++++
1 files changed, 13 insertions(+), 0 deletions(-)
diff --git a/src/gemm.h b/src/gemm.h
index 4514f57..3cdca99 100644
--- a/src/gemm.h
+++ b/src/gemm.h
@@ -2,6 +2,10 @@
#define GEMM_H
#include "activations.h"
#include <stdint.h>
+#include <stddef.h>
+
+void convolution_2d(int w, int h, int ksize, int n, int c, int pad, int stride,
+ float *weights, float *input, float *output, float *mean);
static inline void set_bit(unsigned char *const dst, size_t index) {
size_t dst_i = index / 8;
@@ -30,6 +34,10 @@
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col);
+void im2col_cpu_custom_transpose(float* data_im,
+ int channels, int height, int width,
+ int ksize, int stride, int pad, float* data_col, int ldb_align);
+
void activate_array_cpu_custom(float *x, const int n, const ACTIVATION a);
@@ -38,6 +46,11 @@
float *B, int ldb,
float *C, int ldc);
+
+void forward_maxpool_layer_avx(float *src, float *dst, int *indexes, int size, int w, int h, int out_w, int out_h, int c,
+ int pad, int stride, int batch);
+
+
void gemm(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
--
Gitblit v1.10.0