From 0f645836f193e75c4c3b718369e6fab15b5d19c5 Mon Sep 17 00:00:00 2001
From: Joseph Redmon <pjreddie@gmail.com>
Date: Wed, 11 Feb 2015 03:41:03 +0000
Subject: [PATCH] Detection is back, baby\!
---
src/connected_layer.c | 88 +++++++++++++++++++++++--------------------
1 files changed, 47 insertions(+), 41 deletions(-)
diff --git a/src/connected_layer.c b/src/connected_layer.c
index 938b8b8..642570c 100644
--- a/src/connected_layer.c
+++ b/src/connected_layer.c
@@ -1,6 +1,8 @@
#include "connected_layer.h"
#include "utils.h"
-#include "mini_blas.h"
+#include "cuda.h"
+#include "blas.h"
+#include "gemm.h"
#include <math.h>
#include <stdio.h>
@@ -34,7 +36,6 @@
float scale = 1./sqrt(inputs);
- //scale = .01;
for(i = 0; i < inputs*outputs; ++i){
layer->weights[i] = scale*rand_normal();
}
@@ -44,14 +45,14 @@
}
#ifdef GPU
- layer->weights_cl = cl_make_array(layer->weights, inputs*outputs);
- layer->biases_cl = cl_make_array(layer->biases, outputs);
+ layer->weights_gpu = cuda_make_array(layer->weights, inputs*outputs);
+ layer->biases_gpu = cuda_make_array(layer->biases, outputs);
- layer->weight_updates_cl = cl_make_array(layer->weight_updates, inputs*outputs);
- layer->bias_updates_cl = cl_make_array(layer->bias_updates, outputs);
+ layer->weight_updates_gpu = cuda_make_array(layer->weight_updates, inputs*outputs);
+ layer->bias_updates_gpu = cuda_make_array(layer->bias_updates, outputs);
- layer->output_cl = cl_make_array(layer->output, outputs*batch);
- layer->delta_cl = cl_make_array(layer->delta, outputs*batch);
+ layer->output_gpu = cuda_make_array(layer->output, outputs*batch);
+ layer->delta_gpu = cuda_make_array(layer->delta, outputs*batch);
#endif
layer->activation = activation;
fprintf(stderr, "Connected Layer: %d inputs, %d outputs\n", inputs, outputs);
@@ -76,8 +77,6 @@
axpy_cpu(layer->outputs, 1, layer->bias_updates, 1, layer->bias_prev, 1);
scal_cpu(layer->outputs, 0, layer->bias_updates, 1);
- //printf("rate: %f\n", layer->learning_rate);
-
axpy_cpu(layer->outputs, layer->learning_rate, layer->bias_prev, 1, layer->biases, 1);
axpy_cpu(layer->inputs*layer->outputs, -layer->decay, layer->weights, 1, layer->weight_prev, 1);
@@ -113,9 +112,10 @@
void backward_connected_layer(connected_layer layer, float *input, float *delta)
{
int i;
+ float alpha = 1./layer.batch;
gradient_array(layer.output, layer.outputs*layer.batch, layer.activation, layer.delta);
for(i = 0; i < layer.batch; ++i){
- axpy_cpu(layer.outputs, 1, layer.delta + i*layer.outputs, 1, layer.bias_updates, 1);
+ axpy_cpu(layer.outputs, alpha, layer.delta + i*layer.outputs, 1, layer.bias_updates, 1);
}
int m = layer.inputs;
int k = layer.batch;
@@ -123,7 +123,7 @@
float *a = input;
float *b = layer.delta;
float *c = layer.weight_updates;
- gemm(1,0,m,n,k,1,a,m,b,n,1,c,n);
+ gemm(1,0,m,n,k,alpha,a,m,b,n,1,c,n);
m = layer.batch;
k = layer.outputs;
@@ -140,68 +140,74 @@
void pull_connected_layer(connected_layer layer)
{
- cl_read_array(layer.weights_cl, layer.weights, layer.inputs*layer.outputs);
- cl_read_array(layer.biases_cl, layer.biases, layer.outputs);
- cl_read_array(layer.weight_updates_cl, layer.weight_updates, layer.inputs*layer.outputs);
- cl_read_array(layer.bias_updates_cl, layer.bias_updates, layer.outputs);
+ cuda_pull_array(layer.weights_gpu, layer.weights, layer.inputs*layer.outputs);
+ cuda_pull_array(layer.biases_gpu, layer.biases, layer.outputs);
+ cuda_pull_array(layer.weight_updates_gpu, layer.weight_updates, layer.inputs*layer.outputs);
+ cuda_pull_array(layer.bias_updates_gpu, layer.bias_updates, layer.outputs);
}
void push_connected_layer(connected_layer layer)
{
- cl_write_array(layer.weights_cl, layer.weights, layer.inputs*layer.outputs);
- cl_write_array(layer.biases_cl, layer.biases, layer.outputs);
- cl_write_array(layer.weight_updates_cl, layer.weight_updates, layer.inputs*layer.outputs);
- cl_write_array(layer.bias_updates_cl, layer.bias_updates, layer.outputs);
+ cuda_push_array(layer.weights_gpu, layer.weights, layer.inputs*layer.outputs);
+ cuda_push_array(layer.biases_gpu, layer.biases, layer.outputs);
+ cuda_push_array(layer.weight_updates_gpu, layer.weight_updates, layer.inputs*layer.outputs);
+ cuda_push_array(layer.bias_updates_gpu, layer.bias_updates, layer.outputs);
}
void update_connected_layer_gpu(connected_layer layer)
{
- axpy_ongpu(layer.outputs, layer.learning_rate, layer.bias_updates_cl, 1, layer.biases_cl, 1);
- scal_ongpu(layer.outputs, layer.momentum, layer.bias_updates_cl, 1);
+/*
+ cuda_pull_array(layer.weights_gpu, layer.weights, layer.inputs*layer.outputs);
+ cuda_pull_array(layer.weight_updates_gpu, layer.weight_updates, layer.inputs*layer.outputs);
+ printf("Weights: %f updates: %f\n", mag_array(layer.weights, layer.inputs*layer.outputs), layer.learning_rate*mag_array(layer.weight_updates, layer.inputs*layer.outputs));
+*/
- axpy_ongpu(layer.inputs*layer.outputs, -layer.decay, layer.weights_cl, 1, layer.weight_updates_cl, 1);
- axpy_ongpu(layer.inputs*layer.outputs, layer.learning_rate, layer.weight_updates_cl, 1, layer.weights_cl, 1);
- scal_ongpu(layer.inputs*layer.outputs, layer.momentum, layer.weight_updates_cl, 1);
- pull_connected_layer(layer);
+ axpy_ongpu(layer.outputs, layer.learning_rate, layer.bias_updates_gpu, 1, layer.biases_gpu, 1);
+ scal_ongpu(layer.outputs, layer.momentum, layer.bias_updates_gpu, 1);
+
+ axpy_ongpu(layer.inputs*layer.outputs, -layer.decay, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
+ axpy_ongpu(layer.inputs*layer.outputs, layer.learning_rate, layer.weight_updates_gpu, 1, layer.weights_gpu, 1);
+ scal_ongpu(layer.inputs*layer.outputs, layer.momentum, layer.weight_updates_gpu, 1);
}
-void forward_connected_layer_gpu(connected_layer layer, cl_mem input)
+void forward_connected_layer_gpu(connected_layer layer, float * input)
{
int i;
for(i = 0; i < layer.batch; ++i){
- copy_ongpu_offset(layer.outputs, layer.biases_cl, 0, 1, layer.output_cl, i*layer.outputs, 1);
+ copy_ongpu_offset(layer.outputs, layer.biases_gpu, 0, 1, layer.output_gpu, i*layer.outputs, 1);
}
int m = layer.batch;
int k = layer.inputs;
int n = layer.outputs;
- cl_mem a = input;
- cl_mem b = layer.weights_cl;
- cl_mem c = layer.output_cl;
+ float * a = input;
+ float * b = layer.weights_gpu;
+ float * c = layer.output_gpu;
gemm_ongpu(0,0,m,n,k,1,a,k,b,n,1,c,n);
- activate_array_ongpu(layer.output_cl, layer.outputs*layer.batch, layer.activation);
+ activate_array_ongpu(layer.output_gpu, layer.outputs*layer.batch, layer.activation);
}
-void backward_connected_layer_gpu(connected_layer layer, cl_mem input, cl_mem delta)
+void backward_connected_layer_gpu(connected_layer layer, float * input, float * delta)
{
+ float alpha = 1./layer.batch;
int i;
- gradient_array_ongpu(layer.output_cl, layer.outputs*layer.batch, layer.activation, layer.delta_cl);
+ gradient_array_ongpu(layer.output_gpu, layer.outputs*layer.batch, layer.activation, layer.delta_gpu);
for(i = 0; i < layer.batch; ++i){
- axpy_ongpu_offset(layer.outputs, 1, layer.delta_cl, i*layer.outputs, 1, layer.bias_updates_cl, 0, 1);
+ axpy_ongpu_offset(layer.outputs, alpha, layer.delta_gpu, i*layer.outputs, 1, layer.bias_updates_gpu, 0, 1);
}
int m = layer.inputs;
int k = layer.batch;
int n = layer.outputs;
- cl_mem a = input;
- cl_mem b = layer.delta_cl;
- cl_mem c = layer.weight_updates_cl;
- gemm_ongpu(1,0,m,n,k,1,a,m,b,n,1,c,n);
+ float * a = input;
+ float * b = layer.delta_gpu;
+ float * c = layer.weight_updates_gpu;
+ gemm_ongpu(1,0,m,n,k,alpha,a,m,b,n,1,c,n);
m = layer.batch;
k = layer.outputs;
n = layer.inputs;
- a = layer.delta_cl;
- b = layer.weights_cl;
+ a = layer.delta_gpu;
+ b = layer.weights_gpu;
c = delta;
if(c) gemm_ongpu(0,1,m,n,k,1,a,k,b,k,0,c,n);
--
Gitblit v1.10.0