| | |
| | | l.weights = calloc(outputs*inputs, sizeof(float)); |
| | | l.biases = calloc(outputs, sizeof(float)); |
| | | |
| | | l.forward = forward_connected_layer; |
| | | l.backward = backward_connected_layer; |
| | | l.update = update_connected_layer; |
| | | |
| | | //float scale = 1./sqrt(inputs); |
| | | float scale = sqrt(2./inputs); |
| | | for(i = 0; i < outputs*inputs; ++i){ |
| | |
| | | } |
| | | |
| | | #ifdef GPU |
| | | l.forward_gpu = forward_connected_layer_gpu; |
| | | l.backward_gpu = backward_connected_layer_gpu; |
| | | l.update_gpu = update_connected_layer_gpu; |
| | | |
| | | l.weights_gpu = cuda_make_array(l.weights, outputs*inputs); |
| | | l.biases_gpu = cuda_make_array(l.biases, outputs); |
| | | |
| | |
| | | |
| | | l.x_gpu = cuda_make_array(l.output, l.batch*outputs); |
| | | l.x_norm_gpu = cuda_make_array(l.output, l.batch*outputs); |
| | | #ifdef CUDNN |
| | | cudnnCreateTensorDescriptor(&l.normTensorDesc); |
| | | cudnnCreateTensorDescriptor(&l.dstTensorDesc); |
| | | cudnnSetTensor4dDescriptor(l.dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l.batch, l.out_c, l.out_h, l.out_w); |
| | | cudnnSetTensor4dDescriptor(l.normTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, l.out_c, 1, 1); |
| | | #endif |
| | | } |
| | | #endif |
| | | l.activation = activation; |
| | | fprintf(stderr, "Connected Layer: %d inputs, %d outputs\n", inputs, outputs); |
| | | fprintf(stderr, "connected %4d -> %4d\n", inputs, outputs); |
| | | return l; |
| | | } |
| | | |
| | |
| | | if(l.batch_normalize){ |
| | | printf("Scales "); |
| | | print_statistics(l.scales, l.outputs); |
| | | /* |
| | | printf("Rolling Mean "); |
| | | print_statistics(l.rolling_mean, l.outputs); |
| | | printf("Rolling Variance "); |
| | | print_statistics(l.rolling_variance, l.outputs); |
| | | */ |
| | | } |
| | | printf("Biases "); |
| | | print_statistics(l.biases, l.outputs); |
| | |
| | | float * b = l.weights_gpu; |
| | | float * c = l.output_gpu; |
| | | gemm_ongpu(0,1,m,n,k,1,a,k,b,k,1,c,n); |
| | | if(l.batch_normalize){ |
| | | forward_batchnorm_layer_gpu(l, state); |
| | | } |
| | | for(i = 0; i < l.batch; ++i){ |
| | | axpy_ongpu(l.outputs, 1, l.biases_gpu, 1, l.output_gpu + i*l.outputs, 1); |
| | | } |
| | | if (l.batch_normalize) { |
| | | forward_batchnorm_layer_gpu(l, state); |
| | | } |
| | | else { |
| | | add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.outputs, 1); |
| | | } |
| | | //for(i = 0; i < l.batch; ++i) axpy_ongpu(l.outputs, 1, l.biases_gpu, 1, l.output_gpu + i*l.outputs, 1); |
| | | activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation); |
| | | } |
| | | |