| | |
| | | } |
| | | } |
| | | |
| | | __global__ void dot_kernel(float *output, float scale, int batch, int n, int size, float *delta) |
| | | { |
| | | int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; |
| | | int f1 = index / n; |
| | | int f2 = index % n; |
| | | if (f2 <= f1) return; |
| | | |
| | | float sum = 0; |
| | | float norm1 = 0; |
| | | float norm2 = 0; |
| | | int b, i; |
| | | for(b = 0; b < batch; ++b){ |
| | | for(i = 0; i < size; ++i){ |
| | | int i1 = b * size * n + f1 * size + i; |
| | | int i2 = b * size * n + f2 * size + i; |
| | | sum += output[i1] * output[i2]; |
| | | norm1 += output[i1] * output[i1]; |
| | | norm2 += output[i2] * output[i2]; |
| | | } |
| | | } |
| | | norm1 = sqrt(norm1); |
| | | norm2 = sqrt(norm2); |
| | | float norm = norm1 * norm2; |
| | | sum = sum / norm; |
| | | for(b = 0; b < batch; ++b){ |
| | | for(i = 0; i < size; ++i){ |
| | | int i1 = b * size * n + f1 * size + i; |
| | | int i2 = b * size * n + f2 * size + i; |
| | | delta[i1] += - scale * sum * output[i2] / norm; |
| | | delta[i2] += - scale * sum * output[i1] / norm; |
| | | } |
| | | } |
| | | } |
| | | |
| | | void dot_error_gpu(layer l) |
| | | { |
| | | dot_kernel<<<cuda_gridsize(l.n*l.n), BLOCK>>>(l.output_gpu, l.dot, l.batch, l.n, l.out_w * l.out_h, l.delta_gpu); |
| | | check_error(cudaPeekAtLastError()); |
| | | } |
| | | |
| | | void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size) |
| | | { |
| | | backward_bias_kernel<<<n, BLOCK>>>(bias_updates, delta, batch, n, size); |
| | |
| | | add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, n); |
| | | |
| | | activate_array_ongpu(l.output_gpu, m*n*l.batch, l.activation); |
| | | if(l.dot > 0) dot_error_gpu(l); |
| | | if(l.binary) swap_binary(&l); |
| | | } |
| | | |