#include #include "backpropagation.h" int min(int a, int b) { return ab?a:b; } // Euh..... tout peut être faux à cause de la source void rms_backward(float* input, float* input_z, float* output, int size) { /* Input et output ont la même taille On considère que la dernière couche a utilisée softmax */ float sum=0; for (int i=0; id_bias[j] = ouput[j]; } // Weights for (int i=0; id_weights[i][j] = input[i]*output[j]; } } // Input if (is_first==1) // Pas besoin de backpropager dans l'input return; for (int i=0; iweights[i][j]; } input[i] = tmp*derivative_function(input_z[i]); } } void backward_linearisation(Kernel_nn* ker, float*** input, float*** input_z, float* output, int depth_input, int dim_input, int size_output, ptr d_function) { // Bias for (int j=0; jd_bias[j] += output[j]; } // Weights int cpt = 0; for (int i=0; id_weights[cpt][j] += input[i][k][l]*output[j]; cpt++; } } } } // Input cpt = 0; for (int i=0; iweights[cpt][j]; } input[i][k][l] = tmp*derivative_function(input_z[i][k][l]); cpt++; } } } } void backward_convolution(Kernel_cnn* ker, float*** input, float*** input_z, float*** output, int depth_input, int dim_input, int depth_output, int dim_output, ptr d_function, int is_first) { // Bias for (int i=0; id_bias[i][j][k] += output[i][j][k]; } } } // Weights int k_size = dim_input - dim_output +1; int var = dim_input - k_size +1 for (int h=0; hd_weights[h][i][j][k] += tmp; } } } } // Input if (is_first==1) // Pas besoin de backpropager dans l'input return; for (int i=0; iweights[i][l][m][n]; } } } input[i][j][k] = tmp*derivative_function(input_z[i][j][k]); } } } } // Only last_... have been done, we have to deal with the d_... part // It's EASY but it needs to be done // The first layer needs to be a convolution or a fully conneted one