From 698d5820cdc9146cc0ed390f6cc79304c220d520 Mon Sep 17 00:00:00 2001 From: julienChemillier Date: Thu, 3 Nov 2022 17:50:11 +0100 Subject: [PATCH] Add backpropagation (.h and .c) --- src/cnn/backpropagation.c | 171 ++++++++++++++++++++++++++++++ src/cnn/cnn.c | 18 ++-- src/cnn/creation.c | 2 +- src/cnn/include/backpropagation.h | 40 +++++++ 4 files changed, 223 insertions(+), 8 deletions(-) create mode 100644 src/cnn/backpropagation.c create mode 100644 src/cnn/include/backpropagation.h diff --git a/src/cnn/backpropagation.c b/src/cnn/backpropagation.c new file mode 100644 index 0000000..85afc84 --- /dev/null +++ b/src/cnn/backpropagation.c @@ -0,0 +1,171 @@ +#include +#include "backpropagation.h" + + +int min(int a, int b) { + return ab?a:b; +} + +// Euh..... tout peut être faux à cause de la source +void rms_backward(float* input, float* input_z, float* output, int size) { + /* Input et output ont la même taille + On considère que la dernière couche a utilisée softmax */ + float sum=0; + for (int i=0; id_bias[j] = ouput[j]; + } + + // Weights + for (int i=0; id_weights[i][j] = input[i]*output[j]; + } + } + + // Input + if (is_first==1) // Pas besoin de backpropager dans l'input + return; + + for (int i=0; iweights[i][j]; + } + input[i] = tmp*derivative_function(input_z[i]); + } +} + +void backward_linearisation(Kernel_nn* ker, float*** input, float*** input_z, float* output, int depth_input, int dim_input, int size_output, ptr d_function) { + // Bias + for (int j=0; jd_bias[j] += output[j]; + } + + // Weights + int cpt = 0; + for (int i=0; id_weights[cpt][j] += input[i][k][l]*output[j]; + cpt++; + } + } + } + } + + // Input + cpt = 0; + for (int i=0; iweights[cpt][j]; + } + input[i][k][l] = tmp*derivative_function(input_z[i][k][l]); + cpt++; + } + } + } +} + +void backward_convolution(Kernel_cnn* ker, float*** input, float*** input_z, float*** output, int depth_input, int dim_input, int depth_output, int dim_output, ptr d_function, int is_first) { + // Bias + for (int i=0; id_bias[i][j][k] += output[i][j][k]; + } + } + } + + // Weights + int k_size = dim_input - dim_output +1; + int var = dim_input - k_size +1 + for (int h=0; hd_weights[h][i][j][k] += tmp; + } + } + } + } + + // Input + if (is_first==1) // Pas besoin de backpropager dans l'input + return; + + for (int i=0; iweights[i][l][m][n]; + } + } + } + input[i][j][k] = tmp*derivative_function(input_z[i][j][k]); + } + } + } +} + + +// Only last_... have been done, we have to deal with the d_... part +// It's EASY but it needs to be done + +// The first layer needs to be a convolution or a fully conneted one \ No newline at end of file diff --git a/src/cnn/cnn.c b/src/cnn/cnn.c index 7581005..f7eb4e0 100644 --- a/src/cnn/cnn.c +++ b/src/cnn/cnn.c @@ -88,34 +88,38 @@ void backward_propagation(Network* network, float wanted_number) { int n = network->size; int activation, input_depth, input_width, output_depth, output_width; float*** input; + float*** input_z; float*** output; Kernel* k_i; Kernel* k_i_1; - // rms_backward(network->input[n-1][0][0], wanted_output); // Backward sur la dernière colonne + rms_backward(network->input[n-1][0][0], network->input_z[n-1][0][0], wanted_output, network->width[n-1]); // Backward sur la dernière colonne - for (int i=n-3; i >= 0; i--) { + for (int i=n-2; i >= 0; i--) { // Modifie 'k_i' à partir d'une comparaison d'informations entre 'input' et 'output' k_i = network->kernel[i]; k_i_1 = network->kernel[i+1]; input = network->input[i]; + input_z = network->input_z[i]; input_depth = network->depth[i]; input_width = network->width[i]; output = network->input[i+1]; output_depth = network->depth[i+1]; output_width = network->width[i+1]; - activation = k_i->activation; + activation = i==0?SIGMOID:k_i->activation; if (k_i->cnn) { // Convolution - + ptr d_f = get_function_activation(activation); + backward_convolution(k_i->cnn, input, input_z, output, input_depth, input_width, output_depth, output_width, d_f, i==0); } else if (k_i->nn) { // Full connection + ptr d_f = get_function_activation(activation); if (input_depth==1) { // Vecteur -> Vecteur - + backward_fully_connected(k_i->nn, input[0][0], input_z[0][0], output[0][0], input_width, output_width, d_f, i==0); } else { // Matrice -> vecteur - + backward_linearisation(k_i->nn, input, input_z, output[0][0], input_depth, input_width, output_width, d_f); } } else { // Pooling - // backward_2d_pooling(input, output, input_width, output_width, input_depth) // Depth pour input et output a la même valeur + backward_2d_pooling(input, output, input_width, output_width, input_depth); // Depth pour input et output a la même valeur } } free(wanted_output); diff --git a/src/cnn/creation.c b/src/cnn/creation.c index 582a224..5b8ec21 100644 --- a/src/cnn/creation.c +++ b/src/cnn/creation.c @@ -31,7 +31,7 @@ Network* create_network(int max_size, int learning_rate, int dropout, int initia create_a_cube_input_layer(network, 0, input_depth, input_dim); // create_a_cube_input_z_layer(network, 0, input_depth, input_dim); // This shouldn't be used (if I'm not mistaken) so to save space, we can do: - ntework->input_z[0] = NULL; // As we don't backpropagate the input + network->input_z[0] = NULL; // As we don't backpropagate the input return network; } diff --git a/src/cnn/include/backpropagation.h b/src/cnn/include/backpropagation.h new file mode 100644 index 0000000..6deec24 --- /dev/null +++ b/src/cnn/include/backpropagation.h @@ -0,0 +1,40 @@ +#include "function.h" +#ifndef DEF_BACKPROPAGATION_H +#define DEF_BACKPROPAGATION_H + +/* +* Renvoie la valeur minimale entre a et b +*/ +int min(int a, int b); + +/* +* Renvoie la valeur maximale entre a et b +*/ +int max(int a, int b); + +/* +* Transfert les informations d'erreur de la sortie voulue à la sortie réelle +*/ +void rms_backward(float* input, float* input_z, float* output, int size); + +/* +* Transfert les informations d'erreur à travers une couche d'average pooling +*/ +void backward_2d_pooling(float*** input, float*** output, int input_width, int output_width, int depth); + +/* +* Transfert les informations d'erreur à travers une couche fully connected +*/ +void backward_fully_connected(Kernel_nn* ker, float* input, float* input_z, float* output, int size_input, int size_output, ptr d_function, int is_first); + +/* +* Transfert les informatiosn d'erreur à travers une couche de linéarisation +*/ +void backward_linearisation(Kernel_nn* ker, float*** input, float*** input_z, float* output, int depth_input, int dim_input, int size_output, ptr d_function); + +/* +* Transfert les informations d'erreur à travers un couche de convolution +*/ +void backward_convolution(Kernel_cnn* ker, float*** input, float*** input_z, float*** output, int depth_input, int dim_input, int depth_output, int dim_output, ptr d_function, int is_first); + +#endif