mirror of
https://github.com/augustin64/projet-tipe
synced 2025-01-24 07:36:24 +01:00
Merge branch 'main' of https://github.com/julienChemillier/TIPE.git
This commit is contained in:
commit
9d03611744
171
src/cnn/backpropagation.c
Normal file
171
src/cnn/backpropagation.c
Normal file
@ -0,0 +1,171 @@
|
||||
#include <math.h>
|
||||
#include "backpropagation.h"
|
||||
|
||||
|
||||
int min(int a, int b) {
|
||||
return a<b?a:b;
|
||||
}
|
||||
|
||||
int max(int a, int b) {
|
||||
return a>b?a:b;
|
||||
}
|
||||
|
||||
// Euh..... tout peut être faux à cause de la source
|
||||
void rms_backward(float* input, float* input_z, float* output, int size) {
|
||||
/* Input et output ont la même taille
|
||||
On considère que la dernière couche a utilisée softmax */
|
||||
float sum=0;
|
||||
for (int i=0; i<size; i++)
|
||||
sum += exp(input_z[i]);
|
||||
float denom = sum*sum;
|
||||
for (int i=0; i<size; i++){
|
||||
float e_i = exp(input_z[i]);
|
||||
input[i] = 2*(input[i]-output[i])*((e_i*(sum-e_i))/denom); // ∂E/∂out_i * ∂out_i/∂net_i = 𝛿_i
|
||||
}
|
||||
}
|
||||
|
||||
void backward_2d_pooling(float*** input, float*** output, int input_width, int output_width, int depth) {
|
||||
/* Input et output ont la même profondeur (depth) */
|
||||
// Inventé par moi-même (et que moi (vraiment que moi (lvdmm)))
|
||||
|
||||
int size = output_width - input_width + 1; // Taille du pooling
|
||||
int n = size*size; // Nombre d'éléments dans le pooling
|
||||
|
||||
for (int a=0; a<depth; a++)
|
||||
for (int b=0; b<input_width; b++)
|
||||
for (int c=0; c<input_width; c++)
|
||||
input[a][b][c] = 0;
|
||||
|
||||
for (int i=0; i < depth; i++) {
|
||||
for (int j=0; j < output_width; j++) {
|
||||
for (int k=0; k < output_width; k++) {
|
||||
for (int a=0; a < size; a++) {
|
||||
for (int b=0; b < size; b++) {
|
||||
input[i][size*j +a][size*k +b] += output[i][j][k]/n;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void backward_fully_connected(Kernel_nn* ker, float* input, float* input_z, float* output, int size_input, int size_output, ptr d_function, int is_first) {
|
||||
// Bias
|
||||
for (int j=0; j<size_output; j++) {
|
||||
ker->d_bias[j] = ouput[j];
|
||||
}
|
||||
|
||||
// Weights
|
||||
for (int i=0; i<size_input; i++) {
|
||||
for (int j=0; j<size_output; j++) {
|
||||
ker->d_weights[i][j] = input[i]*output[j];
|
||||
}
|
||||
}
|
||||
|
||||
// Input
|
||||
if (is_first==1) // Pas besoin de backpropager dans l'input
|
||||
return;
|
||||
|
||||
for (int i=0; i<size_input; i++) {
|
||||
float tmp=0;
|
||||
for (int j=0; j<size_output; j++) {
|
||||
tmp += output[j]*ker->weights[i][j];
|
||||
}
|
||||
input[i] = tmp*derivative_function(input_z[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void backward_linearisation(Kernel_nn* ker, float*** input, float*** input_z, float* output, int depth_input, int dim_input, int size_output, ptr d_function) {
|
||||
// Bias
|
||||
for (int j=0; j<size_output; j++) {
|
||||
ker->d_bias[j] += output[j];
|
||||
}
|
||||
|
||||
// Weights
|
||||
int cpt = 0;
|
||||
for (int i=0; i<depth_input; i++) {
|
||||
for (int k=0; k<dim_input; k++) {
|
||||
for (int l=0; l<dim_input; l++) {
|
||||
for (int j=0; j<size_output) {
|
||||
ker->d_weights[cpt][j] += input[i][k][l]*output[j];
|
||||
cpt++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Input
|
||||
cpt = 0;
|
||||
for (int i=0; i<depth_input; i++) {
|
||||
for (int k=0; k<dim_input; k++) {
|
||||
for (int l=0; l<dim_input; l++) {
|
||||
float tmp=0;
|
||||
for (int j=0; j<size_output; j++) {
|
||||
tmp += output[j]*ker->weights[cpt][j];
|
||||
}
|
||||
input[i][k][l] = tmp*derivative_function(input_z[i][k][l]);
|
||||
cpt++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void backward_convolution(Kernel_cnn* ker, float*** input, float*** input_z, float*** output, int depth_input, int dim_input, int depth_output, int dim_output, ptr d_function, int is_first) {
|
||||
// Bias
|
||||
for (int i=0; i<depth_output; i++) {
|
||||
for (int j=0; j<dim_output; j++) {
|
||||
for (int k=0; k<dim_output; k++) {
|
||||
ker->d_bias[i][j][k] += output[i][j][k];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Weights
|
||||
int k_size = dim_input - dim_output +1;
|
||||
int var = dim_input - k_size +1
|
||||
for (int h=0; h<depth_input; h++) {
|
||||
for (int i=0; i<depth_output; i++) {
|
||||
for (int j=0; j<k_size; j++) {
|
||||
for (int k=0; k<k_size; k++) {
|
||||
float tmp = 0;
|
||||
for (int l=0; l<dim_output; l++) {
|
||||
for (int m=0; m<dim_output; m++) {
|
||||
tmp += input[h][l+j][m+k]*output[i][l][m];
|
||||
}
|
||||
}
|
||||
ker->d_weights[h][i][j][k] += tmp;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Input
|
||||
if (is_first==1) // Pas besoin de backpropager dans l'input
|
||||
return;
|
||||
|
||||
for (int i=0; i<depth_input; i++) {
|
||||
for (int j=0; j<dim_input; j++) {
|
||||
for (int k=0; k<dim_input; k++) {
|
||||
float tmp = 0;
|
||||
for (int l=0; l<depth_output; l++) {
|
||||
int min_m = k_size - max(k_size, dim_input-i);
|
||||
int max_m = min(k_size, i+1);
|
||||
int min_n = k_size - max(k_size, dim_input-j;
|
||||
int max_n = min(k_size, j+1);
|
||||
for (int m=min_m; m < max_m; m++) {
|
||||
for (int n=min_n; n < max_n; n++) {
|
||||
tmp += output[l][i-m][j-n]*ker->weights[i][l][m][n];
|
||||
}
|
||||
}
|
||||
}
|
||||
input[i][j][k] = tmp*derivative_function(input_z[i][j][k]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Only last_... have been done, we have to deal with the d_... part
|
||||
// It's EASY but it needs to be done
|
||||
|
||||
// The first layer needs to be a convolution or a fully conneted one
|
@ -88,34 +88,38 @@ void backward_propagation(Network* network, float wanted_number) {
|
||||
int n = network->size;
|
||||
int activation, input_depth, input_width, output_depth, output_width;
|
||||
float*** input;
|
||||
float*** input_z;
|
||||
float*** output;
|
||||
Kernel* k_i;
|
||||
Kernel* k_i_1;
|
||||
// rms_backward(network->input[n-1][0][0], wanted_output); // Backward sur la dernière colonne
|
||||
rms_backward(network->input[n-1][0][0], network->input_z[n-1][0][0], wanted_output, network->width[n-1]); // Backward sur la dernière colonne
|
||||
|
||||
for (int i=n-3; i >= 0; i--) {
|
||||
for (int i=n-2; i >= 0; i--) {
|
||||
// Modifie 'k_i' à partir d'une comparaison d'informations entre 'input' et 'output'
|
||||
k_i = network->kernel[i];
|
||||
k_i_1 = network->kernel[i+1];
|
||||
input = network->input[i];
|
||||
input_z = network->input_z[i];
|
||||
input_depth = network->depth[i];
|
||||
input_width = network->width[i];
|
||||
output = network->input[i+1];
|
||||
output_depth = network->depth[i+1];
|
||||
output_width = network->width[i+1];
|
||||
activation = k_i->activation;
|
||||
activation = i==0?SIGMOID:k_i->activation;
|
||||
|
||||
|
||||
if (k_i->cnn) { // Convolution
|
||||
|
||||
ptr d_f = get_function_activation(activation);
|
||||
backward_convolution(k_i->cnn, input, input_z, output, input_depth, input_width, output_depth, output_width, d_f, i==0);
|
||||
} else if (k_i->nn) { // Full connection
|
||||
ptr d_f = get_function_activation(activation);
|
||||
if (input_depth==1) { // Vecteur -> Vecteur
|
||||
|
||||
backward_fully_connected(k_i->nn, input[0][0], input_z[0][0], output[0][0], input_width, output_width, d_f, i==0);
|
||||
} else { // Matrice -> vecteur
|
||||
|
||||
backward_linearisation(k_i->nn, input, input_z, output[0][0], input_depth, input_width, output_width, d_f);
|
||||
}
|
||||
} else { // Pooling
|
||||
// backward_2d_pooling(input, output, input_width, output_width, input_depth) // Depth pour input et output a la même valeur
|
||||
backward_2d_pooling(input, output, input_width, output_width, input_depth); // Depth pour input et output a la même valeur
|
||||
}
|
||||
}
|
||||
free(wanted_output);
|
||||
|
@ -31,7 +31,7 @@ Network* create_network(int max_size, int learning_rate, int dropout, int initia
|
||||
create_a_cube_input_layer(network, 0, input_depth, input_dim);
|
||||
// create_a_cube_input_z_layer(network, 0, input_depth, input_dim);
|
||||
// This shouldn't be used (if I'm not mistaken) so to save space, we can do:
|
||||
ntework->input_z[0] = NULL; // As we don't backpropagate the input
|
||||
network->input_z[0] = NULL; // As we don't backpropagate the input
|
||||
return network;
|
||||
}
|
||||
|
||||
|
40
src/cnn/include/backpropagation.h
Normal file
40
src/cnn/include/backpropagation.h
Normal file
@ -0,0 +1,40 @@
|
||||
#include "function.h"
|
||||
#ifndef DEF_BACKPROPAGATION_H
|
||||
#define DEF_BACKPROPAGATION_H
|
||||
|
||||
/*
|
||||
* Renvoie la valeur minimale entre a et b
|
||||
*/
|
||||
int min(int a, int b);
|
||||
|
||||
/*
|
||||
* Renvoie la valeur maximale entre a et b
|
||||
*/
|
||||
int max(int a, int b);
|
||||
|
||||
/*
|
||||
* Transfert les informations d'erreur de la sortie voulue à la sortie réelle
|
||||
*/
|
||||
void rms_backward(float* input, float* input_z, float* output, int size);
|
||||
|
||||
/*
|
||||
* Transfert les informations d'erreur à travers une couche d'average pooling
|
||||
*/
|
||||
void backward_2d_pooling(float*** input, float*** output, int input_width, int output_width, int depth);
|
||||
|
||||
/*
|
||||
* Transfert les informations d'erreur à travers une couche fully connected
|
||||
*/
|
||||
void backward_fully_connected(Kernel_nn* ker, float* input, float* input_z, float* output, int size_input, int size_output, ptr d_function, int is_first);
|
||||
|
||||
/*
|
||||
* Transfert les informatiosn d'erreur à travers une couche de linéarisation
|
||||
*/
|
||||
void backward_linearisation(Kernel_nn* ker, float*** input, float*** input_z, float* output, int depth_input, int dim_input, int size_output, ptr d_function);
|
||||
|
||||
/*
|
||||
* Transfert les informations d'erreur à travers un couche de convolution
|
||||
*/
|
||||
void backward_convolution(Kernel_cnn* ker, float*** input, float*** input_z, float*** output, int depth_input, int dim_input, int depth_output, int dim_output, ptr d_function, int is_first);
|
||||
|
||||
#endif
|
Loading…
Reference in New Issue
Block a user