mirror of
https://github.com/augustin64/projet-tipe
synced 2025-01-24 15:36:25 +01:00
Update backprop
This commit is contained in:
parent
4e8f440db7
commit
65505858da
@ -146,19 +146,19 @@ void backward_convolution(Kernel_cnn* ker, float*** input, float*** input_z, flo
|
|||||||
// Input
|
// Input
|
||||||
if (is_first==1) // Pas besoin de backpropager dans l'input
|
if (is_first==1) // Pas besoin de backpropager dans l'input
|
||||||
return;
|
return;
|
||||||
|
int min_m, max_m, min_n, max_n;
|
||||||
for (int i=0; i < depth_input; i++) {
|
for (int i=0; i < depth_input; i++) {
|
||||||
for (int j=0; j < dim_input; j++) {
|
for (int j=0; j < dim_input; j++) {
|
||||||
for (int k=0; k < dim_input; k++) {
|
for (int k=0; k < dim_input; k++) {
|
||||||
float tmp = 0;
|
float tmp = 0;
|
||||||
for (int l=0; l < depth_output; l++) {
|
for (int l=0; l < depth_output; l++) {
|
||||||
int min_m = k_size - max(k_size, dim_input-i);
|
min_m = max(0, k_size-1-j);
|
||||||
int max_m = min(k_size, i+1);
|
max_m = min(k_size, dim_input - j);
|
||||||
int min_n = k_size - max(k_size, dim_input-j);
|
min_n = max(0, k_size-1-k);
|
||||||
int max_n = min(k_size, j+1);
|
max_n = min(k_size, dim_input-k);
|
||||||
for (int m=min_m; m < max_m; m++) {
|
for (int m=min_m; m < max_m; m++) {
|
||||||
for (int n=min_n; n < max_n; n++) {
|
for (int n=min_n; n < max_n; n++) {
|
||||||
tmp += output[l][i-m][j-n]*ker->w[i][l][m][n];
|
tmp += output[l][j-k_size+m+1][k-k_size+n+1]*ker->w[i][l][m][n];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -90,7 +90,6 @@ void forward_propagation(Network* network) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void backward_propagation(Network* network, float wanted_number) {
|
void backward_propagation(Network* network, float wanted_number) {
|
||||||
printf_warning("Appel de backward_propagation, incomplet\n");
|
|
||||||
float* wanted_output = generate_wanted_output(wanted_number);
|
float* wanted_output = generate_wanted_output(wanted_number);
|
||||||
int n = network->size;
|
int n = network->size;
|
||||||
int activation, input_depth, input_width, output_depth, output_width;
|
int activation, input_depth, input_width, output_depth, output_width;
|
||||||
|
@ -102,8 +102,8 @@ void add_2d_average_pooling(Network* network, int dim_output) {
|
|||||||
}
|
}
|
||||||
network->kernel[k_pos]->cnn = NULL;
|
network->kernel[k_pos]->cnn = NULL;
|
||||||
network->kernel[k_pos]->nn = NULL;
|
network->kernel[k_pos]->nn = NULL;
|
||||||
network->kernel[k_pos]->activation = 100*kernel_size; // Ne contient pas de fonction d'activation
|
network->kernel[k_pos]->activation = IDENTITY; // Ne contient pas de fonction d'activation
|
||||||
network->kernel[k_pos]->linearisation = 0;
|
network->kernel[k_pos]->linearisation = kernel_size;
|
||||||
create_a_cube_input_layer(network, n, network->depth[n-1], network->width[n-1]/2);
|
create_a_cube_input_layer(network, n, network->depth[n-1], network->width[n-1]/2);
|
||||||
create_a_cube_input_z_layer(network, n, network->depth[n-1], network->width[n-1]/2); // Will it be used ?
|
create_a_cube_input_z_layer(network, n, network->depth[n-1], network->width[n-1]/2); // Will it be used ?
|
||||||
network->size++;
|
network->size++;
|
||||||
|
@ -9,6 +9,15 @@ float max_float(float a, float b) {
|
|||||||
return a < b ? b:a;
|
return a < b ? b:a;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
float identity(float x) {
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
|
||||||
|
float identity_derivative(float x) {
|
||||||
|
(void)x;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
float sigmoid(float x) {
|
float sigmoid(float x) {
|
||||||
return 1/(1 + exp(-x));
|
return 1/(1 + exp(-x));
|
||||||
}
|
}
|
||||||
@ -105,26 +114,38 @@ void choose_apply_function_vector(int activation, float*** input, int dim) {
|
|||||||
ptr get_function_activation(int activation) {
|
ptr get_function_activation(int activation) {
|
||||||
if (activation == RELU) {
|
if (activation == RELU) {
|
||||||
return &relu;
|
return &relu;
|
||||||
} else if (activation == -RELU) {
|
}
|
||||||
|
if (activation == -RELU) {
|
||||||
return &relu_derivative;
|
return &relu_derivative;
|
||||||
} else if (activation == SIGMOID) {
|
}
|
||||||
|
if (activation == -IDENTITY) {
|
||||||
|
return &identity_derivative;
|
||||||
|
}
|
||||||
|
if (activation == IDENTITY) {
|
||||||
|
return &identity;
|
||||||
|
}
|
||||||
|
if (activation == SIGMOID) {
|
||||||
return &sigmoid;
|
return &sigmoid;
|
||||||
} else if (activation == -SIGMOID) {
|
}
|
||||||
|
if (activation == -SIGMOID) {
|
||||||
return &sigmoid_derivative;
|
return &sigmoid_derivative;
|
||||||
} else if (activation == SOFTMAX) {
|
}
|
||||||
|
if (activation == SOFTMAX) {
|
||||||
printf("Erreur, impossible de renvoyer la fonction softmax\n");
|
printf("Erreur, impossible de renvoyer la fonction softmax\n");
|
||||||
return NULL;
|
return NULL;
|
||||||
} else if (activation == -SOFTMAX) {
|
}
|
||||||
|
if (activation == -SOFTMAX) {
|
||||||
printf("Erreur, impossible de renvoyer la dérivée de la fonction softmax\n");
|
printf("Erreur, impossible de renvoyer la dérivée de la fonction softmax\n");
|
||||||
return NULL;
|
return NULL;
|
||||||
} else if (activation == TANH) {
|
}
|
||||||
|
if (activation == TANH) {
|
||||||
return &tanh_;
|
return &tanh_;
|
||||||
} else if (activation == -TANH) {
|
}
|
||||||
|
if (activation == -TANH) {
|
||||||
return &tanh_derivative;
|
return &tanh_derivative;
|
||||||
} else {
|
}
|
||||||
printf("Erreur, fonction d'activation inconnue (choose_apply_function_vector): %d\n", activation);
|
printf("Erreur, fonction d'activation inconnue (choose_apply_function_vector): %d\n", activation);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// to use:
|
// to use:
|
||||||
// float a = 5; int activation;
|
// float a = 5; int activation;
|
||||||
|
@ -3,10 +3,12 @@
|
|||||||
|
|
||||||
|
|
||||||
// Les dérivées sont l'opposé
|
// Les dérivées sont l'opposé
|
||||||
#define TANH 1
|
#define IDENTITY 1
|
||||||
#define SIGMOID 2
|
#define TANH 2
|
||||||
#define RELU 3
|
#define SIGMOID 3
|
||||||
#define SOFTMAX 4
|
#define RELU 4
|
||||||
|
#define SOFTMAX 5
|
||||||
|
|
||||||
|
|
||||||
typedef float (*ptr)(float);
|
typedef float (*ptr)(float);
|
||||||
typedef ptr (*pm)();
|
typedef ptr (*pm)();
|
||||||
@ -16,6 +18,10 @@ typedef ptr (*pm)();
|
|||||||
*/
|
*/
|
||||||
float max_float(float a, float b);
|
float max_float(float a, float b);
|
||||||
|
|
||||||
|
float identity(float x);
|
||||||
|
|
||||||
|
float identity_derivative(float x);
|
||||||
|
|
||||||
float sigmoid(float x);
|
float sigmoid(float x);
|
||||||
|
|
||||||
float sigmoid_derivative(float x);
|
float sigmoid_derivative(float x);
|
||||||
@ -29,12 +35,12 @@ float tanh_(float x);
|
|||||||
float tanh_derivative(float x);
|
float tanh_derivative(float x);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Applique softmax sur ????
|
* Applique softmax sur input[depth][rows][columns]
|
||||||
*/
|
*/
|
||||||
void apply_softmax_input(float ***input, int depth, int rows, int columns);
|
void apply_softmax_input(float ***input, int depth, int rows, int columns);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Applique la fonction f sur ????
|
* Applique la fonction f sur input[depth][rows][columns]
|
||||||
*/
|
*/
|
||||||
void apply_function_input(float (*f)(float), float*** input, int depth, int rows, int columns);
|
void apply_function_input(float (*f)(float), float*** input, int depth, int rows, int columns);
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ typedef struct Kernel_nn {
|
|||||||
typedef struct Kernel {
|
typedef struct Kernel {
|
||||||
Kernel_cnn* cnn; // NULL si ce n'est pas un cnn
|
Kernel_cnn* cnn; // NULL si ce n'est pas un cnn
|
||||||
Kernel_nn* nn; // NULL si ce n'est pas un nn
|
Kernel_nn* nn; // NULL si ce n'est pas un nn
|
||||||
int activation; // Vaut l'activation sauf pour un pooling où il: vaut pooling_size*100
|
int activation; // Vaut l'identifiant de la fonction d'activation
|
||||||
int linearisation; // Vaut 1 si c'est la linéarisation d'une couche, 0 sinon ?? Ajouter dans les autres
|
int linearisation; // Vaut 1 si c'est la linéarisation d'une couche, 0 sinon ?? Ajouter dans les autres
|
||||||
} Kernel;
|
} Kernel;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user