Clean compilers warnings a bit

This commit is contained in:
augustin64 2022-11-03 18:45:38 +01:00
parent 4637d62e73
commit 88ff365c70
3 changed files with 8 additions and 8 deletions

View File

@ -74,7 +74,7 @@ void backward_fully_connected(Kernel_nn* ker, float* input, float* input_z, floa
for (int j=0; j < size_output; j++) { for (int j=0; j < size_output; j++) {
tmp += output[j]*ker->weights[i][j]; tmp += output[j]*ker->weights[i][j];
} }
input[i] = tmp*derivative_function(input_z[i]); input[i] = tmp*d_function(input_z[i]);
} }
} }
@ -106,7 +106,7 @@ void backward_linearisation(Kernel_nn* ker, float*** input, float*** input_z, fl
for (int j=0; j < size_output; j++) { for (int j=0; j < size_output; j++) {
tmp += output[j]*ker->weights[cpt][j]; tmp += output[j]*ker->weights[cpt][j];
} }
input[i][k][l] = tmp*derivative_function(input_z[i][k][l]); input[i][k][l] = tmp*d_function(input_z[i][k][l]);
cpt++; cpt++;
} }
} }
@ -125,7 +125,7 @@ void backward_convolution(Kernel_cnn* ker, float*** input, float*** input_z, flo
// Weights // Weights
int k_size = dim_input - dim_output +1; int k_size = dim_input - dim_output +1;
int var = dim_input - k_size +1;
for (int h=0; h < depth_input; h++) { for (int h=0; h < depth_input; h++) {
for (int i=0; i < depth_output; i++) { for (int i=0; i < depth_output; i++) {
for (int j=0; j < k_size; j++) { for (int j=0; j < k_size; j++) {
@ -161,7 +161,7 @@ void backward_convolution(Kernel_cnn* ker, float*** input, float*** input_z, flo
} }
} }
} }
input[i][j][k] = tmp*derivative_function(input_z[i][j][k]); input[i][j][k] = tmp*d_function(input_z[i][j][k]);
} }
} }
} }

View File

@ -3,6 +3,7 @@
#include <math.h> #include <math.h>
#include <float.h> // Is it used ? #include <float.h> // Is it used ?
#include "include/backpropagation.h"
#include "include/initialisation.h" #include "include/initialisation.h"
#include "include/function.h" #include "include/function.h"
#include "include/creation.h" #include "include/creation.h"
@ -91,13 +92,12 @@ void backward_propagation(Network* network, float wanted_number) {
float*** input_z; float*** input_z;
float*** output; float*** output;
Kernel* k_i; Kernel* k_i;
Kernel* k_i_1;
rms_backward(network->input[n-1][0][0], network->input_z[n-1][0][0], wanted_output, network->width[n-1]); // Backward sur la dernière colonne rms_backward(network->input[n-1][0][0], network->input_z[n-1][0][0], wanted_output, network->width[n-1]); // Backward sur la dernière colonne
for (int i=n-2; i >= 0; i--) { for (int i=n-2; i >= 0; i--) {
// Modifie 'k_i' à partir d'une comparaison d'informations entre 'input' et 'output' // Modifie 'k_i' à partir d'une comparaison d'informations entre 'input' et 'output'
k_i = network->kernel[i]; k_i = network->kernel[i];
k_i_1 = network->kernel[i+1];
input = network->input[i]; input = network->input[i];
input_z = network->input_z[i]; input_z = network->input_z[i];
input_depth = network->depth[i]; input_depth = network->depth[i];

View File

@ -156,8 +156,8 @@ void add_convolution(Network* network, int depth_output, int dim_output, int act
} }
create_a_cube_input_layer(network, n, depth_output, bias_size); create_a_cube_input_layer(network, n, depth_output, bias_size);
create_a_cube_input_z_layer(network, n, depth_output, bias_size); create_a_cube_input_z_layer(network, n, depth_output, bias_size);
int n_int = network->width[n-1]*network->width[n-1]*network->depth[n-1]; // int n_int = network->width[n-1]*network->width[n-1]*network->depth[n-1];
int n_out = network->width[n]*network->width[n]*network->depth[n]; // int n_out = network->width[n]*network->width[n]*network->depth[n];
/* Not currently used /* Not currently used
initialisation_3d_matrix(network->initialisation, cnn->bias, depth_output, kernel_size, kernel_size, n_int+n_out); initialisation_3d_matrix(network->initialisation, cnn->bias, depth_output, kernel_size, kernel_size, n_int+n_out);
initialisation_3d_matrix(ZERO, cnn->d_bias, depth_output, kernel_size, kernel_size, n_int+n_out); initialisation_3d_matrix(ZERO, cnn->d_bias, depth_output, kernel_size, kernel_size, n_int+n_out);