mirror of
https://github.com/augustin64/projet-tipe
synced 2025-01-23 23:26:25 +01:00
Merge branch 'julienChemillier:main' into main
This commit is contained in:
commit
bc5f491f1f
@ -53,11 +53,11 @@ void forward_propagation(Network* network) {
|
||||
output_width = network->width[i+1];
|
||||
activation = k_i->activation;
|
||||
|
||||
if (k_i->cnn!=NULL) { // Convolution
|
||||
if (k_i->cnn) { // Convolution
|
||||
make_convolution(k_i->cnn, input, output, output_width);
|
||||
choose_apply_function_matrix(activation, output, output_depth, output_width);
|
||||
}
|
||||
else if (k_i->nn!=NULL) { // Full connection
|
||||
else if (k_i->nn) { // Full connection
|
||||
if (input_depth==1) { // Vecteur -> Vecteur
|
||||
make_dense(k_i->nn, input[0][0], output[0][0], input_width, output_width);
|
||||
} else { // Matrice -> vecteur
|
||||
@ -80,7 +80,7 @@ void backward_propagation(Network* network, float wanted_number) {
|
||||
printf_warning("Appel de backward_propagation, incomplet\n");
|
||||
float* wanted_output = generate_wanted_output(wanted_number);
|
||||
int n = network->size;
|
||||
float loss = compute_cross_entropy_loss(network->input[n][0][0], wanted_output, network->width[n]);
|
||||
float loss = compute_mean_squared_error(network->input[n][0][0], wanted_output, network->width[n]);
|
||||
int activation, input_depth, input_width, output_depth, output_width;
|
||||
float*** input;
|
||||
float*** output;
|
||||
@ -106,6 +106,18 @@ void backward_propagation(Network* network, float wanted_number) {
|
||||
free(wanted_output);
|
||||
}
|
||||
|
||||
float compute_mean_squared_error(float* output, float* wanted_output, int len) {
|
||||
if (len==0) {
|
||||
printf("Erreur MSE: la longueur de la sortie est de 0 -> division par 0 impossible\n");
|
||||
return 0.;
|
||||
}
|
||||
float loss=0.;
|
||||
for (int i=0; i < len ; i++) {
|
||||
loss += (output[i]-wanted_output[i])*(output[i]-wanted_output[i]);
|
||||
}
|
||||
return loss/len;
|
||||
}
|
||||
|
||||
float compute_cross_entropy_loss(float* output, float* wanted_output, int len) {
|
||||
float loss=0.;
|
||||
for (int i=0; i < len ; i++) {
|
||||
|
@ -105,26 +105,33 @@ void add_convolution(Network* network, int depth_output, int dim_output, int act
|
||||
cnn->columns = depth_output;
|
||||
cnn->w = (float****)malloc(sizeof(float***)*depth_input);
|
||||
cnn->d_w = (float****)malloc(sizeof(float***)*depth_input);
|
||||
cnn->last_d_w = (float****)malloc(sizeof(float***)*depth_input);
|
||||
for (int i=0; i < depth_input; i++) {
|
||||
cnn->w[i] = (float***)malloc(sizeof(float**)*depth_output);
|
||||
cnn->d_w[i] = (float***)malloc(sizeof(float**)*depth_output);
|
||||
cnn->last_d_w[i] = (float***)malloc(sizeof(float**)*depth_output);
|
||||
for (int j=0; j < depth_output; j++) {
|
||||
cnn->w[i][j] = (float**)malloc(sizeof(float*)*kernel_size);
|
||||
cnn->d_w[i][j] = (float**)malloc(sizeof(float*)*kernel_size);
|
||||
cnn->last_d_w[i][j] = (float**)malloc(sizeof(float*)*kernel_size);
|
||||
for (int k=0; k < kernel_size; k++) {
|
||||
cnn->w[i][j][k] = (float*)malloc(sizeof(float)*kernel_size);
|
||||
cnn->d_w[i][j][k] = (float*)malloc(sizeof(float)*kernel_size);
|
||||
cnn->last_d_w[i][j][k] = (float*)malloc(sizeof(float)*kernel_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
cnn->bias = (float***)malloc(sizeof(float**)*depth_output);
|
||||
cnn->d_bias = (float***)malloc(sizeof(float**)*depth_output);
|
||||
cnn->last_d_bias = (float***)malloc(sizeof(float**)*depth_output);
|
||||
for (int i=0; i < depth_output; i++) {
|
||||
cnn->bias[i] = (float**)malloc(sizeof(float*)*bias_size);
|
||||
cnn->d_bias[i] = (float**)malloc(sizeof(float*)*bias_size);
|
||||
cnn->last_d_bias[i] = (float**)malloc(sizeof(float*)*bias_size);
|
||||
for (int j=0; j < bias_size; j++) {
|
||||
cnn->bias[i][j] = (float*)malloc(sizeof(float)*bias_size);
|
||||
cnn->d_bias[i][j] = (float*)malloc(sizeof(float)*bias_size);
|
||||
cnn->last_d_bias[i][j] = (float*)malloc(sizeof(float)*bias_size);
|
||||
}
|
||||
}
|
||||
create_a_cube_input_layer(network, n, depth_output, bias_size);
|
||||
@ -155,11 +162,14 @@ void add_dense(Network* network, int output_units, int activation) {
|
||||
nn->output_units = output_units;
|
||||
nn->bias = (float*)malloc(sizeof(float)*output_units);
|
||||
nn->d_bias = (float*)malloc(sizeof(float)*output_units);
|
||||
nn->last_d_bias = (float*)malloc(sizeof(float)*output_units);
|
||||
nn->weights = (float**)malloc(sizeof(float*)*input_units);
|
||||
nn->d_weights = (float**)malloc(sizeof(float*)*input_units);
|
||||
nn->last_d_weights = (float**)malloc(sizeof(float*)*input_units);
|
||||
for (int i=0; i < input_units; i++) {
|
||||
nn->weights[i] = (float*)malloc(sizeof(float)*output_units);
|
||||
nn->d_weights[i] = (float*)malloc(sizeof(float)*output_units);
|
||||
nn->last_d_weights[i] = (float*)malloc(sizeof(float)*output_units);
|
||||
}
|
||||
create_a_line_input_layer(network, n, output_units);
|
||||
/* Not currently used
|
||||
@ -190,11 +200,14 @@ void add_dense_linearisation(Network* network, int output_units, int activation)
|
||||
|
||||
nn->bias = (float*)malloc(sizeof(float)*output_units);
|
||||
nn->d_bias = (float*)malloc(sizeof(float)*output_units);
|
||||
nn->last_d_bias = (float*)malloc(sizeof(float)*output_units);
|
||||
nn->weights = (float**)malloc(sizeof(float*)*input_units);
|
||||
nn->d_weights = (float**)malloc(sizeof(float*)*input_units);
|
||||
nn->last_d_weights = (float**)malloc(sizeof(float*)*input_units);
|
||||
for (int i=0; i < input_units; i++) {
|
||||
nn->weights[i] = (float*)malloc(sizeof(float)*output_units);
|
||||
nn->d_weights[i] = (float*)malloc(sizeof(float)*output_units);
|
||||
nn->last_d_weights[i] = (float*)malloc(sizeof(float)*output_units);
|
||||
}
|
||||
/* Not currently used
|
||||
initialisation_1d_matrix(network->initialisation, nn->bias, output_units, output_units+input_units);
|
||||
|
@ -33,27 +33,34 @@ void free_convolution(Network* network, int pos) {
|
||||
for (int j=0; j < bias_size; j++) {
|
||||
free(k_pos->bias[i][j]);
|
||||
free(k_pos->d_bias[i][j]);
|
||||
free(k_pos->last_d_bias[i][j]);
|
||||
}
|
||||
free(k_pos->bias[i]);
|
||||
free(k_pos->d_bias[i]);
|
||||
free(k_pos->last_d_bias[i]);
|
||||
}
|
||||
free(k_pos->bias);
|
||||
free(k_pos->d_bias);
|
||||
free(k_pos->last_d_bias);
|
||||
|
||||
for (int i=0; i < r; i++) {
|
||||
for (int j=0; j < c; j++) {
|
||||
for (int k=0; k < k_size; k++) {
|
||||
free(k_pos->w[i][j][k]);
|
||||
free(k_pos->d_w[i][j][k]);
|
||||
free(k_pos->last_d_w[i][j][k]);
|
||||
}
|
||||
free(k_pos->w[i][j]);
|
||||
free(k_pos->d_w[i][j]);
|
||||
free(k_pos->last_d_w[i][j]);
|
||||
}
|
||||
free(k_pos->w[i]);
|
||||
free(k_pos->d_w[i]);
|
||||
free(k_pos->last_d_w[i]);
|
||||
}
|
||||
free(k_pos->w);
|
||||
free(k_pos->d_w);
|
||||
free(k_pos->last_d_w);
|
||||
|
||||
free(k_pos);
|
||||
}
|
||||
@ -65,12 +72,15 @@ void free_dense(Network* network, int pos) {
|
||||
for (int i=0; i < dim; i++) {
|
||||
free(k_pos->weights[i]);
|
||||
free(k_pos->d_weights[i]);
|
||||
free(k_pos->last_d_weights[i]);
|
||||
}
|
||||
free(k_pos->weights);
|
||||
free(k_pos->d_weights);
|
||||
free(k_pos->last_d_weights);
|
||||
|
||||
free(k_pos->bias);
|
||||
free(k_pos->d_bias);
|
||||
free(k_pos->last_d_bias);
|
||||
|
||||
free(k_pos);
|
||||
}
|
||||
@ -82,12 +92,15 @@ void free_dense_linearisation(Network* network, int pos) {
|
||||
for (int i=0; i < dim; i++) {
|
||||
free(k_pos->weights[i]);
|
||||
free(k_pos->d_weights[i]);
|
||||
free(k_pos->last_d_weights[i]);
|
||||
}
|
||||
free(k_pos->weights);
|
||||
free(k_pos->d_weights);
|
||||
free(k_pos->last_d_weights);
|
||||
|
||||
free(k_pos->bias);
|
||||
free(k_pos->d_bias);
|
||||
free(k_pos->last_d_bias);
|
||||
|
||||
free(k_pos);
|
||||
}
|
||||
|
@ -7,12 +7,12 @@
|
||||
/*
|
||||
* Renvoie si oui ou non (1 ou 0) le neurone va être abandonné
|
||||
*/
|
||||
int will_be_drop(int dropout_prob); //CHECKED
|
||||
int will_be_drop(int dropout_prob);
|
||||
|
||||
/*
|
||||
* Écrit une image 28*28 au centre d'un tableau 32*32 et met à 0 le reste
|
||||
*/
|
||||
void write_image_in_network_32(int** image, int height, int width, float** input); //CHECKED
|
||||
void write_image_in_network_32(int** image, int height, int width, float** input);
|
||||
|
||||
/*
|
||||
* Propage en avant le cnn
|
||||
@ -22,10 +22,15 @@ void forward_propagation(Network* network);
|
||||
/*
|
||||
* Propage en arrière le cnn
|
||||
*/
|
||||
void backward_propagation(Network* network, float wanted_number); // TODO
|
||||
void backward_propagation(Network* network, float wanted_number);
|
||||
|
||||
/*
|
||||
* Renvoie l'erreur du réseau neuronal pour une sortie
|
||||
* Renvoie l'erreur du réseau neuronal pour une sortie (RMS)
|
||||
*/
|
||||
float compute_mean_squared_error(float* output, float* wanted_output, int len);
|
||||
|
||||
/*
|
||||
* Renvoie l'erreur du réseau neuronal pour une sortie (CEL)
|
||||
*/
|
||||
float compute_cross_entropy_loss(float* output, float* wanted_output, int len);
|
||||
|
||||
|
@ -7,8 +7,10 @@ typedef struct Kernel_cnn {
|
||||
int columns; // Depth of the output
|
||||
float*** bias; // bias[columns][k_size][k_size]
|
||||
float*** d_bias; // d_bias[columns][k_size][k_size]
|
||||
float*** last_d_bias; // last_d_bias[columns][k_size][k_size]
|
||||
float**** w; // w[rows][columns][k_size][k_size]
|
||||
float**** d_w; // dw[rows][columns][k_size][k_size]
|
||||
float**** d_w; // d_w[rows][columns][k_size][k_size]
|
||||
float**** last_d_w; // last_d_w[rows][columns][k_size][k_size]
|
||||
} Kernel_cnn;
|
||||
|
||||
typedef struct Kernel_nn {
|
||||
@ -16,8 +18,10 @@ typedef struct Kernel_nn {
|
||||
int output_units; // Nombre d'éléments en sortie
|
||||
float* bias; // bias[output_units]
|
||||
float* d_bias; // d_bias[output_units]
|
||||
float* last_d_bias; // last_d_bias[output_units]
|
||||
float** weights; // weight[input_units][output_units]
|
||||
float** d_weights; // d_weights[input_units][output_units]
|
||||
float** last_d_weights; // last_d_weights[input_units][output_units]
|
||||
} Kernel_nn;
|
||||
|
||||
typedef struct Kernel {
|
||||
|
Loading…
Reference in New Issue
Block a user