From e280d3e9da6424dd6ddbec4f8de89812357acfbc Mon Sep 17 00:00:00 2001 From: Julien Chemillier Date: Fri, 9 Sep 2022 17:39:07 +0200 Subject: [PATCH] Update mnist_cnn: improve code readability --- src/mnist_cnn/cnn.c | 52 ++++++++++++------- src/mnist_cnn/cnn.h | 4 +- src/mnist_cnn/creation.c | 93 ++++++++++++++++------------------ src/mnist_cnn/free.c | 23 ++++----- src/mnist_cnn/function.c | 28 +++++----- src/mnist_cnn/initialisation.c | 24 ++++----- src/mnist_cnn/make.c | 44 ++++++++-------- 7 files changed, 136 insertions(+), 132 deletions(-) diff --git a/src/mnist_cnn/cnn.c b/src/mnist_cnn/cnn.c index ee5ca40..a81879f 100644 --- a/src/mnist_cnn/cnn.c +++ b/src/mnist_cnn/cnn.c @@ -2,21 +2,24 @@ #include #include #include -#include "function.h" -#include "make.h" +#include "initialisation.c" +#include "function.c" +#include "creation.c" +#include "make.c" + #include "cnn.h" // Augmente les dimensions de l'image d'entrée -#define PADING_INPUT 2 +#define PADDING_INPUT 2 int will_be_drop(int dropout_prob) { - return (rand() % 100)height+PADING_INPUT || jwidth+PADING_INPUT) { +void write_image_in_network_32(int** image, int height, int width, float** input) { + for (int i=0; i < height+2*PADDING_INPUT; i++) { + for (int j=PADDING_INPUT; j < width+2*PADDING_INPUT; j++) { + if (i < PADDING_INPUT || i > height+PADDING_INPUT || j < PADDING_INPUT || j > width+PADDING_INPUT) { input[i][j] = 0.; } else { @@ -27,16 +30,21 @@ void write_image_in_newtork_32(int** image, int height, int width, float** input } void forward_propagation(Network* network) { + int output_dim, output_depth; + float*** output; for (int i=0; i < network->size-1; i++) { - if (network->kernel[i].nn==NULL && network->kernel[i].cnn!=NULL) { - make_convolution(network->input[i], network->kernel[i].cnn, network->input[i+1], network->dim[i+1][0]); - choose_apply_function_input(network->kernel[i].activation, network->input[i+1], network->dim[i+1][1], network->dim[i+1][0], network->dim[i+1][0]); + if (network->kernel[i].nn==NULL && network->kernel[i].cnn!=NULL) { //CNN + output = network->input[i+1]; + output_dim = network->dim[i+1][0]; + output_depth = network->dim[i+1][1]; + make_convolution(network->input[i], network->kernel[i].cnn, output, output_dim); + choose_apply_function_input(network->kernel[i].activation, output, output_depth, output_dim, output_dim); } - else if (network->kernel[i].nn!=NULL && network->kernel[i].cnn==NULL) { + else if (network->kernel[i].nn!=NULL && network->kernel[i].cnn==NULL) { //NN make_fully_connected(network->input[i][0][0], network->kernel[i].nn, network->input[i+1][0][0], network->dim[i][0], network->dim[i+1][0]); choose_apply_function_input(network->kernel[i].activation, network->input[i+1], 1, 1, network->dim[i+1][0]); } - else { + else { //Pooling if (network->size-2==i) { printf("Le réseau ne peut pas finir par une pooling layer"); return; @@ -61,13 +69,12 @@ void backward_propagation(Network* network, float wanted_number) { float* wanted_output = generate_wanted_output(wanted_number); int n = network->size-1; float loss = compute_cross_entropy_loss(network->input[n][0][0], wanted_output, network->dim[n][0]); - int i, j; - for (i=n; i>=0; i--) { + for (int i=n; i >= 0; i--) { if (i==n) { if (network->kernel[i].activation == SOFTMAX) { int l2 = network->dim[i][0]; // Taille de la dernière couche int l1 = network->dim[i-1][0]; - for (j=0; j100) { + if (dropout < 0 || dropout > 100) { printf("Erreur, la probabilité de dropout n'est pas respecté, elle doit être comprise entre 0 et 100\n"); } - Network* network = malloc(sizeof(Network)); + Network* network = (Network*)malloc(sizeof(Network)); network->max_size = max_size; network->dropout = dropout; network->initialisation = initialisation; network->size = 1; - network->input = malloc(sizeof(float***)*max_size); - network->kernel = malloc(sizeof(Kernel)*(max_size-1)); - create_a_cube_input_layer(network, 0, input_depth, input_dim); - int i, j; - network->dim = malloc(sizeof(int*)*max_size); - for (i=0; idim[i] = malloc(sizeof(int)*2); + network->input = (float****)malloc(sizeof(float***)*max_size); + network->kernel = (Kernel*)malloc(sizeof(Kernel)*(max_size-1)); + network->dim = (int**)malloc(sizeof(int*)*max_size); + for (int i=0; i < max_size; i++) { + network->dim[i] = (int*)malloc(sizeof(int)*2); } network->dim[0][0] = input_dim; network->dim[0][1] = input_depth; + create_a_cube_input_layer(network, 0, input_depth, input_dim); return network; } @@ -40,12 +39,11 @@ Network* create_network_lenet5(int dropout, int activation, int initialisation) } void create_a_cube_input_layer(Network* network, int pos, int depth, int dim) { - int i, j; - network->input[pos] = malloc(sizeof(float**)*depth); - for (i=0; iinput[pos][i] = malloc(sizeof(float*)*dim); - for (j=0; jinput[pos][i][j] = malloc(sizeof(float)*dim); + network->input[pos] = (float***)malloc(sizeof(float**)*depth); + for (int i=0; i < depth; i++) { + network->input[pos][i] = (float**)malloc(sizeof(float*)*dim); + for (int j=0; j < dim; j++) { + network->input[pos][i][j] = (float*)malloc(sizeof(float)*dim); } } network->dim[pos][0] = dim; @@ -53,10 +51,9 @@ void create_a_cube_input_layer(Network* network, int pos, int depth, int dim) { } void create_a_line_input_layer(Network* network, int pos, int dim) { - int i; - network->input[pos] = malloc(sizeof(float**)); - network->input[pos][0] = malloc(sizeof(float*)); - network->input[pos][0][0] = malloc(sizeof(float)*dim); + network->input[pos] = (float***)malloc(sizeof(float**)); + network->input[pos][0] = (float**)malloc(sizeof(float*)); + network->input[pos][0][0] = (float*)malloc(sizeof(float)*dim); } void add_average_pooling(Network* network, int kernel_size, int activation) { @@ -87,7 +84,7 @@ void add_average_pooling_flatten(Network* network, int kernel_size, int activati } void add_convolution(Network* network, int nb_filter, int kernel_size, int activation) { - int n = network->size, i, j, k; + int n = network->size; if (network->max_size == n) { printf("Impossible de rajouter une couche de convolution, le réseau est déjà plein\n"); return; @@ -95,33 +92,33 @@ void add_convolution(Network* network, int nb_filter, int kernel_size, int activ int r = network->dim[n-1][1]; int c = nb_filter; network->kernel[n].nn = NULL; - network->kernel[n].cnn = malloc(sizeof(Kernel_cnn)); + network->kernel[n].cnn = (Kernel_cnn*)malloc(sizeof(Kernel_cnn)); network->kernel[n].activation = activation; network->kernel[n].cnn->k_size = kernel_size; network->kernel[n].cnn->rows = r; network->kernel[n].cnn->columns = c; - network->kernel[n].cnn->w = malloc(sizeof(float***)*r); - network->kernel[n].cnn->d_w = malloc(sizeof(float***)*r); - for (i=0; ikernel[n].cnn->w[i] = malloc(sizeof(float**)*c); - network->kernel[n].cnn->d_w[i] = malloc(sizeof(float**)*c); - for (j=0; jkernel[n].cnn->w[i][j] = malloc(sizeof(float*)*kernel_size); - network->kernel[n].cnn->d_w[i][j] = malloc(sizeof(float*)*kernel_size); - for (k=0; kkernel[n].cnn->w[i][j][k] = malloc(sizeof(float)*kernel_size); - network->kernel[n].cnn->d_w[i][j][k] = malloc(sizeof(float)*kernel_size); + network->kernel[n].cnn->w = (float****)malloc(sizeof(float***)*r); + network->kernel[n].cnn->d_w = (float****)malloc(sizeof(float***)*r); + for (int i=0; i < r; i++) { + network->kernel[n].cnn->w[i] = (float***)malloc(sizeof(float**)*c); + network->kernel[n].cnn->d_w[i] = (float***)malloc(sizeof(float**)*c); + for (int j=0; j < c; j++) { + network->kernel[n].cnn->w[i][j] = (float**)malloc(sizeof(float*)*kernel_size); + network->kernel[n].cnn->d_w[i][j] = (float**)malloc(sizeof(float*)*kernel_size); + for (int k=0; k < kernel_size; k++) { + network->kernel[n].cnn->w[i][j][k] = (float*)malloc(sizeof(float)*kernel_size); + network->kernel[n].cnn->d_w[i][j][k] = (float*)malloc(sizeof(float)*kernel_size); } } } - network->kernel[n].cnn->bias = malloc(sizeof(float**)*c); - network->kernel[n].cnn->d_bias = malloc(sizeof(float**)*c); - for (i=0; ikernel[n].cnn->bias[i] = malloc(sizeof(float*)*kernel_size); - network->kernel[n].cnn->d_bias[i] = malloc(sizeof(float*)*kernel_size); - for (j=0; jkernel[n].cnn->bias[i][j] = malloc(sizeof(float)*kernel_size); - network->kernel[n].cnn->d_bias[i][j] = malloc(sizeof(float)*kernel_size); + network->kernel[n].cnn->bias = (float***)malloc(sizeof(float**)*c); + network->kernel[n].cnn->d_bias = (float***)malloc(sizeof(float**)*c); + for (int i=0; i < c; i++) { + network->kernel[n].cnn->bias[i] = (float**)malloc(sizeof(float*)*kernel_size); + network->kernel[n].cnn->d_bias[i] = (float**)malloc(sizeof(float*)*kernel_size); + for (int j=0; j < kernel_size; j++) { + network->kernel[n].cnn->bias[i][j] = (float*)malloc(sizeof(float)*kernel_size); + network->kernel[n].cnn->d_bias[i][j] = (float*)malloc(sizeof(float)*kernel_size); } } create_a_cube_input_layer(network, n, c, network->dim[n-1][0] - 2*(kernel_size/2)); @@ -141,17 +138,17 @@ void add_dense(Network* network, int input_units, int output_units, int activati return; } network->kernel[n].cnn = NULL; - network->kernel[n].nn = malloc(sizeof(Kernel_nn)); + network->kernel[n].nn = (Kernel_nn*)malloc(sizeof(Kernel_nn)); network->kernel[n].activation = activation; network->kernel[n].nn->input_units = input_units; network->kernel[n].nn->output_units = output_units; - network->kernel[n].nn->bias = malloc(sizeof(float)*output_units); - network->kernel[n].nn->d_bias = malloc(sizeof(float)*output_units); - network->kernel[n].nn->weights = malloc(sizeof(float*)*input_units); - network->kernel[n].nn->d_weights = malloc(sizeof(float*)*input_units); - for (int i=0; ikernel[n].nn->weights[i] = malloc(sizeof(float)*output_units); - network->kernel[n].nn->d_weights[i] = malloc(sizeof(float)*output_units); + network->kernel[n].nn->bias = (float*)malloc(sizeof(float)*output_units); + network->kernel[n].nn->d_bias = (float*)malloc(sizeof(float)*output_units); + network->kernel[n].nn->weights = (float**)malloc(sizeof(float*)*input_units); + network->kernel[n].nn->d_weights = (float**)malloc(sizeof(float*)*input_units); + for (int i=0; i < input_units; i++) { + network->kernel[n].nn->weights[i] = (float*)malloc(sizeof(float)*output_units); + network->kernel[n].nn->d_weights[i] = (float*)malloc(sizeof(float)*output_units); } initialisation_1d_matrix(network->initialisation, network->kernel[n].nn->bias, output_units, output_units+input_units); initialisation_1d_matrix(ZERO, network->kernel[n].nn->d_bias, output_units, output_units+input_units); diff --git a/src/mnist_cnn/free.c b/src/mnist_cnn/free.c index cb6a293..64980a7 100644 --- a/src/mnist_cnn/free.c +++ b/src/mnist_cnn/free.c @@ -3,9 +3,8 @@ #include "free.h" void free_a_cube_input_layer(Network* network, int pos, int depth, int dim) { - int i, j, k; - for (i=0; iinput[pos][i][j]); } free(network->input[pos][i]); @@ -28,12 +27,12 @@ void free_average_pooling_flatten(Network* network, int pos) { } void free_convolution(Network* network, int pos) { - int i, j, k, c = network->kernel[pos].cnn->columns; + int c = network->kernel[pos].cnn->columns; int k_size = network->kernel[pos].cnn->k_size; int r = network->kernel[pos].cnn->rows; free_a_cube_input_layer(network, pos, c, network->dim[pos-1][0] - 2*(k_size/2)); - for (i=0; ikernel[pos].cnn->bias[i][j]); free(network->kernel[pos].cnn->d_bias[i][j]); } @@ -43,9 +42,9 @@ void free_convolution(Network* network, int pos) { free(network->kernel[pos].cnn->bias); free(network->kernel[pos].cnn->d_bias); - for (i=0; ikernel[pos].cnn->w[i][j][k]); free(network->kernel[pos].cnn->d_w[i][j][k]); } @@ -63,8 +62,8 @@ void free_convolution(Network* network, int pos) { void free_dense(Network* network, int pos) { free_a_line_input_layer(network, pos); - int i, dim = network->kernel[pos].nn->output_units; - for (int i=0; ikernel[pos].nn->output_units; + for (int i=0; i < dim; i++) { free(network->kernel[pos].nn->weights[i]); free(network->kernel[pos].nn->d_weights[i]); } @@ -80,7 +79,7 @@ void free_dense(Network* network, int pos) { void free_network_creation(Network* network) { free_a_cube_input_layer(network, 0, network->dim[0][1], network->dim[0][0]); - for (int i=0; imax_size; i++) { + for (int i=0; i < network->max_size; i++) { free(network->dim[i]); } free(network->dim); diff --git a/src/mnist_cnn/function.c b/src/mnist_cnn/function.c index 5f2bb82..360af32 100644 --- a/src/mnist_cnn/function.c +++ b/src/mnist_cnn/function.c @@ -4,7 +4,7 @@ #include "function.h" float max(float a, float b) { - return ak_size; - for (i=0; icolumns; i++) { - for (j=0; jk_size; + for (int i=0; i < kernel->columns; i++) { + for (int j=0; j < output_dim; j++) { + for (int k=0; k < output_dim; k++) { f = kernel->bias[i][j][k]; - for (a=0; arows; a++) { - for (b=0; brows; a++) { + for (int b=0; b < n; b++) { + for (int c=0; c < n; c++) { f += kernel->w[a][i][b][c]*input[a][j+a][k+b]; } } @@ -25,13 +25,13 @@ void make_convolution(float*** input, Kernel_cnn* kernel, float*** output, int o void make_average_pooling(float*** input, float*** output, int size, int output_depth, int output_dim) { //NOT FINISHED, MISS CONDITIONS ON THE POOLING float average; - int i, j, k, a, b, n=size*size; - for (i=0; ibias[i]; - for (j=0; jweights[i][j]*input[j]; } output[i] = f;