From c67d2bf69719fe6d32bee92e5b3b9e6869c329cb Mon Sep 17 00:00:00 2001 From: julienChemillier Date: Sun, 19 Feb 2023 12:50:27 +0100 Subject: [PATCH] Change 'input_units' to 'size_input' --- doc/cnn/neuron_io.md | 4 ++-- src/cnn/creation.c | 30 +++++++++++++++--------------- src/cnn/free.c | 4 ++-- src/cnn/include/struct.h | 6 +++--- src/cnn/neuron_io.c | 12 ++++++------ src/cnn/utils.c | 28 ++++++++++++++-------------- test/cnn_structure.c | 2 +- 7 files changed, 43 insertions(+), 43 deletions(-) diff --git a/doc/cnn/neuron_io.md b/doc/cnn/neuron_io.md index 256e826..b89ef76 100644 --- a/doc/cnn/neuron_io.md +++ b/doc/cnn/neuron_io.md @@ -44,7 +44,7 @@ type | nom de la variable | commentaire :---:|:---:|:---: uint32_t|activation| uint32_t|linearisation| -uint32_t|input_units| +uint32_t|size_input| uint32_t|output_units| #### Si la couche est de type pooling: @@ -79,4 +79,4 @@ float|...| float|bias[nn->output_units-1]|biais float|weights[0][0]|poids float|...| -float|weights[nn->input_units-1][nn->output_units-1]| +float|weights[nn->size_input-1][nn->output_units-1]| diff --git a/src/cnn/creation.c b/src/cnn/creation.c index 4cdb41a..e8eca23 100644 --- a/src/cnn/creation.c +++ b/src/cnn/creation.c @@ -205,7 +205,7 @@ void add_convolution(Network* network, int depth_output, int dim_output, int act void add_dense(Network* network, int output_units, int activation) { int n = network->size; int k_pos = n-1; - int input_units = network->width[k_pos]; + int size_input = network->width[k_pos]; if (network->max_size == n) { printf("Impossible de rajouter une couche dense, le réseau est déjà plein\n"); return; @@ -216,7 +216,7 @@ void add_dense(Network* network, int output_units, int activation) { network->kernel[k_pos]->activation = activation; network->kernel[k_pos]->linearisation = 0; network->kernel[k_pos]->pooling = 0; - nn->input_units = input_units; + nn->size_input = size_input; nn->output_units = output_units; nn->bias = (float*)nalloc(sizeof(float)*output_units); nn->d_bias = (float*)nalloc(sizeof(float)*output_units); @@ -224,9 +224,9 @@ void add_dense(Network* network, int output_units, int activation) { nn->d_bias[i] = 0.; } - nn->weights = (float**)nalloc(sizeof(float*)*input_units); - nn->d_weights = (float**)nalloc(sizeof(float*)*input_units); - for (int i=0; i < input_units; i++) { + nn->weights = (float**)nalloc(sizeof(float*)*size_input); + nn->d_weights = (float**)nalloc(sizeof(float*)*size_input); + for (int i=0; i < size_input; i++) { nn->weights[i] = (float*)nalloc(sizeof(float)*output_units); nn->d_weights[i] = (float*)nalloc(sizeof(float)*output_units); for (int j=0; j < output_units; j++) { @@ -234,19 +234,19 @@ void add_dense(Network* network, int output_units, int activation) { } } - initialisation_1d_matrix(network->initialisation, nn->bias, output_units, input_units); - initialisation_2d_matrix(network->initialisation, nn->weights, input_units, output_units, input_units, output_units); + initialisation_1d_matrix(network->initialisation, nn->bias, output_units, size_input); + initialisation_2d_matrix(network->initialisation, nn->weights, size_input, output_units, size_input, output_units); create_a_line_input_layer(network, n, output_units); create_a_line_input_z_layer(network, n, output_units); network->size++; } void add_dense_linearisation(Network* network, int output_units, int activation) { - // Can replace input_units by a research of this dim + // Can replace size_input by a research of this dim int n = network->size; int k_pos = n-1; - int input_units = network->depth[k_pos]*network->width[k_pos]*network->width[k_pos]; + int size_input = network->depth[k_pos]*network->width[k_pos]*network->width[k_pos]; if (network->max_size == n) { printf("Impossible de rajouter une couche dense, le réseau est déjà plein\n"); return; @@ -257,7 +257,7 @@ void add_dense_linearisation(Network* network, int output_units, int activation) network->kernel[k_pos]->activation = activation; network->kernel[k_pos]->linearisation = 1; network->kernel[k_pos]->pooling = 0; - nn->input_units = input_units; + nn->size_input = size_input; nn->output_units = output_units; nn->bias = (float*)nalloc(sizeof(float)*output_units); @@ -265,17 +265,17 @@ void add_dense_linearisation(Network* network, int output_units, int activation) for (int i=0; i < output_units; i++) { nn->d_bias[i] = 0.; } - nn->weights = (float**)nalloc(sizeof(float*)*input_units); - nn->d_weights = (float**)nalloc(sizeof(float*)*input_units); - for (int i=0; i < input_units; i++) { + nn->weights = (float**)nalloc(sizeof(float*)*size_input); + nn->d_weights = (float**)nalloc(sizeof(float*)*size_input); + for (int i=0; i < size_input; i++) { nn->weights[i] = (float*)nalloc(sizeof(float)*output_units); nn->d_weights[i] = (float*)nalloc(sizeof(float)*output_units); for (int j=0; j < output_units; j++) { nn->d_weights[i][j] = 0.; } } - initialisation_1d_matrix(network->initialisation, nn->bias, output_units, input_units); - initialisation_2d_matrix(network->initialisation, nn->weights, input_units, output_units, input_units, output_units); + initialisation_1d_matrix(network->initialisation, nn->bias, output_units, size_input); + initialisation_2d_matrix(network->initialisation, nn->weights, size_input, output_units, size_input, output_units); create_a_line_input_layer(network, n, output_units); create_a_line_input_z_layer(network, n, output_units); network->size++; diff --git a/src/cnn/free.c b/src/cnn/free.c index e0ce7bf..bf9e8c9 100644 --- a/src/cnn/free.c +++ b/src/cnn/free.c @@ -70,7 +70,7 @@ void free_convolution(Network* network, int pos) { void free_dense(Network* network, int pos) { free_a_line_input_layer(network, pos+1); Kernel_nn* k_pos = network->kernel[pos]->nn; - int dim = k_pos->input_units; + int dim = k_pos->size_input; for (int i=0; i < dim; i++) { gree(k_pos->weights[i]); gree(k_pos->d_weights[i]); @@ -87,7 +87,7 @@ void free_dense(Network* network, int pos) { void free_dense_linearisation(Network* network, int pos) { free_a_line_input_layer(network, pos+1); Kernel_nn* k_pos = network->kernel[pos]->nn; - int dim = k_pos->input_units; + int dim = k_pos->size_input; for (int i=0; i < dim; i++) { gree(k_pos->weights[i]); gree(k_pos->d_weights[i]); diff --git a/src/cnn/include/struct.h b/src/cnn/include/struct.h index 1f314d8..161cd13 100644 --- a/src/cnn/include/struct.h +++ b/src/cnn/include/struct.h @@ -12,12 +12,12 @@ typedef struct Kernel_cnn { } Kernel_cnn; typedef struct Kernel_nn { - int input_units; // Nombre d'éléments en entrée + int size_input; // Nombre d'éléments en entrée int output_units; // Nombre d'éléments en sortie float* bias; // bias[output_units] float* d_bias; // d_bias[output_units] - float** weights; // weight[input_units][output_units] - float** d_weights; // d_weights[input_units][output_units] + float** weights; // weight[size_input][output_units] + float** d_weights; // d_weights[size_input][output_units] } Kernel_nn; typedef struct Kernel { diff --git a/src/cnn/neuron_io.c b/src/cnn/neuron_io.c index 987e19b..71ab9af 100644 --- a/src/cnn/neuron_io.c +++ b/src/cnn/neuron_io.c @@ -111,7 +111,7 @@ void write_couche(Network* network, int indice_couche, int type_couche, FILE* pt uint32_t pre_buffer[4]; pre_buffer[0] = kernel->activation; pre_buffer[1] = kernel->linearisation; - pre_buffer[2] = nn->input_units; + pre_buffer[2] = nn->size_input; pre_buffer[3] = nn->output_units; fwrite(pre_buffer, sizeof(pre_buffer), 1, ptr); @@ -122,7 +122,7 @@ void write_couche(Network* network, int indice_couche, int type_couche, FILE* pt } fwrite(buffer, sizeof(buffer), 1, ptr); - for (int i=0; i < nn->input_units; i++) { + for (int i=0; i < nn->size_input; i++) { indice_buffer = 0; float buffer[nn->output_units]; for (int j=0; j < nn->output_units; j++) { @@ -287,7 +287,7 @@ Kernel* read_kernel(int type_couche, int output_dim, FILE* ptr) { kernel->activation = buffer[0]; kernel->linearisation = buffer[1]; - kernel->nn->input_units = buffer[2]; + kernel->nn->size_input = buffer[2]; kernel->nn->output_units = buffer[3]; // Lecture du corps @@ -302,9 +302,9 @@ Kernel* read_kernel(int type_couche, int output_dim, FILE* ptr) { nn->d_bias[i] = 0.; } - nn->weights = (float**)nalloc(sizeof(float*)*nn->input_units); - nn->d_weights = (float**)nalloc(sizeof(float*)*nn->input_units); - for (int i=0; i < nn->input_units; i++) { + nn->weights = (float**)nalloc(sizeof(float*)*nn->size_input); + nn->d_weights = (float**)nalloc(sizeof(float*)*nn->size_input); + for (int i=0; i < nn->size_input; i++) { nn->weights[i] = (float*)nalloc(sizeof(float)*nn->output_units); nn->d_weights[i] = (float*)nalloc(sizeof(float)*nn->output_units); for (int j=0; j < nn->output_units; j++) { diff --git a/src/cnn/utils.c b/src/cnn/utils.c index 37241ac..ca8a203 100644 --- a/src/cnn/utils.c +++ b/src/cnn/utils.c @@ -56,12 +56,12 @@ bool equals_networks(Network* network1, Network* network2) { checkEquals(kernel[i]->pooling, "kernel[i]->pooling pour un pooling", i); } else if (!network1->kernel[i]->cnn) { // Type NN - checkEquals(kernel[i]->nn->input_units, "kernel[i]->nn->input_units", i); + checkEquals(kernel[i]->nn->size_input, "kernel[i]->nn->size_input", i); checkEquals(kernel[i]->nn->output_units, "kernel[i]->nn->output_units", i); for (int j=0; j < network1->kernel[i]->nn->output_units; j++) { checkEquals(kernel[i]->nn->bias[j], "kernel[i]->nn->bias[j]", j); } - for (int j=0; j < network1->kernel[i]->nn->input_units; j++) { + for (int j=0; j < network1->kernel[i]->nn->size_input; j++) { for (int k=0; k < network1->kernel[i]->nn->output_units; k++) { checkEquals(kernel[i]->nn->weights[j][k], "kernel[i]->nn->weights[j][k]", k); } @@ -100,7 +100,7 @@ Network* copy_network(Network* network) { // Paramètre du réseau int size = network->size; // Paramètres des couches NN - int input_units; + int size_input; int output_units; // Paramètres des couches CNN int rows; @@ -137,13 +137,13 @@ Network* copy_network(Network* network) { copyVar(kernel[i]->activation); copyVar(kernel[i]->linearisation); // 0 - input_units = network->kernel[i]->nn->input_units; + size_input = network->kernel[i]->nn->size_input; output_units = network->kernel[i]->nn->output_units; network_cp->kernel[i]->cnn = NULL; network_cp->kernel[i]->nn = (Kernel_nn*)nalloc(sizeof(Kernel_nn)); - copyVar(kernel[i]->nn->input_units); + copyVar(kernel[i]->nn->size_input); copyVar(kernel[i]->nn->output_units); network_cp->kernel[i]->nn->bias = (float*)nalloc(sizeof(float)*output_units); @@ -153,9 +153,9 @@ Network* copy_network(Network* network) { network_cp->kernel[i]->nn->d_bias[j] = 0.; } - network_cp->kernel[i]->nn->weights = (float**)nalloc(sizeof(float*)*input_units); - network_cp->kernel[i]->nn->d_weights = (float**)nalloc(sizeof(float*)*input_units); - for (int j=0; j < input_units; j++) { + network_cp->kernel[i]->nn->weights = (float**)nalloc(sizeof(float*)*size_input); + network_cp->kernel[i]->nn->d_weights = (float**)nalloc(sizeof(float*)*size_input); + for (int j=0; j < size_input; j++) { network_cp->kernel[i]->nn->weights[j] = (float*)nalloc(sizeof(float)*output_units); network_cp->kernel[i]->nn->d_weights[j] = (float*)nalloc(sizeof(float)*output_units); for (int k=0; k < output_units; k++) { @@ -254,7 +254,7 @@ void copy_network_parameters(Network* network_src, Network* network_dest) { // Paramètre du réseau int size = network_src->size; // Paramètres des couches NN - int input_units; + int size_input; int output_units; // Paramètres des couches CNN int rows; @@ -267,13 +267,13 @@ void copy_network_parameters(Network* network_src, Network* network_dest) { for (int i=0; i < size-1; i++) { if (!network_src->kernel[i]->cnn && network_src->kernel[i]->nn) { // Cas du NN - input_units = network_src->kernel[i]->nn->input_units; + size_input = network_src->kernel[i]->nn->size_input; output_units = network_src->kernel[i]->nn->output_units; for (int j=0; j < output_units; j++) { copyVarParams(kernel[i]->nn->bias[j]); } - for (int j=0; j < input_units; j++) { + for (int j=0; j < size_input; j++) { for (int k=0; k < output_units; k++) { copyVarParams(kernel[i]->nn->weights[j][k]); } @@ -315,7 +315,7 @@ int count_null_weights(Network* network) { int size = network->size; // Paramètres des couches NN - int input_units; + int size_input; int output_units; // Paramètres des couches CNN int rows; @@ -326,13 +326,13 @@ int count_null_weights(Network* network) { for (int i=0; i < size-1; i++) { if (!network->kernel[i]->cnn && network->kernel[i]->nn) { // Cas du NN - input_units = network->kernel[i]->nn->input_units; + size_input = network->kernel[i]->nn->size_input; output_units = network->kernel[i]->nn->output_units; for (int j=0; j < output_units; j++) { null_bias += fabs(network->kernel[i]->nn->bias[j]) <= epsilon; } - for (int j=0; j < input_units; j++) { + for (int j=0; j < size_input; j++) { for (int k=0; k < output_units; k++) { null_weights += fabs(network->kernel[i]->nn->weights[j][k]) <= epsilon; } diff --git a/test/cnn_structure.c b/test/cnn_structure.c index 2375dd0..dec4ad1 100644 --- a/test/cnn_structure.c +++ b/test/cnn_structure.c @@ -27,7 +27,7 @@ int main() { } } else if (!kernel->cnn) { printf("\n==== Couche %d de type "GREEN"NN"RESET" ====\n", i); - printf("input: %d\n", kernel->nn->input_units); + printf("input: %d\n", kernel->nn->size_input); printf("output: %d\n", kernel->nn->output_units); } else { printf("\n==== Couche %d de type "BLUE"CNN"RESET" ====\n", i);