From 7f88acf17f5a5aad01bc55134890c4f9fda3cc6a Mon Sep 17 00:00:00 2001 From: augustin64 Date: Thu, 2 Mar 2023 10:35:25 +0100 Subject: [PATCH] Add `float clip(float)` to update.c --- src/cnn/include/update.h | 6 ++ src/cnn/update.c | 174 ++++++++++++++++++++------------------- 2 files changed, 94 insertions(+), 86 deletions(-) diff --git a/src/cnn/include/update.h b/src/cnn/include/update.h index e370130..d1a72a2 100644 --- a/src/cnn/include/update.h +++ b/src/cnn/include/update.h @@ -11,6 +11,12 @@ */ #define CLIP_VALUE 300 +/* +* Réduit la valeur de a si abs(a) > CLIP_VALUE +* Renvoie la valeur modifiée càd `signe(a)*min(abs(a), CLIP_VALUE)` +*/ +float clip(float a); + /* * Met à jours les poids à partir de données obtenus après plusieurs backpropagations * Puis met à 0 tous les d_weights diff --git a/src/cnn/update.c b/src/cnn/update.c index 504bebd..5a29755 100644 --- a/src/cnn/update.c +++ b/src/cnn/update.c @@ -3,34 +3,41 @@ #include "include/update.h" #include "include/struct.h" +float clip(float a) { + if (a > CLIP_VALUE) { + return CLIP_VALUE; + } + if (a < -CLIP_VALUE) { + return -CLIP_VALUE; + } + return a; +} + void update_weights(Network* network, Network* d_network) { int n = network->size; - int input_depth, input_width, output_depth, output_width, k_size; - Kernel* k_i; - Kernel* dk_i; - for (int i=0; i<(n-1); i++) { - k_i = network->kernel[i]; - dk_i = d_network->kernel[i]; - input_depth = network->depth[i]; - input_width = network->width[i]; - output_depth = network->depth[i+1]; - output_width = network->width[i+1]; + + for (int i=0; i < (n-1); i++) { + Kernel* k_i = network->kernel[i]; + Kernel* dk_i = d_network->kernel[i]; + + int input_depth = network->depth[i]; + int input_width = network->width[i]; + + int output_depth = network->depth[i+1]; + int output_width = network->width[i+1]; if (k_i->cnn) { // Convolution Kernel_cnn* cnn = k_i->cnn; Kernel_cnn* d_cnn = dk_i->cnn; - k_size = cnn->k_size; - for (int a=0; ak_size; + for (int a=0; a < input_depth; a++) { + for (int b=0; b < output_depth; b++) { + for (int c=0; c < k_size; c++) { + for (int d=0; d < k_size; d++) { cnn->weights[a][b][c][d] -= network->learning_rate * d_cnn->d_weights[a][b][c][d]; d_cnn->d_weights[a][b][c][d] = 0; - if (cnn->weights[a][b][c][d] > CLIP_VALUE) - cnn->weights[a][b][c][d] = CLIP_VALUE; - else if (cnn->weights[a][b][c][d] < -CLIP_VALUE) - cnn->weights[a][b][c][d] = -CLIP_VALUE; + cnn->weights[a][b][c][d] = clip(cnn->weights[a][b][c][d]); } } } @@ -39,8 +46,9 @@ void update_weights(Network* network, Network* d_network) { if (k_i->linearisation == 0) { // Vecteur -> Vecteur Kernel_nn* nn = k_i->nn; Kernel_nn* d_nn = dk_i->nn; - for (int a=0; aweights[a][b] -= network->learning_rate * d_nn->d_weights[a][b]; d_nn->d_weights[a][b] = 0; } @@ -48,91 +56,83 @@ void update_weights(Network* network, Network* d_network) { } else { // Matrice -> vecteur Kernel_nn* nn = k_i->nn; Kernel_nn* d_nn = dk_i->nn; + int size_input = input_width*input_width*input_depth; - for (int a=0; aweights[a][b] -= network->learning_rate * d_nn->d_weights[a][b]; d_nn->d_weights[a][b] = 0; - if (nn->weights[a][b] > CLIP_VALUE) - nn->weights[a][b] = CLIP_VALUE; - else if (nn->weights[a][b] < -CLIP_VALUE) - nn->weights[a][b] = -CLIP_VALUE; + nn->weights[a][b] = clip(nn->weights[a][b]); } } } - } else { // Pooling - (void)0; // Ne rien faire pour la couche pooling } + // Une couche de pooling ne nécessite pas de traitement } } void update_bias(Network* network, Network* d_network) { - int n = network->size; - int output_width, output_depth; - Kernel* k_i; - Kernel* dk_i; - for (int i=0; i<(n-1); i++) { - k_i = network->kernel[i]; - dk_i = d_network->kernel[i]; - output_width = network->width[i+1]; - output_depth = network->depth[i+1]; + + for (int i=0; i < (n-1); i++) { + Kernel* k_i = network->kernel[i]; + Kernel* dk_i = d_network->kernel[i]; + int output_width = network->width[i+1]; + int output_depth = network->depth[i+1]; if (k_i->cnn) { // Convolution Kernel_cnn* cnn = k_i->cnn; Kernel_cnn* d_cnn = dk_i->cnn; - for (int a=0; abias[a][b][c] -= network->learning_rate * d_cnn->d_bias[a][b][c]; d_cnn->d_bias[a][b][c] = 0; - if (cnn->bias[a][b][c] > CLIP_VALUE) - cnn->bias[a][b][c] = CLIP_VALUE; - else if (cnn->bias[a][b][c] < -CLIP_VALUE) - cnn->bias[a][b][c] = -CLIP_VALUE; + cnn->bias[a][b][c] = clip(cnn->bias[a][b][c]); } } } } else if (k_i->nn) { // Full connection Kernel_nn* nn = k_i->nn; Kernel_nn* d_nn = dk_i->nn; - for (int a=0; abias[a] -= network->learning_rate * d_nn->d_bias[a]; d_nn->d_bias[a] = 0; - if (nn->bias[a] > CLIP_VALUE) - nn->bias[a] = CLIP_VALUE; - else if (nn->bias[a] < -CLIP_VALUE) - nn->bias[a] = -CLIP_VALUE; + nn->bias[a] = clip(nn->bias[a]); } - } else { // Pooling - (void)0; // Ne rien faire pour la couche pooling } + // Une couche de pooling ne nécessite pas de traitement } } void reset_d_weights(Network* network) { int n = network->size; - int input_depth, input_width, output_depth, output_width; - Kernel* k_i; - Kernel* k_i_1; - for (int i=0; i<(n-1); i++) { - k_i = network->kernel[i]; - k_i_1 = network->kernel[i+1]; - input_depth = network->depth[i]; - input_width = network->width[i]; - output_depth = network->depth[i+1]; - output_width = network->width[i+1]; + + for (int i=0; i < (n-1); i++) { + Kernel* k_i = network->kernel[i]; + Kernel* k_i_1 = network->kernel[i+1]; + + int input_depth = network->depth[i]; + int input_width = network->width[i]; + + int output_depth = network->depth[i+1]; + int output_width = network->width[i+1]; if (k_i->cnn) { // Convolution Kernel_cnn* cnn = k_i_1->cnn; + int k_size = cnn->k_size; - for (int a=0; ad_weights[a][b][c][d] = 0; } } @@ -141,53 +141,55 @@ void reset_d_weights(Network* network) { } else if (k_i->nn) { // Full connection if (k_i->linearisation == 0) { // Vecteur -> Vecteur Kernel_nn* nn = k_i_1->nn; - for (int a=0; ad_weights[a][b] = 0; } } } else { // Matrice -> vecteur Kernel_nn* nn = k_i_1->nn; + int size_input = input_width*input_width*input_depth; - for (int a=0; ad_weights[a][b] = 0; } } } - } else { // Pooling - (void)0; // Ne rien faire pour la couche pooling } + // Une couche de pooling ne nécessite pas de traitement } } void reset_d_bias(Network* network) { int n = network->size; - int output_width, output_depth; - Kernel* k_i; - Kernel* k_i_1; - for (int i=0; i<(n-1); i++) { - k_i = network->kernel[i]; - k_i_1 = network->kernel[i+1]; - output_width = network->width[i+1]; - output_depth = network->depth[i+1]; + + for (int i=0; i < (n-1); i++) { + Kernel* k_i = network->kernel[i]; + Kernel* k_i_1 = network->kernel[i+1]; + + int output_width = network->width[i+1]; + int output_depth = network->depth[i+1]; if (k_i->cnn) { // Convolution Kernel_cnn* cnn = k_i_1->cnn; - for (int a=0; ad_bias[a][b][c] = 0; } } } } else if (k_i->nn) { // Full connection Kernel_nn* nn = k_i_1->nn; - for (int a=0; ad_bias[a] = 0; } - } else { // Pooling - (void)0; // Ne rien faire pour la couche pooling } + // Une couche de pooling ne nécessite pas de traitement } } \ No newline at end of file