mirror of
https://github.com/augustin64/projet-tipe
synced 2025-01-23 15:16:26 +01:00
Add float clip(float)
to update.c
This commit is contained in:
parent
c45b21e322
commit
7f88acf17f
@ -11,6 +11,12 @@
|
||||
*/
|
||||
#define CLIP_VALUE 300
|
||||
|
||||
/*
|
||||
* Réduit la valeur de a si abs(a) > CLIP_VALUE
|
||||
* Renvoie la valeur modifiée càd `signe(a)*min(abs(a), CLIP_VALUE)`
|
||||
*/
|
||||
float clip(float a);
|
||||
|
||||
/*
|
||||
* Met à jours les poids à partir de données obtenus après plusieurs backpropagations
|
||||
* Puis met à 0 tous les d_weights
|
||||
|
174
src/cnn/update.c
174
src/cnn/update.c
@ -3,34 +3,41 @@
|
||||
#include "include/update.h"
|
||||
#include "include/struct.h"
|
||||
|
||||
float clip(float a) {
|
||||
if (a > CLIP_VALUE) {
|
||||
return CLIP_VALUE;
|
||||
}
|
||||
if (a < -CLIP_VALUE) {
|
||||
return -CLIP_VALUE;
|
||||
}
|
||||
return a;
|
||||
}
|
||||
|
||||
void update_weights(Network* network, Network* d_network) {
|
||||
int n = network->size;
|
||||
int input_depth, input_width, output_depth, output_width, k_size;
|
||||
Kernel* k_i;
|
||||
Kernel* dk_i;
|
||||
for (int i=0; i<(n-1); i++) {
|
||||
k_i = network->kernel[i];
|
||||
dk_i = d_network->kernel[i];
|
||||
input_depth = network->depth[i];
|
||||
input_width = network->width[i];
|
||||
output_depth = network->depth[i+1];
|
||||
output_width = network->width[i+1];
|
||||
|
||||
for (int i=0; i < (n-1); i++) {
|
||||
Kernel* k_i = network->kernel[i];
|
||||
Kernel* dk_i = d_network->kernel[i];
|
||||
|
||||
int input_depth = network->depth[i];
|
||||
int input_width = network->width[i];
|
||||
|
||||
int output_depth = network->depth[i+1];
|
||||
int output_width = network->width[i+1];
|
||||
|
||||
if (k_i->cnn) { // Convolution
|
||||
Kernel_cnn* cnn = k_i->cnn;
|
||||
Kernel_cnn* d_cnn = dk_i->cnn;
|
||||
k_size = cnn->k_size;
|
||||
for (int a=0; a<input_depth; a++) {
|
||||
for (int b=0; b<output_depth; b++) {
|
||||
for (int c=0; c<k_size; c++) {
|
||||
for (int d=0; d<k_size; d++) {
|
||||
int k_size = cnn->k_size;
|
||||
for (int a=0; a < input_depth; a++) {
|
||||
for (int b=0; b < output_depth; b++) {
|
||||
for (int c=0; c < k_size; c++) {
|
||||
for (int d=0; d < k_size; d++) {
|
||||
cnn->weights[a][b][c][d] -= network->learning_rate * d_cnn->d_weights[a][b][c][d];
|
||||
d_cnn->d_weights[a][b][c][d] = 0;
|
||||
|
||||
if (cnn->weights[a][b][c][d] > CLIP_VALUE)
|
||||
cnn->weights[a][b][c][d] = CLIP_VALUE;
|
||||
else if (cnn->weights[a][b][c][d] < -CLIP_VALUE)
|
||||
cnn->weights[a][b][c][d] = -CLIP_VALUE;
|
||||
cnn->weights[a][b][c][d] = clip(cnn->weights[a][b][c][d]);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -39,8 +46,9 @@ void update_weights(Network* network, Network* d_network) {
|
||||
if (k_i->linearisation == 0) { // Vecteur -> Vecteur
|
||||
Kernel_nn* nn = k_i->nn;
|
||||
Kernel_nn* d_nn = dk_i->nn;
|
||||
for (int a=0; a<input_width; a++) {
|
||||
for (int b=0; b<output_width; b++) {
|
||||
|
||||
for (int a=0; a < input_width; a++) {
|
||||
for (int b=0; b < output_width; b++) {
|
||||
nn->weights[a][b] -= network->learning_rate * d_nn->d_weights[a][b];
|
||||
d_nn->d_weights[a][b] = 0;
|
||||
}
|
||||
@ -48,91 +56,83 @@ void update_weights(Network* network, Network* d_network) {
|
||||
} else { // Matrice -> vecteur
|
||||
Kernel_nn* nn = k_i->nn;
|
||||
Kernel_nn* d_nn = dk_i->nn;
|
||||
|
||||
int size_input = input_width*input_width*input_depth;
|
||||
for (int a=0; a<size_input; a++) {
|
||||
for (int b=0; b<output_width; b++) {
|
||||
|
||||
for (int a=0; a < size_input; a++) {
|
||||
for (int b=0; b < output_width; b++) {
|
||||
nn->weights[a][b] -= network->learning_rate * d_nn->d_weights[a][b];
|
||||
d_nn->d_weights[a][b] = 0;
|
||||
|
||||
if (nn->weights[a][b] > CLIP_VALUE)
|
||||
nn->weights[a][b] = CLIP_VALUE;
|
||||
else if (nn->weights[a][b] < -CLIP_VALUE)
|
||||
nn->weights[a][b] = -CLIP_VALUE;
|
||||
nn->weights[a][b] = clip(nn->weights[a][b]);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else { // Pooling
|
||||
(void)0; // Ne rien faire pour la couche pooling
|
||||
}
|
||||
// Une couche de pooling ne nécessite pas de traitement
|
||||
}
|
||||
}
|
||||
|
||||
void update_bias(Network* network, Network* d_network) {
|
||||
|
||||
int n = network->size;
|
||||
int output_width, output_depth;
|
||||
Kernel* k_i;
|
||||
Kernel* dk_i;
|
||||
for (int i=0; i<(n-1); i++) {
|
||||
k_i = network->kernel[i];
|
||||
dk_i = d_network->kernel[i];
|
||||
output_width = network->width[i+1];
|
||||
output_depth = network->depth[i+1];
|
||||
|
||||
for (int i=0; i < (n-1); i++) {
|
||||
Kernel* k_i = network->kernel[i];
|
||||
Kernel* dk_i = d_network->kernel[i];
|
||||
int output_width = network->width[i+1];
|
||||
int output_depth = network->depth[i+1];
|
||||
|
||||
if (k_i->cnn) { // Convolution
|
||||
Kernel_cnn* cnn = k_i->cnn;
|
||||
Kernel_cnn* d_cnn = dk_i->cnn;
|
||||
for (int a=0; a<output_depth; a++) {
|
||||
for (int b=0; b<output_width; b++) {
|
||||
for (int c=0; c<output_width; c++) {
|
||||
|
||||
for (int a=0; a < output_depth; a++) {
|
||||
for (int b=0; b < output_width; b++) {
|
||||
for (int c=0; c < output_width; c++) {
|
||||
cnn->bias[a][b][c] -= network->learning_rate * d_cnn->d_bias[a][b][c];
|
||||
d_cnn->d_bias[a][b][c] = 0;
|
||||
|
||||
if (cnn->bias[a][b][c] > CLIP_VALUE)
|
||||
cnn->bias[a][b][c] = CLIP_VALUE;
|
||||
else if (cnn->bias[a][b][c] < -CLIP_VALUE)
|
||||
cnn->bias[a][b][c] = -CLIP_VALUE;
|
||||
cnn->bias[a][b][c] = clip(cnn->bias[a][b][c]);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (k_i->nn) { // Full connection
|
||||
Kernel_nn* nn = k_i->nn;
|
||||
Kernel_nn* d_nn = dk_i->nn;
|
||||
for (int a=0; a<output_width; a++) {
|
||||
|
||||
for (int a=0; a < output_width; a++) {
|
||||
nn->bias[a] -= network->learning_rate * d_nn->d_bias[a];
|
||||
d_nn->d_bias[a] = 0;
|
||||
|
||||
if (nn->bias[a] > CLIP_VALUE)
|
||||
nn->bias[a] = CLIP_VALUE;
|
||||
else if (nn->bias[a] < -CLIP_VALUE)
|
||||
nn->bias[a] = -CLIP_VALUE;
|
||||
nn->bias[a] = clip(nn->bias[a]);
|
||||
}
|
||||
} else { // Pooling
|
||||
(void)0; // Ne rien faire pour la couche pooling
|
||||
}
|
||||
// Une couche de pooling ne nécessite pas de traitement
|
||||
}
|
||||
}
|
||||
|
||||
void reset_d_weights(Network* network) {
|
||||
int n = network->size;
|
||||
int input_depth, input_width, output_depth, output_width;
|
||||
Kernel* k_i;
|
||||
Kernel* k_i_1;
|
||||
for (int i=0; i<(n-1); i++) {
|
||||
k_i = network->kernel[i];
|
||||
k_i_1 = network->kernel[i+1];
|
||||
input_depth = network->depth[i];
|
||||
input_width = network->width[i];
|
||||
output_depth = network->depth[i+1];
|
||||
output_width = network->width[i+1];
|
||||
|
||||
for (int i=0; i < (n-1); i++) {
|
||||
Kernel* k_i = network->kernel[i];
|
||||
Kernel* k_i_1 = network->kernel[i+1];
|
||||
|
||||
int input_depth = network->depth[i];
|
||||
int input_width = network->width[i];
|
||||
|
||||
int output_depth = network->depth[i+1];
|
||||
int output_width = network->width[i+1];
|
||||
|
||||
if (k_i->cnn) { // Convolution
|
||||
Kernel_cnn* cnn = k_i_1->cnn;
|
||||
|
||||
int k_size = cnn->k_size;
|
||||
for (int a=0; a<input_depth; a++) {
|
||||
for (int b=0; b<output_depth; b++) {
|
||||
for (int c=0; c<k_size; c++) {
|
||||
for (int d=0; d<k_size; d++) {
|
||||
|
||||
for (int a=0; a < input_depth; a++) {
|
||||
for (int b=0; b < output_depth; b++) {
|
||||
for (int c=0; c < k_size; c++) {
|
||||
for (int d=0; d < k_size; d++) {
|
||||
cnn->d_weights[a][b][c][d] = 0;
|
||||
}
|
||||
}
|
||||
@ -141,53 +141,55 @@ void reset_d_weights(Network* network) {
|
||||
} else if (k_i->nn) { // Full connection
|
||||
if (k_i->linearisation == 0) { // Vecteur -> Vecteur
|
||||
Kernel_nn* nn = k_i_1->nn;
|
||||
for (int a=0; a<input_width; a++) {
|
||||
for (int b=0; b<output_width; b++) {
|
||||
|
||||
for (int a=0; a < input_width; a++) {
|
||||
for (int b=0; b < output_width; b++) {
|
||||
nn->d_weights[a][b] = 0;
|
||||
}
|
||||
}
|
||||
} else { // Matrice -> vecteur
|
||||
Kernel_nn* nn = k_i_1->nn;
|
||||
|
||||
int size_input = input_width*input_width*input_depth;
|
||||
for (int a=0; a<size_input; a++) {
|
||||
for (int b=0; b<output_width; b++) {
|
||||
|
||||
for (int a=0; a < size_input; a++) {
|
||||
for (int b=0; b < output_width; b++) {
|
||||
nn->d_weights[a][b] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else { // Pooling
|
||||
(void)0; // Ne rien faire pour la couche pooling
|
||||
}
|
||||
// Une couche de pooling ne nécessite pas de traitement
|
||||
}
|
||||
}
|
||||
|
||||
void reset_d_bias(Network* network) {
|
||||
int n = network->size;
|
||||
int output_width, output_depth;
|
||||
Kernel* k_i;
|
||||
Kernel* k_i_1;
|
||||
for (int i=0; i<(n-1); i++) {
|
||||
k_i = network->kernel[i];
|
||||
k_i_1 = network->kernel[i+1];
|
||||
output_width = network->width[i+1];
|
||||
output_depth = network->depth[i+1];
|
||||
|
||||
for (int i=0; i < (n-1); i++) {
|
||||
Kernel* k_i = network->kernel[i];
|
||||
Kernel* k_i_1 = network->kernel[i+1];
|
||||
|
||||
int output_width = network->width[i+1];
|
||||
int output_depth = network->depth[i+1];
|
||||
|
||||
if (k_i->cnn) { // Convolution
|
||||
Kernel_cnn* cnn = k_i_1->cnn;
|
||||
for (int a=0; a<output_depth; a++) {
|
||||
for (int b=0; b<output_width; b++) {
|
||||
for (int c=0; c<output_width; c++) {
|
||||
|
||||
for (int a=0; a < output_depth; a++) {
|
||||
for (int b=0; b < output_width; b++) {
|
||||
for (int c=0; c < output_width; c++) {
|
||||
cnn->d_bias[a][b][c] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (k_i->nn) { // Full connection
|
||||
Kernel_nn* nn = k_i_1->nn;
|
||||
for (int a=0; a<output_width; a++) {
|
||||
|
||||
for (int a=0; a < output_width; a++) {
|
||||
nn->d_bias[a] = 0;
|
||||
}
|
||||
} else { // Pooling
|
||||
(void)0; // Ne rien faire pour la couche pooling
|
||||
}
|
||||
// Une couche de pooling ne nécessite pas de traitement
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user