tipe/src/cnn/update.c

195 lines
6.3 KiB
C
Raw Normal View History

2022-11-18 14:09:49 +01:00
#include <stdio.h>
2022-11-03 16:28:03 +01:00
2022-11-03 18:13:01 +01:00
#include "include/update.h"
#include "include/struct.h"
2022-11-03 16:28:03 +01:00
2023-03-02 10:35:25 +01:00
float clip(float a) {
if (a > CLIP_VALUE) {
return CLIP_VALUE;
}
if (a < -CLIP_VALUE) {
return -CLIP_VALUE;
}
return a;
}
2023-01-20 13:41:38 +01:00
void update_weights(Network* network, Network* d_network) {
2022-11-03 16:28:03 +01:00
int n = network->size;
2023-03-02 10:35:25 +01:00
for (int i=0; i < (n-1); i++) {
Kernel* k_i = network->kernel[i];
Kernel* dk_i = d_network->kernel[i];
int input_depth = network->depth[i];
int input_width = network->width[i];
int output_depth = network->depth[i+1];
int output_width = network->width[i+1];
2022-11-03 16:28:03 +01:00
if (k_i->cnn) { // Convolution
2022-11-15 12:58:00 +01:00
Kernel_cnn* cnn = k_i->cnn;
2022-11-18 14:09:49 +01:00
Kernel_cnn* d_cnn = dk_i->cnn;
2023-03-02 10:35:25 +01:00
int k_size = cnn->k_size;
for (int a=0; a < input_depth; a++) {
for (int b=0; b < output_depth; b++) {
for (int c=0; c < k_size; c++) {
for (int d=0; d < k_size; d++) {
cnn->weights[a][b][c][d] -= network->learning_rate * d_cnn->d_weights[a][b][c][d];
d_cnn->d_weights[a][b][c][d] = 0;
2023-01-25 11:18:47 +01:00
2023-03-02 10:35:25 +01:00
cnn->weights[a][b][c][d] = clip(cnn->weights[a][b][c][d]);
2022-11-03 16:28:03 +01:00
}
}
}
}
} else if (k_i->nn) { // Full connection
2023-01-17 12:49:35 +01:00
if (k_i->linearisation == 0) { // Vecteur -> Vecteur
2022-11-15 12:50:38 +01:00
Kernel_nn* nn = k_i->nn;
2022-11-18 14:09:49 +01:00
Kernel_nn* d_nn = dk_i->nn;
2023-03-02 10:35:25 +01:00
for (int a=0; a < input_width; a++) {
for (int b=0; b < output_width; b++) {
2023-01-20 13:41:38 +01:00
nn->weights[a][b] -= network->learning_rate * d_nn->d_weights[a][b];
2022-11-18 14:09:49 +01:00
d_nn->d_weights[a][b] = 0;
2022-11-03 16:28:03 +01:00
}
}
} else { // Matrice -> vecteur
2022-11-15 12:50:38 +01:00
Kernel_nn* nn = k_i->nn;
2022-11-18 14:09:49 +01:00
Kernel_nn* d_nn = dk_i->nn;
2023-03-02 10:35:25 +01:00
2023-02-19 13:43:09 +01:00
int size_input = input_width*input_width*input_depth;
2023-03-02 10:35:25 +01:00
for (int a=0; a < size_input; a++) {
for (int b=0; b < output_width; b++) {
2023-01-20 13:41:38 +01:00
nn->weights[a][b] -= network->learning_rate * d_nn->d_weights[a][b];
2022-11-18 14:09:49 +01:00
d_nn->d_weights[a][b] = 0;
2023-01-25 11:18:47 +01:00
2023-03-02 10:35:25 +01:00
nn->weights[a][b] = clip(nn->weights[a][b]);
2022-11-03 16:28:03 +01:00
}
}
}
}
2023-03-02 10:35:25 +01:00
// Une couche de pooling ne nécessite pas de traitement
2022-11-03 16:28:03 +01:00
}
}
2023-01-20 13:41:38 +01:00
void update_bias(Network* network, Network* d_network) {
2022-11-03 16:28:03 +01:00
int n = network->size;
2023-03-02 10:35:25 +01:00
for (int i=0; i < (n-1); i++) {
Kernel* k_i = network->kernel[i];
Kernel* dk_i = d_network->kernel[i];
int output_width = network->width[i+1];
int output_depth = network->depth[i+1];
2022-11-03 16:28:03 +01:00
if (k_i->cnn) { // Convolution
2022-11-15 12:50:38 +01:00
Kernel_cnn* cnn = k_i->cnn;
2022-11-18 14:09:49 +01:00
Kernel_cnn* d_cnn = dk_i->cnn;
2023-03-02 10:35:25 +01:00
for (int a=0; a < output_depth; a++) {
for (int b=0; b < output_width; b++) {
for (int c=0; c < output_width; c++) {
2023-01-20 13:41:38 +01:00
cnn->bias[a][b][c] -= network->learning_rate * d_cnn->d_bias[a][b][c];
2022-11-18 14:09:49 +01:00
d_cnn->d_bias[a][b][c] = 0;
2023-01-25 11:18:47 +01:00
2023-03-02 10:35:25 +01:00
cnn->bias[a][b][c] = clip(cnn->bias[a][b][c]);
2022-11-03 16:28:03 +01:00
}
}
}
} else if (k_i->nn) { // Full connection
2022-11-15 12:50:38 +01:00
Kernel_nn* nn = k_i->nn;
2022-11-18 14:09:49 +01:00
Kernel_nn* d_nn = dk_i->nn;
2023-03-02 10:35:25 +01:00
for (int a=0; a < output_width; a++) {
2023-01-20 13:41:38 +01:00
nn->bias[a] -= network->learning_rate * d_nn->d_bias[a];
2022-11-18 14:09:49 +01:00
d_nn->d_bias[a] = 0;
2023-01-25 11:18:47 +01:00
2023-03-02 10:35:25 +01:00
nn->bias[a] = clip(nn->bias[a]);
2022-11-03 16:28:03 +01:00
}
}
2023-03-02 10:35:25 +01:00
// Une couche de pooling ne nécessite pas de traitement
2022-11-03 16:28:03 +01:00
}
}
void reset_d_weights(Network* network) {
int n = network->size;
2023-03-02 10:35:25 +01:00
for (int i=0; i < (n-1); i++) {
Kernel* k_i = network->kernel[i];
Kernel* k_i_1 = network->kernel[i+1];
int input_depth = network->depth[i];
int input_width = network->width[i];
int output_depth = network->depth[i+1];
int output_width = network->width[i+1];
2022-11-03 16:28:03 +01:00
if (k_i->cnn) { // Convolution
Kernel_cnn* cnn = k_i_1->cnn;
2023-03-02 10:35:25 +01:00
2022-11-03 16:28:03 +01:00
int k_size = cnn->k_size;
2023-03-02 10:35:25 +01:00
for (int a=0; a < input_depth; a++) {
for (int b=0; b < output_depth; b++) {
for (int c=0; c < k_size; c++) {
for (int d=0; d < k_size; d++) {
cnn->d_weights[a][b][c][d] = 0;
2022-11-03 16:28:03 +01:00
}
}
}
}
} else if (k_i->nn) { // Full connection
2023-01-17 12:49:35 +01:00
if (k_i->linearisation == 0) { // Vecteur -> Vecteur
2022-11-03 16:28:03 +01:00
Kernel_nn* nn = k_i_1->nn;
2023-03-02 10:35:25 +01:00
for (int a=0; a < input_width; a++) {
for (int b=0; b < output_width; b++) {
2022-11-03 16:28:03 +01:00
nn->d_weights[a][b] = 0;
}
}
} else { // Matrice -> vecteur
Kernel_nn* nn = k_i_1->nn;
2023-03-02 10:35:25 +01:00
2023-02-19 13:43:09 +01:00
int size_input = input_width*input_width*input_depth;
2023-03-02 10:35:25 +01:00
for (int a=0; a < size_input; a++) {
for (int b=0; b < output_width; b++) {
2022-11-03 16:28:03 +01:00
nn->d_weights[a][b] = 0;
}
}
}
}
2023-03-02 10:35:25 +01:00
// Une couche de pooling ne nécessite pas de traitement
2022-11-03 16:28:03 +01:00
}
}
void reset_d_bias(Network* network) {
int n = network->size;
2023-03-02 10:35:25 +01:00
for (int i=0; i < (n-1); i++) {
Kernel* k_i = network->kernel[i];
Kernel* k_i_1 = network->kernel[i+1];
int output_width = network->width[i+1];
int output_depth = network->depth[i+1];
2022-11-03 16:28:03 +01:00
if (k_i->cnn) { // Convolution
Kernel_cnn* cnn = k_i_1->cnn;
2023-03-02 10:35:25 +01:00
for (int a=0; a < output_depth; a++) {
for (int b=0; b < output_width; b++) {
for (int c=0; c < output_width; c++) {
2022-11-03 16:28:03 +01:00
cnn->d_bias[a][b][c] = 0;
}
}
}
} else if (k_i->nn) { // Full connection
Kernel_nn* nn = k_i_1->nn;
2023-03-02 10:35:25 +01:00
for (int a=0; a < output_width; a++) {
2022-11-03 16:28:03 +01:00
nn->d_bias[a] = 0;
}
}
2023-03-02 10:35:25 +01:00
// Une couche de pooling ne nécessite pas de traitement
2022-11-03 16:28:03 +01:00
}
}