mirror of
https://github.com/augustin64/projet-tipe
synced 2025-01-23 15:16:26 +01:00
Add 'finetuning' variable to the Network class
This commit is contained in:
parent
771bfcaf70
commit
fade0aa28d
@ -6,10 +6,11 @@
|
|||||||
#include "../common/include/utils.h"
|
#include "../common/include/utils.h"
|
||||||
#include "include/initialisation.h"
|
#include "include/initialisation.h"
|
||||||
#include "include/function.h"
|
#include "include/function.h"
|
||||||
|
#include "include/cnn.h"
|
||||||
|
|
||||||
#include "include/creation.h"
|
#include "include/creation.h"
|
||||||
|
|
||||||
Network* create_network(int max_size, float learning_rate, int dropout, int initialisation, int input_width, int input_depth) {
|
Network* create_network(int max_size, float learning_rate, int dropout, int initialisation, int input_width, int input_depth, int finetuning) {
|
||||||
if (dropout < 0 || dropout > 100) {
|
if (dropout < 0 || dropout > 100) {
|
||||||
printf_error("La probabilité de dropout n'est pas respecté, elle doit être comprise entre 0 et 100\n");
|
printf_error("La probabilité de dropout n'est pas respecté, elle doit être comprise entre 0 et 100\n");
|
||||||
}
|
}
|
||||||
@ -19,6 +20,7 @@ Network* create_network(int max_size, float learning_rate, int dropout, int init
|
|||||||
network->dropout = dropout;
|
network->dropout = dropout;
|
||||||
network->initialisation = initialisation;
|
network->initialisation = initialisation;
|
||||||
network->size = 1;
|
network->size = 1;
|
||||||
|
network->finetuning = finetuning;
|
||||||
network->input = (float****)nalloc(max_size, sizeof(float***));
|
network->input = (float****)nalloc(max_size, sizeof(float***));
|
||||||
network->input_z = (float****)nalloc(max_size, sizeof(float***));
|
network->input_z = (float****)nalloc(max_size, sizeof(float***));
|
||||||
network->kernel = (Kernel**)nalloc(max_size-1, sizeof(Kernel*));
|
network->kernel = (Kernel**)nalloc(max_size-1, sizeof(Kernel*));
|
||||||
@ -148,70 +150,87 @@ void add_convolution(Network* network, int kernel_size, int number_of_kernels, i
|
|||||||
cnn->rows = input_depth;
|
cnn->rows = input_depth;
|
||||||
cnn->columns = output_depth;
|
cnn->columns = output_depth;
|
||||||
|
|
||||||
|
// Partie toujours initialisée
|
||||||
cnn->weights = (float****)nalloc(input_depth, sizeof(float***));
|
cnn->weights = (float****)nalloc(input_depth, sizeof(float***));
|
||||||
cnn->d_weights = (float****)nalloc(input_depth, sizeof(float***));
|
|
||||||
#ifdef ADAM_CNN_WEIGHTS
|
|
||||||
cnn->s_d_weights = (float****)nalloc(input_depth, sizeof(float***));
|
|
||||||
cnn->v_d_weights = (float****)nalloc(input_depth, sizeof(float***));
|
|
||||||
#endif
|
|
||||||
for (int i=0; i < input_depth; i++) {
|
for (int i=0; i < input_depth; i++) {
|
||||||
cnn->weights[i] = (float***)nalloc(output_depth, sizeof(float**));
|
cnn->weights[i] = (float***)nalloc(output_depth, sizeof(float**));
|
||||||
cnn->d_weights[i] = (float***)nalloc(output_depth, sizeof(float**));
|
|
||||||
#ifdef ADAM_CNN_WEIGHTS
|
|
||||||
cnn->s_d_weights[i] = (float***)nalloc(output_depth, sizeof(float**));
|
|
||||||
cnn->v_d_weights[i] = (float***)nalloc(output_depth, sizeof(float**));
|
|
||||||
#endif
|
|
||||||
for (int j=0; j < output_depth; j++) {
|
for (int j=0; j < output_depth; j++) {
|
||||||
cnn->weights[i][j] = (float**)nalloc(kernel_size, sizeof(float*));
|
cnn->weights[i][j] = (float**)nalloc(kernel_size, sizeof(float*));
|
||||||
cnn->d_weights[i][j] = (float**)nalloc(kernel_size, sizeof(float*));
|
|
||||||
#ifdef ADAM_CNN_WEIGHTS
|
|
||||||
cnn->s_d_weights[i][j] = (float**)nalloc(kernel_size, sizeof(float*));
|
|
||||||
cnn->v_d_weights[i][j] = (float**)nalloc(kernel_size, sizeof(float*));
|
|
||||||
#endif
|
|
||||||
for (int k=0; k < kernel_size; k++) {
|
for (int k=0; k < kernel_size; k++) {
|
||||||
cnn->weights[i][j][k] = (float*)nalloc(kernel_size, sizeof(float));
|
cnn->weights[i][j][k] = (float*)nalloc(kernel_size, sizeof(float));
|
||||||
cnn->d_weights[i][j][k] = (float*)nalloc(kernel_size, sizeof(float));
|
|
||||||
#ifdef ADAM_CNN_WEIGHTS
|
|
||||||
cnn->s_d_weights[i][j][k] = (float*)nalloc(kernel_size, sizeof(float));
|
|
||||||
cnn->v_d_weights[i][j][k] = (float*)nalloc(kernel_size, sizeof(float));
|
|
||||||
#endif
|
|
||||||
for (int l=0; l < kernel_size; l++) {
|
|
||||||
cnn->d_weights[i][j][k][l] = 0.;
|
|
||||||
#ifdef ADAM_CNN_WEIGHTS
|
|
||||||
cnn->s_d_weights[i][j][k][l] = 0.;
|
|
||||||
cnn->v_d_weights[i][j][k][l] = 0.;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cnn->bias = (float***)nalloc(output_depth, sizeof(float**));
|
cnn->bias = (float***)nalloc(output_depth, sizeof(float**));
|
||||||
cnn->d_bias = (float***)nalloc(output_depth, sizeof(float**));
|
|
||||||
#ifdef ADAM_CNN_BIAS
|
|
||||||
cnn->s_d_bias = (float***)nalloc(output_depth, sizeof(float**));
|
|
||||||
cnn->v_d_bias = (float***)nalloc(output_depth, sizeof(float**));
|
|
||||||
#endif
|
|
||||||
for (int i=0; i < output_depth; i++) {
|
for (int i=0; i < output_depth; i++) {
|
||||||
cnn->bias[i] = (float**)nalloc(bias_size, sizeof(float*));
|
cnn->bias[i] = (float**)nalloc(bias_size, sizeof(float*));
|
||||||
cnn->d_bias[i] = (float**)nalloc(bias_size, sizeof(float*));
|
|
||||||
#ifdef ADAM_CNN_BIAS
|
|
||||||
cnn->s_d_bias[i] = (float**)nalloc(bias_size, sizeof(float*));
|
|
||||||
cnn->v_d_bias[i] = (float**)nalloc(bias_size, sizeof(float*));
|
|
||||||
#endif
|
|
||||||
for (int j=0; j < bias_size; j++) {
|
for (int j=0; j < bias_size; j++) {
|
||||||
cnn->bias[i][j] = (float*)nalloc(bias_size, sizeof(float));
|
cnn->bias[i][j] = (float*)nalloc(bias_size, sizeof(float));
|
||||||
cnn->d_bias[i][j] = (float*)nalloc(bias_size, sizeof(float));
|
}
|
||||||
#ifdef ADAM_CNN_BIAS
|
}
|
||||||
cnn->s_d_bias[i][j] = (float*)nalloc(bias_size, sizeof(float));
|
|
||||||
cnn->v_d_bias[i][j] = (float*)nalloc(bias_size, sizeof(float));
|
// Partie initialisée que sous certaines conditions
|
||||||
|
if (network->finetuning == EVERYTHING) {
|
||||||
|
cnn->d_weights = (float****)nalloc(input_depth, sizeof(float***));
|
||||||
|
#ifdef ADAM_CNN_WEIGHTS
|
||||||
|
cnn->s_d_weights = (float****)nalloc(input_depth, sizeof(float***));
|
||||||
|
cnn->v_d_weights = (float****)nalloc(input_depth, sizeof(float***));
|
||||||
|
#endif
|
||||||
|
for (int i=0; i < input_depth; i++) {
|
||||||
|
cnn->d_weights[i] = (float***)nalloc(output_depth, sizeof(float**));
|
||||||
|
#ifdef ADAM_CNN_WEIGHTS
|
||||||
|
cnn->s_d_weights[i] = (float***)nalloc(output_depth, sizeof(float**));
|
||||||
|
cnn->v_d_weights[i] = (float***)nalloc(output_depth, sizeof(float**));
|
||||||
#endif
|
#endif
|
||||||
for (int k=0; k < bias_size; k++) {
|
for (int j=0; j < output_depth; j++) {
|
||||||
cnn->d_bias[i][j][k] = 0.;
|
cnn->d_weights[i][j] = (float**)nalloc(kernel_size, sizeof(float*));
|
||||||
#ifdef ADAM_CNN_BIAS
|
#ifdef ADAM_CNN_WEIGHTS
|
||||||
cnn->s_d_bias[i][j][k] = 0.;
|
cnn->s_d_weights[i][j] = (float**)nalloc(kernel_size, sizeof(float*));
|
||||||
cnn->v_d_bias[i][j][k] = 0.;
|
cnn->v_d_weights[i][j] = (float**)nalloc(kernel_size, sizeof(float*));
|
||||||
#endif
|
#endif
|
||||||
|
for (int k=0; k < kernel_size; k++) {
|
||||||
|
cnn->d_weights[i][j][k] = (float*)nalloc(kernel_size, sizeof(float));
|
||||||
|
#ifdef ADAM_CNN_WEIGHTS
|
||||||
|
cnn->s_d_weights[i][j][k] = (float*)nalloc(kernel_size, sizeof(float));
|
||||||
|
cnn->v_d_weights[i][j][k] = (float*)nalloc(kernel_size, sizeof(float));
|
||||||
|
#endif
|
||||||
|
for (int l=0; l < kernel_size; l++) {
|
||||||
|
cnn->d_weights[i][j][k][l] = 0.;
|
||||||
|
#ifdef ADAM_CNN_WEIGHTS
|
||||||
|
cnn->s_d_weights[i][j][k][l] = 0.;
|
||||||
|
cnn->v_d_weights[i][j][k][l] = 0.;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cnn->d_bias = (float***)nalloc(output_depth, sizeof(float**));
|
||||||
|
#ifdef ADAM_CNN_BIAS
|
||||||
|
cnn->s_d_bias = (float***)nalloc(output_depth, sizeof(float**));
|
||||||
|
cnn->v_d_bias = (float***)nalloc(output_depth, sizeof(float**));
|
||||||
|
#endif
|
||||||
|
for (int i=0; i < output_depth; i++) {
|
||||||
|
cnn->d_bias[i] = (float**)nalloc(bias_size, sizeof(float*));
|
||||||
|
#ifdef ADAM_CNN_BIAS
|
||||||
|
cnn->s_d_bias[i] = (float**)nalloc(bias_size, sizeof(float*));
|
||||||
|
cnn->v_d_bias[i] = (float**)nalloc(bias_size, sizeof(float*));
|
||||||
|
#endif
|
||||||
|
for (int j=0; j < bias_size; j++) {
|
||||||
|
cnn->d_bias[i][j] = (float*)nalloc(bias_size, sizeof(float));
|
||||||
|
#ifdef ADAM_CNN_BIAS
|
||||||
|
cnn->s_d_bias[i][j] = (float*)nalloc(bias_size, sizeof(float));
|
||||||
|
cnn->v_d_bias[i][j] = (float*)nalloc(bias_size, sizeof(float));
|
||||||
|
#endif
|
||||||
|
for (int k=0; k < bias_size; k++) {
|
||||||
|
cnn->d_bias[i][j][k] = 0.;
|
||||||
|
#ifdef ADAM_CNN_BIAS
|
||||||
|
cnn->s_d_bias[i][j][k] = 0.;
|
||||||
|
cnn->v_d_bias[i][j][k] = 0.;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -245,28 +264,19 @@ void add_dense(Network* network, int size_output, int activation) {
|
|||||||
nn->size_input = size_input;
|
nn->size_input = size_input;
|
||||||
nn->size_output = size_output;
|
nn->size_output = size_output;
|
||||||
|
|
||||||
nn->bias = (float*)nalloc(size_output, sizeof(float));
|
nn->weights = (float**)nalloc(size_input, sizeof(float*));
|
||||||
nn->d_bias = (float*)nalloc(size_output, sizeof(float));
|
for (int i=0; i < size_input; i++) {
|
||||||
#ifdef ADAM_DENSE_BIAS
|
nn->weights[i] = (float*)nalloc(size_output, sizeof(float));
|
||||||
nn->s_d_bias = (float*)nalloc(size_output, sizeof(float));
|
|
||||||
nn->v_d_bias = (float*)nalloc(size_output, sizeof(float));
|
|
||||||
#endif
|
|
||||||
for (int i=0; i < size_output; i++) {
|
|
||||||
nn->d_bias[i] = 0.;
|
|
||||||
#ifdef ADAM_DENSE_BIAS
|
|
||||||
nn->s_d_bias[i] = 0.;
|
|
||||||
nn->v_d_bias[i] = 0.;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
nn->weights = (float**)nalloc(size_input, sizeof(float*));
|
nn->bias = (float*)nalloc(size_output, sizeof(float));
|
||||||
|
|
||||||
nn->d_weights = (float**)nalloc(size_input, sizeof(float*));
|
nn->d_weights = (float**)nalloc(size_input, sizeof(float*));
|
||||||
#ifdef ADAM_DENSE_WEIGHTS
|
#ifdef ADAM_DENSE_WEIGHTS
|
||||||
nn->s_d_weights = (float**)nalloc(size_input, sizeof(float*));
|
nn->s_d_weights = (float**)nalloc(size_input, sizeof(float*));
|
||||||
nn->v_d_weights = (float**)nalloc(size_input, sizeof(float*));
|
nn->v_d_weights = (float**)nalloc(size_input, sizeof(float*));
|
||||||
#endif
|
#endif
|
||||||
for (int i=0; i < size_input; i++) {
|
for (int i=0; i < size_input; i++) {
|
||||||
nn->weights[i] = (float*)nalloc(size_output, sizeof(float));
|
|
||||||
nn->d_weights[i] = (float*)nalloc(size_output, sizeof(float));
|
nn->d_weights[i] = (float*)nalloc(size_output, sizeof(float));
|
||||||
#ifdef ADAM_DENSE_WEIGHTS
|
#ifdef ADAM_DENSE_WEIGHTS
|
||||||
nn->s_d_weights[i] = (float*)nalloc(size_output, sizeof(float));
|
nn->s_d_weights[i] = (float*)nalloc(size_output, sizeof(float));
|
||||||
@ -281,6 +291,20 @@ void add_dense(Network* network, int size_output, int activation) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nn->d_bias = (float*)nalloc(size_output, sizeof(float));
|
||||||
|
#ifdef ADAM_DENSE_BIAS
|
||||||
|
nn->s_d_bias = (float*)nalloc(size_output, sizeof(float));
|
||||||
|
nn->v_d_bias = (float*)nalloc(size_output, sizeof(float));
|
||||||
|
#endif
|
||||||
|
for (int i=0; i < size_output; i++) {
|
||||||
|
nn->d_bias[i] = 0.;
|
||||||
|
#ifdef ADAM_DENSE_BIAS
|
||||||
|
nn->s_d_bias[i] = 0.;
|
||||||
|
nn->v_d_bias[i] = 0.;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
initialisation_1d_matrix(network->initialisation, nn->bias, size_output, size_input, size_output);
|
initialisation_1d_matrix(network->initialisation, nn->bias, size_output, size_input, size_output);
|
||||||
initialisation_2d_matrix(network->initialisation, nn->weights, size_input, size_output, size_input, size_output);
|
initialisation_2d_matrix(network->initialisation, nn->weights, size_input, size_output, size_input, size_output);
|
||||||
create_a_line_input_layer(network, n, size_output);
|
create_a_line_input_layer(network, n, size_output);
|
||||||
@ -310,38 +334,46 @@ void add_dense_linearisation(Network* network, int size_output, int activation)
|
|||||||
nn->size_input = size_input;
|
nn->size_input = size_input;
|
||||||
nn->size_output = size_output;
|
nn->size_output = size_output;
|
||||||
|
|
||||||
nn->bias = (float*)nalloc(size_output, sizeof(float));
|
// Partie toujours initialisée
|
||||||
nn->d_bias = (float*)nalloc(size_output, sizeof(float));
|
|
||||||
#ifdef ADAM_DENSE_BIAS
|
|
||||||
nn->s_d_bias = (float*)nalloc(size_output, sizeof(float));
|
|
||||||
nn->v_d_bias = (float*)nalloc(size_output, sizeof(float));
|
|
||||||
#endif
|
|
||||||
for (int i=0; i < size_output; i++) {
|
|
||||||
nn->d_bias[i] = 0.;
|
|
||||||
#ifdef ADAM_DENSE_BIAS
|
|
||||||
nn->s_d_bias[i] = 0.;
|
|
||||||
nn->v_d_bias[i] = 0.;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
nn->weights = (float**)nalloc(size_input, sizeof(float*));
|
nn->weights = (float**)nalloc(size_input, sizeof(float*));
|
||||||
nn->d_weights = (float**)nalloc(size_input, sizeof(float*));
|
|
||||||
#ifdef ADAM_DENSE_WEIGHTS
|
|
||||||
nn->s_d_weights = (float**)nalloc(size_input, sizeof(float*));
|
|
||||||
nn->v_d_weights = (float**)nalloc(size_input, sizeof(float*));
|
|
||||||
#endif
|
|
||||||
for (int i=0; i < size_input; i++) {
|
for (int i=0; i < size_input; i++) {
|
||||||
nn->weights[i] = (float*)nalloc(size_output, sizeof(float));
|
nn->weights[i] = (float*)nalloc(size_output, sizeof(float));
|
||||||
nn->d_weights[i] = (float*)nalloc(size_output, sizeof(float));
|
}
|
||||||
|
|
||||||
|
nn->bias = (float*)nalloc(size_output, sizeof(float));
|
||||||
|
|
||||||
|
// Partie initialisée que sous certaines conditions
|
||||||
|
if (network->finetuning <= NN_AND_LINEARISATION) {
|
||||||
|
nn->d_weights = (float**)nalloc(size_input, sizeof(float*));
|
||||||
#ifdef ADAM_DENSE_WEIGHTS
|
#ifdef ADAM_DENSE_WEIGHTS
|
||||||
nn->s_d_weights[i] = (float*)nalloc(size_output, sizeof(float));
|
nn->s_d_weights = (float**)nalloc(size_input, sizeof(float*));
|
||||||
nn->v_d_weights[i] = (float*)nalloc(size_output, sizeof(float));
|
nn->v_d_weights = (float**)nalloc(size_input, sizeof(float*));
|
||||||
#endif
|
#endif
|
||||||
for (int j=0; j < size_output; j++) {
|
for (int i=0; i < size_input; i++) {
|
||||||
nn->d_weights[i][j] = 0.;
|
nn->d_weights[i] = (float*)nalloc(size_output, sizeof(float));
|
||||||
#ifdef ADAM_DENSE_WEIGHTS
|
#ifdef ADAM_DENSE_WEIGHTS
|
||||||
nn->s_d_weights[i][j] = 0.;
|
nn->s_d_weights[i] = (float*)nalloc(size_output, sizeof(float));
|
||||||
nn->v_d_weights[i][j] = 0.;
|
nn->v_d_weights[i] = (float*)nalloc(size_output, sizeof(float));
|
||||||
|
#endif
|
||||||
|
for (int j=0; j < size_output; j++) {
|
||||||
|
nn->d_weights[i][j] = 0.;
|
||||||
|
#ifdef ADAM_DENSE_WEIGHTS
|
||||||
|
nn->s_d_weights[i][j] = 0.;
|
||||||
|
nn->v_d_weights[i][j] = 0.;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nn->d_bias = (float*)nalloc(size_output, sizeof(float));
|
||||||
|
#ifdef ADAM_DENSE_BIAS
|
||||||
|
nn->s_d_bias = (float*)nalloc(size_output, sizeof(float));
|
||||||
|
nn->v_d_bias = (float*)nalloc(size_output, sizeof(float));
|
||||||
|
#endif
|
||||||
|
for (int i=0; i < size_output; i++) {
|
||||||
|
nn->d_bias[i] = 0.;
|
||||||
|
#ifdef ADAM_DENSE_BIAS
|
||||||
|
nn->s_d_bias[i] = 0.;
|
||||||
|
nn->v_d_bias[i] = 0.;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
136
src/cnn/free.c
136
src/cnn/free.c
@ -3,6 +3,7 @@
|
|||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
|
||||||
#include "../common/include/memory_management.h"
|
#include "../common/include/memory_management.h"
|
||||||
|
#include "include/cnn.h"
|
||||||
|
|
||||||
#include "include/free.h"
|
#include "include/free.h"
|
||||||
|
|
||||||
@ -42,59 +43,76 @@ void free_convolution(Network* network, int pos) {
|
|||||||
int r = k_pos->rows;
|
int r = k_pos->rows;
|
||||||
int bias_size = network->width[pos+1];
|
int bias_size = network->width[pos+1];
|
||||||
free_a_cube_input_layer(network, pos+1, network->depth[pos+1], network->width[pos+1]);
|
free_a_cube_input_layer(network, pos+1, network->depth[pos+1], network->width[pos+1]);
|
||||||
|
|
||||||
|
// Partie toujours initialisée (donc à libérer)
|
||||||
for (int i=0; i < c; i++) {
|
for (int i=0; i < c; i++) {
|
||||||
for (int j=0; j < bias_size; j++) {
|
for (int j=0; j < bias_size; j++) {
|
||||||
gree(k_pos->bias[i][j], true);
|
gree(k_pos->bias[i][j], true);
|
||||||
gree(k_pos->d_bias[i][j], true);
|
|
||||||
#ifdef ADAM_CNN_BIAS
|
|
||||||
gree(k_pos->s_d_bias[i][j], true);
|
|
||||||
gree(k_pos->v_d_bias[i][j], true);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
gree(k_pos->bias[i], true);
|
gree(k_pos->bias[i], true);
|
||||||
gree(k_pos->d_bias[i], true);
|
|
||||||
#ifdef ADAM_CNN_BIAS
|
|
||||||
gree(k_pos->s_d_bias[i], true);
|
|
||||||
gree(k_pos->v_d_bias[i], true);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
gree(k_pos->bias, true);
|
gree(k_pos->bias, true);
|
||||||
gree(k_pos->d_bias, true);
|
|
||||||
#ifdef ADAM_CNN_BIAS
|
|
||||||
gree(k_pos->s_d_bias, true);
|
|
||||||
gree(k_pos->v_d_bias, true);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
for (int i=0; i < r; i++) {
|
for (int i=0; i < r; i++) {
|
||||||
for (int j=0; j < c; j++) {
|
for (int j=0; j < c; j++) {
|
||||||
for (int k=0; k < k_size; k++) {
|
for (int k=0; k < k_size; k++) {
|
||||||
gree(k_pos->weights[i][j][k], true);
|
gree(k_pos->weights[i][j][k], true);
|
||||||
gree(k_pos->d_weights[i][j][k], true);
|
|
||||||
#ifdef ADAM_CNN_WEIGHTS
|
|
||||||
gree(k_pos->s_d_weights[i][j][k], true);
|
|
||||||
gree(k_pos->v_d_weights[i][j][k], true);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
gree(k_pos->weights[i][j], true);
|
gree(k_pos->weights[i][j], true);
|
||||||
gree(k_pos->d_weights[i][j], true);
|
|
||||||
#ifdef ADAM_CNN_WEIGHTS
|
|
||||||
gree(k_pos->s_d_weights[i][j], true);
|
|
||||||
gree(k_pos->v_d_weights[i][j], true);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
gree(k_pos->weights[i], true);
|
gree(k_pos->weights[i], true);
|
||||||
gree(k_pos->d_weights[i], true);
|
|
||||||
#ifdef ADAM_CNN_WEIGHTS
|
|
||||||
gree(k_pos->s_d_weights[i], true);
|
|
||||||
gree(k_pos->v_d_weights[i], true);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
gree(k_pos->weights, true);
|
gree(k_pos->weights, true);
|
||||||
gree(k_pos->d_weights, true);
|
|
||||||
#ifdef ADAM_CNN_WEIGHTS
|
// Partie initialisée que sous certaines conditions (donc ne pas toujours libérer)
|
||||||
gree(k_pos->s_d_weights, true);
|
if (network->finetuning == EVERYTHING) {
|
||||||
gree(k_pos->v_d_weights, true);
|
for (int i=0; i < c; i++) {
|
||||||
#endif
|
for (int j=0; j < bias_size; j++) {
|
||||||
|
gree(k_pos->d_bias[i][j], true);
|
||||||
|
#ifdef ADAM_CNN_BIAS
|
||||||
|
gree(k_pos->s_d_bias[i][j], true);
|
||||||
|
gree(k_pos->v_d_bias[i][j], true);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
gree(k_pos->d_bias[i], true);
|
||||||
|
#ifdef ADAM_CNN_BIAS
|
||||||
|
gree(k_pos->s_d_bias[i], true);
|
||||||
|
gree(k_pos->v_d_bias[i], true);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
gree(k_pos->d_bias, true);
|
||||||
|
#ifdef ADAM_CNN_BIAS
|
||||||
|
gree(k_pos->s_d_bias, true);
|
||||||
|
gree(k_pos->v_d_bias, true);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
for (int i=0; i < r; i++) {
|
||||||
|
for (int j=0; j < c; j++) {
|
||||||
|
for (int k=0; k < k_size; k++) {
|
||||||
|
gree(k_pos->d_weights[i][j][k], true);
|
||||||
|
#ifdef ADAM_CNN_WEIGHTS
|
||||||
|
gree(k_pos->s_d_weights[i][j][k], true);
|
||||||
|
gree(k_pos->v_d_weights[i][j][k], true);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
gree(k_pos->d_weights[i][j], true);
|
||||||
|
#ifdef ADAM_CNN_WEIGHTS
|
||||||
|
gree(k_pos->s_d_weights[i][j], true);
|
||||||
|
gree(k_pos->v_d_weights[i][j], true);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
gree(k_pos->d_weights[i], true);
|
||||||
|
#ifdef ADAM_CNN_WEIGHTS
|
||||||
|
gree(k_pos->s_d_weights[i], true);
|
||||||
|
gree(k_pos->v_d_weights[i], true);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
gree(k_pos->d_weights, true);
|
||||||
|
#ifdef ADAM_CNN_WEIGHTS
|
||||||
|
gree(k_pos->s_d_weights, true);
|
||||||
|
gree(k_pos->v_d_weights, true);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
gree(k_pos, true);
|
gree(k_pos, true);
|
||||||
}
|
}
|
||||||
@ -103,22 +121,26 @@ void free_dense(Network* network, int pos) {
|
|||||||
free_a_line_input_layer(network, pos+1);
|
free_a_line_input_layer(network, pos+1);
|
||||||
Kernel_nn* k_pos = network->kernel[pos]->nn;
|
Kernel_nn* k_pos = network->kernel[pos]->nn;
|
||||||
int dim = k_pos->size_input;
|
int dim = k_pos->size_input;
|
||||||
|
|
||||||
for (int i=0; i < dim; i++) {
|
for (int i=0; i < dim; i++) {
|
||||||
gree(k_pos->weights[i], true);
|
gree(k_pos->weights[i], true);
|
||||||
|
}
|
||||||
|
gree(k_pos->weights, true);
|
||||||
|
gree(k_pos->bias, true);
|
||||||
|
|
||||||
|
for (int i=0; i < dim; i++) {
|
||||||
gree(k_pos->d_weights[i], true);
|
gree(k_pos->d_weights[i], true);
|
||||||
#ifdef ADAM_DENSE_WEIGHTS
|
#ifdef ADAM_DENSE_WEIGHTS
|
||||||
gree(k_pos->s_d_weights[i], true);
|
gree(k_pos->s_d_weights[i], true);
|
||||||
gree(k_pos->v_d_weights[i], true);
|
gree(k_pos->v_d_weights[i], true);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
gree(k_pos->weights, true);
|
|
||||||
gree(k_pos->d_weights, true);
|
gree(k_pos->d_weights, true);
|
||||||
#ifdef ADAM_DENSE_WEIGHTS
|
#ifdef ADAM_DENSE_WEIGHTS
|
||||||
gree(k_pos->s_d_weights, true);
|
gree(k_pos->s_d_weights, true);
|
||||||
gree(k_pos->v_d_weights, true);
|
gree(k_pos->v_d_weights, true);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
gree(k_pos->bias, true);
|
|
||||||
gree(k_pos->d_bias, true);
|
gree(k_pos->d_bias, true);
|
||||||
#ifdef ADAM_DENSE_BIAS
|
#ifdef ADAM_DENSE_BIAS
|
||||||
gree(k_pos->s_d_bias, true);
|
gree(k_pos->s_d_bias, true);
|
||||||
@ -132,27 +154,35 @@ void free_dense_linearisation(Network* network, int pos) {
|
|||||||
free_a_line_input_layer(network, pos+1);
|
free_a_line_input_layer(network, pos+1);
|
||||||
Kernel_nn* k_pos = network->kernel[pos]->nn;
|
Kernel_nn* k_pos = network->kernel[pos]->nn;
|
||||||
int dim = k_pos->size_input;
|
int dim = k_pos->size_input;
|
||||||
|
|
||||||
|
// Partie toujours initialisée (donc à libérer)
|
||||||
for (int i=0; i < dim; i++) {
|
for (int i=0; i < dim; i++) {
|
||||||
gree(k_pos->weights[i], true);
|
gree(k_pos->weights[i], true);
|
||||||
gree(k_pos->d_weights[i], true);
|
|
||||||
#ifdef ADAM_DENSE_WEIGHTS
|
|
||||||
gree(k_pos->s_d_weights[i], true);
|
|
||||||
gree(k_pos->v_d_weights[i], true);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
gree(k_pos->weights, true);
|
gree(k_pos->weights, true);
|
||||||
gree(k_pos->d_weights, true);
|
|
||||||
#ifdef ADAM_DENSE_WEIGHTS
|
|
||||||
gree(k_pos->s_d_weights, true);
|
|
||||||
gree(k_pos->v_d_weights, true);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
gree(k_pos->bias, true);
|
gree(k_pos->bias, true);
|
||||||
gree(k_pos->d_bias, true);
|
|
||||||
#ifdef ADAM_DENSE_BIAS
|
// Partie initialisée que sous certaines conditions (donc ne pas toujours libérer)
|
||||||
gree(k_pos->s_d_bias, true);
|
if (network->finetuning <= NN_AND_LINEARISATION) {
|
||||||
gree(k_pos->v_d_bias, true);
|
for (int i=0; i < dim; i++) {
|
||||||
#endif
|
gree(k_pos->d_weights[i], true);
|
||||||
|
#ifdef ADAM_DENSE_WEIGHTS
|
||||||
|
gree(k_pos->s_d_weights[i], true);
|
||||||
|
gree(k_pos->v_d_weights[i], true);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
gree(k_pos->d_weights, true);
|
||||||
|
#ifdef ADAM_DENSE_WEIGHTS
|
||||||
|
gree(k_pos->s_d_weights, true);
|
||||||
|
gree(k_pos->v_d_weights, true);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
gree(k_pos->d_bias, true);
|
||||||
|
#ifdef ADAM_DENSE_BIAS
|
||||||
|
gree(k_pos->s_d_bias, true);
|
||||||
|
gree(k_pos->v_d_bias, true);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
gree(k_pos, true);
|
gree(k_pos, true);
|
||||||
}
|
}
|
||||||
|
@ -18,10 +18,10 @@
|
|||||||
|
|
||||||
//* Options d'ADAM optimizer
|
//* Options d'ADAM optimizer
|
||||||
//* Activer ou désactiver Adam sur les couches dense
|
//* Activer ou désactiver Adam sur les couches dense
|
||||||
//#define ADAM_DENSE_WEIGHTS
|
#define ADAM_DENSE_WEIGHTS
|
||||||
//#define ADAM_DENSE_BIAS
|
//#define ADAM_DENSE_BIAS
|
||||||
//* Activer ou désactiver Adam sur les couches convolutives
|
//* Activer ou désactiver Adam sur les couches convolutives
|
||||||
//#define ADAM_CNN_WEIGHTS
|
#define ADAM_CNN_WEIGHTS
|
||||||
//#define ADAM_CNN_BIAS
|
//#define ADAM_CNN_BIAS
|
||||||
|
|
||||||
|
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
/*
|
/*
|
||||||
* Créé un réseau qui peut contenir max_size couche (dont celle d'input et d'output)
|
* Créé un réseau qui peut contenir max_size couche (dont celle d'input et d'output)
|
||||||
*/
|
*/
|
||||||
Network* create_network(int max_size, float learning_rate, int dropout, int initialisation, int input_width, int input_depth);
|
Network* create_network(int max_size, float learning_rate, int dropout, int initialisation, int input_width, int input_depth, int finetuning);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Créé et alloue de la mémoire à une couche de type input cube
|
* Créé et alloue de la mémoire à une couche de type input cube
|
||||||
|
@ -8,29 +8,29 @@
|
|||||||
/*
|
/*
|
||||||
* Renvoie un réseau suivant l'architecture LeNet5
|
* Renvoie un réseau suivant l'architecture LeNet5
|
||||||
*/
|
*/
|
||||||
Network* create_network_lenet5(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth);
|
Network* create_network_lenet5(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth, int finetuning);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Renvoie un réseau suivant l'architecture AlexNet
|
* Renvoie un réseau suivant l'architecture AlexNet
|
||||||
* C'est à dire en entrée 3x227x227 et une sortie de taille 'size_output'
|
* C'est à dire en entrée 3x227x227 et une sortie de taille 'size_output'
|
||||||
*/
|
*/
|
||||||
Network* create_network_alexnet(float learning_rate, int dropout, int activation, int initialisation, int size_output);
|
Network* create_network_alexnet(float learning_rate, int dropout, int activation, int initialisation, int size_output, int finetuning);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Renvoie un réseau suivant l'architecture VGG16 modifiée pour prendre en entrée 3x256x256
|
* Renvoie un réseau suivant l'architecture VGG16 modifiée pour prendre en entrée 3x256x256
|
||||||
* et une sortie de taille 'size_output'
|
* et une sortie de taille 'size_output'
|
||||||
*/
|
*/
|
||||||
Network* create_network_VGG16(float learning_rate, int dropout, int activation, int initialisation, int size_output);
|
Network* create_network_VGG16(float learning_rate, int dropout, int activation, int initialisation, int size_output, int finetuning);
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Renvoie un réseau suivant l'architecture VGG16 originel pour prendre en entrée 3x227x227
|
* Renvoie un réseau suivant l'architecture VGG16 originel pour prendre en entrée 3x227x227
|
||||||
* et une sortie de taille 1 000
|
* et une sortie de taille 1 000
|
||||||
*/
|
*/
|
||||||
Network* create_network_VGG16_227(float learning_rate, int dropout, int activation, int initialisation);
|
Network* create_network_VGG16_227(float learning_rate, int dropout, int activation, int initialisation, int finetuning);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Renvoie un réseau sans convolution, similaire à celui utilisé dans src/dense
|
* Renvoie un réseau sans convolution, similaire à celui utilisé dans src/dense
|
||||||
*/
|
*/
|
||||||
Network* create_simple_one(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth);
|
Network* create_simple_one(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth, int finetuning);
|
||||||
#endif
|
#endif
|
@ -67,6 +67,7 @@ typedef struct Network{
|
|||||||
int dropout; // Probabilité d'abandon d'un neurone dans [0, 100] (entiers)
|
int dropout; // Probabilité d'abandon d'un neurone dans [0, 100] (entiers)
|
||||||
float learning_rate; // Taux d'apprentissage du réseau
|
float learning_rate; // Taux d'apprentissage du réseau
|
||||||
int initialisation; // Id du type d'initialisation
|
int initialisation; // Id du type d'initialisation
|
||||||
|
int finetuning; // backpropagation: 0 sur tout; 1 sur dense et linéarisation; 2 sur dense
|
||||||
|
|
||||||
int max_size; // Taille du tableau contenant le réseau
|
int max_size; // Taille du tableau contenant le réseau
|
||||||
int size; // Taille actuelle du réseau (size ≤ max_size)
|
int size; // Taille actuelle du réseau (size ≤ max_size)
|
||||||
|
@ -7,8 +7,8 @@
|
|||||||
|
|
||||||
#include "include/models.h"
|
#include "include/models.h"
|
||||||
|
|
||||||
Network* create_network_lenet5(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth) {
|
Network* create_network_lenet5(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth, int finetuning) {
|
||||||
Network* network = create_network(8, learning_rate, dropout, initialisation, input_width, input_depth);
|
Network* network = create_network(8, learning_rate, dropout, initialisation, input_width, input_depth, finetuning);
|
||||||
add_convolution(network, 5, 6, 1, 0, activation);
|
add_convolution(network, 5, 6, 1, 0, activation);
|
||||||
add_average_pooling(network, 2, 2, 0);
|
add_average_pooling(network, 2, 2, 0);
|
||||||
add_convolution(network, 5, 16, 1, 0, activation);
|
add_convolution(network, 5, 16, 1, 0, activation);
|
||||||
@ -19,8 +19,8 @@ Network* create_network_lenet5(float learning_rate, int dropout, int activation,
|
|||||||
return network;
|
return network;
|
||||||
}
|
}
|
||||||
|
|
||||||
Network* create_network_alexnet(float learning_rate, int dropout, int activation, int initialisation, int size_output) {
|
Network* create_network_alexnet(float learning_rate, int dropout, int activation, int initialisation, int size_output, int finetuning) {
|
||||||
Network* network = create_network(12, learning_rate, dropout, initialisation, 227, 3);
|
Network* network = create_network(12, learning_rate, dropout, initialisation, 227, 3, finetuning);
|
||||||
add_convolution(network, 11, 96, 4, 0, activation);
|
add_convolution(network, 11, 96, 4, 0, activation);
|
||||||
add_average_pooling(network, 3, 2, 0);
|
add_average_pooling(network, 3, 2, 0);
|
||||||
add_convolution(network, 5, 256, 1, 2, activation);
|
add_convolution(network, 5, 256, 1, 2, activation);
|
||||||
@ -35,8 +35,8 @@ Network* create_network_alexnet(float learning_rate, int dropout, int activation
|
|||||||
return network;
|
return network;
|
||||||
}
|
}
|
||||||
|
|
||||||
Network* create_network_VGG16(float learning_rate, int dropout, int activation, int initialisation, int size_output) {
|
Network* create_network_VGG16(float learning_rate, int dropout, int activation, int initialisation, int size_output, int finetuning) {
|
||||||
Network* network = create_network(22, learning_rate, dropout, initialisation, 256, 3);
|
Network* network = create_network(22, learning_rate, dropout, initialisation, 256, 3, finetuning);
|
||||||
add_convolution(network, 3, 64, 1, 1, activation); // Conv3-64
|
add_convolution(network, 3, 64, 1, 1, activation); // Conv3-64
|
||||||
add_convolution(network, 3, 64, 1, 1, activation); // Conv3-64
|
add_convolution(network, 3, 64, 1, 1, activation); // Conv3-64
|
||||||
add_average_pooling(network, 2, 2, 0); // Max Pool
|
add_average_pooling(network, 2, 2, 0); // Max Pool
|
||||||
@ -66,8 +66,8 @@ Network* create_network_VGG16(float learning_rate, int dropout, int activation,
|
|||||||
return network;
|
return network;
|
||||||
}
|
}
|
||||||
|
|
||||||
Network* create_network_VGG16_227(float learning_rate, int dropout, int activation, int initialisation) {
|
Network* create_network_VGG16_227(float learning_rate, int dropout, int activation, int initialisation, int finetuning) {
|
||||||
Network* network = create_network(22, learning_rate, dropout, initialisation, 227, 3);
|
Network* network = create_network(22, learning_rate, dropout, initialisation, 227, 3, finetuning);
|
||||||
add_convolution(network, 3, 64, 1, 1, activation); // Conv3-64
|
add_convolution(network, 3, 64, 1, 1, activation); // Conv3-64
|
||||||
add_convolution(network, 3, 64, 1, 1, activation); // Conv3-64
|
add_convolution(network, 3, 64, 1, 1, activation); // Conv3-64
|
||||||
add_average_pooling(network, 2, 2, 0); // Max Pool
|
add_average_pooling(network, 2, 2, 0); // Max Pool
|
||||||
@ -97,8 +97,8 @@ Network* create_network_VGG16_227(float learning_rate, int dropout, int activati
|
|||||||
return network;
|
return network;
|
||||||
}
|
}
|
||||||
|
|
||||||
Network* create_simple_one(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth) {
|
Network* create_simple_one(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth, int finetuning) {
|
||||||
Network* network = create_network(3, learning_rate, dropout, initialisation, input_width, input_depth);
|
Network* network = create_network(3, learning_rate, dropout, initialisation, input_width, input_depth, finetuning);
|
||||||
add_dense_linearisation(network, 80, activation);
|
add_dense_linearisation(network, 80, activation);
|
||||||
add_dense(network, 10, SOFTMAX);
|
add_dense(network, 10, SOFTMAX);
|
||||||
return network;
|
return network;
|
||||||
|
@ -187,6 +187,7 @@ Network* read_network(char* filename) {
|
|||||||
network->initialisation = initialisation;
|
network->initialisation = initialisation;
|
||||||
(void) !fread(&dropout, sizeof(uint32_t), 1, ptr);
|
(void) !fread(&dropout, sizeof(uint32_t), 1, ptr);
|
||||||
network->dropout = dropout;
|
network->dropout = dropout;
|
||||||
|
network->finetuning = 0;
|
||||||
|
|
||||||
// Lecture de la taille de l'entrée des différentes matrices
|
// Lecture de la taille de l'entrée des différentes matrices
|
||||||
network->width = (int*)nalloc(size, sizeof(int));
|
network->width = (int*)nalloc(size, sizeof(int));
|
||||||
@ -268,28 +269,35 @@ Kernel* read_kernel(int type_couche, int output_width, FILE* ptr) {
|
|||||||
float tmp;
|
float tmp;
|
||||||
|
|
||||||
cnn->bias = (float***)nalloc(cnn->columns, sizeof(float**));
|
cnn->bias = (float***)nalloc(cnn->columns, sizeof(float**));
|
||||||
|
for (int i=0; i < cnn->columns; i++) {
|
||||||
|
cnn->bias[i] = (float**)nalloc(output_width, sizeof(float*));
|
||||||
|
for (int j=0; j < output_width; j++) {
|
||||||
|
cnn->bias[i][j] = (float*)nalloc(output_width, sizeof(float));
|
||||||
|
for (int k=0; k < output_width; k++) {
|
||||||
|
(void) !fread(&tmp, sizeof(tmp), 1, ptr);
|
||||||
|
cnn->bias[i][j][k] = tmp;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
cnn->d_bias = (float***)nalloc(cnn->columns, sizeof(float**));
|
cnn->d_bias = (float***)nalloc(cnn->columns, sizeof(float**));
|
||||||
#ifdef ADAM_CNN_BIAS
|
#ifdef ADAM_CNN_BIAS
|
||||||
cnn->s_d_bias = (float***)nalloc(cnn->columns, sizeof(float**));
|
cnn->s_d_bias = (float***)nalloc(cnn->columns, sizeof(float**));
|
||||||
cnn->v_d_bias = (float***)nalloc(cnn->columns, sizeof(float**));
|
cnn->v_d_bias = (float***)nalloc(cnn->columns, sizeof(float**));
|
||||||
#endif
|
#endif
|
||||||
for (int i=0; i < cnn->columns; i++) {
|
for (int i=0; i < cnn->columns; i++) {
|
||||||
cnn->bias[i] = (float**)nalloc(output_width, sizeof(float*));
|
|
||||||
cnn->d_bias[i] = (float**)nalloc(output_width, sizeof(float*));
|
cnn->d_bias[i] = (float**)nalloc(output_width, sizeof(float*));
|
||||||
#ifdef ADAM_CNN_BIAS
|
#ifdef ADAM_CNN_BIAS
|
||||||
cnn->s_d_bias[i] = (float**)nalloc(output_width, sizeof(float*));
|
cnn->s_d_bias[i] = (float**)nalloc(output_width, sizeof(float*));
|
||||||
cnn->v_d_bias[i] = (float**)nalloc(output_width, sizeof(float*));
|
cnn->v_d_bias[i] = (float**)nalloc(output_width, sizeof(float*));
|
||||||
#endif
|
#endif
|
||||||
for (int j=0; j < output_width; j++) {
|
for (int j=0; j < output_width; j++) {
|
||||||
cnn->bias[i][j] = (float*)nalloc(output_width, sizeof(float));
|
|
||||||
cnn->d_bias[i][j] = (float*)nalloc(output_width, sizeof(float));
|
cnn->d_bias[i][j] = (float*)nalloc(output_width, sizeof(float));
|
||||||
#ifdef ADAM_CNN_BIAS
|
#ifdef ADAM_CNN_BIAS
|
||||||
cnn->s_d_bias[i][j] = (float*)nalloc(output_width, sizeof(float));
|
cnn->s_d_bias[i][j] = (float*)nalloc(output_width, sizeof(float));
|
||||||
cnn->v_d_bias[i][j] = (float*)nalloc(output_width, sizeof(float));
|
cnn->v_d_bias[i][j] = (float*)nalloc(output_width, sizeof(float));
|
||||||
#endif
|
#endif
|
||||||
for (int k=0; k < output_width; k++) {
|
for (int k=0; k < output_width; k++) {
|
||||||
(void) !fread(&tmp, sizeof(tmp), 1, ptr);
|
|
||||||
cnn->bias[i][j][k] = tmp;
|
|
||||||
cnn->d_bias[i][j][k] = 0.;
|
cnn->d_bias[i][j][k] = 0.;
|
||||||
#ifdef ADAM_CNN_BIAS
|
#ifdef ADAM_CNN_BIAS
|
||||||
cnn->s_d_bias[i][j][k] = 0.;
|
cnn->s_d_bias[i][j][k] = 0.;
|
||||||
@ -299,36 +307,46 @@ Kernel* read_kernel(int type_couche, int output_width, FILE* ptr) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
cnn->weights = (float****)nalloc(cnn->rows, sizeof(float***));
|
cnn->weights = (float****)nalloc(cnn->rows, sizeof(float***));
|
||||||
|
for (int i=0; i < cnn->rows; i++) {
|
||||||
|
cnn->weights[i] = (float***)nalloc(cnn->columns, sizeof(float**));
|
||||||
|
for (int j=0; j < cnn->columns; j++) {
|
||||||
|
cnn->weights[i][j] = (float**)nalloc(cnn->k_size, sizeof(float*));
|
||||||
|
for (int k=0; k < cnn->k_size; k++) {
|
||||||
|
cnn->weights[i][j][k] = (float*)nalloc(cnn->k_size, sizeof(float));
|
||||||
|
for (int l=0; l < cnn->k_size; l++) {
|
||||||
|
(void) !fread(&tmp, sizeof(tmp), 1, ptr);
|
||||||
|
cnn->weights[i][j][k][l] = tmp;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
cnn->d_weights = (float****)nalloc(cnn->rows, sizeof(float***));
|
cnn->d_weights = (float****)nalloc(cnn->rows, sizeof(float***));
|
||||||
#ifdef ADAM_CNN_WEIGHTS
|
#ifdef ADAM_CNN_WEIGHTS
|
||||||
cnn->s_d_weights = (float****)nalloc(cnn->rows, sizeof(float***));
|
cnn->s_d_weights = (float****)nalloc(cnn->rows, sizeof(float***));
|
||||||
cnn->v_d_weights = (float****)nalloc(cnn->rows, sizeof(float***));
|
cnn->v_d_weights = (float****)nalloc(cnn->rows, sizeof(float***));
|
||||||
#endif
|
#endif
|
||||||
for (int i=0; i < cnn->rows; i++) {
|
for (int i=0; i < cnn->rows; i++) {
|
||||||
cnn->weights[i] = (float***)nalloc(cnn->columns, sizeof(float**));
|
|
||||||
cnn->d_weights[i] = (float***)nalloc(cnn->columns, sizeof(float**));
|
cnn->d_weights[i] = (float***)nalloc(cnn->columns, sizeof(float**));
|
||||||
#ifdef ADAM_CNN_WEIGHTS
|
#ifdef ADAM_CNN_WEIGHTS
|
||||||
cnn->s_d_weights[i] = (float***)nalloc(cnn->columns, sizeof(float**));
|
cnn->s_d_weights[i] = (float***)nalloc(cnn->columns, sizeof(float**));
|
||||||
cnn->v_d_weights[i] = (float***)nalloc(cnn->columns, sizeof(float**));
|
cnn->v_d_weights[i] = (float***)nalloc(cnn->columns, sizeof(float**));
|
||||||
#endif
|
#endif
|
||||||
for (int j=0; j < cnn->columns; j++) {
|
for (int j=0; j < cnn->columns; j++) {
|
||||||
cnn->weights[i][j] = (float**)nalloc(cnn->k_size, sizeof(float*));
|
|
||||||
cnn->d_weights[i][j] = (float**)nalloc(cnn->k_size, sizeof(float*));
|
cnn->d_weights[i][j] = (float**)nalloc(cnn->k_size, sizeof(float*));
|
||||||
#ifdef ADAM_CNN_WEIGHTS
|
#ifdef ADAM_CNN_WEIGHTS
|
||||||
cnn->s_d_weights[i][j] = (float**)nalloc(cnn->k_size, sizeof(float*));
|
cnn->s_d_weights[i][j] = (float**)nalloc(cnn->k_size, sizeof(float*));
|
||||||
cnn->v_d_weights[i][j] = (float**)nalloc(cnn->k_size, sizeof(float*));
|
cnn->v_d_weights[i][j] = (float**)nalloc(cnn->k_size, sizeof(float*));
|
||||||
#endif
|
#endif
|
||||||
for (int k=0; k < cnn->k_size; k++) {
|
for (int k=0; k < cnn->k_size; k++) {
|
||||||
cnn->weights[i][j][k] = (float*)nalloc(cnn->k_size, sizeof(float));
|
|
||||||
cnn->d_weights[i][j][k] = (float*)nalloc(cnn->k_size, sizeof(float));
|
cnn->d_weights[i][j][k] = (float*)nalloc(cnn->k_size, sizeof(float));
|
||||||
#ifdef ADAM_CNN_WEIGHTS
|
#ifdef ADAM_CNN_WEIGHTS
|
||||||
cnn->s_d_weights[i][j][k] = (float*)nalloc(cnn->k_size, sizeof(float));
|
cnn->s_d_weights[i][j][k] = (float*)nalloc(cnn->k_size, sizeof(float));
|
||||||
cnn->v_d_weights[i][j][k] = (float*)nalloc(cnn->k_size, sizeof(float));
|
cnn->v_d_weights[i][j][k] = (float*)nalloc(cnn->k_size, sizeof(float));
|
||||||
#endif
|
#endif
|
||||||
for (int l=0; l < cnn->k_size; l++) {
|
for (int l=0; l < cnn->k_size; l++) {
|
||||||
(void) !fread(&tmp, sizeof(tmp), 1, ptr);
|
|
||||||
cnn->weights[i][j][k][l] = tmp;
|
|
||||||
cnn->d_weights[i][j][k][l] = 0.;
|
cnn->d_weights[i][j][k][l] = 0.;
|
||||||
#ifdef ADAM_CNN_WEIGHTS
|
#ifdef ADAM_CNN_WEIGHTS
|
||||||
cnn->s_d_weights[i][j][k][l] = 0.;
|
cnn->s_d_weights[i][j][k][l] = 0.;
|
||||||
@ -357,14 +375,17 @@ Kernel* read_kernel(int type_couche, int output_width, FILE* ptr) {
|
|||||||
float tmp;
|
float tmp;
|
||||||
|
|
||||||
nn->bias = (float*)nalloc(nn->size_output, sizeof(float));
|
nn->bias = (float*)nalloc(nn->size_output, sizeof(float));
|
||||||
|
for (int i=0; i < nn->size_output; i++) {
|
||||||
|
(void) !fread(&tmp, sizeof(tmp), 1, ptr);
|
||||||
|
nn->bias[i] = tmp;
|
||||||
|
}
|
||||||
|
|
||||||
nn->d_bias = (float*)nalloc(nn->size_output, sizeof(float));
|
nn->d_bias = (float*)nalloc(nn->size_output, sizeof(float));
|
||||||
#ifdef ADAM_DENSE_BIAS
|
#ifdef ADAM_DENSE_BIAS
|
||||||
nn->s_d_bias = (float*)nalloc(nn->size_output, sizeof(float));
|
nn->s_d_bias = (float*)nalloc(nn->size_output, sizeof(float));
|
||||||
nn->v_d_bias = (float*)nalloc(nn->size_output, sizeof(float));
|
nn->v_d_bias = (float*)nalloc(nn->size_output, sizeof(float));
|
||||||
#endif
|
#endif
|
||||||
for (int i=0; i < nn->size_output; i++) {
|
for (int i=0; i < nn->size_output; i++) {
|
||||||
(void) !fread(&tmp, sizeof(tmp), 1, ptr);
|
|
||||||
nn->bias[i] = tmp;
|
|
||||||
nn->d_bias[i] = 0.;
|
nn->d_bias[i] = 0.;
|
||||||
#ifdef ADAM_DENSE_BIAS
|
#ifdef ADAM_DENSE_BIAS
|
||||||
nn->s_d_bias[i] = 0.;
|
nn->s_d_bias[i] = 0.;
|
||||||
@ -373,21 +394,26 @@ Kernel* read_kernel(int type_couche, int output_width, FILE* ptr) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
nn->weights = (float**)nalloc(nn->size_input, sizeof(float*));
|
nn->weights = (float**)nalloc(nn->size_input, sizeof(float*));
|
||||||
|
for (int i=0; i < nn->size_input; i++) {
|
||||||
|
nn->weights[i] = (float*)nalloc(nn->size_output, sizeof(float));
|
||||||
|
for (int j=0; j < nn->size_output; j++) {
|
||||||
|
(void) !fread(&tmp, sizeof(tmp), 1, ptr);
|
||||||
|
nn->weights[i][j] = tmp;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
nn->d_weights = (float**)nalloc(nn->size_input, sizeof(float*));
|
nn->d_weights = (float**)nalloc(nn->size_input, sizeof(float*));
|
||||||
#ifdef ADAM_DENSE_WEIGHTS
|
#ifdef ADAM_DENSE_WEIGHTS
|
||||||
nn->s_d_weights = (float**)nalloc(nn->size_input, sizeof(float*));
|
nn->s_d_weights = (float**)nalloc(nn->size_input, sizeof(float*));
|
||||||
nn->v_d_weights = (float**)nalloc(nn->size_input, sizeof(float*));
|
nn->v_d_weights = (float**)nalloc(nn->size_input, sizeof(float*));
|
||||||
#endif
|
#endif
|
||||||
for (int i=0; i < nn->size_input; i++) {
|
for (int i=0; i < nn->size_input; i++) {
|
||||||
nn->weights[i] = (float*)nalloc(nn->size_output, sizeof(float));
|
|
||||||
nn->d_weights[i] = (float*)nalloc(nn->size_output, sizeof(float));
|
nn->d_weights[i] = (float*)nalloc(nn->size_output, sizeof(float));
|
||||||
#ifdef ADAM_DENSE_WEIGHTS
|
#ifdef ADAM_DENSE_WEIGHTS
|
||||||
nn->s_d_weights[i] = (float*)nalloc(nn->size_output, sizeof(float));
|
nn->s_d_weights[i] = (float*)nalloc(nn->size_output, sizeof(float));
|
||||||
nn->v_d_weights[i] = (float*)nalloc(nn->size_output, sizeof(float));
|
nn->v_d_weights[i] = (float*)nalloc(nn->size_output, sizeof(float));
|
||||||
#endif
|
#endif
|
||||||
for (int j=0; j < nn->size_output; j++) {
|
for (int j=0; j < nn->size_output; j++) {
|
||||||
(void) !fread(&tmp, sizeof(tmp), 1, ptr);
|
|
||||||
nn->weights[i][j] = tmp;
|
|
||||||
nn->d_weights[i][j] = 0.;
|
nn->d_weights[i][j] = 0.;
|
||||||
#ifdef ADAM_DENSE_WEIGHTS
|
#ifdef ADAM_DENSE_WEIGHTS
|
||||||
nn->s_d_weights[i][j] = 0.;
|
nn->s_d_weights[i][j] = 0.;
|
||||||
@ -395,6 +421,7 @@ Kernel* read_kernel(int type_couche, int output_width, FILE* ptr) {
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if (type_couche == POOLING) { // Cas du Pooling Layer
|
} else if (type_couche == POOLING) { // Cas du Pooling Layer
|
||||||
uint32_t pooling, linearisation, stride, padding;
|
uint32_t pooling, linearisation, stride, padding;
|
||||||
(void) !fread(&linearisation, sizeof(linearisation), 1, ptr);
|
(void) !fread(&linearisation, sizeof(linearisation), 1, ptr);
|
||||||
|
@ -232,10 +232,10 @@ void train(int dataset_type, char* images_file, char* labels_file, char* data_di
|
|||||||
Network* network;
|
Network* network;
|
||||||
if (!recover) {
|
if (!recover) {
|
||||||
if (dataset_type == 0) {
|
if (dataset_type == 0) {
|
||||||
network = create_network_lenet5(LEARNING_RATE, 0, LEAKY_RELU, HE, input_width, input_depth);
|
network = create_network_lenet5(LEARNING_RATE, 0, LEAKY_RELU, HE, input_width, input_depth, finetuning);
|
||||||
//network = create_simple_one(LEARNING_RATE, 0, RELU, GLOROT, input_width, input_depth);
|
//network = create_simple_one(LEARNING_RATE, 0, RELU, GLOROT, input_width, input_depth);
|
||||||
} else {
|
} else {
|
||||||
network = create_network_VGG16(LEARNING_RATE, 0, RELU, HE, dataset->numCategories);
|
network = create_network_VGG16(LEARNING_RATE, 0, RELU, HE, dataset->numCategories, finetuning);
|
||||||
|
|
||||||
#ifdef USE_MULTITHREADING
|
#ifdef USE_MULTITHREADING
|
||||||
printf_warning("Utilisation de VGG16 avec multithreading. La quantité de RAM utilisée peut devenir excessive\n");
|
printf_warning("Utilisation de VGG16 avec multithreading. La quantité de RAM utilisée peut devenir excessive\n");
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
|
|
||||||
#include "include/update.h"
|
#include "include/update.h"
|
||||||
#include "include/struct.h"
|
#include "include/struct.h"
|
||||||
|
#include "include/cnn.h"
|
||||||
|
|
||||||
#include "include/config.h"
|
#include "include/config.h"
|
||||||
|
|
||||||
@ -31,6 +32,9 @@ void update_weights(Network* network, Network* d_network) {
|
|||||||
int output_width = network->width[i+1];
|
int output_width = network->width[i+1];
|
||||||
|
|
||||||
if (k_i->cnn) { // Convolution
|
if (k_i->cnn) { // Convolution
|
||||||
|
if (network->finetuning != EVERYTHING) {
|
||||||
|
return; // Alors on a finit de backpropager
|
||||||
|
}
|
||||||
Kernel_cnn* cnn = k_i->cnn;
|
Kernel_cnn* cnn = k_i->cnn;
|
||||||
Kernel_cnn* d_cnn = dk_i->cnn;
|
Kernel_cnn* d_cnn = dk_i->cnn;
|
||||||
int k_size = cnn->k_size;
|
int k_size = cnn->k_size;
|
||||||
@ -70,6 +74,9 @@ void update_weights(Network* network, Network* d_network) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else { // Matrice -> vecteur
|
} else { // Matrice -> vecteur
|
||||||
|
if (network->finetuning == NN_ONLY) {
|
||||||
|
return; // Alors on a finit de backpropager
|
||||||
|
}
|
||||||
Kernel_nn* nn = k_i->nn;
|
Kernel_nn* nn = k_i->nn;
|
||||||
Kernel_nn* d_nn = dk_i->nn;
|
Kernel_nn* d_nn = dk_i->nn;
|
||||||
|
|
||||||
@ -105,6 +112,9 @@ void update_bias(Network* network, Network* d_network) {
|
|||||||
int output_depth = network->depth[i+1];
|
int output_depth = network->depth[i+1];
|
||||||
|
|
||||||
if (k_i->cnn) { // Convolution
|
if (k_i->cnn) { // Convolution
|
||||||
|
if (network->finetuning != EVERYTHING) {
|
||||||
|
return; // Alors on a finit de backpropager
|
||||||
|
}
|
||||||
Kernel_cnn* cnn = k_i->cnn;
|
Kernel_cnn* cnn = k_i->cnn;
|
||||||
Kernel_cnn* d_cnn = dk_i->cnn;
|
Kernel_cnn* d_cnn = dk_i->cnn;
|
||||||
|
|
||||||
@ -124,6 +134,11 @@ void update_bias(Network* network, Network* d_network) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if (k_i->nn) { // Full connection
|
} else if (k_i->nn) { // Full connection
|
||||||
|
if (k_i->linearisation == DO_LINEARISE) {// Matrice -> vecteur
|
||||||
|
if (network->finetuning == NN_ONLY) {
|
||||||
|
return; // Alors on a finit de backpropager
|
||||||
|
}
|
||||||
|
}
|
||||||
Kernel_nn* nn = k_i->nn;
|
Kernel_nn* nn = k_i->nn;
|
||||||
Kernel_nn* d_nn = dk_i->nn;
|
Kernel_nn* d_nn = dk_i->nn;
|
||||||
|
|
||||||
@ -157,6 +172,9 @@ void reset_d_weights(Network* network) {
|
|||||||
int output_width = network->width[i+1];
|
int output_width = network->width[i+1];
|
||||||
|
|
||||||
if (k_i->cnn) { // Convolution
|
if (k_i->cnn) { // Convolution
|
||||||
|
if (network->finetuning != EVERYTHING) {
|
||||||
|
continue; // On n'a pas initialisé donc on n'a pas besoin de reset
|
||||||
|
}
|
||||||
Kernel_cnn* cnn = k_i_1->cnn;
|
Kernel_cnn* cnn = k_i_1->cnn;
|
||||||
|
|
||||||
int k_size = cnn->k_size;
|
int k_size = cnn->k_size;
|
||||||
@ -180,6 +198,9 @@ void reset_d_weights(Network* network) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else { // Matrice -> vecteur
|
} else { // Matrice -> vecteur
|
||||||
|
if (network->finetuning == NN_ONLY) {
|
||||||
|
continue; // On n'a pas initialisé donc on n'a pas besoin de reset
|
||||||
|
}
|
||||||
Kernel_nn* nn = k_i_1->nn;
|
Kernel_nn* nn = k_i_1->nn;
|
||||||
|
|
||||||
int size_input = input_width*input_width*input_depth;
|
int size_input = input_width*input_width*input_depth;
|
||||||
@ -206,6 +227,9 @@ void reset_d_bias(Network* network) {
|
|||||||
int output_depth = network->depth[i+1];
|
int output_depth = network->depth[i+1];
|
||||||
|
|
||||||
if (k_i->cnn) { // Convolution
|
if (k_i->cnn) { // Convolution
|
||||||
|
if (network->finetuning != EVERYTHING) {
|
||||||
|
continue; // On n'a pas initialisé donc on n'a pas besoin de reset
|
||||||
|
}
|
||||||
Kernel_cnn* cnn = k_i_1->cnn;
|
Kernel_cnn* cnn = k_i_1->cnn;
|
||||||
|
|
||||||
for (int a=0; a < output_depth; a++) {
|
for (int a=0; a < output_depth; a++) {
|
||||||
@ -216,6 +240,11 @@ void reset_d_bias(Network* network) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if (k_i->nn) { // Full connection
|
} else if (k_i->nn) { // Full connection
|
||||||
|
if (k_i->linearisation == DO_LINEARISE) {
|
||||||
|
if (network->finetuning == NN_ONLY) {
|
||||||
|
continue; // On n'a pas initialisé donc on n'a pas besoin de reset
|
||||||
|
}
|
||||||
|
}
|
||||||
Kernel_nn* nn = k_i_1->nn;
|
Kernel_nn* nn = k_i_1->nn;
|
||||||
|
|
||||||
for (int a=0; a < output_width; a++) {
|
for (int a=0; a < output_width; a++) {
|
||||||
|
@ -13,7 +13,7 @@
|
|||||||
|
|
||||||
int main() {
|
int main() {
|
||||||
printf("Création du réseau\n");
|
printf("Création du réseau\n");
|
||||||
Network* network = create_network_lenet5(0, 0, 3, GLOROT, 32, 1);
|
Network* network = create_network_lenet5(0, 0, 3, GLOROT, 32, 1, 2); // Pas besoin d'initialiser toute la backprop
|
||||||
printf(GREEN "OK\n" RESET);
|
printf(GREEN "OK\n" RESET);
|
||||||
|
|
||||||
printf("Écriture du réseau\n");
|
printf("Écriture du réseau\n");
|
||||||
|
@ -8,12 +8,13 @@
|
|||||||
#include "../src/cnn/include/models.h"
|
#include "../src/cnn/include/models.h"
|
||||||
#include "../src/cnn/include/utils.h"
|
#include "../src/cnn/include/utils.h"
|
||||||
#include "../src/cnn/include/free.h"
|
#include "../src/cnn/include/free.h"
|
||||||
|
#include "../src/cnn/include/cnn.h"
|
||||||
|
|
||||||
|
|
||||||
int main() {
|
int main() {
|
||||||
Kernel* kernel;
|
Kernel* kernel;
|
||||||
printf("Création du réseau\n");
|
printf("Création du réseau\n");
|
||||||
Network* network = create_network_lenet5(0, 0, 3, 2, 32, 1);
|
Network* network = create_network_lenet5(0, 0, 3, 2, 32, 1, NN_ONLY); // Pas besoin d'initialiser toute la backprop
|
||||||
printf(GREEN "OK\n" RESET);
|
printf(GREEN "OK\n" RESET);
|
||||||
|
|
||||||
printf("Architecture LeNet5:\n");
|
printf("Architecture LeNet5:\n");
|
||||||
|
@ -9,8 +9,8 @@
|
|||||||
|
|
||||||
int main() {
|
int main() {
|
||||||
printf("Création du réseau\n");
|
printf("Création du réseau\n");
|
||||||
Network* network = create_network_lenet5(0, 0, 3, 2, 32, 1);
|
Network* network = create_network_lenet5(0, 0, 3, 2, 32, 1, 0);
|
||||||
Network* network2 = create_network_lenet5(0, 0, 3, 2, 32, 1);
|
Network* network2 = create_network_lenet5(0, 0, 3, 2, 32, 1, 0);
|
||||||
printf(GREEN "OK\n" RESET);
|
printf(GREEN "OK\n" RESET);
|
||||||
|
|
||||||
printf("Copie du réseau via copy_network\n");
|
printf("Copie du réseau via copy_network\n");
|
||||||
|
Loading…
Reference in New Issue
Block a user