Changes in forward

This commit is contained in:
Julien Chemillier 2022-09-30 15:50:29 +02:00
parent b7eda807fc
commit 66022a948e
7 changed files with 86 additions and 106 deletions

View File

@ -31,11 +31,11 @@ Network* create_network(int max_size, int dropout, int initialisation, int input
Network* create_network_lenet5(int dropout, int activation, int initialisation) { Network* create_network_lenet5(int dropout, int activation, int initialisation) {
Network* network = create_network(8, dropout, initialisation, 32, 1); Network* network = create_network(8, dropout, initialisation, 32, 1);
network->kernel[0]->activation = activation; network->kernel[0]->activation = activation;
network->kernel[0]->linearisation = 0; network->kernel[0]->linearisation = 0;
add_convolution(network, 6, 5, activation); add_convolution(network, 1, 32, 6, 28, activation);
add_2d_average_pooling(network, 2); add_2d_average_pooling(network, 28, 14);
add_convolution(network, 16, 5, activation); add_convolution(network, 6, 14, 16, 10, activation);
add_2d_average_pooling(network, 2); add_2d_average_pooling(network, 10, 5);
add_dense_linearisation(network, 160, 120, activation); add_dense_linearisation(network, 160, 120, activation);
add_dense(network, 120, 84, activation); add_dense(network, 120, 84, activation);
add_dense(network, 84, 10, SOFTMAX); add_dense(network, 84, 10, SOFTMAX);
@ -62,45 +62,38 @@ void create_a_line_input_layer(Network* network, int pos, int dim) {
network->depth[pos] = 1; network->depth[pos] = 1;
} }
void add_2d_average_pooling(Network* network, int kernel_size) { void add_2d_average_pooling(Network* network, int dim_input, int dim_ouput) {
int n = network->size; int n = network->size;
int k_pos = n-1;
if (network->max_size == n) { if (network->max_size == n) {
printf("Impossible de rajouter une couche d'average pooling, le réseau est déjà plein\n"); printf("Impossible de rajouter une couche d'average pooling, le réseau est déjà plein\n");
return; return;
} }
network->kernel[n]->cnn = NULL; int kernel_size = dim_input/dim_ouput;
network->kernel[n]->nn = NULL; if (dim_input%dim_ouput != 0) {
network->kernel[n]->activation = 100*kernel_size; // Ne contient pas de fonction d'activation printf("Erreur de dimension dans l'average pooling\n");
return;
}
network->kernel[k_pos]->cnn = NULL;
network->kernel[k_pos]->nn = NULL;
network->kernel[k_pos]->activation = 100*kernel_size; // Ne contient pas de fonction d'activation
create_a_cube_input_layer(network, n, network->depth[n-1], network->width[n-1]/2); create_a_cube_input_layer(network, n, network->depth[n-1], network->width[n-1]/2);
network->size++; network->size++;
} }
void add_average_pooling_flatten(Network* network, int kernel_size) { // NEED TO BE VERIFIED void add_convolution(Network* network, int depth_input, int dim_input, int depth_output, int dim_output, int activation) {
int n = network->size;
if (network->max_size == n) {
printf("Impossible de rajouter une couche d'average pooling, le réseau est déjà plein\n");
return;
}
network->kernel[n]->cnn = NULL;
network->kernel[n]->nn = NULL;
network->kernel[n]->activation = 100*kernel_size; // Ne contient pas de fonction d'activation
int dim = (network->width[n-1]*network->width[n-1]*network->depth[n-1])/(kernel_size*kernel_size);
create_a_line_input_layer(network, n, dim);
network->size++;
}
void add_convolution(Network* network, int depth_output, int kernel_size, int activation) {
int n = network->size; int n = network->size;
int k_pos = n-1;
if (network->max_size == n) { if (network->max_size == n) {
printf("Impossible de rajouter une couche de convolution, le réseau est déjà plein \n"); printf("Impossible de rajouter une couche de convolution, le réseau est déjà plein \n");
return; return;
} }
int bias_size = network->width[n-1] - 2*(kernel_size/2); int bias_size = dim_output;
int depth_input = network->depth[n-1]; int kernel_size = dim_input - dim_output +1;
network->kernel[n]->nn = NULL; network->kernel[k_pos]->nn = NULL;
network->kernel[n]->activation = activation; network->kernel[k_pos]->activation = activation;
network->kernel[n]->cnn = (Kernel_cnn*)malloc(sizeof(Kernel_cnn)); network->kernel[k_pos]->cnn = (Kernel_cnn*)malloc(sizeof(Kernel_cnn));
Kernel_cnn* cnn = network->kernel[n]->cnn; Kernel_cnn* cnn = network->kernel[k_pos]->cnn;
cnn->k_size = kernel_size; cnn->k_size = kernel_size;
cnn->rows = depth_input; cnn->rows = depth_input;
@ -132,23 +125,26 @@ void add_convolution(Network* network, int depth_output, int kernel_size, int ac
create_a_cube_input_layer(network, n, depth_output, bias_size); create_a_cube_input_layer(network, n, depth_output, bias_size);
int n_int = network->width[n-1]*network->width[n-1]*network->depth[n-1]; int n_int = network->width[n-1]*network->width[n-1]*network->depth[n-1];
int n_out = network->width[n]*network->width[n]*network->depth[n]; int n_out = network->width[n]*network->width[n]*network->depth[n];
/* Not currently used
initialisation_3d_matrix(network->initialisation, cnn->bias, depth_output, kernel_size, kernel_size, n_int+n_out); initialisation_3d_matrix(network->initialisation, cnn->bias, depth_output, kernel_size, kernel_size, n_int+n_out);
initialisation_3d_matrix(ZERO, cnn->d_bias, depth_output, kernel_size, kernel_size, n_int+n_out); initialisation_3d_matrix(ZERO, cnn->d_bias, depth_output, kernel_size, kernel_size, n_int+n_out);
initialisation_4d_matrix(network->initialisation, cnn->w, depth_input, depth_output, kernel_size, kernel_size, n_int+n_out); initialisation_4d_matrix(network->initialisation, cnn->w, depth_input, depth_output, kernel_size, kernel_size, n_int+n_out);
initialisation_4d_matrix(ZERO, cnn->d_w, depth_input, depth_output, kernel_size, kernel_size, n_int+n_out); initialisation_4d_matrix(ZERO, cnn->d_w, depth_input, depth_output, kernel_size, kernel_size, n_int+n_out);
*/
network->size++; network->size++;
} }
void add_dense(Network* network, int input_units, int output_units, int activation) { void add_dense(Network* network, int input_units, int output_units, int activation) {
int n = network->size; int n = network->size;
int k_pos = n-1;
if (network->max_size == n) { if (network->max_size == n) {
printf("Impossible de rajouter une couche dense, le réseau est déjà plein\n"); printf("Impossible de rajouter une couche dense, le réseau est déjà plein\n");
return; return;
} }
network->kernel[n]->cnn = NULL; network->kernel[k_pos]->cnn = NULL;
network->kernel[n]->nn = (Kernel_nn*)malloc(sizeof(Kernel_nn)); network->kernel[k_pos]->nn = (Kernel_nn*)malloc(sizeof(Kernel_nn));
Kernel_nn* nn = network->kernel[n]->nn; Kernel_nn* nn = network->kernel[k_pos]->nn;
network->kernel[n]->activation = activation; network->kernel[k_pos]->activation = activation;
nn->input_units = input_units; nn->input_units = input_units;
nn->output_units = output_units; nn->output_units = output_units;
nn->bias = (float*)malloc(sizeof(float)*output_units); nn->bias = (float*)malloc(sizeof(float)*output_units);
@ -159,11 +155,12 @@ void add_dense(Network* network, int input_units, int output_units, int activati
nn->weights[i] = (float*)malloc(sizeof(float)*output_units); nn->weights[i] = (float*)malloc(sizeof(float)*output_units);
nn->d_weights[i] = (float*)malloc(sizeof(float)*output_units); nn->d_weights[i] = (float*)malloc(sizeof(float)*output_units);
} }
/* Not currently used
initialisation_1d_matrix(network->initialisation, nn->bias, output_units, output_units+input_units); initialisation_1d_matrix(network->initialisation, nn->bias, output_units, output_units+input_units);
initialisation_1d_matrix(ZERO, nn->d_bias, output_units, output_units+input_units); initialisation_1d_matrix(ZERO, nn->d_bias, output_units, output_units+input_units);
initialisation_2d_matrix(network->initialisation, nn->weights, input_units, output_units, output_units+input_units); initialisation_2d_matrix(network->initialisation, nn->weights, input_units, output_units, output_units+input_units);
initialisation_2d_matrix(ZERO, nn->d_weights, input_units, output_units, output_units+input_units); initialisation_2d_matrix(ZERO, nn->d_weights, input_units, output_units, output_units+input_units);
create_a_line_input_layer(network, n, output_units); create_a_line_input_layer(network, n, output_units); */
network->size++; network->size++;
} }
@ -171,14 +168,15 @@ void add_dense_linearisation(Network* network, int input_units, int output_units
// Can replace input_units by a research of this dim // Can replace input_units by a research of this dim
int n = network->size; int n = network->size;
int k_pos = n-1;
if (network->max_size == n) { if (network->max_size == n) {
printf("Impossible de rajouter une couche dense, le réseau est déjà plein\n"); printf("Impossible de rajouter une couche dense, le réseau est déjà plein\n");
return; return;
} }
network->kernel[n]->cnn = NULL; network->kernel[k_pos]->cnn = NULL;
network->kernel[n]->nn = (Kernel_nn*)malloc(sizeof(Kernel_nn)); network->kernel[k_pos]->nn = (Kernel_nn*)malloc(sizeof(Kernel_nn));
Kernel_nn* nn = network->kernel[n]->nn; Kernel_nn* nn = network->kernel[k_pos]->nn;
network->kernel[n]->activation = activation; network->kernel[k_pos]->activation = activation;
nn->input_units = input_units; nn->input_units = input_units;
nn->output_units = output_units; nn->output_units = output_units;
@ -190,10 +188,11 @@ void add_dense_linearisation(Network* network, int input_units, int output_units
nn->weights[i] = (float*)malloc(sizeof(float)*output_units); nn->weights[i] = (float*)malloc(sizeof(float)*output_units);
nn->d_weights[i] = (float*)malloc(sizeof(float)*output_units); nn->d_weights[i] = (float*)malloc(sizeof(float)*output_units);
} }
/* Not currently used
initialisation_1d_matrix(network->initialisation, nn->bias, output_units, output_units+input_units); initialisation_1d_matrix(network->initialisation, nn->bias, output_units, output_units+input_units);
initialisation_1d_matrix(ZERO, nn->d_bias, output_units, output_units+input_units); initialisation_1d_matrix(ZERO, nn->d_bias, output_units, output_units+input_units);
initialisation_2d_matrix(network->initialisation, nn->weights, input_units, output_units, output_units+input_units); initialisation_2d_matrix(network->initialisation, nn->weights, input_units, output_units, output_units+input_units);
initialisation_2d_matrix(ZERO, nn->d_weights, input_units, output_units, output_units+input_units); initialisation_2d_matrix(ZERO, nn->d_weights, input_units, output_units, output_units+input_units); */
create_a_line_input_layer(network, n, output_units); create_a_line_input_layer(network, n, output_units);
network->size++; network->size++;

View File

@ -72,20 +72,38 @@ void apply_function_input(float (*f)(float), float*** input, int depth, int rows
} }
} }
void choose_apply_function_input(int activation, float*** input, int depth, int rows, int columns) { void choose_apply_function_matrix(int activation, float*** input, int depth, int dim) {
if (activation == RELU) { if (activation == RELU) {
apply_function_input(relu, input, depth, rows, columns); apply_function_input(relu, input, depth, dim, dim);
} }
else if (activation == SIGMOID) { else if (activation == SIGMOID) {
apply_function_input(sigmoid, input, depth, rows, columns); apply_function_input(sigmoid, input, depth, dim, dim);
} }
else if (activation == SOFTMAX) { else if (activation == SOFTMAX) {
apply_softmax_input(input, depth, rows, columns); apply_softmax_input(input, depth, dim, dim);
} }
else if (activation == TANH) { else if (activation == TANH) {
apply_function_input(tanh_, input, depth, rows, columns); apply_function_input(tanh_, input, depth, dim, dim);
} }
else { else {
printf("Erreur, fonction d'activation inconnue: %d\n", activation); printf("Erreur, fonction d'activation inconnue (choose_apply_function_matrix): %d\n", activation);
}
}
void choose_apply_function_vector(int activation, float*** input, int dim) {
if (activation == RELU) {
apply_function_input(relu, input, 1, 1, dim);
}
else if (activation == SIGMOID) {
apply_function_input(sigmoid, input, 1, 1, dim);
}
else if (activation == SOFTMAX) {
apply_softmax_input(input, 1, 1, dim);
}
else if (activation == TANH) {
apply_function_input(tanh_, input, 1, 1, dim);
}
else {
printf("Erreur, fonction d'activation inconnue (choose_apply_function_vector): %d\n", activation);
} }
} }

View File

@ -27,17 +27,12 @@ void create_a_line_input_layer(Network* network, int pos, int dim);
/* /*
* Ajoute au réseau une couche d'average pooling valide de dimension dim*dim * Ajoute au réseau une couche d'average pooling valide de dimension dim*dim
*/ */
void add_2d_average_pooling(Network* network, int kernel_size); void add_2d_average_pooling(Network* network, int dim_input, int dim_ouput);
/*
* Ajoute au réseau une couche d'average pooling valide de dimension dim*dim qui aplatit
*/
void add_average_pooling_flatten(Network* network, int kernel_size);
/* /*
* Ajoute au réseau une couche de convolution dim*dim et initialise les kernels * Ajoute au réseau une couche de convolution dim*dim et initialise les kernels
*/ */
void add_convolution(Network* network, int nb_filter, int kernel_size, int activation); void add_convolution(Network* network, int depth_input, int dim_input, int depth_output, int dim_output, int activation);
/* /*
* Ajoute au réseau une couche dense et initialise les poids et les biais * Ajoute au réseau une couche dense et initialise les poids et les biais

View File

@ -35,9 +35,14 @@ void apply_softmax_input(float ***input, int depth, int rows, int columns);
void apply_function_input(float (*f)(float), float*** input, int depth, int rows, int columns); void apply_function_input(float (*f)(float), float*** input, int depth, int rows, int columns);
/* /*
* Redirige vers la fonction à appliquer sur ???? * Redirige vers la fonction à appliquer sur une matrice
*/ */
void choose_apply_function_input(int activation, float*** input, int depth, int rows, int columns); void choose_apply_function_matrix(int activation, float*** input, int depth, int dim);
/*
* Redirige vers la fonction à appliquer sur un vecteur
*/
void choose_apply_function_vector(int activation, float*** input, int dim);
#endif #endif

View File

@ -13,11 +13,6 @@ void make_convolution(float*** input, Kernel_cnn* kernel, float*** output, int o
*/ */
void make_average_pooling(float*** input, float*** output, int size, int output_depth, int output_dim); void make_average_pooling(float*** input, float*** output, int size, int output_depth, int output_dim);
/*
* Effectue un average pooling avec stride=size et aplatissement
*/
void make_average_pooling_flattened(float*** input, float* output, int size, int input_depth, int input_dim);
/* /*
* Effecute une full connection * Effecute une full connection
*/ */

View File

@ -36,44 +36,40 @@ void forward_propagation(Network* network) {
int n = network->size; int n = network->size;
float*** input; float*** input;
float*** output; float*** output;
Kernel* k_i_1;
Kernel* k_i; Kernel* k_i;
for (int i=0; i < n-1; i++) { for (int i=0; i < n-1; i++) {
k_i_1 = network->kernel[i+1];
k_i = network->kernel[i]; k_i = network->kernel[i];
printf("\n i -> %d :: %d %d \n", i, k_i->cnn==NULL, k_i->nn==NULL);
input_width = network->width[i]; input_width = network->width[i];
input_depth = network->depth[i]; input_depth = network->depth[i];
output_width = network->width[i+1]; output_width = network->width[i+1];
output_depth = network->depth[i+1]; output_depth = network->depth[i+1];
activation = network->kernel[i]->activation; activation = k_i->activation;
input = network->input[i]; input = network->input[i];
output = network->input[i+1]; output = network->input[i+1];
if (k_i_1->nn==NULL && k_i_1->cnn!=NULL) { //CNN if (k_i->cnn!=NULL) { //CNN
printf("Convolution of cnn: %dx%d -> %dx%d\n", input_depth, input_width, output_depth, output_width); printf("Convolution of cnn: %dx%dx%d -> %dx%dx%d\n", input_depth, input_width, input_width, output_depth, output_width, output_width);
make_convolution(input, k_i_1->cnn, output, output_width); make_convolution(input, k_i->cnn, output, output_width);
choose_apply_function_input(activation, output, output_depth, output_width, output_width); choose_apply_function_matrix(activation, output, output_depth, output_width);
} }
else if (k_i_1->nn!=NULL && k_i_1->cnn==NULL) { //NN else if (k_i->nn!=NULL) { //NN
printf("Densification of nn\n"); printf("Densification of nn: %dx%dx%d -> %dx%dx%d\n", input_depth, input_width, input_width, output_depth, output_width, output_width);
// Checked if it is a nn which linearise // Checked if it is a nn which linearise
make_fully_connected(network->input[i][0][0], network->kernel[i]->nn, network->input[i+1][0][0], input_width, output_width); make_fully_connected(network->input[i][0][0], network->kernel[i]->nn, network->input[i+1][0][0], input_width, output_width);
choose_apply_function_input(activation, output, 1, 1, output_width); choose_apply_function_vector(activation, output, output_width);
} }
else { //Pooling (Vérifier dedans) ?? else { //Pooling
if (n-2==i) { if (n-2==i) {
printf("Le réseau ne peut pas finir par une pooling layer"); printf("Le réseau ne peut pas finir par une pooling layer");
return; return;
} }
if (1==1) { // Pooling sur une matrice if (1==1) { // Pooling sur une matrice
printf("Average pooling\n"); printf("Average pooling: %dx%dx%d -> %dx%dx%d\n", input_depth, input_width, input_width, output_depth, output_width, output_width);
make_average_pooling(input, output, activation/100, output_depth, output_width); make_average_pooling(input, output, activation/100, output_depth, output_width);
} }
else if (1==0) { // Pooling sur un vecteur else { // Pooling sur un vecteur
printf("Error: Not implemented: forward: %d\n", i); printf("Erreur: le pooling ne se fait que sur une matrice \n");
}
else {
printf("Erreur: forward_propagation: %d -> %d %d\n", i, k_i_1->nn==NULL, k_i_1->cnn);
return; return;
} }
} }
@ -89,7 +85,7 @@ void backward_propagation(Network* network, float wanted_number) { // TODO
if (i==n) { if (i==n) {
if (network->kernel[i]->activation == SOFTMAX) { if (network->kernel[i]->activation == SOFTMAX) {
int l2 = network->width[i]; // Taille de la dernière couche int l2 = network->width[i]; // Taille de la dernière couche
int l1 = network->width[i-1]; //int l1 = network->width[i-1];
for (int j=0; j < l2; j++) { for (int j=0; j < l2; j++) {
} }

View File

@ -4,12 +4,9 @@
#include "include/make.h" #include "include/make.h"
void make_convolution(float*** input, Kernel_cnn* kernel, float*** output, int output_dim) { void make_convolution(float*** input, Kernel_cnn* kernel, float*** output, int output_dim) {
// TODO, MISS CONDITIONS ON THE CONVOLUTION
printf_warning("Appel de make_convolution, incomplet\n"); printf_warning("Appel de make_convolution, incomplet\n");
float f; float f;
int n = kernel->k_size; int n = kernel->k_size;
printf("Convolution output: %dx%dx%d, %dx%dx%d\n", kernel->columns, output_dim, output_dim, kernel->rows, n, n);
printf("BIS %d %d \n", kernel->columns, kernel->k_size);
for (int i=0; i < kernel->columns; i++) { for (int i=0; i < kernel->columns; i++) {
for (int j=0; j < output_dim; j++) { for (int j=0; j < output_dim; j++) {
for (int k=0; k < output_dim; k++) { for (int k=0; k < output_dim; k++) {
@ -47,31 +44,6 @@ void make_average_pooling(float*** input, float*** output, int size, int output_
} }
} }
void make_average_pooling_flattened(float*** input, float* output, int size, int input_depth, int input_dim) {
if ((input_depth*input_dim*input_dim) % (size*size) != 0) {
printf_error("Deux layers non compatibles avec un average pooling flattened");
return;
}
float average;
int n = size*size;
int cpt = 0;
int output_dim = input_dim - 2*(size/2);
for (int i=0; i < input_depth; i++) {
for (int j=0; j < output_dim; j++) {
for (int k=0; k < output_dim; k++) {
average = 0.;
for (int a=0; a < size; a++) {
for (int b=0; b < size; b++) {
average += input[i][2*j +a][2*k +b];
}
}
output[cpt] = average;
cpt++;
}
}
}
}
void make_fully_connected(float* input, Kernel_nn* kernel, float* output, int size_input, int size_output) { void make_fully_connected(float* input, Kernel_nn* kernel, float* output, int size_input, int size_output) {
float f; float f;
for (int i=0; i < size_output; i++) { for (int i=0; i < size_output; i++) {