Creation of the folder 'include'

This commit is contained in:
Julien Chemillier 2022-09-16 14:53:35 +02:00
parent 93e1555136
commit d236055a6d
13 changed files with 48 additions and 49 deletions

View File

@ -1,8 +1,8 @@
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
#include "creation.h" #include "include/creation.h"
#include "function.h" #include "include/function.h"
#include "initialisation.h" #include "include/initialisation.h"
Network* create_network(int max_size, int dropout, int initialisation, int input_dim, int input_depth) { Network* create_network(int max_size, int dropout, int initialisation, int input_dim, int input_depth) {
if (dropout < 0 || dropout > 100) { if (dropout < 0 || dropout > 100) {
@ -86,28 +86,27 @@ void add_average_pooling_flatten(Network* network, int kernel_size, int activati
network->size++; network->size++;
} }
void add_convolution(Network* network, int nb_filter, int kernel_size, int activation) { void add_convolution(Network* network, int depth_output, int kernel_size, int activation) {
int n = network->size; int n = network->size;
if (network->max_size == n) { if (network->max_size == n) {
printf("Impossible de rajouter une couche de convolution, le réseau est déjà plein\n"); printf("Impossible de rajouter une couche de convolution, le réseau est déjà plein \n");
return; return;
} }
int r = network->depth[n-1]; int depth_input = network->depth[n-1];
int c = nb_filter;
network->kernel[n]->nn = NULL; network->kernel[n]->nn = NULL;
network->kernel[n]->activation = activation; network->kernel[n]->activation = activation;
network->kernel[n]->cnn = (Kernel_cnn*)malloc(sizeof(Kernel_cnn)); network->kernel[n]->cnn = (Kernel_cnn*)malloc(sizeof(Kernel_cnn));
Kernel_cnn* cnn = network->kernel[n]->cnn; Kernel_cnn* cnn = network->kernel[n]->cnn;
cnn->k_size = kernel_size; cnn->k_size = kernel_size;
cnn->rows = r; cnn->rows = depth_input;
cnn->columns = c; cnn->columns = depth_output;
cnn->w = (float****)malloc(sizeof(float***)*r); cnn->w = (float****)malloc(sizeof(float***)*depth_input);
cnn->d_w = (float****)malloc(sizeof(float***)*r); cnn->d_w = (float****)malloc(sizeof(float***)*depth_input);
for (int i=0; i < r; i++) { for (int i=0; i < depth_input; i++) {
cnn->w[i] = (float***)malloc(sizeof(float**)*c); cnn->w[i] = (float***)malloc(sizeof(float**)*depth_output);
cnn->d_w[i] = (float***)malloc(sizeof(float**)*c); cnn->d_w[i] = (float***)malloc(sizeof(float**)*depth_output);
for (int j=0; j < c; j++) { for (int j=0; j < depth_output; j++) {
cnn->w[i][j] = (float**)malloc(sizeof(float*)*kernel_size); cnn->w[i][j] = (float**)malloc(sizeof(float*)*kernel_size);
cnn->d_w[i][j] = (float**)malloc(sizeof(float*)*kernel_size); cnn->d_w[i][j] = (float**)malloc(sizeof(float*)*kernel_size);
for (int k=0; k < kernel_size; k++) { for (int k=0; k < kernel_size; k++) {
@ -116,9 +115,9 @@ void add_convolution(Network* network, int nb_filter, int kernel_size, int activ
} }
} }
} }
cnn->bias = (float***)malloc(sizeof(float**)*c); cnn->bias = (float***)malloc(sizeof(float**)*depth_output);
cnn->d_bias = (float***)malloc(sizeof(float**)*c); cnn->d_bias = (float***)malloc(sizeof(float**)*depth_output);
for (int i=0; i < c; i++) { for (int i=0; i < depth_output; i++) {
cnn->bias[i] = (float**)malloc(sizeof(float*)*kernel_size); cnn->bias[i] = (float**)malloc(sizeof(float*)*kernel_size);
cnn->d_bias[i] = (float**)malloc(sizeof(float*)*kernel_size); cnn->d_bias[i] = (float**)malloc(sizeof(float*)*kernel_size);
for (int j=0; j < kernel_size; j++) { for (int j=0; j < kernel_size; j++) {
@ -126,13 +125,13 @@ void add_convolution(Network* network, int nb_filter, int kernel_size, int activ
cnn->d_bias[i][j] = (float*)malloc(sizeof(float)*kernel_size); cnn->d_bias[i][j] = (float*)malloc(sizeof(float)*kernel_size);
} }
} }
create_a_cube_input_layer(network, n, c, network->width[n-1] - 2*(kernel_size/2)); create_a_cube_input_layer(network, n, depth_output, network->width[n-1] - 2*(kernel_size/2));
int n_int = network->width[n-1]*network->width[n-1]*network->depth[n-1]; int n_int = network->width[n-1]*network->width[n-1]*network->depth[n-1];
int n_out = network->width[n]*network->width[n]*network->depth[n]; int n_out = network->width[n]*network->width[n]*network->depth[n];
initialisation_3d_matrix(network->initialisation, cnn->bias, c, kernel_size, kernel_size, n_int+n_out); initialisation_3d_matrix(network->initialisation, cnn->bias, depth_output, kernel_size, kernel_size, n_int+n_out);
initialisation_3d_matrix(ZERO, cnn->d_bias, c, kernel_size, kernel_size, n_int+n_out); initialisation_3d_matrix(ZERO, cnn->d_bias, depth_output, kernel_size, kernel_size, n_int+n_out);
initialisation_4d_matrix(network->initialisation, cnn->w, r, c, kernel_size, kernel_size, n_int+n_out); initialisation_4d_matrix(network->initialisation, cnn->w, depth_input, depth_output, kernel_size, kernel_size, n_int+n_out);
initialisation_4d_matrix(ZERO, cnn->d_w, r, c, kernel_size, kernel_size, n_int+n_out); initialisation_4d_matrix(ZERO, cnn->d_w, depth_input, depth_output, kernel_size, kernel_size, n_int+n_out);
network->size++; network->size++;
} }

View File

@ -1,6 +1,6 @@
#include <stdlib.h> #include <stdlib.h>
#include <stdio.h> #include <stdio.h>
#include "free.h" #include "include/free.h"
void free_a_cube_input_layer(Network* network, int pos, int depth, int dim) { void free_a_cube_input_layer(Network* network, int pos, int depth, int dim) {
for (int i=0; i < depth; i++) { for (int i=0; i < depth; i++) {

View File

@ -1,7 +1,7 @@
#include <stdio.h> #include <stdio.h>
#include <math.h> #include <math.h>
#include <float.h> #include <float.h>
#include "function.h" #include "include/function.h"
float max(float a, float b) { float max(float a, float b) {
return a < b ? b:a; return a < b ? b:a;

View File

@ -16,7 +16,7 @@ Network* create_network_lenet5(int dropout, int activation, int initialisation);
/* /*
* Créé et alloue de la mémoire à une couche de type input cube * Créé et alloue de la mémoire à une couche de type input cube
*/ */
void create_a_cube_input_layer(Network* network, int pos, int depth, int dim); void create_a_cube_input_layer(Network* network, int pos, int depth, int dim); // CHECKED
/* /*
* Créé et alloue de la mémoire à une couche de type ligne * Créé et alloue de la mémoire à une couche de type ligne

View File

@ -7,12 +7,12 @@
/* /*
* Renvoie si oui ou non (1 ou 0) le neurone va être abandonné * Renvoie si oui ou non (1 ou 0) le neurone va être abandonné
*/ */
int will_be_drop(int dropout_prob); int will_be_drop(int dropout_prob); //CHECKED
/* /*
* Écrit une image 28*28 au centre d'un tableau 32*32 et met à 0 le reste * Écrit une image 28*28 au centre d'un tableau 32*32 et met à 0 le reste
*/ */
void write_image_in_network_32(int** image, int height, int width, float** input); void write_image_in_network_32(int** image, int height, int width, float** input); //CHECKED
/* /*
* Propage en avant le cnn * Propage en avant le cnn

View File

@ -1,6 +1,6 @@
#include <stdlib.h> #include <stdlib.h>
#include <math.h> #include <math.h>
#include "initialisation.h" #include "include/initialisation.h"
void initialisation_1d_matrix(int initialisation, float* matrix, int rows, int n) { //NOT FINISHED void initialisation_1d_matrix(int initialisation, float* matrix, int rows, int n) { //NOT FINISHED

View File

@ -7,7 +7,7 @@
#include "creation.c" #include "creation.c"
#include "make.c" #include "make.c"
#include "main.h" #include "include/main.h"
// Augmente les dimensions de l'image d'entrée // Augmente les dimensions de l'image d'entrée
#define PADDING_INPUT 2 #define PADDING_INPUT 2
@ -18,8 +18,8 @@ int will_be_drop(int dropout_prob) {
void write_image_in_network_32(int** image, int height, int width, float** input) { void write_image_in_network_32(int** image, int height, int width, float** input) {
for (int i=0; i < height+2*PADDING_INPUT; i++) { for (int i=0; i < height+2*PADDING_INPUT; i++) {
for (int j=PADDING_INPUT; j < width+2*PADDING_INPUT; j++) { for (int j=0; j < width+2*PADDING_INPUT; j++) {
if (i < PADDING_INPUT || i > height+PADDING_INPUT || j < PADDING_INPUT || j > width+PADDING_INPUT) { if (i < PADDING_INPUT || i >= height+PADDING_INPUT || j < PADDING_INPUT || j >= width+PADDING_INPUT) {
input[i][j] = 0.; input[i][j] = 0.;
} }
else { else {

View File

@ -1,5 +1,5 @@
#include <stdio.h> #include <stdio.h>
#include "make.h" #include "include/make.h"
void make_convolution(float*** input, Kernel_cnn* kernel, float*** output, int output_dim) { void make_convolution(float*** input, Kernel_cnn* kernel, float*** output, int output_dim) {
//NOT FINISHED, MISS CONDITIONS ON THE CONVOLUTION //NOT FINISHED, MISS CONDITIONS ON THE CONVOLUTION