mirror of
https://github.com/augustin64/projet-tipe
synced 2025-01-24 07:36:24 +01:00
Creation of the folder 'include'
This commit is contained in:
parent
93e1555136
commit
d236055a6d
@ -1,36 +1,36 @@
|
|||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include "creation.h"
|
#include "include/creation.h"
|
||||||
#include "function.h"
|
#include "include/function.h"
|
||||||
#include "initialisation.h"
|
#include "include/initialisation.h"
|
||||||
|
|
||||||
Network* create_network(int max_size, int dropout, int initialisation, int input_dim, int input_depth) {
|
Network* create_network(int max_size, int dropout, int initialisation, int input_dim, int input_depth) {
|
||||||
if (dropout < 0 || dropout > 100) {
|
if (dropout < 0 || dropout > 100) {
|
||||||
printf("Erreur, la probabilité de dropout n'est pas respecté, elle doit être comprise entre 0 et 100\n");
|
printf("Erreur, la probabilité de dropout n'est pas respecté, elle doit être comprise entre 0 et 100\n");
|
||||||
}
|
}
|
||||||
Network* network = (Network*)malloc(sizeof(Network));
|
Network* network = (Network*)malloc(sizeof(Network));
|
||||||
network->max_size = max_size;
|
network->max_size = max_size;
|
||||||
network->dropout = dropout;
|
network->dropout = dropout;
|
||||||
network->initialisation = initialisation;
|
network->initialisation = initialisation;
|
||||||
network->size = 1;
|
network->size = 1;
|
||||||
network->input = (float****)malloc(sizeof(float***)*max_size);
|
network->input = (float****)malloc(sizeof(float***)*max_size);
|
||||||
network->kernel = (Kernel**)malloc(sizeof(Kernel*)*(max_size-1));
|
network->kernel = (Kernel**)malloc(sizeof(Kernel*)*(max_size-1));
|
||||||
network->width = (int*)malloc(sizeof(int*)*max_size);
|
network->width = (int*)malloc(sizeof(int*)*max_size);
|
||||||
network->depth = (int*)malloc(sizeof(int*)*max_size);
|
network->depth = (int*)malloc(sizeof(int*)*max_size);
|
||||||
for (int i=0; i < max_size; i++) {
|
for (int i=0; i < max_size; i++) {
|
||||||
network->kernel[i] = (Kernel*)malloc(sizeof(Kernel));
|
network->kernel[i] = (Kernel*)malloc(sizeof(Kernel));
|
||||||
}
|
}
|
||||||
network->width[0] = input_dim;
|
network->width[0] = input_dim;
|
||||||
network->depth[0] = input_depth;
|
network->depth[0] = input_depth;
|
||||||
network->kernel[0]->nn = NULL;
|
network->kernel[0]->nn = NULL;
|
||||||
network->kernel[0]->cnn = NULL;
|
network->kernel[0]->cnn = NULL;
|
||||||
create_a_cube_input_layer(network, 0, input_depth, input_dim);
|
create_a_cube_input_layer(network, 0, input_depth, input_dim);
|
||||||
return network;
|
return network;
|
||||||
}
|
}
|
||||||
|
|
||||||
Network* create_network_lenet5(int dropout, int activation, int initialisation) {
|
Network* create_network_lenet5(int dropout, int activation, int initialisation) {
|
||||||
Network* network = create_network(8, dropout, initialisation, 32, 1);
|
Network* network = create_network(8, dropout, initialisation, 32, 1);
|
||||||
network->kernel[0]->activation = activation;
|
network->kernel[0]->activation = activation;
|
||||||
add_convolution(network, 6, 5, activation);
|
add_convolution(network, 6, 5, activation);
|
||||||
add_average_pooling(network, 2, activation);
|
add_average_pooling(network, 2, activation);
|
||||||
add_convolution(network, 16, 5, activation);
|
add_convolution(network, 16, 5, activation);
|
||||||
@ -86,28 +86,27 @@ void add_average_pooling_flatten(Network* network, int kernel_size, int activati
|
|||||||
network->size++;
|
network->size++;
|
||||||
}
|
}
|
||||||
|
|
||||||
void add_convolution(Network* network, int nb_filter, int kernel_size, int activation) {
|
void add_convolution(Network* network, int depth_output, int kernel_size, int activation) {
|
||||||
int n = network->size;
|
int n = network->size;
|
||||||
if (network->max_size == n) {
|
if (network->max_size == n) {
|
||||||
printf("Impossible de rajouter une couche de convolution, le réseau est déjà plein\n");
|
printf("Impossible de rajouter une couche de convolution, le réseau est déjà plein \n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
int r = network->depth[n-1];
|
int depth_input = network->depth[n-1];
|
||||||
int c = nb_filter;
|
|
||||||
network->kernel[n]->nn = NULL;
|
network->kernel[n]->nn = NULL;
|
||||||
network->kernel[n]->activation = activation;
|
network->kernel[n]->activation = activation;
|
||||||
network->kernel[n]->cnn = (Kernel_cnn*)malloc(sizeof(Kernel_cnn));
|
network->kernel[n]->cnn = (Kernel_cnn*)malloc(sizeof(Kernel_cnn));
|
||||||
Kernel_cnn* cnn = network->kernel[n]->cnn;
|
Kernel_cnn* cnn = network->kernel[n]->cnn;
|
||||||
|
|
||||||
cnn->k_size = kernel_size;
|
cnn->k_size = kernel_size;
|
||||||
cnn->rows = r;
|
cnn->rows = depth_input;
|
||||||
cnn->columns = c;
|
cnn->columns = depth_output;
|
||||||
cnn->w = (float****)malloc(sizeof(float***)*r);
|
cnn->w = (float****)malloc(sizeof(float***)*depth_input);
|
||||||
cnn->d_w = (float****)malloc(sizeof(float***)*r);
|
cnn->d_w = (float****)malloc(sizeof(float***)*depth_input);
|
||||||
for (int i=0; i < r; i++) {
|
for (int i=0; i < depth_input; i++) {
|
||||||
cnn->w[i] = (float***)malloc(sizeof(float**)*c);
|
cnn->w[i] = (float***)malloc(sizeof(float**)*depth_output);
|
||||||
cnn->d_w[i] = (float***)malloc(sizeof(float**)*c);
|
cnn->d_w[i] = (float***)malloc(sizeof(float**)*depth_output);
|
||||||
for (int j=0; j < c; j++) {
|
for (int j=0; j < depth_output; j++) {
|
||||||
cnn->w[i][j] = (float**)malloc(sizeof(float*)*kernel_size);
|
cnn->w[i][j] = (float**)malloc(sizeof(float*)*kernel_size);
|
||||||
cnn->d_w[i][j] = (float**)malloc(sizeof(float*)*kernel_size);
|
cnn->d_w[i][j] = (float**)malloc(sizeof(float*)*kernel_size);
|
||||||
for (int k=0; k < kernel_size; k++) {
|
for (int k=0; k < kernel_size; k++) {
|
||||||
@ -116,9 +115,9 @@ void add_convolution(Network* network, int nb_filter, int kernel_size, int activ
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cnn->bias = (float***)malloc(sizeof(float**)*c);
|
cnn->bias = (float***)malloc(sizeof(float**)*depth_output);
|
||||||
cnn->d_bias = (float***)malloc(sizeof(float**)*c);
|
cnn->d_bias = (float***)malloc(sizeof(float**)*depth_output);
|
||||||
for (int i=0; i < c; i++) {
|
for (int i=0; i < depth_output; i++) {
|
||||||
cnn->bias[i] = (float**)malloc(sizeof(float*)*kernel_size);
|
cnn->bias[i] = (float**)malloc(sizeof(float*)*kernel_size);
|
||||||
cnn->d_bias[i] = (float**)malloc(sizeof(float*)*kernel_size);
|
cnn->d_bias[i] = (float**)malloc(sizeof(float*)*kernel_size);
|
||||||
for (int j=0; j < kernel_size; j++) {
|
for (int j=0; j < kernel_size; j++) {
|
||||||
@ -126,13 +125,13 @@ void add_convolution(Network* network, int nb_filter, int kernel_size, int activ
|
|||||||
cnn->d_bias[i][j] = (float*)malloc(sizeof(float)*kernel_size);
|
cnn->d_bias[i][j] = (float*)malloc(sizeof(float)*kernel_size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
create_a_cube_input_layer(network, n, c, network->width[n-1] - 2*(kernel_size/2));
|
create_a_cube_input_layer(network, n, depth_output, network->width[n-1] - 2*(kernel_size/2));
|
||||||
int n_int = network->width[n-1]*network->width[n-1]*network->depth[n-1];
|
int n_int = network->width[n-1]*network->width[n-1]*network->depth[n-1];
|
||||||
int n_out = network->width[n]*network->width[n]*network->depth[n];
|
int n_out = network->width[n]*network->width[n]*network->depth[n];
|
||||||
initialisation_3d_matrix(network->initialisation, cnn->bias, c, kernel_size, kernel_size, n_int+n_out);
|
initialisation_3d_matrix(network->initialisation, cnn->bias, depth_output, kernel_size, kernel_size, n_int+n_out);
|
||||||
initialisation_3d_matrix(ZERO, cnn->d_bias, c, kernel_size, kernel_size, n_int+n_out);
|
initialisation_3d_matrix(ZERO, cnn->d_bias, depth_output, kernel_size, kernel_size, n_int+n_out);
|
||||||
initialisation_4d_matrix(network->initialisation, cnn->w, r, c, kernel_size, kernel_size, n_int+n_out);
|
initialisation_4d_matrix(network->initialisation, cnn->w, depth_input, depth_output, kernel_size, kernel_size, n_int+n_out);
|
||||||
initialisation_4d_matrix(ZERO, cnn->d_w, r, c, kernel_size, kernel_size, n_int+n_out);
|
initialisation_4d_matrix(ZERO, cnn->d_w, depth_input, depth_output, kernel_size, kernel_size, n_int+n_out);
|
||||||
network->size++;
|
network->size++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include "free.h"
|
#include "include/free.h"
|
||||||
|
|
||||||
void free_a_cube_input_layer(Network* network, int pos, int depth, int dim) {
|
void free_a_cube_input_layer(Network* network, int pos, int depth, int dim) {
|
||||||
for (int i=0; i < depth; i++) {
|
for (int i=0; i < depth; i++) {
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <math.h>
|
#include <math.h>
|
||||||
#include <float.h>
|
#include <float.h>
|
||||||
#include "function.h"
|
#include "include/function.h"
|
||||||
|
|
||||||
float max(float a, float b) {
|
float max(float a, float b) {
|
||||||
return a < b ? b:a;
|
return a < b ? b:a;
|
||||||
|
@ -16,7 +16,7 @@ Network* create_network_lenet5(int dropout, int activation, int initialisation);
|
|||||||
/*
|
/*
|
||||||
* Créé et alloue de la mémoire à une couche de type input cube
|
* Créé et alloue de la mémoire à une couche de type input cube
|
||||||
*/
|
*/
|
||||||
void create_a_cube_input_layer(Network* network, int pos, int depth, int dim);
|
void create_a_cube_input_layer(Network* network, int pos, int depth, int dim); // CHECKED
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Créé et alloue de la mémoire à une couche de type ligne
|
* Créé et alloue de la mémoire à une couche de type ligne
|
@ -7,12 +7,12 @@
|
|||||||
/*
|
/*
|
||||||
* Renvoie si oui ou non (1 ou 0) le neurone va être abandonné
|
* Renvoie si oui ou non (1 ou 0) le neurone va être abandonné
|
||||||
*/
|
*/
|
||||||
int will_be_drop(int dropout_prob);
|
int will_be_drop(int dropout_prob); //CHECKED
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Écrit une image 28*28 au centre d'un tableau 32*32 et met à 0 le reste
|
* Écrit une image 28*28 au centre d'un tableau 32*32 et met à 0 le reste
|
||||||
*/
|
*/
|
||||||
void write_image_in_network_32(int** image, int height, int width, float** input);
|
void write_image_in_network_32(int** image, int height, int width, float** input); //CHECKED
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Propage en avant le cnn
|
* Propage en avant le cnn
|
@ -1,6 +1,6 @@
|
|||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <math.h>
|
#include <math.h>
|
||||||
#include "initialisation.h"
|
#include "include/initialisation.h"
|
||||||
|
|
||||||
|
|
||||||
void initialisation_1d_matrix(int initialisation, float* matrix, int rows, int n) { //NOT FINISHED
|
void initialisation_1d_matrix(int initialisation, float* matrix, int rows, int n) { //NOT FINISHED
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
#include "creation.c"
|
#include "creation.c"
|
||||||
#include "make.c"
|
#include "make.c"
|
||||||
|
|
||||||
#include "main.h"
|
#include "include/main.h"
|
||||||
|
|
||||||
// Augmente les dimensions de l'image d'entrée
|
// Augmente les dimensions de l'image d'entrée
|
||||||
#define PADDING_INPUT 2
|
#define PADDING_INPUT 2
|
||||||
@ -18,8 +18,8 @@ int will_be_drop(int dropout_prob) {
|
|||||||
|
|
||||||
void write_image_in_network_32(int** image, int height, int width, float** input) {
|
void write_image_in_network_32(int** image, int height, int width, float** input) {
|
||||||
for (int i=0; i < height+2*PADDING_INPUT; i++) {
|
for (int i=0; i < height+2*PADDING_INPUT; i++) {
|
||||||
for (int j=PADDING_INPUT; j < width+2*PADDING_INPUT; j++) {
|
for (int j=0; j < width+2*PADDING_INPUT; j++) {
|
||||||
if (i < PADDING_INPUT || i > height+PADDING_INPUT || j < PADDING_INPUT || j > width+PADDING_INPUT) {
|
if (i < PADDING_INPUT || i >= height+PADDING_INPUT || j < PADDING_INPUT || j >= width+PADDING_INPUT) {
|
||||||
input[i][j] = 0.;
|
input[i][j] = 0.;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include "make.h"
|
#include "include/make.h"
|
||||||
|
|
||||||
void make_convolution(float*** input, Kernel_cnn* kernel, float*** output, int output_dim) {
|
void make_convolution(float*** input, Kernel_cnn* kernel, float*** output, int output_dim) {
|
||||||
//NOT FINISHED, MISS CONDITIONS ON THE CONVOLUTION
|
//NOT FINISHED, MISS CONDITIONS ON THE CONVOLUTION
|
||||||
|
Loading…
Reference in New Issue
Block a user