mirror of
https://github.com/augustin64/projet-tipe
synced 2025-01-24 07:36:24 +01:00
Update mnist_cnn: improve code readability
This commit is contained in:
parent
19efa5f7d6
commit
e280d3e9da
@ -2,21 +2,24 @@
|
|||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <math.h>
|
#include <math.h>
|
||||||
#include <float.h>
|
#include <float.h>
|
||||||
#include "function.h"
|
#include "initialisation.c"
|
||||||
#include "make.h"
|
#include "function.c"
|
||||||
|
#include "creation.c"
|
||||||
|
#include "make.c"
|
||||||
|
|
||||||
#include "cnn.h"
|
#include "cnn.h"
|
||||||
|
|
||||||
// Augmente les dimensions de l'image d'entrée
|
// Augmente les dimensions de l'image d'entrée
|
||||||
#define PADING_INPUT 2
|
#define PADDING_INPUT 2
|
||||||
|
|
||||||
int will_be_drop(int dropout_prob) {
|
int will_be_drop(int dropout_prob) {
|
||||||
return (rand() % 100)<dropout_prob;
|
return (rand() % 100) < dropout_prob;
|
||||||
}
|
}
|
||||||
|
|
||||||
void write_image_in_newtork_32(int** image, int height, int width, float** input) {
|
void write_image_in_network_32(int** image, int height, int width, float** input) {
|
||||||
for (int i=0; i < height+2*PADING_INPUT; i++) {
|
for (int i=0; i < height+2*PADDING_INPUT; i++) {
|
||||||
for (int j=PADING_INPUT; j < width+2*PADING_INPUT; j++) {
|
for (int j=PADDING_INPUT; j < width+2*PADDING_INPUT; j++) {
|
||||||
if (i<PADING_INPUT || i>height+PADING_INPUT || j<PADING_INPUT || j>width+PADING_INPUT) {
|
if (i < PADDING_INPUT || i > height+PADDING_INPUT || j < PADDING_INPUT || j > width+PADDING_INPUT) {
|
||||||
input[i][j] = 0.;
|
input[i][j] = 0.;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
@ -27,16 +30,21 @@ void write_image_in_newtork_32(int** image, int height, int width, float** input
|
|||||||
}
|
}
|
||||||
|
|
||||||
void forward_propagation(Network* network) {
|
void forward_propagation(Network* network) {
|
||||||
|
int output_dim, output_depth;
|
||||||
|
float*** output;
|
||||||
for (int i=0; i < network->size-1; i++) {
|
for (int i=0; i < network->size-1; i++) {
|
||||||
if (network->kernel[i].nn==NULL && network->kernel[i].cnn!=NULL) {
|
if (network->kernel[i].nn==NULL && network->kernel[i].cnn!=NULL) { //CNN
|
||||||
make_convolution(network->input[i], network->kernel[i].cnn, network->input[i+1], network->dim[i+1][0]);
|
output = network->input[i+1];
|
||||||
choose_apply_function_input(network->kernel[i].activation, network->input[i+1], network->dim[i+1][1], network->dim[i+1][0], network->dim[i+1][0]);
|
output_dim = network->dim[i+1][0];
|
||||||
|
output_depth = network->dim[i+1][1];
|
||||||
|
make_convolution(network->input[i], network->kernel[i].cnn, output, output_dim);
|
||||||
|
choose_apply_function_input(network->kernel[i].activation, output, output_depth, output_dim, output_dim);
|
||||||
}
|
}
|
||||||
else if (network->kernel[i].nn!=NULL && network->kernel[i].cnn==NULL) {
|
else if (network->kernel[i].nn!=NULL && network->kernel[i].cnn==NULL) { //NN
|
||||||
make_fully_connected(network->input[i][0][0], network->kernel[i].nn, network->input[i+1][0][0], network->dim[i][0], network->dim[i+1][0]);
|
make_fully_connected(network->input[i][0][0], network->kernel[i].nn, network->input[i+1][0][0], network->dim[i][0], network->dim[i+1][0]);
|
||||||
choose_apply_function_input(network->kernel[i].activation, network->input[i+1], 1, 1, network->dim[i+1][0]);
|
choose_apply_function_input(network->kernel[i].activation, network->input[i+1], 1, 1, network->dim[i+1][0]);
|
||||||
}
|
}
|
||||||
else {
|
else { //Pooling
|
||||||
if (network->size-2==i) {
|
if (network->size-2==i) {
|
||||||
printf("Le réseau ne peut pas finir par une pooling layer");
|
printf("Le réseau ne peut pas finir par une pooling layer");
|
||||||
return;
|
return;
|
||||||
@ -61,13 +69,12 @@ void backward_propagation(Network* network, float wanted_number) {
|
|||||||
float* wanted_output = generate_wanted_output(wanted_number);
|
float* wanted_output = generate_wanted_output(wanted_number);
|
||||||
int n = network->size-1;
|
int n = network->size-1;
|
||||||
float loss = compute_cross_entropy_loss(network->input[n][0][0], wanted_output, network->dim[n][0]);
|
float loss = compute_cross_entropy_loss(network->input[n][0][0], wanted_output, network->dim[n][0]);
|
||||||
int i, j;
|
for (int i=n; i >= 0; i--) {
|
||||||
for (i=n; i>=0; i--) {
|
|
||||||
if (i==n) {
|
if (i==n) {
|
||||||
if (network->kernel[i].activation == SOFTMAX) {
|
if (network->kernel[i].activation == SOFTMAX) {
|
||||||
int l2 = network->dim[i][0]; // Taille de la dernière couche
|
int l2 = network->dim[i][0]; // Taille de la dernière couche
|
||||||
int l1 = network->dim[i-1][0];
|
int l1 = network->dim[i-1][0];
|
||||||
for (j=0; j<l2; j++) {
|
for (int j=0; j < l2; j++) {
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -93,7 +100,7 @@ void backward_propagation(Network* network, float wanted_number) {
|
|||||||
|
|
||||||
float compute_cross_entropy_loss(float* output, float* wanted_output, int len) {
|
float compute_cross_entropy_loss(float* output, float* wanted_output, int len) {
|
||||||
float loss=0.;
|
float loss=0.;
|
||||||
for (int i=0; i<len ; i++) {
|
for (int i=0; i < len ; i++) {
|
||||||
if (wanted_output[i]==1) {
|
if (wanted_output[i]==1) {
|
||||||
if (output[i]==0.) {
|
if (output[i]==0.) {
|
||||||
loss -= log(FLT_EPSILON);
|
loss -= log(FLT_EPSILON);
|
||||||
@ -107,8 +114,8 @@ float compute_cross_entropy_loss(float* output, float* wanted_output, int len) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
float* generate_wanted_output(float wanted_number) {
|
float* generate_wanted_output(float wanted_number) {
|
||||||
float* wanted_output = malloc(sizeof(float)*10);
|
float* wanted_output = (float*)malloc(sizeof(float)*10);
|
||||||
for (int i=0; i<10; i++) {
|
for (int i=0; i < 10; i++) {
|
||||||
if (i==wanted_number) {
|
if (i==wanted_number) {
|
||||||
wanted_output[i]=1;
|
wanted_output[i]=1;
|
||||||
}
|
}
|
||||||
@ -118,3 +125,10 @@ float* generate_wanted_output(float wanted_number) {
|
|||||||
}
|
}
|
||||||
return wanted_output;
|
return wanted_output;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int main() {
|
||||||
|
Network* network;
|
||||||
|
network = create_network_lenet5(0, TANH, GLOROT_NORMAL);
|
||||||
|
forward_propagation(network);
|
||||||
|
return 0;
|
||||||
|
}
|
@ -10,9 +10,9 @@
|
|||||||
int will_be_drop(int dropout_prob);
|
int will_be_drop(int dropout_prob);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ecrit une image 28*28 au centre d'un tableau 32*32 et met à 0 le reste
|
* Écrit une image 28*28 au centre d'un tableau 32*32 et met à 0 le reste
|
||||||
*/
|
*/
|
||||||
void write_image_in_newtork_32(int** image, int height, int width, float** input);
|
void write_image_in_network_32(int** image, int height, int width, float** input);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Propage en avant le cnn
|
* Propage en avant le cnn
|
||||||
|
@ -5,24 +5,23 @@
|
|||||||
#include "initialisation.h"
|
#include "initialisation.h"
|
||||||
|
|
||||||
Network* create_network(int max_size, int dropout, int initialisation, int input_dim, int input_depth) {
|
Network* create_network(int max_size, int dropout, int initialisation, int input_dim, int input_depth) {
|
||||||
if (dropout<0 || dropout>100) {
|
if (dropout < 0 || dropout > 100) {
|
||||||
printf("Erreur, la probabilité de dropout n'est pas respecté, elle doit être comprise entre 0 et 100\n");
|
printf("Erreur, la probabilité de dropout n'est pas respecté, elle doit être comprise entre 0 et 100\n");
|
||||||
}
|
}
|
||||||
Network* network = malloc(sizeof(Network));
|
Network* network = (Network*)malloc(sizeof(Network));
|
||||||
network->max_size = max_size;
|
network->max_size = max_size;
|
||||||
network->dropout = dropout;
|
network->dropout = dropout;
|
||||||
network->initialisation = initialisation;
|
network->initialisation = initialisation;
|
||||||
network->size = 1;
|
network->size = 1;
|
||||||
network->input = malloc(sizeof(float***)*max_size);
|
network->input = (float****)malloc(sizeof(float***)*max_size);
|
||||||
network->kernel = malloc(sizeof(Kernel)*(max_size-1));
|
network->kernel = (Kernel*)malloc(sizeof(Kernel)*(max_size-1));
|
||||||
create_a_cube_input_layer(network, 0, input_depth, input_dim);
|
network->dim = (int**)malloc(sizeof(int*)*max_size);
|
||||||
int i, j;
|
for (int i=0; i < max_size; i++) {
|
||||||
network->dim = malloc(sizeof(int*)*max_size);
|
network->dim[i] = (int*)malloc(sizeof(int)*2);
|
||||||
for (i=0; i<max_size; i++) {
|
|
||||||
network->dim[i] = malloc(sizeof(int)*2);
|
|
||||||
}
|
}
|
||||||
network->dim[0][0] = input_dim;
|
network->dim[0][0] = input_dim;
|
||||||
network->dim[0][1] = input_depth;
|
network->dim[0][1] = input_depth;
|
||||||
|
create_a_cube_input_layer(network, 0, input_depth, input_dim);
|
||||||
return network;
|
return network;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -40,12 +39,11 @@ Network* create_network_lenet5(int dropout, int activation, int initialisation)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void create_a_cube_input_layer(Network* network, int pos, int depth, int dim) {
|
void create_a_cube_input_layer(Network* network, int pos, int depth, int dim) {
|
||||||
int i, j;
|
network->input[pos] = (float***)malloc(sizeof(float**)*depth);
|
||||||
network->input[pos] = malloc(sizeof(float**)*depth);
|
for (int i=0; i < depth; i++) {
|
||||||
for (i=0; i<depth; i++) {
|
network->input[pos][i] = (float**)malloc(sizeof(float*)*dim);
|
||||||
network->input[pos][i] = malloc(sizeof(float*)*dim);
|
for (int j=0; j < dim; j++) {
|
||||||
for (j=0; j<dim; j++) {
|
network->input[pos][i][j] = (float*)malloc(sizeof(float)*dim);
|
||||||
network->input[pos][i][j] = malloc(sizeof(float)*dim);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
network->dim[pos][0] = dim;
|
network->dim[pos][0] = dim;
|
||||||
@ -53,10 +51,9 @@ void create_a_cube_input_layer(Network* network, int pos, int depth, int dim) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void create_a_line_input_layer(Network* network, int pos, int dim) {
|
void create_a_line_input_layer(Network* network, int pos, int dim) {
|
||||||
int i;
|
network->input[pos] = (float***)malloc(sizeof(float**));
|
||||||
network->input[pos] = malloc(sizeof(float**));
|
network->input[pos][0] = (float**)malloc(sizeof(float*));
|
||||||
network->input[pos][0] = malloc(sizeof(float*));
|
network->input[pos][0][0] = (float*)malloc(sizeof(float)*dim);
|
||||||
network->input[pos][0][0] = malloc(sizeof(float)*dim);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void add_average_pooling(Network* network, int kernel_size, int activation) {
|
void add_average_pooling(Network* network, int kernel_size, int activation) {
|
||||||
@ -87,7 +84,7 @@ void add_average_pooling_flatten(Network* network, int kernel_size, int activati
|
|||||||
}
|
}
|
||||||
|
|
||||||
void add_convolution(Network* network, int nb_filter, int kernel_size, int activation) {
|
void add_convolution(Network* network, int nb_filter, int kernel_size, int activation) {
|
||||||
int n = network->size, i, j, k;
|
int n = network->size;
|
||||||
if (network->max_size == n) {
|
if (network->max_size == n) {
|
||||||
printf("Impossible de rajouter une couche de convolution, le réseau est déjà plein\n");
|
printf("Impossible de rajouter une couche de convolution, le réseau est déjà plein\n");
|
||||||
return;
|
return;
|
||||||
@ -95,33 +92,33 @@ void add_convolution(Network* network, int nb_filter, int kernel_size, int activ
|
|||||||
int r = network->dim[n-1][1];
|
int r = network->dim[n-1][1];
|
||||||
int c = nb_filter;
|
int c = nb_filter;
|
||||||
network->kernel[n].nn = NULL;
|
network->kernel[n].nn = NULL;
|
||||||
network->kernel[n].cnn = malloc(sizeof(Kernel_cnn));
|
network->kernel[n].cnn = (Kernel_cnn*)malloc(sizeof(Kernel_cnn));
|
||||||
network->kernel[n].activation = activation;
|
network->kernel[n].activation = activation;
|
||||||
network->kernel[n].cnn->k_size = kernel_size;
|
network->kernel[n].cnn->k_size = kernel_size;
|
||||||
network->kernel[n].cnn->rows = r;
|
network->kernel[n].cnn->rows = r;
|
||||||
network->kernel[n].cnn->columns = c;
|
network->kernel[n].cnn->columns = c;
|
||||||
network->kernel[n].cnn->w = malloc(sizeof(float***)*r);
|
network->kernel[n].cnn->w = (float****)malloc(sizeof(float***)*r);
|
||||||
network->kernel[n].cnn->d_w = malloc(sizeof(float***)*r);
|
network->kernel[n].cnn->d_w = (float****)malloc(sizeof(float***)*r);
|
||||||
for (i=0; i<r; i++) {
|
for (int i=0; i < r; i++) {
|
||||||
network->kernel[n].cnn->w[i] = malloc(sizeof(float**)*c);
|
network->kernel[n].cnn->w[i] = (float***)malloc(sizeof(float**)*c);
|
||||||
network->kernel[n].cnn->d_w[i] = malloc(sizeof(float**)*c);
|
network->kernel[n].cnn->d_w[i] = (float***)malloc(sizeof(float**)*c);
|
||||||
for (j=0; j<c; j++) {
|
for (int j=0; j < c; j++) {
|
||||||
network->kernel[n].cnn->w[i][j] = malloc(sizeof(float*)*kernel_size);
|
network->kernel[n].cnn->w[i][j] = (float**)malloc(sizeof(float*)*kernel_size);
|
||||||
network->kernel[n].cnn->d_w[i][j] = malloc(sizeof(float*)*kernel_size);
|
network->kernel[n].cnn->d_w[i][j] = (float**)malloc(sizeof(float*)*kernel_size);
|
||||||
for (k=0; k<kernel_size; k++) {
|
for (int k=0; k < kernel_size; k++) {
|
||||||
network->kernel[n].cnn->w[i][j][k] = malloc(sizeof(float)*kernel_size);
|
network->kernel[n].cnn->w[i][j][k] = (float*)malloc(sizeof(float)*kernel_size);
|
||||||
network->kernel[n].cnn->d_w[i][j][k] = malloc(sizeof(float)*kernel_size);
|
network->kernel[n].cnn->d_w[i][j][k] = (float*)malloc(sizeof(float)*kernel_size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
network->kernel[n].cnn->bias = malloc(sizeof(float**)*c);
|
network->kernel[n].cnn->bias = (float***)malloc(sizeof(float**)*c);
|
||||||
network->kernel[n].cnn->d_bias = malloc(sizeof(float**)*c);
|
network->kernel[n].cnn->d_bias = (float***)malloc(sizeof(float**)*c);
|
||||||
for (i=0; i<c; i++) {
|
for (int i=0; i < c; i++) {
|
||||||
network->kernel[n].cnn->bias[i] = malloc(sizeof(float*)*kernel_size);
|
network->kernel[n].cnn->bias[i] = (float**)malloc(sizeof(float*)*kernel_size);
|
||||||
network->kernel[n].cnn->d_bias[i] = malloc(sizeof(float*)*kernel_size);
|
network->kernel[n].cnn->d_bias[i] = (float**)malloc(sizeof(float*)*kernel_size);
|
||||||
for (j=0; j<kernel_size; j++) {
|
for (int j=0; j < kernel_size; j++) {
|
||||||
network->kernel[n].cnn->bias[i][j] = malloc(sizeof(float)*kernel_size);
|
network->kernel[n].cnn->bias[i][j] = (float*)malloc(sizeof(float)*kernel_size);
|
||||||
network->kernel[n].cnn->d_bias[i][j] = malloc(sizeof(float)*kernel_size);
|
network->kernel[n].cnn->d_bias[i][j] = (float*)malloc(sizeof(float)*kernel_size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
create_a_cube_input_layer(network, n, c, network->dim[n-1][0] - 2*(kernel_size/2));
|
create_a_cube_input_layer(network, n, c, network->dim[n-1][0] - 2*(kernel_size/2));
|
||||||
@ -141,17 +138,17 @@ void add_dense(Network* network, int input_units, int output_units, int activati
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
network->kernel[n].cnn = NULL;
|
network->kernel[n].cnn = NULL;
|
||||||
network->kernel[n].nn = malloc(sizeof(Kernel_nn));
|
network->kernel[n].nn = (Kernel_nn*)malloc(sizeof(Kernel_nn));
|
||||||
network->kernel[n].activation = activation;
|
network->kernel[n].activation = activation;
|
||||||
network->kernel[n].nn->input_units = input_units;
|
network->kernel[n].nn->input_units = input_units;
|
||||||
network->kernel[n].nn->output_units = output_units;
|
network->kernel[n].nn->output_units = output_units;
|
||||||
network->kernel[n].nn->bias = malloc(sizeof(float)*output_units);
|
network->kernel[n].nn->bias = (float*)malloc(sizeof(float)*output_units);
|
||||||
network->kernel[n].nn->d_bias = malloc(sizeof(float)*output_units);
|
network->kernel[n].nn->d_bias = (float*)malloc(sizeof(float)*output_units);
|
||||||
network->kernel[n].nn->weights = malloc(sizeof(float*)*input_units);
|
network->kernel[n].nn->weights = (float**)malloc(sizeof(float*)*input_units);
|
||||||
network->kernel[n].nn->d_weights = malloc(sizeof(float*)*input_units);
|
network->kernel[n].nn->d_weights = (float**)malloc(sizeof(float*)*input_units);
|
||||||
for (int i=0; i<input_units; i++) {
|
for (int i=0; i < input_units; i++) {
|
||||||
network->kernel[n].nn->weights[i] = malloc(sizeof(float)*output_units);
|
network->kernel[n].nn->weights[i] = (float*)malloc(sizeof(float)*output_units);
|
||||||
network->kernel[n].nn->d_weights[i] = malloc(sizeof(float)*output_units);
|
network->kernel[n].nn->d_weights[i] = (float*)malloc(sizeof(float)*output_units);
|
||||||
}
|
}
|
||||||
initialisation_1d_matrix(network->initialisation, network->kernel[n].nn->bias, output_units, output_units+input_units);
|
initialisation_1d_matrix(network->initialisation, network->kernel[n].nn->bias, output_units, output_units+input_units);
|
||||||
initialisation_1d_matrix(ZERO, network->kernel[n].nn->d_bias, output_units, output_units+input_units);
|
initialisation_1d_matrix(ZERO, network->kernel[n].nn->d_bias, output_units, output_units+input_units);
|
||||||
|
@ -3,9 +3,8 @@
|
|||||||
#include "free.h"
|
#include "free.h"
|
||||||
|
|
||||||
void free_a_cube_input_layer(Network* network, int pos, int depth, int dim) {
|
void free_a_cube_input_layer(Network* network, int pos, int depth, int dim) {
|
||||||
int i, j, k;
|
for (int i=0; i < depth; i++) {
|
||||||
for (i=0; i<depth; i++) {
|
for (int j=0; j < dim; j++) {
|
||||||
for (j=0; j<dim; j++) {
|
|
||||||
free(network->input[pos][i][j]);
|
free(network->input[pos][i][j]);
|
||||||
}
|
}
|
||||||
free(network->input[pos][i]);
|
free(network->input[pos][i]);
|
||||||
@ -28,12 +27,12 @@ void free_average_pooling_flatten(Network* network, int pos) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void free_convolution(Network* network, int pos) {
|
void free_convolution(Network* network, int pos) {
|
||||||
int i, j, k, c = network->kernel[pos].cnn->columns;
|
int c = network->kernel[pos].cnn->columns;
|
||||||
int k_size = network->kernel[pos].cnn->k_size;
|
int k_size = network->kernel[pos].cnn->k_size;
|
||||||
int r = network->kernel[pos].cnn->rows;
|
int r = network->kernel[pos].cnn->rows;
|
||||||
free_a_cube_input_layer(network, pos, c, network->dim[pos-1][0] - 2*(k_size/2));
|
free_a_cube_input_layer(network, pos, c, network->dim[pos-1][0] - 2*(k_size/2));
|
||||||
for (i=0; i<c; i++) {
|
for (int i=0; i < c; i++) {
|
||||||
for (j=0; j<k_size; j++) {
|
for (int j=0; j < k_size; j++) {
|
||||||
free(network->kernel[pos].cnn->bias[i][j]);
|
free(network->kernel[pos].cnn->bias[i][j]);
|
||||||
free(network->kernel[pos].cnn->d_bias[i][j]);
|
free(network->kernel[pos].cnn->d_bias[i][j]);
|
||||||
}
|
}
|
||||||
@ -43,9 +42,9 @@ void free_convolution(Network* network, int pos) {
|
|||||||
free(network->kernel[pos].cnn->bias);
|
free(network->kernel[pos].cnn->bias);
|
||||||
free(network->kernel[pos].cnn->d_bias);
|
free(network->kernel[pos].cnn->d_bias);
|
||||||
|
|
||||||
for (i=0; i<r; i++) {
|
for (int i=0; i < r; i++) {
|
||||||
for (j=0; j<c; j++) {
|
for (int j=0; j < c; j++) {
|
||||||
for (k=0; k<k_size; k++) {
|
for (int k=0; k < k_size; k++) {
|
||||||
free(network->kernel[pos].cnn->w[i][j][k]);
|
free(network->kernel[pos].cnn->w[i][j][k]);
|
||||||
free(network->kernel[pos].cnn->d_w[i][j][k]);
|
free(network->kernel[pos].cnn->d_w[i][j][k]);
|
||||||
}
|
}
|
||||||
@ -63,8 +62,8 @@ void free_convolution(Network* network, int pos) {
|
|||||||
|
|
||||||
void free_dense(Network* network, int pos) {
|
void free_dense(Network* network, int pos) {
|
||||||
free_a_line_input_layer(network, pos);
|
free_a_line_input_layer(network, pos);
|
||||||
int i, dim = network->kernel[pos].nn->output_units;
|
int dim = network->kernel[pos].nn->output_units;
|
||||||
for (int i=0; i<dim; i++) {
|
for (int i=0; i < dim; i++) {
|
||||||
free(network->kernel[pos].nn->weights[i]);
|
free(network->kernel[pos].nn->weights[i]);
|
||||||
free(network->kernel[pos].nn->d_weights[i]);
|
free(network->kernel[pos].nn->d_weights[i]);
|
||||||
}
|
}
|
||||||
@ -80,7 +79,7 @@ void free_dense(Network* network, int pos) {
|
|||||||
void free_network_creation(Network* network) {
|
void free_network_creation(Network* network) {
|
||||||
free_a_cube_input_layer(network, 0, network->dim[0][1], network->dim[0][0]);
|
free_a_cube_input_layer(network, 0, network->dim[0][1], network->dim[0][0]);
|
||||||
|
|
||||||
for (int i=0; i<network->max_size; i++) {
|
for (int i=0; i < network->max_size; i++) {
|
||||||
free(network->dim[i]);
|
free(network->dim[i]);
|
||||||
}
|
}
|
||||||
free(network->dim);
|
free(network->dim);
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
#include "function.h"
|
#include "function.h"
|
||||||
|
|
||||||
float max(float a, float b) {
|
float max(float a, float b) {
|
||||||
return a<b?b:a;
|
return a < b ? b:a;
|
||||||
}
|
}
|
||||||
|
|
||||||
float sigmoid(float x) {
|
float sigmoid(float x) {
|
||||||
@ -36,27 +36,26 @@ float tanh_derivative(float x) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void apply_softmax_input(float ***input, int depth, int rows, int columns) {
|
void apply_softmax_input(float ***input, int depth, int rows, int columns) {
|
||||||
int i, j, k;
|
|
||||||
float m = FLT_MIN;
|
float m = FLT_MIN;
|
||||||
float sum=0;
|
float sum=0;
|
||||||
for (i=0; i<depth; i++) {
|
for (int i=0; i < depth; i++) {
|
||||||
for (j=0; j<rows; j++) {
|
for (int j=0; j < rows; j++) {
|
||||||
for (k=0; k<columns; k++) {
|
for (int k=0; k < columns; k++) {
|
||||||
m = max(m, input[i][j][k]);
|
m = max(m, input[i][j][k]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (i=0; i<depth; i++) {
|
for (int i=0; i < depth; i++) {
|
||||||
for (j=0; j<rows; j++) {
|
for (int j=0; j < rows; j++) {
|
||||||
for (k=0; k<columns; k++) {
|
for (int k=0; k < columns; k++) {
|
||||||
input[i][j][k] = exp(m-input[i][j][k]);
|
input[i][j][k] = exp(m-input[i][j][k]);
|
||||||
sum += input[i][j][k];
|
sum += input[i][j][k];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (i=0; i<depth; i++) {
|
for (int i=0; i < depth; i++) {
|
||||||
for (j=0; j<rows; j++) {
|
for (int j=0; j < rows; j++) {
|
||||||
for (k=0; k<columns; k++) {
|
for (int k=0; k < columns; k++) {
|
||||||
input[i][j][k] = input[i][j][k]/sum;
|
input[i][j][k] = input[i][j][k]/sum;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -64,10 +63,9 @@ void apply_softmax_input(float ***input, int depth, int rows, int columns) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void apply_function_input(float (*f)(float), float*** input, int depth, int rows, int columns) {
|
void apply_function_input(float (*f)(float), float*** input, int depth, int rows, int columns) {
|
||||||
int i, j ,k;
|
for (int i=0; i < depth; i++) {
|
||||||
for (i=0; i<depth; i++) {
|
for (int j=0; j < rows; j++) {
|
||||||
for (j=0; j<rows; j++) {
|
for (int k=0; k < columns; k++) {
|
||||||
for (k=0; k<columns; k++) {
|
|
||||||
input[i][j][k] = (*f)(input[i][j][k]);
|
input[i][j][k] = (*f)(input[i][j][k]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4,32 +4,29 @@
|
|||||||
|
|
||||||
|
|
||||||
void initialisation_1d_matrix(int initialisation, float* matrix, int rows, int n) { //NOT FINISHED
|
void initialisation_1d_matrix(int initialisation, float* matrix, int rows, int n) { //NOT FINISHED
|
||||||
int i;
|
|
||||||
float lower_bound = -6/sqrt((double)n);
|
float lower_bound = -6/sqrt((double)n);
|
||||||
float distance = -lower_bound-lower_bound;
|
float distance = -lower_bound-lower_bound;
|
||||||
for (i=0; i<rows; i++) {
|
for (int i=0; i < rows; i++) {
|
||||||
matrix[i] = lower_bound + RAND_FLT()*distance;
|
matrix[i] = lower_bound + RAND_FLT()*distance;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void initialisation_2d_matrix(int initialisation, float** matrix, int rows, int columns, int n) { //NOT FINISHED
|
void initialisation_2d_matrix(int initialisation, float** matrix, int rows, int columns, int n) { //NOT FINISHED
|
||||||
int i, j;
|
|
||||||
float lower_bound = -6/sqrt((double)n);
|
float lower_bound = -6/sqrt((double)n);
|
||||||
float distance = -lower_bound-lower_bound;
|
float distance = -lower_bound-lower_bound;
|
||||||
for (i=0; i<rows; i++) {
|
for (int i=0; i < rows; i++) {
|
||||||
for (j=0; j<columns; j++) {
|
for (int j=0; j < columns; j++) {
|
||||||
matrix[i][j] = lower_bound + RAND_FLT()*distance;
|
matrix[i][j] = lower_bound + RAND_FLT()*distance;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void initialisation_3d_matrix(int initialisation, float*** matrix, int depth, int rows, int columns, int n) { //NOT FINISHED
|
void initialisation_3d_matrix(int initialisation, float*** matrix, int depth, int rows, int columns, int n) { //NOT FINISHED
|
||||||
int i, j, k;
|
|
||||||
float lower_bound = -6/sqrt((double)n);
|
float lower_bound = -6/sqrt((double)n);
|
||||||
float distance = -lower_bound-lower_bound;
|
float distance = -lower_bound-lower_bound;
|
||||||
for (i=0; i<depth; i++) {
|
for (int i=0; i < depth; i++) {
|
||||||
for (j=0; j<rows; j++) {
|
for (int j=0; j < rows; j++) {
|
||||||
for (k=0; k<columns; k++) {
|
for (int k=0; k < columns; k++) {
|
||||||
matrix[i][j][k] = lower_bound + RAND_FLT()*distance;
|
matrix[i][j][k] = lower_bound + RAND_FLT()*distance;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -37,13 +34,12 @@ void initialisation_3d_matrix(int initialisation, float*** matrix, int depth, in
|
|||||||
}
|
}
|
||||||
|
|
||||||
void initialisation_4d_matrix(int initialisation, float**** matrix, int rows, int columns, int rows1, int columns1, int n) { //NOT FINISHED
|
void initialisation_4d_matrix(int initialisation, float**** matrix, int rows, int columns, int rows1, int columns1, int n) { //NOT FINISHED
|
||||||
int i, j, k, l;
|
|
||||||
float lower_bound = -6/sqrt((double)n);
|
float lower_bound = -6/sqrt((double)n);
|
||||||
float distance = -lower_bound-lower_bound;
|
float distance = -lower_bound-lower_bound;
|
||||||
for (i=0; i<rows; i++) {
|
for (int i=0; i < rows; i++) {
|
||||||
for (j=0; j<columns; j++) {
|
for (int j=0; j < columns; j++) {
|
||||||
for (k=0; k<rows1; k++) {
|
for (int k=0; k < rows1; k++) {
|
||||||
for (l=0; l<columns1; l++) {
|
for (int l=0; l < columns1; l++) {
|
||||||
matrix[i][j][k][l] = lower_bound + RAND_FLT()*distance;
|
matrix[i][j][k][l] = lower_bound + RAND_FLT()*distance;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4,14 +4,14 @@
|
|||||||
void make_convolution(float*** input, Kernel_cnn* kernel, float*** output, int output_dim) {
|
void make_convolution(float*** input, Kernel_cnn* kernel, float*** output, int output_dim) {
|
||||||
//NOT FINISHED, MISS CONDITIONS ON THE CONVOLUTION
|
//NOT FINISHED, MISS CONDITIONS ON THE CONVOLUTION
|
||||||
float f;
|
float f;
|
||||||
int i, j, k, a, b, c, n=kernel->k_size;
|
int n = kernel->k_size;
|
||||||
for (i=0; i<kernel->columns; i++) {
|
for (int i=0; i < kernel->columns; i++) {
|
||||||
for (j=0; j<output_dim; j++) {
|
for (int j=0; j < output_dim; j++) {
|
||||||
for (k=0; k<output_dim; k++) {
|
for (int k=0; k < output_dim; k++) {
|
||||||
f = kernel->bias[i][j][k];
|
f = kernel->bias[i][j][k];
|
||||||
for (a=0; a<kernel->rows; a++) {
|
for (int a=0; a < kernel->rows; a++) {
|
||||||
for (b=0; b<n; b++) {
|
for (int b=0; b < n; b++) {
|
||||||
for (c=0; c<n; c++) {
|
for (int c=0; c < n; c++) {
|
||||||
f += kernel->w[a][i][b][c]*input[a][j+a][k+b];
|
f += kernel->w[a][i][b][c]*input[a][j+a][k+b];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -25,13 +25,13 @@ void make_convolution(float*** input, Kernel_cnn* kernel, float*** output, int o
|
|||||||
void make_average_pooling(float*** input, float*** output, int size, int output_depth, int output_dim) {
|
void make_average_pooling(float*** input, float*** output, int size, int output_depth, int output_dim) {
|
||||||
//NOT FINISHED, MISS CONDITIONS ON THE POOLING
|
//NOT FINISHED, MISS CONDITIONS ON THE POOLING
|
||||||
float average;
|
float average;
|
||||||
int i, j, k, a, b, n=size*size;
|
int n = size*size;
|
||||||
for (i=0; i<output_depth; i++) {
|
for (int i=0; i < output_depth; i++) {
|
||||||
for (j=0; j<output_dim; j++) {
|
for (int j=0; j < output_dim; j++) {
|
||||||
for (k=0; k<output_dim; k++) {
|
for (int k=0; k < output_dim; k++) {
|
||||||
average = 0.;
|
average = 0.;
|
||||||
for (a=0; a<size; a++) {
|
for (int a=0; a < size; a++) {
|
||||||
for (b=0; b<size; b++) {
|
for (int b=0; b < size; b++) {
|
||||||
average += input[i][2*j +a][2*k +b];
|
average += input[i][2*j +a][2*k +b];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -47,14 +47,15 @@ void make_average_pooling_flattened(float*** input, float* output, int size, int
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
float average;
|
float average;
|
||||||
int i, j, k, a, b, n=size*size, cpt=0;
|
int n = size*size;
|
||||||
|
int cpt = 0;
|
||||||
int output_dim = input_dim - 2*(size/2);
|
int output_dim = input_dim - 2*(size/2);
|
||||||
for (i=0; i<input_depth; i++) {
|
for (int i=0; i < input_depth; i++) {
|
||||||
for (j=0; j<output_dim; j++) {
|
for (int j=0; j < output_dim; j++) {
|
||||||
for (k=0; k<output_dim; k++) {
|
for (int k=0; k < output_dim; k++) {
|
||||||
average = 0.;
|
average = 0.;
|
||||||
for (a=0; a<size; a++) {
|
for (int a=0; a < size; a++) {
|
||||||
for (b=0; b<size; b++) {
|
for (int b=0; b < size; b++) {
|
||||||
average += input[i][2*j +a][2*k +b];
|
average += input[i][2*j +a][2*k +b];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -66,11 +67,10 @@ void make_average_pooling_flattened(float*** input, float* output, int size, int
|
|||||||
}
|
}
|
||||||
|
|
||||||
void make_fully_connected(float* input, Kernel_nn* kernel, float* output, int size_input, int size_output) {
|
void make_fully_connected(float* input, Kernel_nn* kernel, float* output, int size_input, int size_output) {
|
||||||
int i, j, k;
|
|
||||||
float f;
|
float f;
|
||||||
for (i=0; i<size_output; i++) {
|
for (int i=0; i < size_output; i++) {
|
||||||
f = kernel->bias[i];
|
f = kernel->bias[i];
|
||||||
for (j=0; j<size_input; j++) {
|
for (int j=0; j < size_input; j++) {
|
||||||
f += kernel->weights[i][j]*input[j];
|
f += kernel->weights[i][j]*input[j];
|
||||||
}
|
}
|
||||||
output[i] = f;
|
output[i] = f;
|
||||||
|
Loading…
Reference in New Issue
Block a user