Completion of the forward

This commit is contained in:
Julien Chemillier 2022-10-02 20:31:20 +02:00
parent 71e7aaac34
commit 9f44e4a189
5 changed files with 77 additions and 70 deletions

View File

@ -1,7 +1,7 @@
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <float.h>
#include <float.h> // Is it used ?
#include "../colors.h"
#include "include/initialisation.h"
@ -32,80 +32,75 @@ void write_image_in_network_32(int** image, int height, int width, float** input
}
void forward_propagation(Network* network) {
int activation, input_width, input_depth, output_width, output_depth;
int activation, input_depth, input_width, output_depth, output_width;
int n = network->size;
float*** input;
float*** output;
Kernel* k_i;
for (int i=0; i < n-1; i++) {
// Transférer les informations de 'input' à 'output'
k_i = network->kernel[i];
printf("\n i -> %d :: %d %d \n", i, k_i->cnn==NULL, k_i->nn==NULL);
input_width = network->width[i];
input_depth = network->depth[i];
output_width = network->width[i+1];
output_depth = network->depth[i+1];
activation = k_i->activation;
input = network->input[i];
input_depth = network->depth[i];
input_width = network->width[i];
output = network->input[i+1];
output_depth = network->depth[i+1];
output_width = network->width[i+1];
activation = k_i->activation;
if (k_i->cnn!=NULL) { //CNN
printf("Convolution of cnn: %dx%dx%d -> %dx%dx%d\n", input_depth, input_width, input_width, output_depth, output_width, output_width);
make_convolution(input, k_i->cnn, output, output_width);
if (k_i->cnn!=NULL) { // Convolution
printf("\n(%d)-Convolution of cnn: %dx%dx%d -> %dx%dx%d\n", i, input_depth, input_width, input_width, output_depth, output_width, output_width);
make_convolution(k_i->cnn, input, output, output_width);
choose_apply_function_matrix(activation, output, output_depth, output_width);
}
else if (k_i->nn!=NULL) { //NN
printf("Densification of nn: %dx%dx%d -> %dx%dx%d\n", input_depth, input_width, input_width, output_depth, output_width, output_width);
// Checked if it is a nn which linearise
make_fully_connected(network->input[i][0][0], network->kernel[i]->nn, network->input[i+1][0][0], input_width, output_width);
else if (k_i->nn!=NULL) { // Full connection
if (input_depth==1) { // Vecteur -> Vecteur
printf("\n(%d)-Densification of nn: %dx%dx%d -> %dx%dx%d\n", i, 1, 1, input_width, 1, 1, output_width);
make_dense(k_i->nn, input[0][0], output[0][0], input_width, output_width);
} else { // Matrice -> vecteur
printf("\n(%d)-Densification linearised of nn: %dx%dx%d -> %dx%dx%d\n", i, input_depth, input_width, input_width, 1, 1, output_width);
make_dense_linearised(k_i->nn, input, output[0][0], input_depth, input_width, output_width);
}
choose_apply_function_vector(activation, output, output_width);
}
else { //Pooling
else { // Pooling
if (n-2==i) {
printf("Le réseau ne peut pas finir par une pooling layer");
printf("Le réseau ne peut pas finir par une pooling layer\n");
return;
}
if (1==1) { // Pooling sur une matrice
printf("Average pooling: %dx%dx%d -> %dx%dx%d\n", input_depth, input_width, input_width, output_depth, output_width, output_width);
} else { // Pooling sur une matrice
printf("\n(%d)-Average pooling: %dx%dx%d -> %dx%dx%d\n", i, input_depth, input_width, input_width, output_depth, output_width, output_width);
make_average_pooling(input, output, activation/100, output_depth, output_width);
}
else { // Pooling sur un vecteur
printf("Erreur: le pooling ne se fait que sur une matrice \n");
return;
}
}
}
}
void backward_propagation(Network* network, float wanted_number) { // TODO
void backward_propagation(Network* network, float wanted_number) {
printf_warning("Appel de backward_propagation, incomplet\n");
float* wanted_output = generate_wanted_output(wanted_number);
int n = network->size-1;
int n = network->size;
float loss = compute_cross_entropy_loss(network->input[n][0][0], wanted_output, network->width[n]);
for (int i=n; i >= 0; i--) {
if (i==n) {
if (network->kernel[i]->activation == SOFTMAX) {
int l2 = network->width[i]; // Taille de la dernière couche
//int l1 = network->width[i-1];
for (int j=0; j < l2; j++) {
int activation, input_depth, input_width, output_depth, output_width;
float*** input;
float*** output;
Kernel* k_i;
Kernel* k_i_1;
}
}
else {
printf("Erreur, seule la fonction SOFTMAX est implémentée pour la dernière couche");
return;
}
}
else {
if (network->kernel[i]->activation == SIGMOID) {
for (int i=n-3; i >= 0; i--) {
// Modifie 'k_i' à partir d'une comparaison d'informations entre 'input' et 'output'
k_i = network->kernel[i];
k_i_1 = network->kernel[i+1];
input = network->input[i];
input_depth = network->depth[i];
input_width = network->width[i];
output = network->input[i+1];
output_depth = network->depth[i+1];
output_width = network->width[i+1];
activation = k_i->activation;
}
else if (network->kernel[i]->activation == TANH) {
}
else if (network->kernel[i]->activation == RELU) {
}
}
//if convolution
// else if dense (linearised or not)
// else pooling
}
free(wanted_output);
}

View File

@ -36,7 +36,7 @@ Network* create_network_lenet5(int dropout, int activation, int initialisation,
add_2d_average_pooling(network, 28, 14);
add_convolution(network, 6, 14, 16, 10, activation);
add_2d_average_pooling(network, 10, 5);
add_dense_linearisation(network, 160, 120, activation);
add_dense_linearisation(network, 400, 120, activation);
add_dense(network, 120, 84, activation);
add_dense(network, 84, 10, SOFTMAX);
return network;
@ -155,13 +155,13 @@ void add_dense(Network* network, int input_units, int output_units, int activati
nn->weights[i] = (float*)malloc(sizeof(float)*output_units);
nn->d_weights[i] = (float*)malloc(sizeof(float)*output_units);
}
create_a_line_input_layer(network, n, output_units);
/* Not currently used
initialisation_1d_matrix(network->initialisation, nn->bias, output_units, output_units+input_units);
initialisation_1d_matrix(ZERO, nn->d_bias, output_units, output_units+input_units);
initialisation_2d_matrix(network->initialisation, nn->weights, input_units, output_units, output_units+input_units);
initialisation_2d_matrix(ZERO, nn->d_weights, input_units, output_units, output_units+input_units);
*/
create_a_line_input_layer(network, n, output_units);
network->size++;
}

View File

@ -6,7 +6,7 @@
/*
* Effectue une convolution sans stride
*/
void make_convolution(float*** input, Kernel_cnn* kernel, float*** output, int output_dim);
void make_convolution(Kernel_cnn* kernel, float*** input, float*** output, int output_dim);
/*
* Effecute un average pooling avec stride=size
@ -16,6 +16,11 @@ void make_average_pooling(float*** input, float*** output, int size, int output_
/*
* Effecute une full connection
*/
void make_fully_connected(float* input, Kernel_nn* kernel, float* output, int size_input, int size_output);
void make_dense(Kernel_nn* kernel, float* input, float* output, int size_input, int size_output);
/*
* Effecute une full connection qui passe d'une matrice à un vecteur
*/
void make_dense_linearised(Kernel_nn* kernel, float*** input, float* output, int depth_input, int dim_input, int size_output);
#endif

View File

@ -28,15 +28,6 @@ void help(char* call) {
void dev_conv() {
Network* network = create_network_lenet5(0, TANH, GLOROT_NORMAL, 32, 1);
for (int i=0; i < 8; i++) {
printf("%d %d \n", network->depth[i], network->width[i]);
}
printf("Kernel:\n");
for (int i=0; i<7; i++) {
if (network->kernel[i]->cnn!=NULL) {
printf("%d -> %d %d\n", i, network->kernel[i]->cnn->rows, network->kernel[i]->cnn->k_size);
}
}
forward_propagation(network);
}

View File

@ -3,10 +3,10 @@
#include "../colors.h"
#include "include/make.h"
void make_convolution(float*** input, Kernel_cnn* kernel, float*** output, int output_dim) {
printf_warning("Appel de make_convolution, incomplet\n");
void make_convolution(Kernel_cnn* kernel, float*** input, float*** output, int output_dim) {
float f;
int n = kernel->k_size;
printf("max_input %dx%dx%d: %d \n", kernel->rows, n+output_dim -1, output_dim+n -1, n);
for (int i=0; i < kernel->columns; i++) {
for (int j=0; j < output_dim; j++) {
for (int k=0; k < output_dim; k++) {
@ -14,19 +14,18 @@ void make_convolution(float*** input, Kernel_cnn* kernel, float*** output, int o
for (int a=0; a < kernel->rows; a++) {
for (int b=0; b < n; b++) {
for (int c=0; c < n; c++) {
f += kernel->w[a][i][b][c]*input[a][j+a][k+b];
f += kernel->w[a][i][b][c]*input[a][j+b][k+c];
}
}
}
output[i][j][k] = f;
output[i][j][k] = f/n; // Average
}
}
}
}
void make_average_pooling(float*** input, float*** output, int size, int output_depth, int output_dim) {
// TODO, MISS CONDITIONS ON THE POOLING
printf_warning("Appel de make_average_pooling, incomplet\n");
printf("%d -> %d \n", output_dim*size, output_dim);
float average;
int n = size*size;
for (int i=0; i < output_depth; i++) {
@ -35,7 +34,7 @@ void make_average_pooling(float*** input, float*** output, int size, int output_
average = 0.;
for (int a=0; a < size; a++) {
for (int b=0; b < size; b++) {
average += input[i][2*j +a][2*k +b];
average += input[i][size*j +a][size*k +b];
}
}
output[i][j][k] = average/n;
@ -44,7 +43,7 @@ void make_average_pooling(float*** input, float*** output, int size, int output_
}
}
void make_fully_connected(float* input, Kernel_nn* kernel, float* output, int size_input, int size_output) {
void make_dense(Kernel_nn* kernel, float* input, float* output, int size_input, int size_output) {
float f;
for (int i=0; i < size_output; i++) {
f = kernel->bias[i];
@ -54,3 +53,20 @@ void make_fully_connected(float* input, Kernel_nn* kernel, float* output, int si
output[i] = f;
}
}
void make_dense_linearised(Kernel_nn* kernel, float*** input, float* output, int depth_input, int dim_input, int size_output) {
int n = depth_input*dim_input*dim_input;
printf("%dx%dx%d (%d) -> %d\n",depth_input, dim_input, dim_input, n, size_output);
float f;
for (int l=0; l<size_output; l++) {
f = 0;
for (int i=0; i<depth_input; i++) {
for (int j=0; j<dim_input; j++) {
for (int k=0; k<dim_input; k++) {
f += input[i][j][k]*kernel->weights[k + j*dim_input + i*depth_input][l];
}
}
}
output[l] = f/size_output;
}
}