#include #include #include #include #include "function.h" #include "make.h" #include "cnn.h" // Augmente les dimensions de l'image d'entrée #define PADING_INPUT 2 int will_be_drop(int dropout_prob) { return (rand() % 100)height+PADING_INPUT || jwidth+PADING_INPUT) { input[i][j] = 0.; } else { input[i][j] = (float)image[i][j] / 255.0f; } } } } void forward_propagation(Network* network) { for (int i=0; i < network->size-1; i++) { if (network->kernel[i].nn==NULL && network->kernel[i].cnn!=NULL) { make_convolution(network->input[i], network->kernel[i].cnn, network->input[i+1], network->dim[i+1][0]); choose_apply_function_input(network->kernel[i].activation, network->input[i+1], network->dim[i+1][1], network->dim[i+1][0], network->dim[i+1][0]); } else if (network->kernel[i].nn!=NULL && network->kernel[i].cnn==NULL) { make_fully_connected(network->input[i][0][0], network->kernel[i].nn, network->input[i+1][0][0], network->dim[i][0], network->dim[i+1][0]); choose_apply_function_input(network->kernel[i].activation, network->input[i+1], 1, 1, network->dim[i+1][0]); } else { if (network->size-2==i) { printf("Le réseau ne peut pas finir par une pooling layer"); return; } if (network->kernel[i+1].nn!=NULL && network->kernel[i+1].cnn==NULL) { make_average_pooling_flattened(network->input[i], network->input[i+1][0][0], network->kernel[i].activation/100, network->dim[i][1], network->dim[i][0]); choose_apply_function_input(network->kernel[i].activation%100, network->input[i+1], 1, 1, network->dim[i+1][0]); } else if (network->kernel[i+1].nn==NULL && network->kernel[i+1].cnn!=NULL) { make_average_pooling(network->input[i], network->input[i+1], network->kernel[i].activation/100, network->dim[i+1][1], network->dim[i+1][0]); choose_apply_function_input(network->kernel[i].activation%100, network->input[i+1], network->dim[i+1][1], network->dim[i+1][0], network->dim[i+1][0]); } else { printf("Le réseau ne peut pas contenir deux poolings layers collées"); return; } } } } void backward_propagation(Network* network, float wanted_number) { float* wanted_output = generate_wanted_output(wanted_number); int n = network->size-1; float loss = compute_cross_entropy_loss(network->input[n][0][0], wanted_output, network->dim[n][0]); int i, j; for (i=n; i>=0; i--) { if (i==n) { if (network->kernel[i].activation == SOFTMAX) { int l2 = network->dim[i][0]; // Taille de la dernière couche int l1 = network->dim[i-1][0]; for (j=0; jkernel[i].activation == SIGMOID) { } else if (network->kernel[i].activation == TANH) { } else if (network->kernel[i].activation == RELU) { } } } free(wanted_output); } float compute_cross_entropy_loss(float* output, float* wanted_output, int len) { float loss=0.; for (int i=0; i