mirror of
https://github.com/augustin64/projet-tipe
synced 2025-01-24 07:36:24 +01:00
Add knuth shuffle
This commit is contained in:
parent
e584dfc791
commit
ffc0c6ea9f
@ -1,3 +1,5 @@
|
|||||||
|
#include "neural_network.h"
|
||||||
|
|
||||||
#ifndef DEF_MAIN_H
|
#ifndef DEF_MAIN_H
|
||||||
#define DEF_MAIN_H
|
#define DEF_MAIN_H
|
||||||
|
|
||||||
@ -49,6 +51,16 @@ void* train_thread(void* parameters);
|
|||||||
*/
|
*/
|
||||||
void train(int epochs, int layers, int neurons, char* recovery, char* image_file, char* label_file, char* out, char* delta, int nb_images_to_process, int start);
|
void train(int epochs, int layers, int neurons, char* recovery, char* image_file, char* label_file, char* out, char* delta, int nb_images_to_process, int start);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Échange deux éléments d'un tableau
|
||||||
|
*/
|
||||||
|
void swap(int* tab, int i, int j);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Mélange un tableau avec le mélange de Knuth
|
||||||
|
*/
|
||||||
|
void knuth_shuffle(int* tab, int n);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Reconnaissance d'un set d'images, renvoie un tableau de float contentant les prédictions
|
* Reconnaissance d'un set d'images, renvoie un tableau de float contentant les prédictions
|
||||||
* modele: nom du fichier contenant le réseau neuronal
|
* modele: nom du fichier contenant le réseau neuronal
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#include <pthread.h>
|
#include <pthread.h>
|
||||||
#include <sys/sysinfo.h>
|
#include <sys/sysinfo.h>
|
||||||
|
|
||||||
|
#include "include/main.h"
|
||||||
#include "include/mnist.h"
|
#include "include/mnist.h"
|
||||||
#include "include/neuron_io.h"
|
#include "include/neuron_io.h"
|
||||||
#include "include/neural_network.h"
|
#include "include/neural_network.h"
|
||||||
@ -20,6 +21,7 @@ typedef struct TrainParameters {
|
|||||||
Network* network;
|
Network* network;
|
||||||
int*** images;
|
int*** images;
|
||||||
int* labels;
|
int* labels;
|
||||||
|
int* shuffle_indices;
|
||||||
int start;
|
int start;
|
||||||
int nb_images;
|
int nb_images;
|
||||||
int height;
|
int height;
|
||||||
@ -97,6 +99,7 @@ void* train_thread(void* parameters) {
|
|||||||
|
|
||||||
int*** images = param->images;
|
int*** images = param->images;
|
||||||
int* labels = param->labels;
|
int* labels = param->labels;
|
||||||
|
int* shuffle = param->shuffle_indices;
|
||||||
|
|
||||||
int start = param->start;
|
int start = param->start;
|
||||||
int nb_images = param->nb_images;
|
int nb_images = param->nb_images;
|
||||||
@ -107,15 +110,15 @@ void* train_thread(void* parameters) {
|
|||||||
int* desired_output;
|
int* desired_output;
|
||||||
|
|
||||||
for (int i=start; i < start+nb_images; i++) {
|
for (int i=start; i < start+nb_images; i++) {
|
||||||
write_image_in_network(images[i], network, height, width);
|
write_image_in_network(images[shuffle[i]], network, height, width);
|
||||||
desired_output = desired_output_creation(network, labels[i]);
|
desired_output = desired_output_creation(network, labels[shuffle[i]]);
|
||||||
forward_propagation(network);
|
forward_propagation(network);
|
||||||
backward_propagation(network, desired_output);
|
backward_propagation(network, desired_output);
|
||||||
|
|
||||||
for (int k=0; k < nb_neurons_last_layer; k++) {
|
for (int k=0; k < nb_neurons_last_layer; k++) {
|
||||||
sortie[k] = last_layer->neurons[k]->z;
|
sortie[k] = last_layer->neurons[k]->z;
|
||||||
}
|
}
|
||||||
if (indice_max(sortie, nb_neurons_last_layer) == labels[i]) {
|
if (indice_max(sortie, nb_neurons_last_layer) == labels[shuffle[i]]) {
|
||||||
accuracy += 1.;
|
accuracy += 1.;
|
||||||
}
|
}
|
||||||
free(desired_output);
|
free(desired_output);
|
||||||
@ -134,7 +137,7 @@ void train(int epochs, int layers, int neurons, char* recovery, char* image_file
|
|||||||
|
|
||||||
//int* repartition = malloc(sizeof(int)*layers);
|
//int* repartition = malloc(sizeof(int)*layers);
|
||||||
int nb_neurons_last_layer = 10;
|
int nb_neurons_last_layer = 10;
|
||||||
int repartition[2] = {neurons, nb_neurons_last_layer};
|
int repartition[3] = {neurons, 42, nb_neurons_last_layer};
|
||||||
|
|
||||||
float accuracy;
|
float accuracy;
|
||||||
|
|
||||||
@ -179,6 +182,11 @@ void train(int epochs, int layers, int neurons, char* recovery, char* image_file
|
|||||||
int*** images = read_mnist_images(image_file);
|
int*** images = read_mnist_images(image_file);
|
||||||
unsigned int* labels = read_mnist_labels(label_file);
|
unsigned int* labels = read_mnist_labels(label_file);
|
||||||
|
|
||||||
|
int* shuffle_indices = (int*)malloc(sizeof(int)*nb_images_total);
|
||||||
|
for (int i=0; i < nb_images_total; i++) {
|
||||||
|
shuffle_indices[i] = i;
|
||||||
|
}
|
||||||
|
|
||||||
if (nb_images_to_process != -1) {
|
if (nb_images_to_process != -1) {
|
||||||
nb_images_total = nb_images_to_process;
|
nb_images_total = nb_images_to_process;
|
||||||
}
|
}
|
||||||
@ -191,9 +199,11 @@ void train(int epochs, int layers, int neurons, char* recovery, char* image_file
|
|||||||
train_parameters[j]->height = height;
|
train_parameters[j]->height = height;
|
||||||
train_parameters[j]->width = width;
|
train_parameters[j]->width = width;
|
||||||
train_parameters[j]->nb_images = BATCHES / nb_threads;
|
train_parameters[j]->nb_images = BATCHES / nb_threads;
|
||||||
|
train_parameters[j]->shuffle_indices = shuffle_indices;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i=0; i < epochs; i++) {
|
for (int i=0; i < epochs; i++) {
|
||||||
|
knuth_shuffle(shuffle_indices, nb_images_total); // Shuffle images between each epoch
|
||||||
accuracy = 0.;
|
accuracy = 0.;
|
||||||
for (int k=0; k < nb_images_total / BATCHES; k++) {
|
for (int k=0; k < nb_images_total / BATCHES; k++) {
|
||||||
nb_remaining_images = BATCHES;
|
nb_remaining_images = BATCHES;
|
||||||
@ -220,6 +230,7 @@ void train(int epochs, int layers, int neurons, char* recovery, char* image_file
|
|||||||
deletion_of_network(train_parameters[j]->network);
|
deletion_of_network(train_parameters[j]->network);
|
||||||
}
|
}
|
||||||
printf("\rThreads [%d]\tÉpoque [%d/%d]\tImage [%d/%d]\tAccuracy: %0.1f%%", nb_threads, i, epochs, BATCHES*(k+1), nb_images_total, accuracy*100);
|
printf("\rThreads [%d]\tÉpoque [%d/%d]\tImage [%d/%d]\tAccuracy: %0.1f%%", nb_threads, i, epochs, BATCHES*(k+1), nb_images_total, accuracy*100);
|
||||||
|
fflush(stdout);
|
||||||
}
|
}
|
||||||
printf("\rThreads [%d]\tÉpoque [%d/%d]\tImage [%d/%d]\tAccuracy: %0.1f%%\n", nb_threads, i, epochs, nb_images_total, nb_images_total, accuracy*100);
|
printf("\rThreads [%d]\tÉpoque [%d/%d]\tImage [%d/%d]\tAccuracy: %0.1f%%\n", nb_threads, i, epochs, nb_images_total, nb_images_total, accuracy*100);
|
||||||
write_network(out, network);
|
write_network(out, network);
|
||||||
@ -239,6 +250,18 @@ void train(int epochs, int layers, int neurons, char* recovery, char* image_file
|
|||||||
free(tid);
|
free(tid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void swap(int* tab, int i, int j) {
|
||||||
|
int tmp = tab[i];
|
||||||
|
tab[i] = tab[j];
|
||||||
|
tab[j] = tmp;
|
||||||
|
}
|
||||||
|
|
||||||
|
void knuth_shuffle(int* tab, int n) {
|
||||||
|
for(int i=1; i < n; i++) {
|
||||||
|
swap(tab, i, rand() %i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
float** recognize(char* modele, char* entree) {
|
float** recognize(char* modele, char* entree) {
|
||||||
Network* network = read_network(modele);
|
Network* network = read_network(modele);
|
||||||
Layer* last_layer = network->layers[network->nb_layers-1];
|
Layer* last_layer = network->layers[network->nb_layers-1];
|
||||||
@ -351,7 +374,7 @@ int main(int argc, char* argv[]) {
|
|||||||
}
|
}
|
||||||
if (! strcmp(argv[1], "train")) {
|
if (! strcmp(argv[1], "train")) {
|
||||||
int epochs = EPOCHS;
|
int epochs = EPOCHS;
|
||||||
int layers = 2;
|
int layers = 3;
|
||||||
int neurons = 784;
|
int neurons = 784;
|
||||||
int nb_images = -1;
|
int nb_images = -1;
|
||||||
int start = 0;
|
int start = 0;
|
||||||
|
@ -10,7 +10,7 @@
|
|||||||
|
|
||||||
// Définit le taux d'apprentissage du réseau neuronal, donc la rapidité d'adaptation du modèle (compris entre 0 et 1)
|
// Définit le taux d'apprentissage du réseau neuronal, donc la rapidité d'adaptation du modèle (compris entre 0 et 1)
|
||||||
// Cette valeur peut évoluer au fur et à mesure des époques (linéaire c'est mieux)
|
// Cette valeur peut évoluer au fur et à mesure des époques (linéaire c'est mieux)
|
||||||
#define LEARNING_RATE 0.5
|
#define LEARNING_RATE 0.1
|
||||||
// Retourne un nombre aléatoire entre 0 et 1
|
// Retourne un nombre aléatoire entre 0 et 1
|
||||||
#define RAND_DOUBLE() ((double)rand())/((double)RAND_MAX)
|
#define RAND_DOUBLE() ((double)rand())/((double)RAND_MAX)
|
||||||
//Coefficient leaking ReLU
|
//Coefficient leaking ReLU
|
||||||
|
Loading…
Reference in New Issue
Block a user