From 7deef7c5c534141261de6aa7a8d7bc0e40b6cf6f Mon Sep 17 00:00:00 2001 From: augustin64 Date: Sat, 21 Jan 2023 18:59:59 +0100 Subject: [PATCH] Add simple_one --- src/cnn/creation.c | 9 +++++++++ src/cnn/include/creation.h | 5 +++++ src/cnn/include/train.h | 2 +- src/cnn/train.c | 18 ++++++++++++------ 4 files changed, 27 insertions(+), 7 deletions(-) diff --git a/src/cnn/creation.c b/src/cnn/creation.c index 28af73f..862ffb5 100644 --- a/src/cnn/creation.c +++ b/src/cnn/creation.c @@ -47,6 +47,15 @@ Network* create_network_lenet5(float learning_rate, int dropout, int activation, return network; } +Network* create_simple_one(float learning_rate, int dropout, int activation, int initialisation, int input_dim, int input_depth) { + Network* network = create_network(3, learning_rate, dropout, initialisation, input_dim, input_depth); + network->kernel[0]->activation = activation; + network->kernel[0]->linearisation = 0; + add_dense_linearisation(network, 80, activation); + add_dense(network, 10, SOFTMAX); + return network; +} + void create_a_cube_input_layer(Network* network, int pos, int depth, int dim) { network->input[pos] = (float***)malloc(sizeof(float**)*depth); for (int i=0; i < depth; i++) { diff --git a/src/cnn/include/creation.h b/src/cnn/include/creation.h index 31d69bd..dda58b5 100644 --- a/src/cnn/include/creation.h +++ b/src/cnn/include/creation.h @@ -14,6 +14,11 @@ Network* create_network(int max_size, float learning_rate, int dropout, int init */ Network* create_network_lenet5(float learning_rate, int dropout, int activation, int initialisation, int input_dim, int input_depth); +/* +* Renvoie un réseau sans convolution, similaire à celui utilisé dans src/mnist +*/ +Network* create_simple_one(float learning_rate, int dropout, int activation, int initialisation, int input_dim, int input_depth); + /* * Créé et alloue de la mémoire à une couche de type input cube */ diff --git a/src/cnn/include/train.h b/src/cnn/include/train.h index 37ca0fe..3388403 100644 --- a/src/cnn/include/train.h +++ b/src/cnn/include/train.h @@ -7,7 +7,7 @@ #define EPOCHS 10 #define BATCHES 500 #define USE_MULTITHREADING -#define LEARNING_RATE 0.01 +#define LEARNING_RATE 0.5 /* diff --git a/src/cnn/train.c b/src/cnn/train.c index ccc65c1..6a8e460 100644 --- a/src/cnn/train.c +++ b/src/cnn/train.c @@ -50,6 +50,11 @@ void* train_thread(void* parameters) { write_image_in_network_32(images[index[i]], height, width, network->input[0][0]); forward_propagation(network); maxi = indice_max(network->input[network->size-1][0][0], 10); + if (maxi == -1) { + printf("\n"); + printf_error("Le réseau sature.\n"); + exit(1); + } wanted_output = generate_wanted_output(labels[index[i]], 10); loss += compute_mean_squared_error(network->input[network->size-1][0][0], wanted_output, 10); @@ -134,7 +139,8 @@ void train(int dataset_type, char* images_file, char* labels_file, char* data_di // Initialisation du réseau if (!recover) { - network = create_network_lenet5(LEARNING_RATE, 0, TANH, GLOROT, input_dim, input_depth); + network = create_network_lenet5(LEARNING_RATE, 0, RELU, GLOROT, input_dim, input_depth); + //network = create_simple_one(LEARNING_RATE, 0, RELU, GLOROT, input_dim, input_depth); } else { network = read_network(recover); network->learning_rate = LEARNING_RATE; @@ -272,7 +278,7 @@ void train(int dataset_type, char* images_file, char* labels_file, char* data_di } } current_accuracy = accuracy * nb_images_total/((j+1)*BATCHES); - printf("\rThreads [%d]\tÉpoque [%d/%d]\tImage [%d/%d]\tAccuracy: "YELLOW"%0.2f%%"RESET" ", nb_threads, i, epochs, BATCHES*(j+1), nb_images_total, current_accuracy*100); + printf("\rThreads [%d]\tÉpoque [%d/%d]\tImage [%d/%d]\tAccuracy: "YELLOW"%0.2f%%"RESET" \tRéussies: %d", nb_threads, i, epochs, BATCHES*(j+1), nb_images_total, current_accuracy*100, (int)(accuracy*nb_images_total)); fflush(stdout); #else (void)nb_images_total_remaining; // Juste pour enlever un warning @@ -294,19 +300,19 @@ void train(int dataset_type, char* images_file, char* labels_file, char* data_di update_weights(network, network); update_bias(network, network); - printf("\rÉpoque [%d/%d]\tImage [%d/%d]\tAccuracy: "YELLOW"%0.4f%%"RESET" ", i, epochs, BATCHES*(j+1), nb_images_total, current_accuracy*100); + printf("\rÉpoque [%d/%d]\tImage [%d/%d]\tAccuracy: "YELLOW"%0.4f%%"RESET" \tRéussies: %d", i, epochs, BATCHES*(j+1), nb_images_total, current_accuracy*100, (int)(accuracy*nb_images_total)); fflush(stdout); #endif // Il serait intéressant d'utiliser la perte calculée pour // savoir l'avancement dans l'apprentissage et donc comment adapter le taux d'apprentissage - //network->learning_rate = 0.01*batch_loss; + network->learning_rate = 10*LEARNING_RATE*batch_loss; } end_time = omp_get_wtime(); elapsed_time = end_time - start_time; #ifdef USE_MULTITHREADING - printf("\rThreads [%d]\tÉpoque [%d/%d]\tImage [%d/%d]\tAccuracy: "GREEN"%0.4f%%"RESET"\tTemps: %0.2f s\n", nb_threads, i, epochs, nb_images_total, nb_images_total, accuracy*100, elapsed_time); + printf("\rThreads [%d]\tÉpoque [%d/%d]\tImage [%d/%d]\tAccuracy: "GREEN"%0.4f%%"RESET" \tRéussies: %d\tTemps: %0.2f s\n", nb_threads, i, epochs, nb_images_total, nb_images_total, accuracy*100, (int)(accuracy*nb_images_total), elapsed_time); #else - printf("\rÉpoque [%d/%d]\tImage [%d/%d]\tAccuracy: "GREEN"%0.4f%%"RESET"\tTemps: %0.2f s\n", i, epochs, nb_images_total, nb_images_total, accuracy*100, elapsed_time); + printf("\rÉpoque [%d/%d]\tImage [%d/%d]\tAccuracy: "GREEN"%0.4f%%"RESET" \tRéussies: %d\tTemps: %0.2f s\n", i, epochs, nb_images_total, nb_images_total, accuracy*100, (int)(accuracy*nb_images_total), elapsed_time); #endif write_network(out, network); }