mirror of
https://github.com/augustin64/projet-tipe
synced 2025-01-24 07:36:24 +01:00
Update train.c
This commit is contained in:
parent
7fff1652c5
commit
e4ec06705b
@ -19,14 +19,19 @@ int will_be_drop(int dropout_prob) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void write_image_in_network_32(int** image, int height, int width, float** input) {
|
void write_image_in_network_32(int** image, int height, int width, float** input) {
|
||||||
for (int i=0; i < height+2*PADDING_INPUT; i++) {
|
int padding = (32 - height)/2;
|
||||||
for (int j=0; j < width+2*PADDING_INPUT; j++) {
|
for (int i=0; i < padding; i++) {
|
||||||
if (i < PADDING_INPUT || i >= height+PADDING_INPUT || j < PADDING_INPUT || j >= width+PADDING_INPUT) {
|
for (int j=0; j < 32; j++) {
|
||||||
input[i][j] = 0.;
|
input[i][j] = 0.;
|
||||||
}
|
input[31-i][j] = 0.;
|
||||||
else {
|
input[j][i] = 0.;
|
||||||
input[i][j] = (float)image[i][j] / 255.0f;
|
input[j][31-i] = 0.;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int i=0; i < width; i++) {
|
||||||
|
for (int j=0; j < height; j++) {
|
||||||
|
input[i+2][j+2] = (float)image[i][j] / 255.0f;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -49,16 +54,13 @@ void forward_propagation(Network* network) {
|
|||||||
activation = k_i->activation;
|
activation = k_i->activation;
|
||||||
|
|
||||||
if (k_i->cnn!=NULL) { // Convolution
|
if (k_i->cnn!=NULL) { // Convolution
|
||||||
printf("\n(%d)-Convolution of cnn: %dx%dx%d -> %dx%dx%d\n", i, input_depth, input_width, input_width, output_depth, output_width, output_width);
|
|
||||||
make_convolution(k_i->cnn, input, output, output_width);
|
make_convolution(k_i->cnn, input, output, output_width);
|
||||||
choose_apply_function_matrix(activation, output, output_depth, output_width);
|
choose_apply_function_matrix(activation, output, output_depth, output_width);
|
||||||
}
|
}
|
||||||
else if (k_i->nn!=NULL) { // Full connection
|
else if (k_i->nn!=NULL) { // Full connection
|
||||||
if (input_depth==1) { // Vecteur -> Vecteur
|
if (input_depth==1) { // Vecteur -> Vecteur
|
||||||
printf("\n(%d)-Densification of nn: %dx%dx%d -> %dx%dx%d\n", i, 1, 1, input_width, 1, 1, output_width);
|
|
||||||
make_dense(k_i->nn, input[0][0], output[0][0], input_width, output_width);
|
make_dense(k_i->nn, input[0][0], output[0][0], input_width, output_width);
|
||||||
} else { // Matrice -> vecteur
|
} else { // Matrice -> vecteur
|
||||||
printf("\n(%d)-Densification linearised of nn: %dx%dx%d -> %dx%dx%d\n", i, input_depth, input_width, input_width, 1, 1, output_width);
|
|
||||||
make_dense_linearised(k_i->nn, input, output[0][0], input_depth, input_width, output_width);
|
make_dense_linearised(k_i->nn, input, output[0][0], input_depth, input_width, output_width);
|
||||||
}
|
}
|
||||||
choose_apply_function_vector(activation, output, output_width);
|
choose_apply_function_vector(activation, output, output_width);
|
||||||
@ -68,7 +70,6 @@ void forward_propagation(Network* network) {
|
|||||||
printf("Le réseau ne peut pas finir par une pooling layer\n");
|
printf("Le réseau ne peut pas finir par une pooling layer\n");
|
||||||
return;
|
return;
|
||||||
} else { // Pooling sur une matrice
|
} else { // Pooling sur une matrice
|
||||||
printf("\n(%d)-Average pooling: %dx%dx%d -> %dx%dx%d\n", i, input_depth, input_width, input_width, output_depth, output_width, output_width);
|
|
||||||
make_average_pooling(input, output, activation/100, output_depth, output_width);
|
make_average_pooling(input, output, activation/100, output_depth, output_width);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -6,7 +6,6 @@
|
|||||||
void make_convolution(Kernel_cnn* kernel, float*** input, float*** output, int output_dim) {
|
void make_convolution(Kernel_cnn* kernel, float*** input, float*** output, int output_dim) {
|
||||||
float f;
|
float f;
|
||||||
int n = kernel->k_size;
|
int n = kernel->k_size;
|
||||||
printf("max_input %dx%dx%d: %d \n", kernel->rows, n+output_dim -1, output_dim+n -1, n);
|
|
||||||
for (int i=0; i < kernel->columns; i++) {
|
for (int i=0; i < kernel->columns; i++) {
|
||||||
for (int j=0; j < output_dim; j++) {
|
for (int j=0; j < output_dim; j++) {
|
||||||
for (int k=0; k < output_dim; k++) {
|
for (int k=0; k < output_dim; k++) {
|
||||||
@ -25,7 +24,6 @@ void make_convolution(Kernel_cnn* kernel, float*** input, float*** output, int o
|
|||||||
}
|
}
|
||||||
|
|
||||||
void make_average_pooling(float*** input, float*** output, int size, int output_depth, int output_dim) {
|
void make_average_pooling(float*** input, float*** output, int size, int output_depth, int output_dim) {
|
||||||
printf("%d -> %d \n", output_dim*size, output_dim);
|
|
||||||
float average;
|
float average;
|
||||||
int n = size*size;
|
int n = size*size;
|
||||||
for (int i=0; i < output_depth; i++) {
|
for (int i=0; i < output_depth; i++) {
|
||||||
@ -56,7 +54,6 @@ void make_dense(Kernel_nn* kernel, float* input, float* output, int size_input,
|
|||||||
|
|
||||||
void make_dense_linearised(Kernel_nn* kernel, float*** input, float* output, int depth_input, int dim_input, int size_output) {
|
void make_dense_linearised(Kernel_nn* kernel, float*** input, float* output, int depth_input, int dim_input, int size_output) {
|
||||||
int n = depth_input*dim_input*dim_input;
|
int n = depth_input*dim_input*dim_input;
|
||||||
printf("%dx%dx%d (%d) -> %d\n",depth_input, dim_input, dim_input, n, size_output);
|
|
||||||
float f;
|
float f;
|
||||||
for (int l=0; l<size_output; l++) {
|
for (int l=0; l<size_output; l++) {
|
||||||
f = 0;
|
f = 0;
|
||||||
|
@ -7,6 +7,8 @@
|
|||||||
#include "../mnist/mnist.c"
|
#include "../mnist/mnist.c"
|
||||||
#include "../colors.h"
|
#include "../colors.h"
|
||||||
#include "neuron_io.c"
|
#include "neuron_io.c"
|
||||||
|
#include "utils.c"
|
||||||
|
#include "free.c"
|
||||||
#include "cnn.c"
|
#include "cnn.c"
|
||||||
|
|
||||||
#include "include/train.h"
|
#include "include/train.h"
|
||||||
@ -28,8 +30,8 @@ void* train_thread(void* parameters) {
|
|||||||
|
|
||||||
for (int i=start; i < start+nb_images; i++) {
|
for (int i=start; i < start+nb_images; i++) {
|
||||||
if (dataset_type == 0) {
|
if (dataset_type == 0) {
|
||||||
// TODO write_image_in_network_32(images[i], height, width, network_input);
|
write_image_in_network_32(images[i], height, width, network->input[0][0]);
|
||||||
//forward_propagation(network);
|
forward_propagation(network);
|
||||||
//backward_propagation(network, labels[i]);
|
//backward_propagation(network, labels[i]);
|
||||||
|
|
||||||
// TODO get_indice_max(network last layer)
|
// TODO get_indice_max(network last layer)
|
||||||
@ -118,10 +120,14 @@ void train(int dataset_type, char* images_file, char* labels_file, char* data_di
|
|||||||
if (dataset_type == 0) {
|
if (dataset_type == 0) {
|
||||||
train_params->images = images;
|
train_params->images = images;
|
||||||
train_params->labels = labels;
|
train_params->labels = labels;
|
||||||
|
train_params->width = 28;
|
||||||
|
train_params->height = 28;
|
||||||
train_params->data_dir = NULL;
|
train_params->data_dir = NULL;
|
||||||
} else {
|
} else {
|
||||||
train_params->data_dir = data_dir;
|
train_params->data_dir = data_dir;
|
||||||
train_params->images = NULL;
|
train_params->images = NULL;
|
||||||
|
train_params->width = 0;
|
||||||
|
train_params->height = 0;
|
||||||
train_params->labels = NULL;
|
train_params->labels = NULL;
|
||||||
}
|
}
|
||||||
train_params->nb_images = BATCHES;
|
train_params->nb_images = BATCHES;
|
||||||
@ -145,17 +151,16 @@ void train(int dataset_type, char* images_file, char* labels_file, char* data_di
|
|||||||
} else {
|
} else {
|
||||||
nb_remaining_images -= BATCHES / nb_threads;
|
nb_remaining_images -= BATCHES / nb_threads;
|
||||||
}
|
}
|
||||||
// TODO train_parameters[k]->network = copy_network(network);
|
train_parameters[k]->network = copy_network(network);
|
||||||
train_parameters[k]->start = BATCHES*j + (nb_images_total/BATCHES)*k;
|
train_parameters[k]->start = BATCHES*j + (nb_images_total/BATCHES)*k;
|
||||||
pthread_create( &tid[j], NULL, train_thread, (void*) train_parameters[k]);
|
pthread_create( &tid[j], NULL, train_thread, (void*) train_parameters[k]);
|
||||||
}
|
}
|
||||||
for (int k=0; k < nb_threads; k++) {
|
for (int k=0; k < nb_threads; k++) {
|
||||||
// TODO joindre les threads et afficher la progression
|
|
||||||
// On attend la terminaison de chaque thread un à un
|
// On attend la terminaison de chaque thread un à un
|
||||||
pthread_join( tid[j], NULL );
|
pthread_join( tid[j], NULL );
|
||||||
accuracy += train_parameters[k]->accuracy / (float) nb_images_total;
|
accuracy += train_parameters[k]->accuracy / (float) nb_images_total;
|
||||||
// TODO patch_network(network, train_parameters[k]->network, train_parameters[k]->nb_images);
|
// TODO patch_network(network, train_parameters[k]->network, train_parameters[k]->nb_images);
|
||||||
// TODO free_network(train_parameters[k]->network);
|
free_network(train_parameters[k]->network);
|
||||||
}
|
}
|
||||||
printf("\rThreads [%d]\tÉpoque [%d/%d]\tImage [%d/%d]\tAccuracy: %0.1f%%", nb_threads, i, epochs, BATCHES*(j+1), nb_images_total, accuracy*100);
|
printf("\rThreads [%d]\tÉpoque [%d/%d]\tImage [%d/%d]\tAccuracy: %0.1f%%", nb_threads, i, epochs, BATCHES*(j+1), nb_images_total, accuracy*100);
|
||||||
#else
|
#else
|
||||||
@ -172,7 +177,7 @@ void train(int dataset_type, char* images_file, char* labels_file, char* data_di
|
|||||||
#endif
|
#endif
|
||||||
write_network(out, network);
|
write_network(out, network);
|
||||||
}
|
}
|
||||||
// TODO free_network(network)
|
free_network(network);
|
||||||
#ifdef USE_MULTITHREADING
|
#ifdef USE_MULTITHREADING
|
||||||
free(tid);
|
free(tid);
|
||||||
#else
|
#else
|
||||||
|
Loading…
Reference in New Issue
Block a user