diff --git a/Makefile b/Makefile index ce8f7b7..a9f8714 100644 --- a/Makefile +++ b/Makefile @@ -65,10 +65,10 @@ $(BUILDDIR)/mnist_%.o: $(MNIST_SRCDIR)/%.c $(MNIST_SRCDIR)/include/%.h # cnn: $(BUILDDIR)/cnn-main; -$(BUILDDIR)/cnn-main: $(CNN_SRCDIR)/main.c $(BUILDDIR)/cnn_train.o $(BUILDDIR)/cnn_cnn.o $(BUILDDIR)/cnn_creation.o $(BUILDDIR)/cnn_initialisation.o $(BUILDDIR)/cnn_make.o $(BUILDDIR)/cnn_neuron_io.o $(BUILDDIR)/cnn_function.o $(BUILDDIR)/cnn_utils.o $(BUILDDIR)/cnn_free.o $(BUILDDIR)/cnn_convolution.o $(BUILDDIR)/colors.o $(BUILDDIR)/mnist.o +$(BUILDDIR)/cnn-main: $(CNN_SRCDIR)/main.c $(BUILDDIR)/cnn_train.o $(BUILDDIR)/cnn_cnn.o $(BUILDDIR)/cnn_creation.o $(BUILDDIR)/cnn_initialisation.o $(BUILDDIR)/cnn_make.o $(BUILDDIR)/cnn_neuron_io.o $(BUILDDIR)/cnn_function.o $(BUILDDIR)/cnn_utils.o $(BUILDDIR)/cnn_free.o $(BUILDDIR)/cnn_convolution.o $(BUILDDIR)/cnn_backpropagation.o $(BUILDDIR)/colors.o $(BUILDDIR)/mnist.o $(CC) $(CFLAGS) $^ -o $@ -$(BUILDDIR)/cnn-main-cuda: $(BUILDDIR)/cnn_main.o $(BUILDDIR)/cnn_train.o $(BUILDDIR)/cnn_cnn.o $(BUILDDIR)/cnn_creation.o $(BUILDDIR)/cnn_initialisation.o $(BUILDDIR)/cnn_make.o $(BUILDDIR)/cnn_neuron_io.o $(BUILDDIR)/cnn_function.o $(BUILDDIR)/cnn_utils.o $(BUILDDIR)/cnn_free.o $(BUILDDIR)/cnn_cuda_convolution.o $(BUILDDIR)/colors.o $(BUILDDIR)/mnist.o +$(BUILDDIR)/cnn-main-cuda: $(BUILDDIR)/cnn_main.o $(BUILDDIR)/cnn_train.o $(BUILDDIR)/cnn_cnn.o $(BUILDDIR)/cnn_creation.o $(BUILDDIR)/cnn_initialisation.o $(BUILDDIR)/cnn_make.o $(BUILDDIR)/cnn_neuron_io.o $(BUILDDIR)/cnn_function.o $(BUILDDIR)/cnn_utils.o $(BUILDDIR)/cnn_free.o $(BUILDDIR)/cnn_cuda_convolution.o $(BUILDDIR)/cnn_backpropagation.o $(BUILDDIR)/colors.o $(BUILDDIR)/mnist.o $(NVCC) $(NVCCFLAGS) $^ -o $@ $(BUILDDIR)/cnn_%.o: $(CNN_SRCDIR)/%.c $(CNN_SRCDIR)/include/%.h diff --git a/src/cnn/backpropagation.c b/src/cnn/backpropagation.c index 85afc84..de0b4b5 100644 --- a/src/cnn/backpropagation.c +++ b/src/cnn/backpropagation.c @@ -1,5 +1,7 @@ #include -#include "backpropagation.h" + +#include "include/backpropagation.h" +#include "include/struct.h" int min(int a, int b) { @@ -7,7 +9,7 @@ int min(int a, int b) { } int max(int a, int b) { - return a>b?a:b; + return a > b ? a : b; } // Euh..... tout peut être faux à cause de la source @@ -15,10 +17,10 @@ void rms_backward(float* input, float* input_z, float* output, int size) { /* Input et output ont la même taille On considère que la dernière couche a utilisée softmax */ float sum=0; - for (int i=0; id_bias[j] = ouput[j]; + for (int j=0; j < size_output; j++) { + ker->d_bias[j] = output[j]; } // Weights - for (int i=0; id_weights[i][j] = input[i]*output[j]; } } // Input - if (is_first==1) // Pas besoin de backpropager dans l'input + if (is_first==1) {// Pas besoin de backpropager dans l'input return; + } - for (int i=0; iweights[i][j]; } input[i] = tmp*derivative_function(input_z[i]); @@ -77,16 +80,16 @@ void backward_fully_connected(Kernel_nn* ker, float* input, float* input_z, floa void backward_linearisation(Kernel_nn* ker, float*** input, float*** input_z, float* output, int depth_input, int dim_input, int size_output, ptr d_function) { // Bias - for (int j=0; jd_bias[j] += output[j]; } // Weights int cpt = 0; - for (int i=0; id_weights[cpt][j] += input[i][k][l]*output[j]; cpt++; } @@ -96,11 +99,11 @@ void backward_linearisation(Kernel_nn* ker, float*** input, float*** input_z, fl // Input cpt = 0; - for (int i=0; iweights[cpt][j]; } input[i][k][l] = tmp*derivative_function(input_z[i][k][l]); @@ -112,9 +115,9 @@ void backward_linearisation(Kernel_nn* ker, float*** input, float*** input_z, fl void backward_convolution(Kernel_cnn* ker, float*** input, float*** input_z, float*** output, int depth_input, int dim_input, int depth_output, int dim_output, ptr d_function, int is_first) { // Bias - for (int i=0; id_bias[i][j][k] += output[i][j][k]; } } @@ -122,18 +125,18 @@ void backward_convolution(Kernel_cnn* ker, float*** input, float*** input_z, flo // Weights int k_size = dim_input - dim_output +1; - int var = dim_input - k_size +1 - for (int h=0; hd_weights[h][i][j][k] += tmp; + ker->d_w[h][i][j][k] += tmp; } } } @@ -143,18 +146,18 @@ void backward_convolution(Kernel_cnn* ker, float*** input, float*** input_z, flo if (is_first==1) // Pas besoin de backpropager dans l'input return; - for (int i=0; iweights[i][l][m][n]; + tmp += output[l][i-m][j-n]*ker->w[i][l][m][n]; } } } @@ -168,4 +171,4 @@ void backward_convolution(Kernel_cnn* ker, float*** input, float*** input_z, flo // Only last_... have been done, we have to deal with the d_... part // It's EASY but it needs to be done -// The first layer needs to be a convolution or a fully conneted one \ No newline at end of file +// The first layer needs to be a convolution or a fully connected one \ No newline at end of file diff --git a/src/cnn/function.c b/src/cnn/function.c index edd3695..b6ec88b 100644 --- a/src/cnn/function.c +++ b/src/cnn/function.c @@ -5,7 +5,7 @@ #include "include/function.h" -float max(float a, float b) { +float max_float(float a, float b) { return a < b ? b:a; } @@ -19,7 +19,7 @@ float sigmoid_derivative(float x) { } float relu(float x) { - return max(0, x); + return max_float(0, x); } float relu_derivative(float x) { @@ -43,7 +43,7 @@ void apply_softmax_input(float ***input, int depth, int rows, int columns) { for (int i=0; i < depth; i++) { for (int j=0; j < rows; j++) { for (int k=0; k < columns; k++) { - m = max(m, input[i][j][k]); + m = max_float(m, input[i][j][k]); } } } diff --git a/src/cnn/include/backpropagation.h b/src/cnn/include/backpropagation.h index 6deec24..29e7671 100644 --- a/src/cnn/include/backpropagation.h +++ b/src/cnn/include/backpropagation.h @@ -1,4 +1,6 @@ #include "function.h" +#include "struct.h" + #ifndef DEF_BACKPROPAGATION_H #define DEF_BACKPROPAGATION_H diff --git a/src/cnn/include/function.h b/src/cnn/include/function.h index 344edb2..86da475 100644 --- a/src/cnn/include/function.h +++ b/src/cnn/include/function.h @@ -14,7 +14,7 @@ typedef ptr (*pm)(); /* * Fonction max pour les floats */ -float max(float a, float b); +float max_float(float a, float b); float sigmoid(float x); diff --git a/src/cnn/include/train.h b/src/cnn/include/train.h index eba592a..58f7c3a 100644 --- a/src/cnn/include/train.h +++ b/src/cnn/include/train.h @@ -24,6 +24,12 @@ typedef struct TrainParameters { float accuracy; } TrainParameters; + +/* +* Renvoie l'indice maximal d'un tableau tab de taille n +*/ +int indice_max(float* tab, int n); + /* * Fonction auxiliaire d'entraînement destinée à être exécutée sur plusieurs threads à la fois */ diff --git a/src/cnn/make.c b/src/cnn/make.c index 04d624a..855cac7 100644 --- a/src/cnn/make.c +++ b/src/cnn/make.c @@ -10,6 +10,7 @@ void make_average_pooling(float*** input, float*** output, int size, int output_ // output[output_depth][output_dim][output_dim] float average; int n = size*size; + for (int i=0; i < output_depth; i++) { for (int j=0; j < output_dim; j++) { for (int k=0; k < output_dim; k++) { @@ -29,6 +30,7 @@ void make_dense(Kernel_nn* kernel, float* input, float* output, int size_input, // input[size_input] // output[size_output] float f; + for (int i=0; i < size_output; i++) { f = kernel->bias[i]; for (int j=0; j < size_input; j++) { @@ -41,13 +43,13 @@ void make_dense(Kernel_nn* kernel, float* input, float* output, int size_input, void make_dense_linearised(Kernel_nn* kernel, float*** input, float* output, int depth_input, int dim_input, int size_output) { // input[depth_input][dim_input][dim_input] // output[size_output] - int n = depth_input*dim_input*dim_input; float f; - for (int l=0; lweights[k + j*dim_input + i*depth_input][l]; } } diff --git a/src/cnn/train.c b/src/cnn/train.c index 382486f..a3b0219 100644 --- a/src/cnn/train.c +++ b/src/cnn/train.c @@ -17,9 +17,24 @@ #include "include/train.h" +int indice_max(float* tab, int n) { + int indice = -1; + float maxi = FLT_MIN; + + for (int i=0; i < n; i++) { + if (tab[i] > maxi) { + maxi = tab[i]; + indice = i; + } + } + return indice; +} + + void* train_thread(void* parameters) { TrainParameters* param = (TrainParameters*)parameters; Network* network = param->network; + int maxi; int*** images = param->images; int* labels = (int*)param->labels; @@ -37,8 +52,10 @@ void* train_thread(void* parameters) { forward_propagation(network); backward_propagation(network, labels[i]); - // TODO get_indice_max(network last layer) - // TODO if indice_max == labels[i] then accuracy += 1. + maxi = indice_max(network->input[network->size-1][0][0], network->width[network->size-1]); + if (maxi == labels[i]) { + accuracy += 1.; + } } else { printf_error("Dataset de type JPG non implémenté\n"); exit(1); diff --git a/src/cnn/update.c b/src/cnn/update.c index aab9c8f..0392799 100644 --- a/src/cnn/update.c +++ b/src/cnn/update.c @@ -1,5 +1,6 @@ -#include "update.h" +#include "include/update.h" +#include "include/struct.h" void update_weights(Network* network) { int n = network->size;