Update backpropagation.c

This commit is contained in:
augustin64 2022-11-03 18:13:01 +01:00
parent 9d03611744
commit 4637d62e73
9 changed files with 86 additions and 55 deletions

View File

@ -65,10 +65,10 @@ $(BUILDDIR)/mnist_%.o: $(MNIST_SRCDIR)/%.c $(MNIST_SRCDIR)/include/%.h
#
cnn: $(BUILDDIR)/cnn-main;
$(BUILDDIR)/cnn-main: $(CNN_SRCDIR)/main.c $(BUILDDIR)/cnn_train.o $(BUILDDIR)/cnn_cnn.o $(BUILDDIR)/cnn_creation.o $(BUILDDIR)/cnn_initialisation.o $(BUILDDIR)/cnn_make.o $(BUILDDIR)/cnn_neuron_io.o $(BUILDDIR)/cnn_function.o $(BUILDDIR)/cnn_utils.o $(BUILDDIR)/cnn_free.o $(BUILDDIR)/cnn_convolution.o $(BUILDDIR)/colors.o $(BUILDDIR)/mnist.o
$(BUILDDIR)/cnn-main: $(CNN_SRCDIR)/main.c $(BUILDDIR)/cnn_train.o $(BUILDDIR)/cnn_cnn.o $(BUILDDIR)/cnn_creation.o $(BUILDDIR)/cnn_initialisation.o $(BUILDDIR)/cnn_make.o $(BUILDDIR)/cnn_neuron_io.o $(BUILDDIR)/cnn_function.o $(BUILDDIR)/cnn_utils.o $(BUILDDIR)/cnn_free.o $(BUILDDIR)/cnn_convolution.o $(BUILDDIR)/cnn_backpropagation.o $(BUILDDIR)/colors.o $(BUILDDIR)/mnist.o
$(CC) $(CFLAGS) $^ -o $@
$(BUILDDIR)/cnn-main-cuda: $(BUILDDIR)/cnn_main.o $(BUILDDIR)/cnn_train.o $(BUILDDIR)/cnn_cnn.o $(BUILDDIR)/cnn_creation.o $(BUILDDIR)/cnn_initialisation.o $(BUILDDIR)/cnn_make.o $(BUILDDIR)/cnn_neuron_io.o $(BUILDDIR)/cnn_function.o $(BUILDDIR)/cnn_utils.o $(BUILDDIR)/cnn_free.o $(BUILDDIR)/cnn_cuda_convolution.o $(BUILDDIR)/colors.o $(BUILDDIR)/mnist.o
$(BUILDDIR)/cnn-main-cuda: $(BUILDDIR)/cnn_main.o $(BUILDDIR)/cnn_train.o $(BUILDDIR)/cnn_cnn.o $(BUILDDIR)/cnn_creation.o $(BUILDDIR)/cnn_initialisation.o $(BUILDDIR)/cnn_make.o $(BUILDDIR)/cnn_neuron_io.o $(BUILDDIR)/cnn_function.o $(BUILDDIR)/cnn_utils.o $(BUILDDIR)/cnn_free.o $(BUILDDIR)/cnn_cuda_convolution.o $(BUILDDIR)/cnn_backpropagation.o $(BUILDDIR)/colors.o $(BUILDDIR)/mnist.o
$(NVCC) $(NVCCFLAGS) $^ -o $@
$(BUILDDIR)/cnn_%.o: $(CNN_SRCDIR)/%.c $(CNN_SRCDIR)/include/%.h

View File

@ -1,5 +1,7 @@
#include <math.h>
#include "backpropagation.h"
#include "include/backpropagation.h"
#include "include/struct.h"
int min(int a, int b) {
@ -52,7 +54,7 @@ void backward_2d_pooling(float*** input, float*** output, int input_width, int o
void backward_fully_connected(Kernel_nn* ker, float* input, float* input_z, float* output, int size_input, int size_output, ptr d_function, int is_first) {
// Bias
for (int j=0; j < size_output; j++) {
ker->d_bias[j] = ouput[j];
ker->d_bias[j] = output[j];
}
// Weights
@ -63,8 +65,9 @@ void backward_fully_connected(Kernel_nn* ker, float* input, float* input_z, floa
}
// Input
if (is_first==1) // Pas besoin de backpropager dans l'input
if (is_first==1) {// Pas besoin de backpropager dans l'input
return;
}
for (int i=0; i < size_input; i++) {
float tmp=0;
@ -86,7 +89,7 @@ void backward_linearisation(Kernel_nn* ker, float*** input, float*** input_z, fl
for (int i=0; i < depth_input; i++) {
for (int k=0; k < dim_input; k++) {
for (int l=0; l < dim_input; l++) {
for (int j=0; j<size_output) {
for (int j=0; j < size_output; j++) {
ker->d_weights[cpt][j] += input[i][k][l]*output[j];
cpt++;
}
@ -122,7 +125,7 @@ void backward_convolution(Kernel_cnn* ker, float*** input, float*** input_z, flo
// Weights
int k_size = dim_input - dim_output +1;
int var = dim_input - k_size +1
int var = dim_input - k_size +1;
for (int h=0; h < depth_input; h++) {
for (int i=0; i < depth_output; i++) {
for (int j=0; j < k_size; j++) {
@ -133,7 +136,7 @@ void backward_convolution(Kernel_cnn* ker, float*** input, float*** input_z, flo
tmp += input[h][l+j][m+k]*output[i][l][m];
}
}
ker->d_weights[h][i][j][k] += tmp;
ker->d_w[h][i][j][k] += tmp;
}
}
}
@ -150,11 +153,11 @@ void backward_convolution(Kernel_cnn* ker, float*** input, float*** input_z, flo
for (int l=0; l < depth_output; l++) {
int min_m = k_size - max(k_size, dim_input-i);
int max_m = min(k_size, i+1);
int min_n = k_size - max(k_size, dim_input-j;
int min_n = k_size - max(k_size, dim_input-j);
int max_n = min(k_size, j+1);
for (int m=min_m; m < max_m; m++) {
for (int n=min_n; n < max_n; n++) {
tmp += output[l][i-m][j-n]*ker->weights[i][l][m][n];
tmp += output[l][i-m][j-n]*ker->w[i][l][m][n];
}
}
}
@ -168,4 +171,4 @@ void backward_convolution(Kernel_cnn* ker, float*** input, float*** input_z, flo
// Only last_... have been done, we have to deal with the d_... part
// It's EASY but it needs to be done
// The first layer needs to be a convolution or a fully conneted one
// The first layer needs to be a convolution or a fully connected one

View File

@ -5,7 +5,7 @@
#include "include/function.h"
float max(float a, float b) {
float max_float(float a, float b) {
return a < b ? b:a;
}
@ -19,7 +19,7 @@ float sigmoid_derivative(float x) {
}
float relu(float x) {
return max(0, x);
return max_float(0, x);
}
float relu_derivative(float x) {
@ -43,7 +43,7 @@ void apply_softmax_input(float ***input, int depth, int rows, int columns) {
for (int i=0; i < depth; i++) {
for (int j=0; j < rows; j++) {
for (int k=0; k < columns; k++) {
m = max(m, input[i][j][k]);
m = max_float(m, input[i][j][k]);
}
}
}

View File

@ -1,4 +1,6 @@
#include "function.h"
#include "struct.h"
#ifndef DEF_BACKPROPAGATION_H
#define DEF_BACKPROPAGATION_H

View File

@ -14,7 +14,7 @@ typedef ptr (*pm)();
/*
* Fonction max pour les floats
*/
float max(float a, float b);
float max_float(float a, float b);
float sigmoid(float x);

View File

@ -24,6 +24,12 @@ typedef struct TrainParameters {
float accuracy;
} TrainParameters;
/*
* Renvoie l'indice maximal d'un tableau tab de taille n
*/
int indice_max(float* tab, int n);
/*
* Fonction auxiliaire d'entraînement destinée à être exécutée sur plusieurs threads à la fois
*/

View File

@ -10,6 +10,7 @@ void make_average_pooling(float*** input, float*** output, int size, int output_
// output[output_depth][output_dim][output_dim]
float average;
int n = size*size;
for (int i=0; i < output_depth; i++) {
for (int j=0; j < output_dim; j++) {
for (int k=0; k < output_dim; k++) {
@ -29,6 +30,7 @@ void make_dense(Kernel_nn* kernel, float* input, float* output, int size_input,
// input[size_input]
// output[size_output]
float f;
for (int i=0; i < size_output; i++) {
f = kernel->bias[i];
for (int j=0; j < size_input; j++) {
@ -41,8 +43,8 @@ void make_dense(Kernel_nn* kernel, float* input, float* output, int size_input,
void make_dense_linearised(Kernel_nn* kernel, float*** input, float* output, int depth_input, int dim_input, int size_output) {
// input[depth_input][dim_input][dim_input]
// output[size_output]
int n = depth_input*dim_input*dim_input;
float f;
for (int l=0; l < size_output; l++) {
f = 0;
for (int i=0; i < depth_input; i++) {

View File

@ -17,9 +17,24 @@
#include "include/train.h"
int indice_max(float* tab, int n) {
int indice = -1;
float maxi = FLT_MIN;
for (int i=0; i < n; i++) {
if (tab[i] > maxi) {
maxi = tab[i];
indice = i;
}
}
return indice;
}
void* train_thread(void* parameters) {
TrainParameters* param = (TrainParameters*)parameters;
Network* network = param->network;
int maxi;
int*** images = param->images;
int* labels = (int*)param->labels;
@ -37,8 +52,10 @@ void* train_thread(void* parameters) {
forward_propagation(network);
backward_propagation(network, labels[i]);
// TODO get_indice_max(network last layer)
// TODO if indice_max == labels[i] then accuracy += 1.
maxi = indice_max(network->input[network->size-1][0][0], network->width[network->size-1]);
if (maxi == labels[i]) {
accuracy += 1.;
}
} else {
printf_error("Dataset de type JPG non implémenté\n");
exit(1);

View File

@ -1,5 +1,6 @@
#include "update.h"
#include "include/update.h"
#include "include/struct.h"
void update_weights(Network* network) {
int n = network->size;