Compare commits

...

18 Commits

Author SHA1 Message Date
588aec2fb8
Merge pull request #3 from augustin64/macos 2023-07-12 14:19:57 +02:00
02f6aad718
Merge branch 'julienChemillier:macos' into macos 2023-07-12 14:05:55 +02:00
881f81e0e9 Merge commits from 'origin/main' 2023-07-12 14:00:58 +02:00
julienChemillier
a6ed449e1c
Merge pull request #6 from julienChemillier/Finetuning_deepening
Finetuning deepening
2023-07-12 13:50:38 +02:00
julienChemillier
e661a4178d Fix some MacOS compatibility issues 2023-06-26 18:09:21 +02:00
6c7112b9b5 remove openmp dependency
time may not be accurate (now using processor time instead of real time)
2023-06-26 16:45:06 +02:00
5c712c0120 Remove unnecessary includes 2023-06-26 16:39:25 +02:00
julienChemillier
6f0b360dc9 Remove Adam optimize options by default 2023-05-31 11:15:46 +02:00
julienChemillier
7e4f9167fa Fix free issue 2023-05-31 08:52:14 +02:00
julienChemillier
3d0af7da2a Merge branch 'Finetuning_deepening' of https://github.com/julienChemillier/TIPE into Finetuning_deepening 2023-05-30 16:02:50 +02:00
julienChemillier
ba05923c78 Remove an useless allocation of memory 2023-05-30 13:52:13 +02:00
7e90b15671 Merge remote-tracking branch 'upstream/main' into Finetuning_deepening 2023-05-28 09:30:52 +02:00
julienChemillier
d916c6c86b Rectification of a d_network leak 2023-05-28 09:12:52 +02:00
julienChemillier
d7d5a7ae6e Reduce arguments in functions in 'free.c' 2023-05-28 09:07:30 +02:00
julienChemillier
4ad116511e Fix an argument error in 'backpropagation' file 2023-05-27 21:09:04 +02:00
julienChemillier
84e552105a Modification in the structures 2023-05-27 20:22:29 +02:00
julienChemillier
208b121c73 Add a new classe: 'D_Network' 2023-05-26 22:16:26 +02:00
julienChemillier
fade0aa28d Add 'finetuning' variable to the Network class 2023-05-26 20:46:58 +02:00
30 changed files with 662 additions and 586 deletions

View File

@ -1,3 +1,4 @@
OS := $(shell uname)
BUILDDIR := ./build BUILDDIR := ./build
SRCDIR := ./src SRCDIR := ./src
CACHE_DIR := ./.cache CACHE_DIR := ./.cache
@ -27,8 +28,8 @@ TESTS_SRC_CU += $(wildcard $(TEST_SRCDIR)/*.cu)
TESTS_OBJ = $(TESTS_SRC:$(TEST_SRCDIR)/%.c=$(BUILDDIR)/$(TEST_SRCDIR)-%) $(TESTS_SRC_CU:$(TEST_SRCDIR)/%.cu=$(BUILDDIR)/$(TEST_SRCDIR)-%) TESTS_OBJ = $(TESTS_SRC:$(TEST_SRCDIR)/%.c=$(BUILDDIR)/$(TEST_SRCDIR)-%) $(TESTS_SRC_CU:$(TEST_SRCDIR)/%.cu=$(BUILDDIR)/$(TEST_SRCDIR)-%)
# Linker only flags # Linker only flags
LD_CFLAGS = -lm -lpthread -ljpeg -fopenmp LD_CFLAGS = -lm -lpthread -ljpeg
LD_NVCCFLAGS = -ljpeg -Xcompiler -fopenmp LD_NVCCFLAGS = -ljpeg
# Compilation flag # Compilation flag
CFLAGS = -Wall -Wextra -std=gnu99 -g -O3 CFLAGS = -Wall -Wextra -std=gnu99 -g -O3
@ -41,6 +42,13 @@ NVCCFLAGS = -g
# -fsanitize=address -lasan # -fsanitize=address -lasan
#! WARNING: test/cnn-neuron_io fails with this option enabled #! WARNING: test/cnn-neuron_io fails with this option enabled
# Specify library path of libjpeg on MacOS
ifeq ($(OS),Darwin)
LD_CFLAGS += -I/opt/homebrew/Cellar/jpeg/9e/include/ -L/opt/homebrew/Cellar/jpeg/9e/lib/
LD_NVCCFLAGS += -L/opt/homebrew/Cellar/jpeg/9e/lib/
CFLAGS += -I/opt/homebrew/Cellar/jpeg/9e/include/
endif
all: dense cnn; all: dense cnn;

View File

@ -178,17 +178,24 @@ Résultats avec VGG16, pour des images de 256x256 pixels (seulement une plus pet
Sur le cloud avec google Colab: bon GPU mais mauvais processeur: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1LfwSrQRaoC91yC9mx9BKHzuc7odev5r6?usp=sharing) Sur le cloud avec google Colab: bon GPU mais mauvais processeur: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1LfwSrQRaoC91yC9mx9BKHzuc7odev5r6?usp=sharing)
Les distributions suivantes ont étés essayées, il sera sans doute nécessaire de modifier le code pour l'exécuter sous Windows/ MacOS: ## Dépendances
- `cuda` : pour utiliser la carte graphique (NVIDIA seulement)
- `libjpeg-dev` : n'est pas nécessairement installé par défaut
- GNU `make` : installé par défaut sur la majorité des distributions Linux et sur MacOS
- `gcc` : installé par défaut sur la majorité des distributions Linux et sur MacOS
### Linux
Les distributions suivantes ont étés essayées, il faudra parfois installer `libjpeg`
- Arch - Arch
- Fedora - Fedora
- Manjaro - Manjaro
- Ubuntu - Ubuntu: `apt install libjpeg-dev`
## Dépendances ### MacOS
- `cuda` : pour utiliser la carte graphique (NVIDIA seulement) Avec [Homebrew](https://brew.sh/):
- `libjpeg-dev` : n'est pas installé par défaut sur ubuntu notamment ```bash
- GNU `make` : installé par défaut sur la majorité des distributions brew install libjpeg
- `gcc` : installé par défaut sur la majorité des distributions ```
## Compilation ## Compilation

View File

@ -316,12 +316,12 @@ __global__ void backward_dense_kernel_2(float** weights, float* input, float* in
input[idx] = tmp*( (*d_f)(input_z[idx]) ); input[idx] = tmp*( (*d_f)(input_z[idx]) );
} }
void backward_dense_device(Kernel_nn* ker, float* input, float* input_z, float* output, int size_input, int size_output, int activation, int is_first) { void backward_dense_device(Kernel_nn* ker, D_Kernel_nn* d_ker, float* input, float* input_z, float* output, int size_input, int size_output, int activation, int is_first) {
// Make computation // Make computation
dim3 gridSize1(i_div_up(size_input, BLOCKSIZE_x), i_div_up(size_output, BLOCKSIZE_y)); dim3 gridSize1(i_div_up(size_input, BLOCKSIZE_x), i_div_up(size_output, BLOCKSIZE_y));
dim3 blockSize1(BLOCKSIZE_x, BLOCKSIZE_y); dim3 blockSize1(BLOCKSIZE_x, BLOCKSIZE_y);
backward_dense_kernel_1<<<gridSize1, blockSize1>>>(ker->d_weights, ker->d_bias, input, output, size_input, size_output); backward_dense_kernel_1<<<gridSize1, blockSize1>>>(d_ker->d_weights, d_ker->d_bias, input, output, size_input, size_output);
gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() ); gpuErrchk( cudaDeviceSynchronize() );
@ -341,18 +341,18 @@ void backward_dense_device(Kernel_nn* ker, float* input, float* input_z, float*
} }
#endif #endif
void backward_dense_cpu(Kernel_nn* ker, float* input, float* input_z, float* output, int size_input, int size_output, int activation, int is_first) { void backward_dense_cpu(Kernel_nn* ker, D_Kernel_nn* d_ker, float* input, float* input_z, float* output, int size_input, int size_output, int activation, int is_first) {
funcPtr d_function = get_activation_function(activation); funcPtr d_function = get_activation_function(activation);
// Bias // Bias
for (int j=0; j < size_output; j++) { for (int j=0; j < size_output; j++) {
ker->d_bias[j] += output[j]; d_ker->d_bias[j] += output[j];
} }
// Weights // Weights
for (int i=0; i < size_input; i++) { for (int i=0; i < size_input; i++) {
for (int j=0; j < size_output; j++) { for (int j=0; j < size_output; j++) {
ker->d_weights[i][j] += input[i]*output[j]; d_ker->d_weights[i][j] += input[i]*output[j];
} }
} }
@ -373,11 +373,11 @@ void backward_dense_cpu(Kernel_nn* ker, float* input, float* input_z, float* out
#ifdef __CUDACC__ #ifdef __CUDACC__
extern "C" extern "C"
#endif #endif
void backward_dense(Kernel_nn* ker, float* input, float* input_z, float* output, int size_input, int size_output, int activation, int is_first) { void backward_dense(Kernel_nn* ker, D_Kernel_nn* d_ker, float* input, float* input_z, float* output, int size_input, int size_output, int activation, int is_first) {
#ifndef __CUDACC__ #ifndef __CUDACC__
backward_dense_cpu(ker, input, input_z, output, size_input, size_output, activation, is_first); backward_dense_cpu(ker, d_ker, input, input_z, output, size_input, size_output, activation, is_first);
#else #else
backward_dense_device(ker, input, input_z, output, size_input, size_output, activation, is_first); backward_dense_device(ker, d_ker, input, input_z, output, size_input, size_output, activation, is_first);
#endif #endif
} }
@ -425,12 +425,12 @@ __global__ void backward_linearisation_kernel_2(float** weights, float*** input,
input[idx][idy][idz] = tmp*( (*d_f)(input_z[idx][idy][idz]) ); input[idx][idy][idz] = tmp*( (*d_f)(input_z[idx][idy][idz]) );
} }
void backward_linearisation_device(Kernel_nn* ker, float*** input, float*** input_z, float* output, int input_depth, int input_width, int size_output, int activation) { void backward_linearisation_device(Kernel_nn* ker, D_Kernel_nn* d_ker, float*** input, float*** input_z, float* output, int input_depth, int input_width, int size_output, int activation) {
// Make computation // Make computation
dim3 gridSize(i_div_up(input_depth, BLOCKSIZE_x), i_div_up(input_width, BLOCKSIZE_y), i_div_up(input_width, BLOCKSIZE_y)); dim3 gridSize(i_div_up(input_depth, BLOCKSIZE_x), i_div_up(input_width, BLOCKSIZE_y), i_div_up(input_width, BLOCKSIZE_y));
dim3 blockSize(BLOCKSIZE_x, BLOCKSIZE_y, BLOCKSIZE_z); dim3 blockSize(BLOCKSIZE_x, BLOCKSIZE_y, BLOCKSIZE_z);
backward_linearisation_kernel_1<<<gridSize, blockSize>>>(ker->d_weights, ker->d_bias, input, output, input_depth, input_width, size_output); backward_linearisation_kernel_1<<<gridSize, blockSize>>>(d_ker->d_weights, d_ker->d_bias, input, output, input_depth, input_width, size_output);
gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() ); gpuErrchk( cudaDeviceSynchronize() );
@ -445,13 +445,13 @@ void backward_linearisation_device(Kernel_nn* ker, float*** input, float*** inpu
} }
#endif #endif
void backward_linearisation_cpu(Kernel_nn* ker, float*** input, float*** input_z, float* output, int input_depth, int input_width, int size_output, int activation) { void backward_linearisation_cpu(Kernel_nn* ker, D_Kernel_nn* d_ker, float*** input, float*** input_z, float* output, int input_depth, int input_width, int size_output, int activation) {
funcPtr d_function = get_activation_function(activation); funcPtr d_function = get_activation_function(activation);
// Bias // Bias
for (int j=0; j < size_output; j++) { for (int j=0; j < size_output; j++) {
ker->d_bias[j] += output[j]; d_ker->d_bias[j] += output[j];
} }
// Weights // Weights
@ -460,7 +460,7 @@ void backward_linearisation_cpu(Kernel_nn* ker, float*** input, float*** input_z
for (int k=0; k < input_width; k++) { for (int k=0; k < input_width; k++) {
for (int l=0; l < input_width; l++) { for (int l=0; l < input_width; l++) {
for (int j=0; j < size_output; j++) { for (int j=0; j < size_output; j++) {
ker->d_weights[cpt][j] += input[i][k][l]*output[j]; d_ker->d_weights[cpt][j] += input[i][k][l]*output[j];
} }
cpt++; cpt++;
} }
@ -486,11 +486,11 @@ void backward_linearisation_cpu(Kernel_nn* ker, float*** input, float*** input_z
#ifdef __CUDACC__ #ifdef __CUDACC__
extern "C" extern "C"
#endif #endif
void backward_linearisation(Kernel_nn* ker, float*** input, float*** input_z, float* output, int input_depth, int input_width, int size_output, int activation) { void backward_linearisation(Kernel_nn* ker, D_Kernel_nn* d_ker, float*** input, float*** input_z, float* output, int input_depth, int input_width, int size_output, int activation) {
#ifndef __CUDACC__ #ifndef __CUDACC__
backward_linearisation_cpu(ker, input, input_z, output, input_depth, input_width, size_output, activation); backward_linearisation_cpu(ker, d_ker, input, input_z, output, input_depth, input_width, size_output, activation);
#else #else
backward_linearisation_device(ker, input, input_z, output, input_depth, input_width, size_output, activation); backward_linearisation_device(ker, d_ker, input, input_z, output, input_depth, input_width, size_output, activation);
#endif #endif
} }
@ -569,12 +569,12 @@ __global__ void backward_convolution_apply_propagate_kernel(float*** input, floa
} }
} }
void backward_convolution_device(Kernel_cnn* kernel, float*** input, float*** input_z, float*** output, int input_depth, int input_width, int output_depth, int output_width, int activation, int is_first, int kernel_size, int padding, int stride) { void backward_convolution_device(Kernel_cnn* kernel, D_Kernel_cnn* d_kernel, float*** input, float*** input_z, float*** output, int input_depth, int input_width, int output_depth, int output_width, int activation, int is_first, int kernel_size, int padding, int stride) {
// Bias Kernel // Bias Kernel
dim3 gridSize1(i_div_up(output_depth, BLOCKSIZE_x), i_div_up(output_width, BLOCKSIZE_y), i_div_up(output_width, BLOCKSIZE_y)); dim3 gridSize1(i_div_up(output_depth, BLOCKSIZE_x), i_div_up(output_width, BLOCKSIZE_y), i_div_up(output_width, BLOCKSIZE_y));
dim3 blockSize1(BLOCKSIZE_x, BLOCKSIZE_y, BLOCKSIZE_z); dim3 blockSize1(BLOCKSIZE_x, BLOCKSIZE_y, BLOCKSIZE_z);
backward_convolution_dbias_kernel<<<gridSize1, blockSize1>>>(kernel->d_bias, output, output_depth, output_width); backward_convolution_dbias_kernel<<<gridSize1, blockSize1>>>(d_kernel->d_bias, output, output_depth, output_width);
gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() ); gpuErrchk( cudaDeviceSynchronize() );
@ -582,7 +582,7 @@ void backward_convolution_device(Kernel_cnn* kernel, float*** input, float*** in
dim3 gridSize2(i_div_up(output_width, BLOCKSIZE_x), i_div_up(output_width, BLOCKSIZE_y), i_div_up(output_depth, BLOCKSIZE_y)); dim3 gridSize2(i_div_up(output_width, BLOCKSIZE_x), i_div_up(output_width, BLOCKSIZE_y), i_div_up(output_depth, BLOCKSIZE_y));
dim3 blockSize2(BLOCKSIZE_x, BLOCKSIZE_y, BLOCKSIZE_z); dim3 blockSize2(BLOCKSIZE_x, BLOCKSIZE_y, BLOCKSIZE_z);
backward_convolution_dweight_kernel<<<gridSize2, blockSize2>>>(kernel->d_weights, input, output, input_depth, output_depth, input_width, output_width, kernel_size, stride, padding); backward_convolution_dweight_kernel<<<gridSize2, blockSize2>>>(d_kernel->d_weights, input, output, input_depth, output_depth, input_width, output_width, kernel_size, stride, padding);
gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() ); gpuErrchk( cudaDeviceSynchronize() );
@ -610,7 +610,7 @@ void backward_convolution_device(Kernel_cnn* kernel, float*** input, float*** in
#endif #endif
void backward_convolution_cpu(Kernel_cnn* ker, float*** input, float*** input_z, float*** output, int input_depth, int input_width, int output_depth, int output_width, int activation, int is_first, int kernel_size, int padding, int stride) { void backward_convolution_cpu(Kernel_cnn* ker, D_Kernel_cnn* d_ker, float*** input, float*** input_z, float*** output, int input_depth, int input_width, int output_depth, int output_width, int activation, int is_first, int kernel_size, int padding, int stride) {
funcPtr d_function = get_activation_function(activation); funcPtr d_function = get_activation_function(activation);
int max_move = kernel_size - padding; int max_move = kernel_size - padding;
@ -619,7 +619,7 @@ void backward_convolution_cpu(Kernel_cnn* ker, float*** input, float*** input_z,
for (int i=0; i < output_depth; i++) { for (int i=0; i < output_depth; i++) {
for (int j=0; j < output_width; j++) { for (int j=0; j < output_width; j++) {
for (int k=0; k < output_width; k++) { for (int k=0; k < output_width; k++) {
ker->d_bias[i][j][k] += output[i][j][k]; d_ker->d_bias[i][j][k] += output[i][j][k];
} }
} }
} }
@ -637,7 +637,7 @@ void backward_convolution_cpu(Kernel_cnn* ker, float*** input, float*** input_z,
} }
} }
} }
ker->d_weights[h][i][j+padding][k+padding] += tmp; d_ker->d_weights[h][i][j+padding][k+padding] += tmp;
} }
} }
} }
@ -680,10 +680,10 @@ void backward_convolution_cpu(Kernel_cnn* ker, float*** input, float*** input_z,
#ifdef __CUDACC__ #ifdef __CUDACC__
extern "C" extern "C"
#endif #endif
void backward_convolution(Kernel_cnn* ker, float*** input, float*** input_z, float*** output, int input_depth, int input_width, int output_depth, int output_width, int activation, int is_first, int kernel_size, int padding, int stride) { void backward_convolution(Kernel_cnn* ker, D_Kernel_cnn* d_ker, float*** input, float*** input_z, float*** output, int input_depth, int input_width, int output_depth, int output_width, int activation, int is_first, int kernel_size, int padding, int stride) {
#ifndef __CUDACC__ #ifndef __CUDACC__
backward_convolution_cpu(ker, input, input_z, output, input_depth, input_width, output_depth, output_width, activation, is_first, kernel_size, padding, stride); backward_convolution_cpu(ker, d_ker, input, input_z, output, input_depth, input_width, output_depth, output_width, activation, is_first, kernel_size, padding, stride);
#else #else
backward_convolution_device(ker, input, input_z, output, input_depth, input_width, output_depth, output_width, activation, is_first, kernel_size, padding, stride); backward_convolution_device(ker, d_ker, input, input_z, output, input_depth, input_width, output_depth, output_width, activation, is_first, kernel_size, padding, stride);
#endif #endif
} }

View File

@ -316,12 +316,12 @@ __global__ void backward_dense_kernel_2(float** weights, float* input, float* in
input[idx] = tmp*( (*d_f)(input_z[idx]) ); input[idx] = tmp*( (*d_f)(input_z[idx]) );
} }
void backward_dense_device(Kernel_nn* ker, float* input, float* input_z, float* output, int size_input, int size_output, int activation, int is_first) { void backward_dense_device(Kernel_nn* ker, D_Kernel_nn* d_ker, float* input, float* input_z, float* output, int size_input, int size_output, int activation, int is_first) {
// Make computation // Make computation
dim3 gridSize1(i_div_up(size_input, BLOCKSIZE_x), i_div_up(size_output, BLOCKSIZE_y)); dim3 gridSize1(i_div_up(size_input, BLOCKSIZE_x), i_div_up(size_output, BLOCKSIZE_y));
dim3 blockSize1(BLOCKSIZE_x, BLOCKSIZE_y); dim3 blockSize1(BLOCKSIZE_x, BLOCKSIZE_y);
backward_dense_kernel_1<<<gridSize1, blockSize1>>>(ker->d_weights, ker->d_bias, input, output, size_input, size_output); backward_dense_kernel_1<<<gridSize1, blockSize1>>>(d_ker->d_weights, d_ker->d_bias, input, output, size_input, size_output);
gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() ); gpuErrchk( cudaDeviceSynchronize() );
@ -341,18 +341,18 @@ void backward_dense_device(Kernel_nn* ker, float* input, float* input_z, float*
} }
#endif #endif
void backward_dense_cpu(Kernel_nn* ker, float* input, float* input_z, float* output, int size_input, int size_output, int activation, int is_first) { void backward_dense_cpu(Kernel_nn* ker, D_Kernel_nn* d_ker, float* input, float* input_z, float* output, int size_input, int size_output, int activation, int is_first) {
funcPtr d_function = get_activation_function(activation); funcPtr d_function = get_activation_function(activation);
// Bias // Bias
for (int j=0; j < size_output; j++) { for (int j=0; j < size_output; j++) {
ker->d_bias[j] += output[j]; d_ker->d_bias[j] += output[j];
} }
// Weights // Weights
for (int i=0; i < size_input; i++) { for (int i=0; i < size_input; i++) {
for (int j=0; j < size_output; j++) { for (int j=0; j < size_output; j++) {
ker->d_weights[i][j] += input[i]*output[j]; d_ker->d_weights[i][j] += input[i]*output[j];
} }
} }
@ -373,11 +373,11 @@ void backward_dense_cpu(Kernel_nn* ker, float* input, float* input_z, float* out
#ifdef __CUDACC__ #ifdef __CUDACC__
extern "C" extern "C"
#endif #endif
void backward_dense(Kernel_nn* ker, float* input, float* input_z, float* output, int size_input, int size_output, int activation, int is_first) { void backward_dense(Kernel_nn* ker, D_Kernel_nn* d_ker, float* input, float* input_z, float* output, int size_input, int size_output, int activation, int is_first) {
#ifndef __CUDACC__ #ifndef __CUDACC__
backward_dense_cpu(ker, input, input_z, output, size_input, size_output, activation, is_first); backward_dense_cpu(ker, d_ker, input, input_z, output, size_input, size_output, activation, is_first);
#else #else
backward_dense_device(ker, input, input_z, output, size_input, size_output, activation, is_first); backward_dense_device(ker, d_ker, input, input_z, output, size_input, size_output, activation, is_first);
#endif #endif
} }
@ -425,12 +425,12 @@ __global__ void backward_linearisation_kernel_2(float** weights, float*** input,
input[idx][idy][idz] = tmp*( (*d_f)(input_z[idx][idy][idz]) ); input[idx][idy][idz] = tmp*( (*d_f)(input_z[idx][idy][idz]) );
} }
void backward_linearisation_device(Kernel_nn* ker, float*** input, float*** input_z, float* output, int input_depth, int input_width, int size_output, int activation) { void backward_linearisation_device(Kernel_nn* ker, D_Kernel_nn* d_ker, float*** input, float*** input_z, float* output, int input_depth, int input_width, int size_output, int activation) {
// Make computation // Make computation
dim3 gridSize(i_div_up(input_depth, BLOCKSIZE_x), i_div_up(input_width, BLOCKSIZE_y), i_div_up(input_width, BLOCKSIZE_y)); dim3 gridSize(i_div_up(input_depth, BLOCKSIZE_x), i_div_up(input_width, BLOCKSIZE_y), i_div_up(input_width, BLOCKSIZE_y));
dim3 blockSize(BLOCKSIZE_x, BLOCKSIZE_y, BLOCKSIZE_z); dim3 blockSize(BLOCKSIZE_x, BLOCKSIZE_y, BLOCKSIZE_z);
backward_linearisation_kernel_1<<<gridSize, blockSize>>>(ker->d_weights, ker->d_bias, input, output, input_depth, input_width, size_output); backward_linearisation_kernel_1<<<gridSize, blockSize>>>(d_ker->d_weights, d_ker->d_bias, input, output, input_depth, input_width, size_output);
gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() ); gpuErrchk( cudaDeviceSynchronize() );
@ -445,13 +445,13 @@ void backward_linearisation_device(Kernel_nn* ker, float*** input, float*** inpu
} }
#endif #endif
void backward_linearisation_cpu(Kernel_nn* ker, float*** input, float*** input_z, float* output, int input_depth, int input_width, int size_output, int activation) { void backward_linearisation_cpu(Kernel_nn* ker, D_Kernel_nn* d_ker, float*** input, float*** input_z, float* output, int input_depth, int input_width, int size_output, int activation) {
funcPtr d_function = get_activation_function(activation); funcPtr d_function = get_activation_function(activation);
// Bias // Bias
for (int j=0; j < size_output; j++) { for (int j=0; j < size_output; j++) {
ker->d_bias[j] += output[j]; d_ker->d_bias[j] += output[j];
} }
// Weights // Weights
@ -460,7 +460,7 @@ void backward_linearisation_cpu(Kernel_nn* ker, float*** input, float*** input_z
for (int k=0; k < input_width; k++) { for (int k=0; k < input_width; k++) {
for (int l=0; l < input_width; l++) { for (int l=0; l < input_width; l++) {
for (int j=0; j < size_output; j++) { for (int j=0; j < size_output; j++) {
ker->d_weights[cpt][j] += input[i][k][l]*output[j]; d_ker->d_weights[cpt][j] += input[i][k][l]*output[j];
} }
cpt++; cpt++;
} }
@ -486,11 +486,11 @@ void backward_linearisation_cpu(Kernel_nn* ker, float*** input, float*** input_z
#ifdef __CUDACC__ #ifdef __CUDACC__
extern "C" extern "C"
#endif #endif
void backward_linearisation(Kernel_nn* ker, float*** input, float*** input_z, float* output, int input_depth, int input_width, int size_output, int activation) { void backward_linearisation(Kernel_nn* ker, D_Kernel_nn* d_ker, float*** input, float*** input_z, float* output, int input_depth, int input_width, int size_output, int activation) {
#ifndef __CUDACC__ #ifndef __CUDACC__
backward_linearisation_cpu(ker, input, input_z, output, input_depth, input_width, size_output, activation); backward_linearisation_cpu(ker, d_ker, input, input_z, output, input_depth, input_width, size_output, activation);
#else #else
backward_linearisation_device(ker, input, input_z, output, input_depth, input_width, size_output, activation); backward_linearisation_device(ker, d_ker, input, input_z, output, input_depth, input_width, size_output, activation);
#endif #endif
} }
@ -569,12 +569,12 @@ __global__ void backward_convolution_apply_propagate_kernel(float*** input, floa
} }
} }
void backward_convolution_device(Kernel_cnn* kernel, float*** input, float*** input_z, float*** output, int input_depth, int input_width, int output_depth, int output_width, int activation, int is_first, int kernel_size, int padding, int stride) { void backward_convolution_device(Kernel_cnn* kernel, D_Kernel_cnn* d_kernel, float*** input, float*** input_z, float*** output, int input_depth, int input_width, int output_depth, int output_width, int activation, int is_first, int kernel_size, int padding, int stride) {
// Bias Kernel // Bias Kernel
dim3 gridSize1(i_div_up(output_depth, BLOCKSIZE_x), i_div_up(output_width, BLOCKSIZE_y), i_div_up(output_width, BLOCKSIZE_y)); dim3 gridSize1(i_div_up(output_depth, BLOCKSIZE_x), i_div_up(output_width, BLOCKSIZE_y), i_div_up(output_width, BLOCKSIZE_y));
dim3 blockSize1(BLOCKSIZE_x, BLOCKSIZE_y, BLOCKSIZE_z); dim3 blockSize1(BLOCKSIZE_x, BLOCKSIZE_y, BLOCKSIZE_z);
backward_convolution_dbias_kernel<<<gridSize1, blockSize1>>>(kernel->d_bias, output, output_depth, output_width); backward_convolution_dbias_kernel<<<gridSize1, blockSize1>>>(d_kernel->d_bias, output, output_depth, output_width);
gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() ); gpuErrchk( cudaDeviceSynchronize() );
@ -582,7 +582,7 @@ void backward_convolution_device(Kernel_cnn* kernel, float*** input, float*** in
dim3 gridSize2(i_div_up(output_width, BLOCKSIZE_x), i_div_up(output_width, BLOCKSIZE_y), i_div_up(output_depth, BLOCKSIZE_y)); dim3 gridSize2(i_div_up(output_width, BLOCKSIZE_x), i_div_up(output_width, BLOCKSIZE_y), i_div_up(output_depth, BLOCKSIZE_y));
dim3 blockSize2(BLOCKSIZE_x, BLOCKSIZE_y, BLOCKSIZE_z); dim3 blockSize2(BLOCKSIZE_x, BLOCKSIZE_y, BLOCKSIZE_z);
backward_convolution_dweight_kernel<<<gridSize2, blockSize2>>>(kernel->d_weights, input, output, input_depth, output_depth, input_width, output_width, kernel_size, stride, padding); backward_convolution_dweight_kernel<<<gridSize2, blockSize2>>>(d_kernel->d_weights, input, output, input_depth, output_depth, input_width, output_width, kernel_size, stride, padding);
gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() ); gpuErrchk( cudaDeviceSynchronize() );
@ -610,7 +610,7 @@ void backward_convolution_device(Kernel_cnn* kernel, float*** input, float*** in
#endif #endif
void backward_convolution_cpu(Kernel_cnn* ker, float*** input, float*** input_z, float*** output, int input_depth, int input_width, int output_depth, int output_width, int activation, int is_first, int kernel_size, int padding, int stride) { void backward_convolution_cpu(Kernel_cnn* ker, D_Kernel_cnn* d_ker, float*** input, float*** input_z, float*** output, int input_depth, int input_width, int output_depth, int output_width, int activation, int is_first, int kernel_size, int padding, int stride) {
funcPtr d_function = get_activation_function(activation); funcPtr d_function = get_activation_function(activation);
int max_move = kernel_size - padding; int max_move = kernel_size - padding;
@ -619,7 +619,7 @@ void backward_convolution_cpu(Kernel_cnn* ker, float*** input, float*** input_z,
for (int i=0; i < output_depth; i++) { for (int i=0; i < output_depth; i++) {
for (int j=0; j < output_width; j++) { for (int j=0; j < output_width; j++) {
for (int k=0; k < output_width; k++) { for (int k=0; k < output_width; k++) {
ker->d_bias[i][j][k] += output[i][j][k]; d_ker->d_bias[i][j][k] += output[i][j][k];
} }
} }
} }
@ -637,13 +637,13 @@ void backward_convolution_cpu(Kernel_cnn* ker, float*** input, float*** input_z,
} }
} }
} }
ker->d_weights[h][i][j+padding][k+padding] += tmp; d_ker->d_weights[h][i][j+padding][k+padding] += tmp;
} }
} }
} }
} }
// Input TODO // Input
if (is_first==1) // Pas besoin de backpropager dans l'input if (is_first==1) // Pas besoin de backpropager dans l'input
return; return;
for (int i=0; i < input_depth; i++) { for (int i=0; i < input_depth; i++) {
@ -680,10 +680,10 @@ void backward_convolution_cpu(Kernel_cnn* ker, float*** input, float*** input_z,
#ifdef __CUDACC__ #ifdef __CUDACC__
extern "C" extern "C"
#endif #endif
void backward_convolution(Kernel_cnn* ker, float*** input, float*** input_z, float*** output, int input_depth, int input_width, int output_depth, int output_width, int activation, int is_first, int kernel_size, int padding, int stride) { void backward_convolution(Kernel_cnn* ker, D_Kernel_cnn* d_ker, float*** input, float*** input_z, float*** output, int input_depth, int input_width, int output_depth, int output_width, int activation, int is_first, int kernel_size, int padding, int stride) {
#ifndef __CUDACC__ #ifndef __CUDACC__
backward_convolution_cpu(ker, input, input_z, output, input_depth, input_width, output_depth, output_width, activation, is_first, kernel_size, padding, stride); backward_convolution_cpu(ker, d_ker, input, input_z, output, input_depth, input_width, output_depth, output_width, activation, is_first, kernel_size, padding, stride);
#else #else
backward_convolution_device(ker, input, input_z, output, input_depth, input_width, output_depth, output_width, activation, is_first, kernel_size, padding, stride); backward_convolution_device(ker, d_ker, input, input_z, output, input_depth, input_width, output_depth, output_width, activation, is_first, kernel_size, padding, stride);
#endif #endif
} }

View File

@ -9,11 +9,6 @@
#include "../common/include/colors.h" #include "../common/include/colors.h"
#include "../common/include/utils.h" #include "../common/include/utils.h"
#include "include/backpropagation.h" #include "include/backpropagation.h"
#include "include/initialisation.h"
#include "include/convolution.h"
#include "include/function.h"
#include "include/creation.h"
#include "include/update.h"
#include "include/make.h" #include "include/make.h"
#include "include/cnn.h" #include "include/cnn.h"
@ -253,6 +248,7 @@ void forward_propagation(Network* network) {
void backward_propagation(Network* network, int wanted_number, int finetuning) { void backward_propagation(Network* network, int wanted_number, int finetuning) {
int n = network->size; // Nombre de couches du réseau int n = network->size; // Nombre de couches du réseau
D_Network* d_network = network->d_network;
// Backward sur la dernière couche qui utilise toujours SOFTMAX // Backward sur la dernière couche qui utilise toujours SOFTMAX
float* wanted_output = generate_wanted_output(wanted_number, network->width[network->size -1]); // Sortie désirée, permet d'initialiser une erreur float* wanted_output = generate_wanted_output(wanted_number, network->width[network->size -1]); // Sortie désirée, permet d'initialiser une erreur
@ -269,6 +265,7 @@ void backward_propagation(Network* network, int wanted_number, int finetuning) {
for (int i=n-2; i >= 0; i--) { for (int i=n-2; i >= 0; i--) {
// Modifie 'k_i' à partir d'une comparaison d'informations entre 'input' et 'output' // Modifie 'k_i' à partir d'une comparaison d'informations entre 'input' et 'output'
Kernel* k_i = network->kernel[i]; Kernel* k_i = network->kernel[i];
D_Kernel* d_k_i = d_network->kernel[i];
float*** input = network->input[i]; float*** input = network->input[i];
float*** input_z = network->input_z[i]; float*** input_z = network->input_z[i];
@ -290,15 +287,15 @@ void backward_propagation(Network* network, int wanted_number, int finetuning) {
return; // On arrête la backpropagation return; // On arrête la backpropagation
} }
int kernel_size = k_i->cnn->k_size; int kernel_size = k_i->cnn->k_size;
backward_convolution(k_i->cnn, input, input_z, output, input_depth, input_width, output_depth, output_width, -activation, is_last_layer, kernel_size, padding, stride); backward_convolution(k_i->cnn, d_k_i->cnn, input, input_z, output, input_depth, input_width, output_depth, output_width, -activation, is_last_layer, kernel_size, padding, stride);
} else if (k_i->nn) { // Full connection } else if (k_i->nn) { // Full connection
if (k_i->linearisation == DOESNT_LINEARISE) { // Vecteur -> Vecteur if (k_i->linearisation == DOESNT_LINEARISE) { // Vecteur -> Vecteur
backward_dense(k_i->nn, input[0][0], input_z[0][0], output[0][0], input_width, output_width, -activation, is_last_layer); backward_dense(k_i->nn, d_k_i->nn, input[0][0], input_z[0][0], output[0][0], input_width, output_width, -activation, is_last_layer);
} else { // Matrice -> vecteur } else { // Matrice -> vecteur
if (finetuning == NN_ONLY) { if (finetuning == NN_ONLY) {
return; // On arrête la backpropagation return; // On arrête la backpropagation
} }
backward_linearisation(k_i->nn, input, input_z, output[0][0], input_depth, input_width, output_width, -activation); backward_linearisation(k_i->nn, d_k_i->nn, input, input_z, output[0][0], input_depth, input_width, output_width, -activation);
} }
} else { // Pooling } else { // Pooling
int kernel_size = 2*padding + input_width + stride - output_width*stride; int kernel_size = 2*padding + input_width + stride - output_width*stride;

View File

@ -6,19 +6,22 @@
#include "../common/include/utils.h" #include "../common/include/utils.h"
#include "include/initialisation.h" #include "include/initialisation.h"
#include "include/function.h" #include "include/function.h"
#include "include/cnn.h"
#include "include/creation.h" #include "include/creation.h"
Network* create_network(int max_size, float learning_rate, int dropout, int initialisation, int input_width, int input_depth) { Network* create_network(int max_size, float learning_rate, int dropout, int initialisation, int input_width, int input_depth, int finetuning) {
if (dropout < 0 || dropout > 100) { if (dropout < 0 || dropout > 100) {
printf_error("La probabilité de dropout n'est pas respecté, elle doit être comprise entre 0 et 100\n"); printf_error("La probabilité de dropout n'est pas respecté, elle doit être comprise entre 0 et 100\n");
} }
Network* network = (Network*)nalloc(1, sizeof(Network)); Network* network = (Network*)nalloc(1, sizeof(Network));
network->d_network = NULL;
network->learning_rate = learning_rate; network->learning_rate = learning_rate;
network->max_size = max_size; network->max_size = max_size;
network->dropout = dropout; network->dropout = dropout;
network->initialisation = initialisation; network->initialisation = initialisation;
network->size = 1; network->size = 1;
network->finetuning = finetuning;
network->input = (float****)nalloc(max_size, sizeof(float***)); network->input = (float****)nalloc(max_size, sizeof(float***));
network->input_z = (float****)nalloc(max_size, sizeof(float***)); network->input_z = (float****)nalloc(max_size, sizeof(float***));
network->kernel = (Kernel**)nalloc(max_size-1, sizeof(Kernel*)); network->kernel = (Kernel**)nalloc(max_size-1, sizeof(Kernel*));
@ -30,10 +33,144 @@ Network* create_network(int max_size, float learning_rate, int dropout, int init
network->width[0] = input_width; network->width[0] = input_width;
network->depth[0] = input_depth; network->depth[0] = input_depth;
create_a_cube_input_layer(network, 0, input_depth, input_width); create_a_cube_input_layer(network, 0, input_depth, input_width);
create_a_cube_input_z_layer(network, 0, input_depth, input_width);
return network; return network;
} }
D_Network* create_d_network(Network* network) {
// On initialise le réseau
int max_size = network->max_size;
D_Network* d_network = (D_Network*)nalloc(1, sizeof(D_Network));
if (pthread_mutex_init(&(d_network->lock), NULL) != 0)
{
printf_error("Le mutex ne s'est pas initialisé correctement \n");
}
d_network->kernel = (D_Kernel**)nalloc(max_size-1, sizeof(D_Kernel*));
for (int i=0; i < max_size-1; i++) {
d_network->kernel[i] = (D_Kernel*)nalloc(1, sizeof(D_Kernel));
}
// Puis toutes ses couches
int n = network->size;
for (int i=0; i<(n-1); i++) {
Kernel* k_i = network->kernel[i];
D_Kernel* d_k_i = d_network->kernel[i];
if (k_i->cnn) { // Convolution
int k_size = k_i->cnn->k_size;
int rows = k_i->cnn->rows;
int columns = k_i->cnn->columns;
int output_width = network->width[i+1];
d_k_i->cnn = (D_Kernel_cnn*)nalloc(1, sizeof(D_Kernel_cnn));
D_Kernel_cnn* cnn = d_k_i->cnn;
// Weights
cnn->d_weights = (float****)nalloc(rows, sizeof(float***));
#ifdef ADAM_CNN_WEIGHTS
cnn->s_d_weights = (float****)nalloc(rows, sizeof(float***));
cnn->v_d_weights = (float****)nalloc(rows, sizeof(float***));
#endif
for (int i=0; i < rows; i++) {
cnn->d_weights[i] = (float***)nalloc(columns, sizeof(float**));
#ifdef ADAM_CNN_WEIGHTS
cnn->s_d_weights[i] = (float***)nalloc(columns, sizeof(float**));
cnn->v_d_weights[i] = (float***)nalloc(columns, sizeof(float**));
#endif
for (int j=0; j < columns; j++) {
cnn->d_weights[i][j] = (float**)nalloc(k_size, sizeof(float*));
#ifdef ADAM_CNN_WEIGHTS
cnn->s_d_weights[i][j] = (float**)nalloc(k_size, sizeof(float*));
cnn->v_d_weights[i][j] = (float**)nalloc(k_size, sizeof(float*));
#endif
for (int k=0; k < k_size; k++) {
cnn->d_weights[i][j][k] = (float*)nalloc(k_size, sizeof(float));
#ifdef ADAM_CNN_WEIGHTS
cnn->s_d_weights[i][j][k] = (float*)nalloc(k_size, sizeof(float));
cnn->v_d_weights[i][j][k] = (float*)nalloc(k_size, sizeof(float));
#endif
for (int l=0; l < k_size; l++) {
cnn->d_weights[i][j][k][l] = 0.;
#ifdef ADAM_CNN_WEIGHTS
cnn->s_d_weights[i][j][k][l] = 0.;
cnn->v_d_weights[i][j][k][l] = 0.;
#endif
}
}
}
}
//Bias
cnn->d_bias = (float***)nalloc(columns, sizeof(float**));
#ifdef ADAM_CNN_BIAS
cnn->s_d_bias = (float***)nalloc(columns, sizeof(float**));
cnn->v_d_bias = (float***)nalloc(columns, sizeof(float**));
#endif
for (int i=0; i < columns; i++) {
cnn->d_bias[i] = (float**)nalloc(output_width, sizeof(float*));
#ifdef ADAM_CNN_BIAS
cnn->s_d_bias[i] = (float**)nalloc(output_width, sizeof(float*));
cnn->v_d_bias[i] = (float**)nalloc(output_width, sizeof(float*));
#endif
for (int j=0; j < output_width; j++) {
cnn->d_bias[i][j] = (float*)nalloc(output_width, sizeof(float));
#ifdef ADAM_CNN_BIAS
cnn->s_d_bias[i][j] = (float*)nalloc(output_width, sizeof(float));
cnn->v_d_bias[i][j] = (float*)nalloc(output_width, sizeof(float));
#endif
for (int k=0; k < output_width; k++) {
cnn->d_bias[i][j][k] = 0.;
#ifdef ADAM_CNN_BIAS
cnn->s_d_bias[i][j][k] = 0.;
cnn->v_d_bias[i][j][k] = 0.;
#endif
}
}
}
} else if (k_i->nn) {
d_k_i->nn = (D_Kernel_nn*)nalloc(1, sizeof(D_Kernel_nn));
D_Kernel_nn* nn = d_k_i->nn;
int size_input = k_i->nn->size_input;
int size_output = k_i->nn->size_output;
// Weights
nn->d_weights = (float**)nalloc(size_input, sizeof(float*));
#ifdef ADAM_DENSE_WEIGHTS
nn->s_d_weights = (float**)nalloc(size_input, sizeof(float*));
nn->v_d_weights = (float**)nalloc(size_input, sizeof(float*));
#endif
for (int i=0; i < size_input; i++) {
nn->d_weights[i] = (float*)nalloc(size_output, sizeof(float));
#ifdef ADAM_DENSE_WEIGHTS
nn->s_d_weights[i] = (float*)nalloc(size_output, sizeof(float));
nn->v_d_weights[i] = (float*)nalloc(size_output, sizeof(float));
#endif
for (int j=0; j < size_output; j++) {
nn->d_weights[i][j] = 0.;
#ifdef ADAM_DENSE_WEIGHTS
nn->s_d_weights[i][j] = 0.;
nn->v_d_weights[i][j] = 0.;
#endif
}
}
// Bias
nn->d_bias = (float*)nalloc(size_output, sizeof(float));
#ifdef ADAM_DENSE_BIAS
nn->s_d_bias = (float*)nalloc(size_output, sizeof(float));
nn->v_d_bias = (float*)nalloc(size_output, sizeof(float));
#endif
for (int i=0; i < size_output; i++) {
nn->d_bias[i] = 0.;
#ifdef ADAM_DENSE_BIAS
nn->s_d_bias[i] = 0.;
nn->v_d_bias[i] = 0.;
#endif
}
}
// Sinon c'est un pooling donc on ne fait rien
}
return d_network;
}
void create_a_cube_input_layer(Network* network, int pos, int depth, int dim) { void create_a_cube_input_layer(Network* network, int pos, int depth, int dim) {
network->input[pos] = (float***)nalloc(depth, sizeof(float**)); network->input[pos] = (float***)nalloc(depth, sizeof(float**));
for (int i=0; i < depth; i++) { for (int i=0; i < depth; i++) {
@ -149,70 +286,21 @@ void add_convolution(Network* network, int kernel_size, int number_of_kernels, i
cnn->columns = output_depth; cnn->columns = output_depth;
cnn->weights = (float****)nalloc(input_depth, sizeof(float***)); cnn->weights = (float****)nalloc(input_depth, sizeof(float***));
cnn->d_weights = (float****)nalloc(input_depth, sizeof(float***));
#ifdef ADAM_CNN_WEIGHTS
cnn->s_d_weights = (float****)nalloc(input_depth, sizeof(float***));
cnn->v_d_weights = (float****)nalloc(input_depth, sizeof(float***));
#endif
for (int i=0; i < input_depth; i++) { for (int i=0; i < input_depth; i++) {
cnn->weights[i] = (float***)nalloc(output_depth, sizeof(float**)); cnn->weights[i] = (float***)nalloc(output_depth, sizeof(float**));
cnn->d_weights[i] = (float***)nalloc(output_depth, sizeof(float**));
#ifdef ADAM_CNN_WEIGHTS
cnn->s_d_weights[i] = (float***)nalloc(output_depth, sizeof(float**));
cnn->v_d_weights[i] = (float***)nalloc(output_depth, sizeof(float**));
#endif
for (int j=0; j < output_depth; j++) { for (int j=0; j < output_depth; j++) {
cnn->weights[i][j] = (float**)nalloc(kernel_size, sizeof(float*)); cnn->weights[i][j] = (float**)nalloc(kernel_size, sizeof(float*));
cnn->d_weights[i][j] = (float**)nalloc(kernel_size, sizeof(float*));
#ifdef ADAM_CNN_WEIGHTS
cnn->s_d_weights[i][j] = (float**)nalloc(kernel_size, sizeof(float*));
cnn->v_d_weights[i][j] = (float**)nalloc(kernel_size, sizeof(float*));
#endif
for (int k=0; k < kernel_size; k++) { for (int k=0; k < kernel_size; k++) {
cnn->weights[i][j][k] = (float*)nalloc(kernel_size, sizeof(float)); cnn->weights[i][j][k] = (float*)nalloc(kernel_size, sizeof(float));
cnn->d_weights[i][j][k] = (float*)nalloc(kernel_size, sizeof(float));
#ifdef ADAM_CNN_WEIGHTS
cnn->s_d_weights[i][j][k] = (float*)nalloc(kernel_size, sizeof(float));
cnn->v_d_weights[i][j][k] = (float*)nalloc(kernel_size, sizeof(float));
#endif
for (int l=0; l < kernel_size; l++) {
cnn->d_weights[i][j][k][l] = 0.;
#ifdef ADAM_CNN_WEIGHTS
cnn->s_d_weights[i][j][k][l] = 0.;
cnn->v_d_weights[i][j][k][l] = 0.;
#endif
}
} }
} }
} }
cnn->bias = (float***)nalloc(output_depth, sizeof(float**)); cnn->bias = (float***)nalloc(output_depth, sizeof(float**));
cnn->d_bias = (float***)nalloc(output_depth, sizeof(float**));
#ifdef ADAM_CNN_BIAS
cnn->s_d_bias = (float***)nalloc(output_depth, sizeof(float**));
cnn->v_d_bias = (float***)nalloc(output_depth, sizeof(float**));
#endif
for (int i=0; i < output_depth; i++) { for (int i=0; i < output_depth; i++) {
cnn->bias[i] = (float**)nalloc(bias_size, sizeof(float*)); cnn->bias[i] = (float**)nalloc(bias_size, sizeof(float*));
cnn->d_bias[i] = (float**)nalloc(bias_size, sizeof(float*));
#ifdef ADAM_CNN_BIAS
cnn->s_d_bias[i] = (float**)nalloc(bias_size, sizeof(float*));
cnn->v_d_bias[i] = (float**)nalloc(bias_size, sizeof(float*));
#endif
for (int j=0; j < bias_size; j++) { for (int j=0; j < bias_size; j++) {
cnn->bias[i][j] = (float*)nalloc(bias_size, sizeof(float)); cnn->bias[i][j] = (float*)nalloc(bias_size, sizeof(float));
cnn->d_bias[i][j] = (float*)nalloc(bias_size, sizeof(float));
#ifdef ADAM_CNN_BIAS
cnn->s_d_bias[i][j] = (float*)nalloc(bias_size, sizeof(float));
cnn->v_d_bias[i][j] = (float*)nalloc(bias_size, sizeof(float));
#endif
for (int k=0; k < bias_size; k++) {
cnn->d_bias[i][j][k] = 0.;
#ifdef ADAM_CNN_BIAS
cnn->s_d_bias[i][j][k] = 0.;
cnn->v_d_bias[i][j][k] = 0.;
#endif
}
} }
} }
@ -245,42 +333,14 @@ void add_dense(Network* network, int size_output, int activation) {
nn->size_input = size_input; nn->size_input = size_input;
nn->size_output = size_output; nn->size_output = size_output;
nn->bias = (float*)nalloc(size_output, sizeof(float));
nn->d_bias = (float*)nalloc(size_output, sizeof(float));
#ifdef ADAM_DENSE_BIAS
nn->s_d_bias = (float*)nalloc(size_output, sizeof(float));
nn->v_d_bias = (float*)nalloc(size_output, sizeof(float));
#endif
for (int i=0; i < size_output; i++) {
nn->d_bias[i] = 0.;
#ifdef ADAM_DENSE_BIAS
nn->s_d_bias[i] = 0.;
nn->v_d_bias[i] = 0.;
#endif
}
nn->weights = (float**)nalloc(size_input, sizeof(float*)); nn->weights = (float**)nalloc(size_input, sizeof(float*));
nn->d_weights = (float**)nalloc(size_input, sizeof(float*));
#ifdef ADAM_DENSE_WEIGHTS
nn->s_d_weights = (float**)nalloc(size_input, sizeof(float*));
nn->v_d_weights = (float**)nalloc(size_input, sizeof(float*));
#endif
for (int i=0; i < size_input; i++) { for (int i=0; i < size_input; i++) {
nn->weights[i] = (float*)nalloc(size_output, sizeof(float)); nn->weights[i] = (float*)nalloc(size_output, sizeof(float));
nn->d_weights[i] = (float*)nalloc(size_output, sizeof(float));
#ifdef ADAM_DENSE_WEIGHTS
nn->s_d_weights[i] = (float*)nalloc(size_output, sizeof(float));
nn->v_d_weights[i] = (float*)nalloc(size_output, sizeof(float));
#endif
for (int j=0; j < size_output; j++) {
nn->d_weights[i][j] = 0.;
#ifdef ADAM_DENSE_WEIGHTS
nn->s_d_weights[i][j] = 0.;
nn->v_d_weights[i][j] = 0.;
#endif
}
} }
nn->bias = (float*)nalloc(size_output, sizeof(float));
initialisation_1d_matrix(network->initialisation, nn->bias, size_output, size_input, size_output); initialisation_1d_matrix(network->initialisation, nn->bias, size_output, size_input, size_output);
initialisation_2d_matrix(network->initialisation, nn->weights, size_input, size_output, size_input, size_output); initialisation_2d_matrix(network->initialisation, nn->weights, size_input, size_output, size_input, size_output);
create_a_line_input_layer(network, n, size_output); create_a_line_input_layer(network, n, size_output);
@ -310,42 +370,13 @@ void add_dense_linearisation(Network* network, int size_output, int activation)
nn->size_input = size_input; nn->size_input = size_input;
nn->size_output = size_output; nn->size_output = size_output;
nn->bias = (float*)nalloc(size_output, sizeof(float));
nn->d_bias = (float*)nalloc(size_output, sizeof(float));
#ifdef ADAM_DENSE_BIAS
nn->s_d_bias = (float*)nalloc(size_output, sizeof(float));
nn->v_d_bias = (float*)nalloc(size_output, sizeof(float));
#endif
for (int i=0; i < size_output; i++) {
nn->d_bias[i] = 0.;
#ifdef ADAM_DENSE_BIAS
nn->s_d_bias[i] = 0.;
nn->v_d_bias[i] = 0.;
#endif
}
nn->weights = (float**)nalloc(size_input, sizeof(float*)); nn->weights = (float**)nalloc(size_input, sizeof(float*));
nn->d_weights = (float**)nalloc(size_input, sizeof(float*));
#ifdef ADAM_DENSE_WEIGHTS
nn->s_d_weights = (float**)nalloc(size_input, sizeof(float*));
nn->v_d_weights = (float**)nalloc(size_input, sizeof(float*));
#endif
for (int i=0; i < size_input; i++) { for (int i=0; i < size_input; i++) {
nn->weights[i] = (float*)nalloc(size_output, sizeof(float)); nn->weights[i] = (float*)nalloc(size_output, sizeof(float));
nn->d_weights[i] = (float*)nalloc(size_output, sizeof(float));
#ifdef ADAM_DENSE_WEIGHTS
nn->s_d_weights[i] = (float*)nalloc(size_output, sizeof(float));
nn->v_d_weights[i] = (float*)nalloc(size_output, sizeof(float));
#endif
for (int j=0; j < size_output; j++) {
nn->d_weights[i][j] = 0.;
#ifdef ADAM_DENSE_WEIGHTS
nn->s_d_weights[i][j] = 0.;
nn->v_d_weights[i][j] = 0.;
#endif
}
} }
nn->bias = (float*)nalloc(size_output, sizeof(float));
initialisation_1d_matrix(network->initialisation, nn->bias, size_output, size_input, size_output); initialisation_1d_matrix(network->initialisation, nn->bias, size_output, size_input, size_output);
initialisation_2d_matrix(network->initialisation, nn->weights, size_input, size_output, size_input, size_output); initialisation_2d_matrix(network->initialisation, nn->weights, size_input, size_output, size_input, size_output);
create_a_line_input_layer(network, n, size_output); create_a_line_input_layer(network, n, size_output);

View File

@ -3,6 +3,7 @@
#include <stdio.h> #include <stdio.h>
#include "../common/include/memory_management.h" #include "../common/include/memory_management.h"
#include "include/cnn.h"
#include "include/free.h" #include "include/free.h"
@ -19,6 +20,16 @@ void free_a_cube_input_layer(Network* network, int pos, int depth, int dim) {
gree(network->input_z[pos], true); gree(network->input_z[pos], true);
} }
void free_a_cube_input_layer_without_z(Network* network, int pos, int depth, int dim) {
for (int i=0; i < depth; i++) {
for (int j=0; j < dim; j++) {
gree(network->input[pos][i][j], true);
}
gree(network->input[pos][i], true);
}
gree(network->input[pos], true);
}
void free_a_line_input_layer(Network* network, int pos) { void free_a_line_input_layer(Network* network, int pos) {
// Libère l'espace mémoire de network->input[pos] et network->input_z[pos] // Libère l'espace mémoire de network->input[pos] et network->input_z[pos]
// lorsque ces couches sont denses (donc sont des matrice de dimension 1) // lorsque ces couches sont denses (donc sont des matrice de dimension 1)
@ -42,59 +53,26 @@ void free_convolution(Network* network, int pos) {
int r = k_pos->rows; int r = k_pos->rows;
int bias_size = network->width[pos+1]; int bias_size = network->width[pos+1];
free_a_cube_input_layer(network, pos+1, network->depth[pos+1], network->width[pos+1]); free_a_cube_input_layer(network, pos+1, network->depth[pos+1], network->width[pos+1]);
// Partie toujours initialisée (donc à libérer)
for (int i=0; i < c; i++) { for (int i=0; i < c; i++) {
for (int j=0; j < bias_size; j++) { for (int j=0; j < bias_size; j++) {
gree(k_pos->bias[i][j], true); gree(k_pos->bias[i][j], true);
gree(k_pos->d_bias[i][j], true);
#ifdef ADAM_CNN_BIAS
gree(k_pos->s_d_bias[i][j], true);
gree(k_pos->v_d_bias[i][j], true);
#endif
} }
gree(k_pos->bias[i], true); gree(k_pos->bias[i], true);
gree(k_pos->d_bias[i], true);
#ifdef ADAM_CNN_BIAS
gree(k_pos->s_d_bias[i], true);
gree(k_pos->v_d_bias[i], true);
#endif
} }
gree(k_pos->bias, true); gree(k_pos->bias, true);
gree(k_pos->d_bias, true);
#ifdef ADAM_CNN_BIAS
gree(k_pos->s_d_bias, true);
gree(k_pos->v_d_bias, true);
#endif
for (int i=0; i < r; i++) { for (int i=0; i < r; i++) {
for (int j=0; j < c; j++) { for (int j=0; j < c; j++) {
for (int k=0; k < k_size; k++) { for (int k=0; k < k_size; k++) {
gree(k_pos->weights[i][j][k], true); gree(k_pos->weights[i][j][k], true);
gree(k_pos->d_weights[i][j][k], true);
#ifdef ADAM_CNN_WEIGHTS
gree(k_pos->s_d_weights[i][j][k], true);
gree(k_pos->v_d_weights[i][j][k], true);
#endif
} }
gree(k_pos->weights[i][j], true); gree(k_pos->weights[i][j], true);
gree(k_pos->d_weights[i][j], true);
#ifdef ADAM_CNN_WEIGHTS
gree(k_pos->s_d_weights[i][j], true);
gree(k_pos->v_d_weights[i][j], true);
#endif
} }
gree(k_pos->weights[i], true); gree(k_pos->weights[i], true);
gree(k_pos->d_weights[i], true);
#ifdef ADAM_CNN_WEIGHTS
gree(k_pos->s_d_weights[i], true);
gree(k_pos->v_d_weights[i], true);
#endif
} }
gree(k_pos->weights, true); gree(k_pos->weights, true);
gree(k_pos->d_weights, true);
#ifdef ADAM_CNN_WEIGHTS
gree(k_pos->s_d_weights, true);
gree(k_pos->v_d_weights, true);
#endif
gree(k_pos, true); gree(k_pos, true);
} }
@ -103,27 +81,12 @@ void free_dense(Network* network, int pos) {
free_a_line_input_layer(network, pos+1); free_a_line_input_layer(network, pos+1);
Kernel_nn* k_pos = network->kernel[pos]->nn; Kernel_nn* k_pos = network->kernel[pos]->nn;
int dim = k_pos->size_input; int dim = k_pos->size_input;
for (int i=0; i < dim; i++) { for (int i=0; i < dim; i++) {
gree(k_pos->weights[i], true); gree(k_pos->weights[i], true);
gree(k_pos->d_weights[i], true);
#ifdef ADAM_DENSE_WEIGHTS
gree(k_pos->s_d_weights[i], true);
gree(k_pos->v_d_weights[i], true);
#endif
} }
gree(k_pos->weights, true); gree(k_pos->weights, true);
gree(k_pos->d_weights, true);
#ifdef ADAM_DENSE_WEIGHTS
gree(k_pos->s_d_weights, true);
gree(k_pos->v_d_weights, true);
#endif
gree(k_pos->bias, true); gree(k_pos->bias, true);
gree(k_pos->d_bias, true);
#ifdef ADAM_DENSE_BIAS
gree(k_pos->s_d_bias, true);
gree(k_pos->v_d_bias, true);
#endif
gree(k_pos, true); gree(k_pos, true);
} }
@ -132,34 +95,19 @@ void free_dense_linearisation(Network* network, int pos) {
free_a_line_input_layer(network, pos+1); free_a_line_input_layer(network, pos+1);
Kernel_nn* k_pos = network->kernel[pos]->nn; Kernel_nn* k_pos = network->kernel[pos]->nn;
int dim = k_pos->size_input; int dim = k_pos->size_input;
for (int i=0; i < dim; i++) { for (int i=0; i < dim; i++) {
gree(k_pos->weights[i], true); gree(k_pos->weights[i], true);
gree(k_pos->d_weights[i], true);
#ifdef ADAM_DENSE_WEIGHTS
gree(k_pos->s_d_weights[i], true);
gree(k_pos->v_d_weights[i], true);
#endif
} }
gree(k_pos->weights, true); gree(k_pos->weights, true);
gree(k_pos->d_weights, true);
#ifdef ADAM_DENSE_WEIGHTS
gree(k_pos->s_d_weights, true);
gree(k_pos->v_d_weights, true);
#endif
gree(k_pos->bias, true); gree(k_pos->bias, true);
gree(k_pos->d_bias, true);
#ifdef ADAM_DENSE_BIAS
gree(k_pos->s_d_bias, true);
gree(k_pos->v_d_bias, true);
#endif
gree(k_pos, true); gree(k_pos, true);
} }
void free_network_creation(Network* network) { void free_network_creation(Network* network) {
// On libère l'input correspondant à l'image: input[0] (car elle n'appartient à aucune couche) // On libère l'input correspondant à l'image: input[0] (car elle n'appartient à aucune couche)
free_a_cube_input_layer(network, 0, network->depth[0], network->width[0]); free_a_cube_input_layer_without_z(network, 0, network->depth[0], network->width[0]);
for (int i=0; i < network->max_size-1; i++) { for (int i=0; i < network->max_size-1; i++) {
gree(network->kernel[i], true); gree(network->kernel[i], true);
@ -173,6 +121,7 @@ void free_network_creation(Network* network) {
gree(network, true); gree(network, true);
} }
void free_network(Network* network) { void free_network(Network* network) {
#if (defined(USE_CUDA) || defined(TEST_MEMORY_MANAGEMENT)) && defined(FREE_ALL_OPT) #if (defined(USE_CUDA) || defined(TEST_MEMORY_MANAGEMENT)) && defined(FREE_ALL_OPT)
// Supprimer toute la mémoire allouée avec nalloc directement // Supprimer toute la mémoire allouée avec nalloc directement
@ -206,3 +155,137 @@ void free_network(Network* network) {
free_network_creation(network); free_network_creation(network);
#endif #endif
} }
// ----------------------- Pour le d_network -----------------------
void free_d_convolution(Network* network, int pos) {
Kernel_cnn* k_pos = network->kernel[pos]->cnn;
D_Network* d_network = network->d_network;
D_Kernel_cnn* d_k_pos = d_network->kernel[pos]->cnn;
int c = k_pos->columns;
int k_size = k_pos->k_size;
int r = k_pos->rows;
int bias_size = network->width[pos+1];
if (network->finetuning == EVERYTHING) {
for (int i=0; i < c; i++) {
for (int j=0; j < bias_size; j++) {
gree(d_k_pos->d_bias[i][j], true);
#ifdef ADAM_CNN_BIAS
gree(d_k_pos->s_d_bias[i][j], true);
gree(d_k_pos->v_d_bias[i][j], true);
#endif
}
gree(d_k_pos->d_bias[i], true);
#ifdef ADAM_CNN_BIAS
gree(d_k_pos->s_d_bias[i], true);
gree(d_k_pos->v_d_bias[i], true);
#endif
}
gree(d_k_pos->d_bias, true);
#ifdef ADAM_CNN_BIAS
gree(d_k_pos->s_d_bias, true);
gree(d_k_pos->v_d_bias, true);
#endif
for (int i=0; i < r; i++) {
for (int j=0; j < c; j++) {
for (int k=0; k < k_size; k++) {
gree(d_k_pos->d_weights[i][j][k], true);
#ifdef ADAM_CNN_WEIGHTS
gree(d_k_pos->s_d_weights[i][j][k], true);
gree(d_k_pos->v_d_weights[i][j][k], true);
#endif
}
gree(d_k_pos->d_weights[i][j], true);
#ifdef ADAM_CNN_WEIGHTS
gree(d_k_pos->s_d_weights[i][j], true);
gree(d_k_pos->v_d_weights[i][j], true);
#endif
}
gree(d_k_pos->d_weights[i], true);
#ifdef ADAM_CNN_WEIGHTS
gree(d_k_pos->s_d_weights[i], true);
gree(d_k_pos->v_d_weights[i], true);
#endif
}
gree(d_k_pos->d_weights, true);
#ifdef ADAM_CNN_WEIGHTS
gree(d_k_pos->s_d_weights, true);
gree(d_k_pos->v_d_weights, true);
#endif
}
}
void free_d_dense(Network* network, int pos) {
D_Network* d_network = network->d_network;
D_Kernel_nn* d_k_pos = d_network->kernel[pos]->nn;
int dim = network->kernel[pos]->nn->size_input;
for (int i=0; i < dim; i++) {
gree(d_k_pos->d_weights[i], true);
#ifdef ADAM_DENSE_WEIGHTS
gree(d_k_pos->s_d_weights[i], true);
gree(d_k_pos->v_d_weights[i], true);
#endif
}
gree(d_k_pos->d_weights, true);
#ifdef ADAM_DENSE_WEIGHTS
gree(d_k_pos->s_d_weights, true);
gree(d_k_pos->v_d_weights, true);
#endif
gree(d_k_pos->d_bias, true);
#ifdef ADAM_DENSE_BIAS
gree(d_k_pos->s_d_bias, true);
gree(d_k_pos->v_d_bias, true);
#endif
}
void free_d_dense_linearisation(Network* network, int pos) {
D_Network* d_network = network->d_network;
D_Kernel_nn* d_k_pos = d_network->kernel[pos]->nn;
int dim = network->kernel[pos]->nn->size_input;
if (network->finetuning <= NN_AND_LINEARISATION) {
for (int i=0; i < dim; i++) {
gree(d_k_pos->d_weights[i], true);
#ifdef ADAM_DENSE_WEIGHTS
gree(d_k_pos->s_d_weights[i], true);
gree(d_k_pos->v_d_weights[i], true);
#endif
}
gree(d_k_pos->d_weights, true);
#ifdef ADAM_DENSE_WEIGHTS
gree(d_k_pos->s_d_weights, true);
gree(d_k_pos->v_d_weights, true);
#endif
gree(d_k_pos->d_bias, true);
#ifdef ADAM_DENSE_BIAS
gree(d_k_pos->s_d_bias, true);
gree(d_k_pos->v_d_bias, true);
#endif
}
gree(d_k_pos, true);
}
void free_d_network(Network* network) {
D_Network* d_network = network->d_network;
for (int i=0; i < network->max_size-1; i++) {
D_Kernel* d_k_i = d_network->kernel[i];
if (d_k_i->cnn) { // Convolution
free_d_convolution(network, i);
} else if (d_k_i->nn) { // Dense
if (network->kernel[i]->linearisation == DOESNT_LINEARISE) { // Vecteur -> Vecteur
free_d_dense(network, i);
} else { // Matrice -> Vecteur
free_d_dense_linearisation(network, i);
}
}
gree(d_network->kernel[i], true);
}
gree(d_network->kernel, true);
pthread_mutex_destroy(&(d_network->lock));
gree(d_network, true);
}

View File

@ -50,7 +50,7 @@ extern "C"
/* /*
* Transfert les informations d'erreur à travers une couche fully connected * Transfert les informations d'erreur à travers une couche fully connected
*/ */
void backward_dense(Kernel_nn* ker, float* input, float* input_z, float* output, int size_input, int size_output, int activation, int is_first); void backward_dense(Kernel_nn* ker, D_Kernel_nn* d_ker, float* input, float* input_z, float* output, int size_input, int size_output, int activation, int is_first);
#ifdef __CUDACC__ #ifdef __CUDACC__
@ -59,7 +59,7 @@ extern "C"
/* /*
* Transfert les informations d'erreur à travers une couche de linéarisation * Transfert les informations d'erreur à travers une couche de linéarisation
*/ */
void backward_linearisation(Kernel_nn* ker, float*** input, float*** input_z, float* output, int input_depth, int input_width, int size_output, int activation); void backward_linearisation(Kernel_nn* ker, D_Kernel_nn* d_ker, float*** input, float*** input_z, float* output, int input_depth, int input_width, int size_output, int activation);
#ifdef __CUDACC__ #ifdef __CUDACC__
@ -68,6 +68,6 @@ extern "C"
/* /*
* Transfert les informations d'erreur à travers un couche de convolution * Transfert les informations d'erreur à travers un couche de convolution
*/ */
void backward_convolution(Kernel_cnn* ker, float*** input, float*** input_z, float*** output, int input_depth, int input_width, int output_depth, int output_width, int activation, int is_first, int kernel_size, int padding, int stride); void backward_convolution(Kernel_cnn* ker, D_Kernel_cnn* d_ker, float*** input, float*** input_z, float*** output, int input_depth, int input_width, int output_depth, int output_width, int activation, int is_first, int kernel_size, int padding, int stride);
#endif #endif

View File

@ -7,7 +7,14 @@
/* /*
* Créé un réseau qui peut contenir max_size couche (dont celle d'input et d'output) * Créé un réseau qui peut contenir max_size couche (dont celle d'input et d'output)
*/ */
Network* create_network(int max_size, float learning_rate, int dropout, int initialisation, int input_width, int input_depth); Network* create_network(int max_size, float learning_rate, int dropout, int initialisation, int input_width, int input_depth, int finetuning);
/*
* Créer un réseau associé à 'network' pour la backpropagation en suivant la même
* architecture que 'network'
* Pour cela, la fonction alloue le réseau et ses couches
*/
D_Network* create_d_network(Network* network);
/* /*
* Créé et alloue de la mémoire à une couche de type input cube * Créé et alloue de la mémoire à une couche de type input cube

View File

@ -10,6 +10,13 @@
*/ */
void free_a_cube_input_layer(Network* network, int pos, int depth, int dim); void free_a_cube_input_layer(Network* network, int pos, int depth, int dim);
/*
* Libère l'espace mémoire de network->input[pos]
* lorsque cette couches est non denses (donc est une matrice de dimension 3)
* Libère donc l'espace mémoire alloué dans 'create_a_cube_input_layer' (creation.c)
*/
void free_a_cube_input_layer_without_z(Network* network, int pos, int depth, int dim);
/* /*
* Libère l'espace mémoire de network->input[pos] et network->input_z[pos] * Libère l'espace mémoire de network->input[pos] et network->input_z[pos]
* lorsque ces couches sont denses (donc sont des matrice de dimension 1) * lorsque ces couches sont denses (donc sont des matrice de dimension 1)
@ -47,4 +54,24 @@ void free_network_creation(Network* network);
*/ */
void free_network(Network* network); void free_network(Network* network);
/*
* Libère l'espace mémoire alloué pour une d_convolution
*/
void free_d_convolution(Network* network, int pos);
/*
* Libère l'espace mémoire alloué pour une d_dense
*/
void free_d_dense(Network* network, int pos);
/*
* Libère l'espace mémoire alloué pour une d_dense_linearisation
*/
void free_d_dense_linearisation(Network* network, int pos);
/*
* Libère entièrement l'espace mémoire alloué dans 'create_d_network' (creation.c)
*/
void free_d_network(Network* network);
#endif #endif

View File

@ -8,29 +8,29 @@
/* /*
* Renvoie un réseau suivant l'architecture LeNet5 * Renvoie un réseau suivant l'architecture LeNet5
*/ */
Network* create_network_lenet5(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth); Network* create_network_lenet5(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth, int finetuning);
/* /*
* Renvoie un réseau suivant l'architecture AlexNet * Renvoie un réseau suivant l'architecture AlexNet
* C'est à dire en entrée 3x227x227 et une sortie de taille 'size_output' * C'est à dire en entrée 3x227x227 et une sortie de taille 'size_output'
*/ */
Network* create_network_alexnet(float learning_rate, int dropout, int activation, int initialisation, int size_output); Network* create_network_alexnet(float learning_rate, int dropout, int activation, int initialisation, int size_output, int finetuning);
/* /*
* Renvoie un réseau suivant l'architecture VGG16 modifiée pour prendre en entrée 3x256x256 * Renvoie un réseau suivant l'architecture VGG16 modifiée pour prendre en entrée 3x256x256
* et une sortie de taille 'size_output' * et une sortie de taille 'size_output'
*/ */
Network* create_network_VGG16(float learning_rate, int dropout, int activation, int initialisation, int size_output); Network* create_network_VGG16(float learning_rate, int dropout, int activation, int initialisation, int size_output, int finetuning);
/* /*
* Renvoie un réseau suivant l'architecture VGG16 originel pour prendre en entrée 3x227x227 * Renvoie un réseau suivant l'architecture VGG16 originel pour prendre en entrée 3x227x227
* et une sortie de taille 1 000 * et une sortie de taille 1 000
*/ */
Network* create_network_VGG16_227(float learning_rate, int dropout, int activation, int initialisation); Network* create_network_VGG16_227(float learning_rate, int dropout, int activation, int initialisation, int finetuning);
/* /*
* Renvoie un réseau sans convolution, similaire à celui utilisé dans src/dense * Renvoie un réseau sans convolution, similaire à celui utilisé dans src/dense
*/ */
Network* create_simple_one(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth); Network* create_simple_one(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth, int finetuning);
#endif #endif

View File

@ -1,8 +1,11 @@
#ifndef DEF_STRUCT_H #ifndef DEF_STRUCT_H
#define DEF_STRUCT_H #define DEF_STRUCT_H
#include <pthread.h>
#include "config.h" #include "config.h"
#define NO_POOLING 0 #define NO_POOLING 0
#define AVG_POOLING 1 #define AVG_POOLING 1
#define MAX_POOLING 2 #define MAX_POOLING 2
@ -10,6 +13,61 @@
#define DOESNT_LINEARISE 0 #define DOESNT_LINEARISE 0
#define DO_LINEARISE 1 #define DO_LINEARISE 1
//------------------- Réseau pour la backpropagation -------------------
/*
* On définit ici la classe D_Network associé à la classe Network
* Elle permet la backpropagation des réseaux auxquels elle est associée
*/
typedef struct D_Kernel_cnn {
// Noyau ayant une couche matricielle en sortie
float*** d_bias; // d_bias[columns][output_width][output_width]
#ifdef ADAM_CNN_BIAS
float*** s_d_bias; // s_d_bias[columns][output_width][output_width]
float*** v_d_bias; // v_d_bias[columns][output_width][output_width]
#endif
float**** d_weights; // d_weights[rows][columns][k_size][k_size]
#ifdef ADAM_CNN_WEIGHTS
float**** s_d_weights; // s_d_weights[rows][columns][k_size][k_size]
float**** v_d_weights; // v_d_weights[rows][columns][k_size][k_size]
#endif
} D_Kernel_cnn;
typedef struct D_Kernel_nn {
// Noyau ayant une couche vectorielle en sortie
float* d_bias; // d_bias[size_output]
#ifdef ADAM_DENSE_BIAS
float* s_d_bias; // s_d_bias[size_output]
float* v_d_bias; // v_d_bias[size_output]
#endif
float** d_weights; // d_weights[size_input][size_output]
#ifdef ADAM_DENSE_WEIGHTS
float** s_d_weights; // s_d_weights[size_input][size_output]
float** v_d_weights; // v_d_weights[size_input][size_output]
#endif
} D_Kernel_nn;
typedef struct D_Kernel {
D_Kernel_cnn* cnn; // NULL si ce n'est pas un cnn
D_Kernel_nn* nn; // NULL si ce n'est pas un nn
// Ajouter un mutex
} D_Kernel;
typedef struct D_Network{
D_Kernel** kernel; // kernel[size], contient tous les kernels
pthread_mutex_t lock; // Restreint les modifications de d_network à un seul réseau à la fois
} D_Network;
//-------------------------- Réseau classique --------------------------
typedef struct Kernel_cnn { typedef struct Kernel_cnn {
// Noyau ayant une couche matricielle en sortie // Noyau ayant une couche matricielle en sortie
int k_size; // k_size = 2*padding + input_width + stride - output_width*stride int k_size; // k_size = 2*padding + input_width + stride - output_width*stride
@ -17,18 +75,7 @@ typedef struct Kernel_cnn {
int columns; // Depth de l'output int columns; // Depth de l'output
float*** bias; // bias[columns][output_width][output_width] <=> bias[output depth][output width][output width] float*** bias; // bias[columns][output_width][output_width] <=> bias[output depth][output width][output width]
float*** d_bias; // d_bias[columns][output_width][output_width]
#ifdef ADAM_CNN_BIAS
float*** s_d_bias; // s_d_bias[columns][output_width][output_width]
float*** v_d_bias; // v_d_bias[columns][output_width][output_width]
#endif
float**** weights; // weights[rows][columns][k_size][k_size] <=> weights[input depth][output depth][kernel size][kernel size] float**** weights; // weights[rows][columns][k_size][k_size] <=> weights[input depth][output depth][kernel size][kernel size]
float**** d_weights; // d_weights[rows][columns][k_size][k_size]
#ifdef ADAM_CNN_WEIGHTS
float**** s_d_weights; // s_d_weights[rows][columns][k_size][k_size]
float**** v_d_weights; // v_d_weights[rows][columns][k_size][k_size]
#endif
} Kernel_cnn; } Kernel_cnn;
typedef struct Kernel_nn { typedef struct Kernel_nn {
@ -37,18 +84,7 @@ typedef struct Kernel_nn {
int size_output; // Nombre d'éléments en sortie int size_output; // Nombre d'éléments en sortie
float* bias; // bias[size_output] float* bias; // bias[size_output]
float* d_bias; // d_bias[size_output]
#ifdef ADAM_DENSE_BIAS
float* s_d_bias; // s_d_bias[size_output]
float* v_d_bias; // v_d_bias[size_output]
#endif
float** weights; // weight[size_input][size_output] float** weights; // weight[size_input][size_output]
float** d_weights; // d_weights[size_input][size_output]
#ifdef ADAM_DENSE_WEIGHTS
float** s_d_weights; // s_d_weights[size_input][size_output]
float** v_d_weights; // v_d_weights[size_input][size_output]
#endif
} Kernel_nn; } Kernel_nn;
typedef struct Kernel { typedef struct Kernel {
@ -63,10 +99,12 @@ typedef struct Kernel {
} Kernel; } Kernel;
typedef struct Network{ typedef struct Network{
int dropout; // Probabilité d'abandon d'un neurone dans [0, 100] (entiers) int dropout; // Probabilité d'abandon d'un neurone dans [0, 100] (entiers)
float learning_rate; // Taux d'apprentissage du réseau float learning_rate; // Taux d'apprentissage du réseau
int initialisation; // Id du type d'initialisation int initialisation; // Id du type d'initialisation
int finetuning; // backpropagation: 0 sur tout; 1 sur dense et linéarisation; 2 sur dense
int max_size; // Taille du tableau contenant le réseau int max_size; // Taille du tableau contenant le réseau
int size; // Taille actuelle du réseau (size ≤ max_size) int size; // Taille actuelle du réseau (size ≤ max_size)
@ -77,6 +115,9 @@ typedef struct Network{
Kernel** kernel; // kernel[size], contient tous les kernels Kernel** kernel; // kernel[size], contient tous les kernels
float**** input_z; // Tableau de toutes les couches du réseau input_z[size][couche->depth][couche->width][couche->width] float**** input_z; // Tableau de toutes les couches du réseau input_z[size][couche->depth][couche->width][couche->width]
float**** input; // input[i] = f(input_z[i]) où f est la fonction d'activation de la couche i float**** input; // input[i] = f(input_z[i]) où f est la fonction d'activation de la couche i
D_Network* d_network; // Réseau utilisé pour la backpropagation
// Ce dernier peut être commun à plusieurs réseau 'Network'
} Network; } Network;
#endif #endif

View File

@ -15,13 +15,13 @@ float clip(float a);
* Met à jours les poids à partir de données obtenus après plusieurs backpropagations * Met à jours les poids à partir de données obtenus après plusieurs backpropagations
* Puis met à 0 tous les d_weights * Puis met à 0 tous les d_weights
*/ */
void update_weights(Network* network, Network* d_network); void update_weights(Network* network);
/* /*
* Met à jours les biais à partir de données obtenus après plusieurs backpropagations * Met à jours les biais à partir de données obtenus après plusieurs backpropagations
* Puis met à 0 tous les d_bias * Puis met à 0 tous les d_bias
*/ */
void update_bias(Network* network, Network* d_network); void update_bias(Network* network);
/* /*
* Met à 0 toutes les données de backpropagation de poids * Met à 0 toutes les données de backpropagation de poids

View File

@ -7,12 +7,8 @@
#include "../common/include/colors.h" #include "../common/include/colors.h"
#include "include/initialisation.h"
#include "include/test_network.h" #include "include/test_network.h"
#include "include/function.h"
#include "include/creation.h"
#include "include/train.h" #include "include/train.h"
#include "include/cnn.h"
#include "include/main.h" #include "include/main.h"

View File

@ -7,8 +7,8 @@
#include "include/models.h" #include "include/models.h"
Network* create_network_lenet5(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth) { Network* create_network_lenet5(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth, int finetuning) {
Network* network = create_network(8, learning_rate, dropout, initialisation, input_width, input_depth); Network* network = create_network(8, learning_rate, dropout, initialisation, input_width, input_depth, finetuning);
add_convolution(network, 5, 6, 1, 0, activation); add_convolution(network, 5, 6, 1, 0, activation);
add_average_pooling(network, 2, 2, 0); add_average_pooling(network, 2, 2, 0);
add_convolution(network, 5, 16, 1, 0, activation); add_convolution(network, 5, 16, 1, 0, activation);
@ -19,8 +19,8 @@ Network* create_network_lenet5(float learning_rate, int dropout, int activation,
return network; return network;
} }
Network* create_network_alexnet(float learning_rate, int dropout, int activation, int initialisation, int size_output) { Network* create_network_alexnet(float learning_rate, int dropout, int activation, int initialisation, int size_output, int finetuning) {
Network* network = create_network(12, learning_rate, dropout, initialisation, 227, 3); Network* network = create_network(12, learning_rate, dropout, initialisation, 227, 3, finetuning);
add_convolution(network, 11, 96, 4, 0, activation); add_convolution(network, 11, 96, 4, 0, activation);
add_average_pooling(network, 3, 2, 0); add_average_pooling(network, 3, 2, 0);
add_convolution(network, 5, 256, 1, 2, activation); add_convolution(network, 5, 256, 1, 2, activation);
@ -35,8 +35,8 @@ Network* create_network_alexnet(float learning_rate, int dropout, int activation
return network; return network;
} }
Network* create_network_VGG16(float learning_rate, int dropout, int activation, int initialisation, int size_output) { Network* create_network_VGG16(float learning_rate, int dropout, int activation, int initialisation, int size_output, int finetuning) {
Network* network = create_network(22, learning_rate, dropout, initialisation, 256, 3); Network* network = create_network(22, learning_rate, dropout, initialisation, 256, 3, finetuning);
add_convolution(network, 3, 64, 1, 1, activation); // Conv3-64 add_convolution(network, 3, 64, 1, 1, activation); // Conv3-64
add_convolution(network, 3, 64, 1, 1, activation); // Conv3-64 add_convolution(network, 3, 64, 1, 1, activation); // Conv3-64
add_average_pooling(network, 2, 2, 0); // Max Pool add_average_pooling(network, 2, 2, 0); // Max Pool
@ -66,8 +66,8 @@ Network* create_network_VGG16(float learning_rate, int dropout, int activation,
return network; return network;
} }
Network* create_network_VGG16_227(float learning_rate, int dropout, int activation, int initialisation) { Network* create_network_VGG16_227(float learning_rate, int dropout, int activation, int initialisation, int finetuning) {
Network* network = create_network(22, learning_rate, dropout, initialisation, 227, 3); Network* network = create_network(22, learning_rate, dropout, initialisation, 227, 3, finetuning);
add_convolution(network, 3, 64, 1, 1, activation); // Conv3-64 add_convolution(network, 3, 64, 1, 1, activation); // Conv3-64
add_convolution(network, 3, 64, 1, 1, activation); // Conv3-64 add_convolution(network, 3, 64, 1, 1, activation); // Conv3-64
add_average_pooling(network, 2, 2, 0); // Max Pool add_average_pooling(network, 2, 2, 0); // Max Pool
@ -97,8 +97,8 @@ Network* create_network_VGG16_227(float learning_rate, int dropout, int activati
return network; return network;
} }
Network* create_simple_one(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth) { Network* create_simple_one(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth, int finetuning) {
Network* network = create_network(3, learning_rate, dropout, initialisation, input_width, input_depth); Network* network = create_network(3, learning_rate, dropout, initialisation, input_width, input_depth, finetuning);
add_dense_linearisation(network, 80, activation); add_dense_linearisation(network, 80, activation);
add_dense(network, 10, SOFTMAX); add_dense(network, 10, SOFTMAX);
return network; return network;

View File

@ -187,6 +187,7 @@ Network* read_network(char* filename) {
network->initialisation = initialisation; network->initialisation = initialisation;
(void) !fread(&dropout, sizeof(uint32_t), 1, ptr); (void) !fread(&dropout, sizeof(uint32_t), 1, ptr);
network->dropout = dropout; network->dropout = dropout;
network->finetuning = 0;
// Lecture de la taille de l'entrée des différentes matrices // Lecture de la taille de l'entrée des différentes matrices
network->width = (int*)nalloc(size, sizeof(int)); network->width = (int*)nalloc(size, sizeof(int));
@ -268,72 +269,27 @@ Kernel* read_kernel(int type_couche, int output_width, FILE* ptr) {
float tmp; float tmp;
cnn->bias = (float***)nalloc(cnn->columns, sizeof(float**)); cnn->bias = (float***)nalloc(cnn->columns, sizeof(float**));
cnn->d_bias = (float***)nalloc(cnn->columns, sizeof(float**));
#ifdef ADAM_CNN_BIAS
cnn->s_d_bias = (float***)nalloc(cnn->columns, sizeof(float**));
cnn->v_d_bias = (float***)nalloc(cnn->columns, sizeof(float**));
#endif
for (int i=0; i < cnn->columns; i++) { for (int i=0; i < cnn->columns; i++) {
cnn->bias[i] = (float**)nalloc(output_width, sizeof(float*)); cnn->bias[i] = (float**)nalloc(output_width, sizeof(float*));
cnn->d_bias[i] = (float**)nalloc(output_width, sizeof(float*));
#ifdef ADAM_CNN_BIAS
cnn->s_d_bias[i] = (float**)nalloc(output_width, sizeof(float*));
cnn->v_d_bias[i] = (float**)nalloc(output_width, sizeof(float*));
#endif
for (int j=0; j < output_width; j++) { for (int j=0; j < output_width; j++) {
cnn->bias[i][j] = (float*)nalloc(output_width, sizeof(float)); cnn->bias[i][j] = (float*)nalloc(output_width, sizeof(float));
cnn->d_bias[i][j] = (float*)nalloc(output_width, sizeof(float));
#ifdef ADAM_CNN_BIAS
cnn->s_d_bias[i][j] = (float*)nalloc(output_width, sizeof(float));
cnn->v_d_bias[i][j] = (float*)nalloc(output_width, sizeof(float));
#endif
for (int k=0; k < output_width; k++) { for (int k=0; k < output_width; k++) {
(void) !fread(&tmp, sizeof(tmp), 1, ptr); (void) !fread(&tmp, sizeof(tmp), 1, ptr);
cnn->bias[i][j][k] = tmp; cnn->bias[i][j][k] = tmp;
cnn->d_bias[i][j][k] = 0.;
#ifdef ADAM_CNN_BIAS
cnn->s_d_bias[i][j][k] = 0.;
cnn->v_d_bias[i][j][k] = 0.;
#endif
} }
} }
} }
cnn->weights = (float****)nalloc(cnn->rows, sizeof(float***)); cnn->weights = (float****)nalloc(cnn->rows, sizeof(float***));
cnn->d_weights = (float****)nalloc(cnn->rows, sizeof(float***));
#ifdef ADAM_CNN_WEIGHTS
cnn->s_d_weights = (float****)nalloc(cnn->rows, sizeof(float***));
cnn->v_d_weights = (float****)nalloc(cnn->rows, sizeof(float***));
#endif
for (int i=0; i < cnn->rows; i++) { for (int i=0; i < cnn->rows; i++) {
cnn->weights[i] = (float***)nalloc(cnn->columns, sizeof(float**)); cnn->weights[i] = (float***)nalloc(cnn->columns, sizeof(float**));
cnn->d_weights[i] = (float***)nalloc(cnn->columns, sizeof(float**));
#ifdef ADAM_CNN_WEIGHTS
cnn->s_d_weights[i] = (float***)nalloc(cnn->columns, sizeof(float**));
cnn->v_d_weights[i] = (float***)nalloc(cnn->columns, sizeof(float**));
#endif
for (int j=0; j < cnn->columns; j++) { for (int j=0; j < cnn->columns; j++) {
cnn->weights[i][j] = (float**)nalloc(cnn->k_size, sizeof(float*)); cnn->weights[i][j] = (float**)nalloc(cnn->k_size, sizeof(float*));
cnn->d_weights[i][j] = (float**)nalloc(cnn->k_size, sizeof(float*));
#ifdef ADAM_CNN_WEIGHTS
cnn->s_d_weights[i][j] = (float**)nalloc(cnn->k_size, sizeof(float*));
cnn->v_d_weights[i][j] = (float**)nalloc(cnn->k_size, sizeof(float*));
#endif
for (int k=0; k < cnn->k_size; k++) { for (int k=0; k < cnn->k_size; k++) {
cnn->weights[i][j][k] = (float*)nalloc(cnn->k_size, sizeof(float)); cnn->weights[i][j][k] = (float*)nalloc(cnn->k_size, sizeof(float));
cnn->d_weights[i][j][k] = (float*)nalloc(cnn->k_size, sizeof(float));
#ifdef ADAM_CNN_WEIGHTS
cnn->s_d_weights[i][j][k] = (float*)nalloc(cnn->k_size, sizeof(float));
cnn->v_d_weights[i][j][k] = (float*)nalloc(cnn->k_size, sizeof(float));
#endif
for (int l=0; l < cnn->k_size; l++) { for (int l=0; l < cnn->k_size; l++) {
(void) !fread(&tmp, sizeof(tmp), 1, ptr); (void) !fread(&tmp, sizeof(tmp), 1, ptr);
cnn->weights[i][j][k][l] = tmp; cnn->weights[i][j][k][l] = tmp;
cnn->d_weights[i][j][k][l] = 0.;
#ifdef ADAM_CNN_WEIGHTS
cnn->s_d_weights[i][j][k][l] = 0.;
cnn->v_d_weights[i][j][k][l] = 0.;
#endif
} }
} }
} }
@ -357,42 +313,17 @@ Kernel* read_kernel(int type_couche, int output_width, FILE* ptr) {
float tmp; float tmp;
nn->bias = (float*)nalloc(nn->size_output, sizeof(float)); nn->bias = (float*)nalloc(nn->size_output, sizeof(float));
nn->d_bias = (float*)nalloc(nn->size_output, sizeof(float));
#ifdef ADAM_DENSE_BIAS
nn->s_d_bias = (float*)nalloc(nn->size_output, sizeof(float));
nn->v_d_bias = (float*)nalloc(nn->size_output, sizeof(float));
#endif
for (int i=0; i < nn->size_output; i++) { for (int i=0; i < nn->size_output; i++) {
(void) !fread(&tmp, sizeof(tmp), 1, ptr); (void) !fread(&tmp, sizeof(tmp), 1, ptr);
nn->bias[i] = tmp; nn->bias[i] = tmp;
nn->d_bias[i] = 0.;
#ifdef ADAM_DENSE_BIAS
nn->s_d_bias[i] = 0.;
nn->v_d_bias[i] = 0.;
#endif
} }
nn->weights = (float**)nalloc(nn->size_input, sizeof(float*)); nn->weights = (float**)nalloc(nn->size_input, sizeof(float*));
nn->d_weights = (float**)nalloc(nn->size_input, sizeof(float*));
#ifdef ADAM_DENSE_WEIGHTS
nn->s_d_weights = (float**)nalloc(nn->size_input, sizeof(float*));
nn->v_d_weights = (float**)nalloc(nn->size_input, sizeof(float*));
#endif
for (int i=0; i < nn->size_input; i++) { for (int i=0; i < nn->size_input; i++) {
nn->weights[i] = (float*)nalloc(nn->size_output, sizeof(float)); nn->weights[i] = (float*)nalloc(nn->size_output, sizeof(float));
nn->d_weights[i] = (float*)nalloc(nn->size_output, sizeof(float));
#ifdef ADAM_DENSE_WEIGHTS
nn->s_d_weights[i] = (float*)nalloc(nn->size_output, sizeof(float));
nn->v_d_weights[i] = (float*)nalloc(nn->size_output, sizeof(float));
#endif
for (int j=0; j < nn->size_output; j++) { for (int j=0; j < nn->size_output; j++) {
(void) !fread(&tmp, sizeof(tmp), 1, ptr); (void) !fread(&tmp, sizeof(tmp), 1, ptr);
nn->weights[i][j] = tmp; nn->weights[i][j] = tmp;
nn->d_weights[i][j] = 0.;
#ifdef ADAM_DENSE_WEIGHTS
nn->s_d_weights[i][j] = 0.;
nn->v_d_weights[i][j] = 0.;
#endif
} }
} }
} else if (type_couche == POOLING) { // Cas du Pooling Layer } else if (type_couche == POOLING) { // Cas du Pooling Layer

View File

@ -7,7 +7,6 @@
#include "../common/include/memory_management.h" #include "../common/include/memory_management.h"
#include "../common/include/mnist.h" #include "../common/include/mnist.h"
#include "include/neuron_io.h" #include "include/neuron_io.h"
#include "include/struct.h"
#include "include/jpeg.h" #include "include/jpeg.h"
#include "include/free.h" #include "include/free.h"
#include "include/cnn.h" #include "include/cnn.h"

View File

@ -1,11 +1,18 @@
#include <sys/sysinfo.h>
#include <pthread.h> #include <pthread.h>
#include <stdlib.h> #include <stdlib.h>
#include <stdio.h> #include <stdio.h>
#include <float.h> #include <float.h>
#include <math.h> #include <math.h>
#include <time.h> #include <time.h>
#include <omp.h> #include <time.h>
#ifdef __linux__
#include <sys/sysinfo.h>
#elif defined(__APPLE__)
#include <sys/sysctl.h>
#else
#error Unknown platform
#endif
#include "../common/include/memory_management.h" #include "../common/include/memory_management.h"
#include "../common/include/colors.h" #include "../common/include/colors.h"
@ -14,6 +21,7 @@
#include "include/initialisation.h" #include "include/initialisation.h"
#include "include/test_network.h" #include "include/test_network.h"
#include "include/neuron_io.h" #include "include/neuron_io.h"
#include "include/creation.h"
#include "include/function.h" #include "include/function.h"
#include "include/update.h" #include "include/update.h"
#include "include/models.h" #include "include/models.h"
@ -64,7 +72,7 @@ void* train_thread(void* parameters) {
float loss = 0.; float loss = 0.;
#ifdef DETAILED_TRAIN_TIMINGS #ifdef DETAILED_TRAIN_TIMINGS
double start_time; clock_t start_time;
#endif #endif
pthread_t tid; pthread_t tid;
@ -81,16 +89,16 @@ void* train_thread(void* parameters) {
write_image_in_network_32(images[index[i]], height, width, network->input[0][0], param->offset); write_image_in_network_32(images[index[i]], height, width, network->input[0][0], param->offset);
#ifdef DETAILED_TRAIN_TIMINGS #ifdef DETAILED_TRAIN_TIMINGS
start_time = omp_get_wtime(); start_time = clock();
#endif #endif
forward_propagation(network); forward_propagation(network);
#ifdef DETAILED_TRAIN_TIMINGS #ifdef DETAILED_TRAIN_TIMINGS
printf("Temps de forward: "); printf("Temps de forward: ");
printf_time(omp_get_wtime() - start_time); printf_time(clock() - start_time);
printf("\n"); printf("\n");
start_time = omp_get_wtime(); start_time = clock();
#endif #endif
maxi = indice_max(network->input[network->size-1][0][0], 10); maxi = indice_max(network->input[network->size-1][0][0], 10);
@ -108,9 +116,9 @@ void* train_thread(void* parameters) {
#ifdef DETAILED_TRAIN_TIMINGS #ifdef DETAILED_TRAIN_TIMINGS
printf("Temps de backward: "); printf("Temps de backward: ");
printf_time(omp_get_wtime() - start_time); printf_time(clock() - start_time);
printf("\n"); printf("\n");
start_time = omp_get_wtime(); start_time = clock();
#endif #endif
if (maxi == labels[index[i]]) { if (maxi == labels[index[i]]) {
@ -131,16 +139,16 @@ void* train_thread(void* parameters) {
write_256_image_in_network(param->dataset->images[index[i]], width, height, param->dataset->numComponents, network->width[0], network->input[0]); write_256_image_in_network(param->dataset->images[index[i]], width, height, param->dataset->numComponents, network->width[0], network->input[0]);
#ifdef DETAILED_TRAIN_TIMINGS #ifdef DETAILED_TRAIN_TIMINGS
start_time = omp_get_wtime(); start_time = clock();
#endif #endif
forward_propagation(network); forward_propagation(network);
#ifdef DETAILED_TRAIN_TIMINGS #ifdef DETAILED_TRAIN_TIMINGS
printf("Temps de forward: "); printf("Temps de forward: ");
printf_time(omp_get_wtime() - start_time); printf_time(clock() - start_time);
printf("\n"); printf("\n");
start_time = omp_get_wtime(); start_time = clock();
#endif #endif
maxi = indice_max(network->input[network->size-1][0][0], param->dataset->numCategories); maxi = indice_max(network->input[network->size-1][0][0], param->dataset->numCategories);
@ -148,9 +156,9 @@ void* train_thread(void* parameters) {
#ifdef DETAILED_TRAIN_TIMINGS #ifdef DETAILED_TRAIN_TIMINGS
printf("Temps de backward: "); printf("Temps de backward: ");
printf_time(omp_get_wtime() - start_time); printf_time(clock() - start_time);
printf("\n"); printf("\n");
start_time = omp_get_wtime(); start_time = clock();
#endif #endif
@ -179,7 +187,7 @@ void train(int dataset_type, char* images_file, char* labels_file, char* data_di
exit(1); exit(1);
} }
#endif #endif
srand(time(NULL)); srand(clock());
float loss; float loss;
float batch_loss; // May be redundant with loss, but gives more informations float batch_loss; // May be redundant with loss, but gives more informations
float test_accuracy = 0.; // Used to decrease Learning rate float test_accuracy = 0.; // Used to decrease Learning rate
@ -190,12 +198,12 @@ void train(int dataset_type, char* images_file, char* labels_file, char* data_di
//* Différents timers pour mesurer les performance en terme de vitesse //* Différents timers pour mesurer les performance en terme de vitesse
double start_time, end_time; clock_t start_time, end_time;
double elapsed_time; clock_t elapsed_time;
double algo_start = omp_get_wtime(); clock_t algo_start = clock();
start_time = omp_get_wtime(); start_time = clock();
//* Chargement du dataset //* Chargement du dataset
@ -232,10 +240,10 @@ void train(int dataset_type, char* images_file, char* labels_file, char* data_di
Network* network; Network* network;
if (!recover) { if (!recover) {
if (dataset_type == 0) { if (dataset_type == 0) {
network = create_network_lenet5(LEARNING_RATE, 0, LEAKY_RELU, HE, input_width, input_depth); network = create_network_lenet5(LEARNING_RATE, 0, LEAKY_RELU, HE, input_width, input_depth, finetuning);
//network = create_simple_one(LEARNING_RATE, 0, RELU, GLOROT, input_width, input_depth); //network = create_simple_one(LEARNING_RATE, 0, RELU, GLOROT, input_width, input_depth);
} else { } else {
network = create_network_VGG16(LEARNING_RATE, 0, RELU, HE, dataset->numCategories); network = create_network_VGG16(LEARNING_RATE, 0, RELU, HE, dataset->numCategories, finetuning);
#ifdef USE_MULTITHREADING #ifdef USE_MULTITHREADING
printf_warning("Utilisation de VGG16 avec multithreading. La quantité de RAM utilisée peut devenir excessive\n"); printf_warning("Utilisation de VGG16 avec multithreading. La quantité de RAM utilisée peut devenir excessive\n");
@ -246,6 +254,10 @@ void train(int dataset_type, char* images_file, char* labels_file, char* data_di
network->learning_rate = LEARNING_RATE; network->learning_rate = LEARNING_RATE;
} }
// On ajoute le réseau de backpropagation au réseau précédemment initialisé
D_Network* d_network = create_d_network(network);
network->d_network = d_network;
/* /*
shuffle_index[i] contient le nouvel index de l'élément à l'emplacement i avant mélange shuffle_index[i] contient le nouvel index de l'élément à l'emplacement i avant mélange
Cela permet de réordonner le jeu d'apprentissage pour éviter certains biais Cela permet de réordonner le jeu d'apprentissage pour éviter certains biais
@ -261,7 +273,17 @@ void train(int dataset_type, char* images_file, char* labels_file, char* data_di
#ifdef USE_MULTITHREADING #ifdef USE_MULTITHREADING
int nb_remaining_images; // Nombre d'images restantes à lancer pour une série de threads int nb_remaining_images; // Nombre d'images restantes à lancer pour une série de threads
// Récupération du nombre de threads disponibles // Récupération du nombre de threads disponibles
int nb_threads = get_nprocs(); #ifdef __linux__
int nb_threads = get_nprocs();
#elif defined(__APPLE__)
int nb_threads;
size_t len = sizeof(nb_threads);
if (sysctlbyname("hw.logicalcpu", &nb_threads, &len, NULL, 0) == -1) {
perror("sysctl");
exit(1);
}
#endif
pthread_t *tid = (pthread_t*)malloc(nb_threads * sizeof(pthread_t)); pthread_t *tid = (pthread_t*)malloc(nb_threads * sizeof(pthread_t));
// Création des paramètres donnés à chaque thread dans le cas du multi-threading // Création des paramètres donnés à chaque thread dans le cas du multi-threading
@ -320,7 +342,7 @@ void train(int dataset_type, char* images_file, char* labels_file, char* data_di
train_params->finetuning = finetuning; train_params->finetuning = finetuning;
#endif #endif
end_time = omp_get_wtime(); end_time = clock();
elapsed_time = end_time - start_time; elapsed_time = end_time - start_time;
printf("Taux d'apprentissage initial: %0.2e\n", network->learning_rate); printf("Taux d'apprentissage initial: %0.2e\n", network->learning_rate);
@ -331,7 +353,7 @@ void train(int dataset_type, char* images_file, char* labels_file, char* data_di
//* Boucle d'apprentissage //* Boucle d'apprentissage
for (int i=0; i < epochs; i++) { for (int i=0; i < epochs; i++) {
start_time = omp_get_wtime(); start_time = clock();
// La variable accuracy permet d'avoir une ESTIMATION // La variable accuracy permet d'avoir une ESTIMATION
// du taux de réussite et de l'entraînement du réseau, // du taux de réussite et de l'entraînement du réseau,
// mais n'est en aucun cas une valeur réelle dans le cas // mais n'est en aucun cas une valeur réelle dans le cas
@ -392,8 +414,8 @@ void train(int dataset_type, char* images_file, char* labels_file, char* data_di
// On attend que tous les fils aient fini avant d'appliquer des modifications au réseau principal // On attend que tous les fils aient fini avant d'appliquer des modifications au réseau principal
for (int k=0; k < nb_threads; k++) { for (int k=0; k < nb_threads; k++) {
if (train_parameters[k]->network) { // Si le fil a été utilisé if (train_parameters[k]->network) { // Si le fil a été utilisé
update_weights(network, train_parameters[k]->network); update_weights(network); // , train_parameters[k]->network
update_bias(network, train_parameters[k]->network); update_bias(network); // , train_parameters[k]->network
} }
} }
current_accuracy = accuracy * nb_images_total/((j+1)*BATCHES); current_accuracy = accuracy * nb_images_total/((j+1)*BATCHES);
@ -416,14 +438,14 @@ void train(int dataset_type, char* images_file, char* labels_file, char* data_di
loss += train_params->loss/nb_images_total; loss += train_params->loss/nb_images_total;
batch_loss += train_params->loss/BATCHES; batch_loss += train_params->loss/BATCHES;
update_weights(network, network); update_weights(network);
update_bias(network, network); update_bias(network);
printf("\rÉpoque [%d/%d]\tImage [%d/%d]\tAccuracy: " YELLOW "%0.4f%%" RESET "\tBatch Accuracy: " YELLOW "%0.2f%%" RESET, i, epochs, BATCHES*(j+1), nb_images_total, current_accuracy*100, batch_accuracy*100); printf("\rÉpoque [%d/%d]\tImage [%d/%d]\tAccuracy: " YELLOW "%0.4f%%" RESET "\tBatch Accuracy: " YELLOW "%0.2f%%" RESET, i, epochs, BATCHES*(j+1), nb_images_total, current_accuracy*100, batch_accuracy*100);
#endif #endif
} }
//* Fin d'une époque: affichage des résultats et sauvegarde du réseau //* Fin d'une époque: affichage des résultats et sauvegarde du réseau
end_time = omp_get_wtime(); end_time = clock();
elapsed_time = end_time - start_time; elapsed_time = end_time - start_time;
#ifdef USE_MULTITHREADING #ifdef USE_MULTITHREADING
printf("\rThreads [%d]\tÉpoque [%d/%d]\tImage [%d/%d]\tAccuracy: " GREEN "%0.4f%%" RESET " \tLoss: %lf\tTemps: ", nb_threads, i, epochs, nb_images_total, nb_images_total, accuracy*100, loss); printf("\rThreads [%d]\tÉpoque [%d/%d]\tImage [%d/%d]\tAccuracy: " GREEN "%0.4f%%" RESET " \tLoss: %lf\tTemps: ", nb_threads, i, epochs, nb_images_total, nb_images_total, accuracy*100, loss);
@ -458,6 +480,7 @@ void train(int dataset_type, char* images_file, char* labels_file, char* data_di
write_network(out, network); write_network(out, network);
} }
free(shuffle_index); free(shuffle_index);
free_d_network(network);
free_network(network); free_network(network);
#ifdef USE_MULTITHREADING #ifdef USE_MULTITHREADING
@ -483,7 +506,7 @@ void train(int dataset_type, char* images_file, char* labels_file, char* data_di
free_dataset(dataset); free_dataset(dataset);
} }
end_time = omp_get_wtime(); end_time = clock();
elapsed_time = end_time - algo_start; elapsed_time = end_time - algo_start;
printf("\nTemps total: "); printf("\nTemps total: ");
printf_time(elapsed_time); printf_time(elapsed_time);

View File

@ -4,6 +4,7 @@
#include "include/update.h" #include "include/update.h"
#include "include/struct.h" #include "include/struct.h"
#include "include/cnn.h"
#include "include/config.h" #include "include/config.h"
@ -17,12 +18,14 @@ float clip(float a) {
return a; return a;
} }
void update_weights(Network* network, Network* d_network) { void update_weights(Network* network) {
int n = network->size; int n = network->size;
D_Network* d_network = network->d_network;
pthread_mutex_lock(&(d_network->lock));
for (int i=0; i < (n-1); i++) { for (int i=0; i < (n-1); i++) {
Kernel* k_i = network->kernel[i]; Kernel* k_i = network->kernel[i];
Kernel* dk_i = d_network->kernel[i]; D_Kernel* d_k_i = d_network->kernel[i];
int input_depth = network->depth[i]; int input_depth = network->depth[i];
int input_width = network->width[i]; int input_width = network->width[i];
@ -31,8 +34,11 @@ void update_weights(Network* network, Network* d_network) {
int output_width = network->width[i+1]; int output_width = network->width[i+1];
if (k_i->cnn) { // Convolution if (k_i->cnn) { // Convolution
if (network->finetuning != EVERYTHING) {
return; // Alors on a finit de backpropager
}
Kernel_cnn* cnn = k_i->cnn; Kernel_cnn* cnn = k_i->cnn;
Kernel_cnn* d_cnn = dk_i->cnn; D_Kernel_cnn* d_cnn = d_k_i->cnn;
int k_size = cnn->k_size; int k_size = cnn->k_size;
for (int a=0; a < input_depth; a++) { for (int a=0; a < input_depth; a++) {
for (int b=0; b < output_depth; b++) { for (int b=0; b < output_depth; b++) {
@ -55,7 +61,7 @@ void update_weights(Network* network, Network* d_network) {
} else if (k_i->nn) { // Full connection } else if (k_i->nn) { // Full connection
if (k_i->linearisation == DOESNT_LINEARISE) { // Vecteur -> Vecteur if (k_i->linearisation == DOESNT_LINEARISE) { // Vecteur -> Vecteur
Kernel_nn* nn = k_i->nn; Kernel_nn* nn = k_i->nn;
Kernel_nn* d_nn = dk_i->nn; D_Kernel_nn* d_nn = d_k_i->nn;
for (int a=0; a < input_width; a++) { for (int a=0; a < input_width; a++) {
for (int b=0; b < output_width; b++) { for (int b=0; b < output_width; b++) {
@ -70,8 +76,11 @@ void update_weights(Network* network, Network* d_network) {
} }
} }
} else { // Matrice -> vecteur } else { // Matrice -> vecteur
if (network->finetuning == NN_ONLY) {
return; // Alors on a finit de backpropager
}
Kernel_nn* nn = k_i->nn; Kernel_nn* nn = k_i->nn;
Kernel_nn* d_nn = dk_i->nn; D_Kernel_nn* d_nn = d_k_i->nn;
int size_input = input_width*input_width*input_depth; int size_input = input_width*input_width*input_depth;
@ -93,20 +102,25 @@ void update_weights(Network* network, Network* d_network) {
} }
// Une couche de pooling ne nécessite pas de traitement // Une couche de pooling ne nécessite pas de traitement
} }
pthread_mutex_unlock(&(d_network->lock));
} }
void update_bias(Network* network, Network* d_network) { void update_bias(Network* network) {
int n = network->size; int n = network->size;
D_Network* d_network = network->d_network;
for (int i=0; i < (n-1); i++) { for (int i=0; i < (n-1); i++) {
Kernel* k_i = network->kernel[i]; Kernel* k_i = network->kernel[i];
Kernel* dk_i = d_network->kernel[i]; D_Kernel* d_k_i = d_network->kernel[i];
int output_width = network->width[i+1]; int output_width = network->width[i+1];
int output_depth = network->depth[i+1]; int output_depth = network->depth[i+1];
if (k_i->cnn) { // Convolution if (k_i->cnn) { // Convolution
if (network->finetuning != EVERYTHING) {
return; // Alors on a finit de backpropager
}
Kernel_cnn* cnn = k_i->cnn; Kernel_cnn* cnn = k_i->cnn;
Kernel_cnn* d_cnn = dk_i->cnn; D_Kernel_cnn* d_cnn = d_k_i->cnn;
for (int a=0; a < output_depth; a++) { for (int a=0; a < output_depth; a++) {
for (int b=0; b < output_width; b++) { for (int b=0; b < output_width; b++) {
@ -124,8 +138,13 @@ void update_bias(Network* network, Network* d_network) {
} }
} }
} else if (k_i->nn) { // Full connection } else if (k_i->nn) { // Full connection
if (k_i->linearisation == DO_LINEARISE) {// Matrice -> vecteur
if (network->finetuning == NN_ONLY) {
return; // Alors on a finit de backpropager
}
}
Kernel_nn* nn = k_i->nn; Kernel_nn* nn = k_i->nn;
Kernel_nn* d_nn = dk_i->nn; D_Kernel_nn* d_nn = d_k_i->nn;
for (int a=0; a < output_width; a++) { for (int a=0; a < output_width; a++) {
#ifdef ADAM_DENSE_BIAS #ifdef ADAM_DENSE_BIAS
@ -145,10 +164,12 @@ void update_bias(Network* network, Network* d_network) {
void reset_d_weights(Network* network) { void reset_d_weights(Network* network) {
int n = network->size; int n = network->size;
D_Network* d_network = network->d_network;
for (int i=0; i < (n-1); i++) { for (int i=0; i < (n-1); i++) {
Kernel* k_i = network->kernel[i]; Kernel* k_i = network->kernel[i];
Kernel* k_i_1 = network->kernel[i+1]; Kernel* k_i_1 = network->kernel[i+1];
D_Kernel* d_k_i_1 = d_network->kernel[i+1];
int input_depth = network->depth[i]; int input_depth = network->depth[i];
int input_width = network->width[i]; int input_width = network->width[i];
@ -157,36 +178,42 @@ void reset_d_weights(Network* network) {
int output_width = network->width[i+1]; int output_width = network->width[i+1];
if (k_i->cnn) { // Convolution if (k_i->cnn) { // Convolution
Kernel_cnn* cnn = k_i_1->cnn; if (network->finetuning != EVERYTHING) {
continue; // On n'a pas initialisé donc on n'a pas besoin de reset
}
D_Kernel_cnn* d_cnn = d_k_i_1->cnn;
int k_size = cnn->k_size; int k_size = k_i_1->cnn->k_size;
for (int a=0; a < input_depth; a++) { for (int a=0; a < input_depth; a++) {
for (int b=0; b < output_depth; b++) { for (int b=0; b < output_depth; b++) {
for (int c=0; c < k_size; c++) { for (int c=0; c < k_size; c++) {
for (int d=0; d < k_size; d++) { for (int d=0; d < k_size; d++) {
cnn->d_weights[a][b][c][d] = 0; d_cnn->d_weights[a][b][c][d] = 0;
} }
} }
} }
} }
} else if (k_i->nn) { // Full connection } else if (k_i->nn) { // Full connection
if (k_i->linearisation == DOESNT_LINEARISE) { // Vecteur -> Vecteur if (k_i->linearisation == DOESNT_LINEARISE) { // Vecteur -> Vecteur
Kernel_nn* nn = k_i_1->nn; D_Kernel_nn* d_nn = d_k_i_1->nn;
for (int a=0; a < input_width; a++) { for (int a=0; a < input_width; a++) {
for (int b=0; b < output_width; b++) { for (int b=0; b < output_width; b++) {
nn->d_weights[a][b] = 0; d_nn->d_weights[a][b] = 0;
} }
} }
} else { // Matrice -> vecteur } else { // Matrice -> vecteur
Kernel_nn* nn = k_i_1->nn; if (network->finetuning == NN_ONLY) {
continue; // On n'a pas initialisé donc on n'a pas besoin de reset
}
D_Kernel_nn* d_nn = d_k_i_1->nn;
int size_input = input_width*input_width*input_depth; int size_input = input_width*input_width*input_depth;
for (int a=0; a < size_input; a++) { for (int a=0; a < size_input; a++) {
for (int b=0; b < output_width; b++) { for (int b=0; b < output_width; b++) {
nn->d_weights[a][b] = 0; d_nn->d_weights[a][b] = 0;
} }
} }
} }
@ -197,29 +224,38 @@ void reset_d_weights(Network* network) {
void reset_d_bias(Network* network) { void reset_d_bias(Network* network) {
int n = network->size; int n = network->size;
D_Network* d_network = network->d_network;
for (int i=0; i < (n-1); i++) { for (int i=0; i < (n-1); i++) {
Kernel* k_i = network->kernel[i]; Kernel* k_i = network->kernel[i];
Kernel* k_i_1 = network->kernel[i+1]; D_Kernel* d_k_i_1 = d_network->kernel[i+1];
int output_width = network->width[i+1]; int output_width = network->width[i+1];
int output_depth = network->depth[i+1]; int output_depth = network->depth[i+1];
if (k_i->cnn) { // Convolution if (k_i->cnn) { // Convolution
Kernel_cnn* cnn = k_i_1->cnn; if (network->finetuning != EVERYTHING) {
continue; // On n'a pas initialisé donc on n'a pas besoin de reset
}
D_Kernel_cnn* d_cnn = d_k_i_1->cnn;
for (int a=0; a < output_depth; a++) { for (int a=0; a < output_depth; a++) {
for (int b=0; b < output_width; b++) { for (int b=0; b < output_width; b++) {
for (int c=0; c < output_width; c++) { for (int c=0; c < output_width; c++) {
cnn->d_bias[a][b][c] = 0; d_cnn->d_bias[a][b][c] = 0;
} }
} }
} }
} else if (k_i->nn) { // Full connection } else if (k_i->nn) { // Full connection
Kernel_nn* nn = k_i_1->nn; if (k_i->linearisation == DO_LINEARISE) {
if (network->finetuning == NN_ONLY) {
continue; // On n'a pas initialisé donc on n'a pas besoin de reset
}
}
D_Kernel_nn* d_nn = d_k_i_1->nn;
for (int a=0; a < output_width; a++) { for (int a=0; a < output_width; a++) {
nn->d_bias[a] = 0; d_nn->d_bias[a] = 0;
} }
} }
// Une couche de pooling ne nécessite pas de traitement // Une couche de pooling ne nécessite pas de traitement

View File

@ -115,6 +115,7 @@ Network* copy_network(Network* network) {
copyVar(initialisation); copyVar(initialisation);
copyVar(max_size); copyVar(max_size);
copyVar(size); copyVar(size);
copyVar(d_network); // Les deux réseaux partagent ainsi le même réseau pour la backpropagation
network_cp->width = (int*)nalloc(size, sizeof(int)); network_cp->width = (int*)nalloc(size, sizeof(int));
network_cp->depth = (int*)nalloc(size, sizeof(int)); network_cp->depth = (int*)nalloc(size, sizeof(int));
@ -153,40 +154,15 @@ Network* copy_network(Network* network) {
copyVar(kernel[i]->nn->size_output); copyVar(kernel[i]->nn->size_output);
network_cp->kernel[i]->nn->bias = (float*)nalloc(size_output, sizeof(float)); network_cp->kernel[i]->nn->bias = (float*)nalloc(size_output, sizeof(float));
network_cp->kernel[i]->nn->d_bias = (float*)nalloc(size_output, sizeof(float));
#ifdef ADAM_DENSE_BIAS
network_cp->kernel[i]->nn->s_d_bias = (float*)nalloc(size_output, sizeof(float));
network_cp->kernel[i]->nn->v_d_bias = (float*)nalloc(size_output, sizeof(float));
#endif
for (int j=0; j < size_output; j++) { for (int j=0; j < size_output; j++) {
copyVar(kernel[i]->nn->bias[j]); copyVar(kernel[i]->nn->bias[j]);
network_cp->kernel[i]->nn->d_bias[j] = 0.;
#ifdef ADAM_DENSE_BIAS
network_cp->kernel[i]->nn->s_d_bias[j] = 0.;
network_cp->kernel[i]->nn->v_d_bias[j] = 0.;
#endif
} }
network_cp->kernel[i]->nn->weights = (float**)nalloc(size_input, sizeof(float*)); network_cp->kernel[i]->nn->weights = (float**)nalloc(size_input, sizeof(float*));
network_cp->kernel[i]->nn->d_weights = (float**)nalloc(size_input, sizeof(float*));
#ifdef ADAM_DENSE_WEIGHTS
network_cp->kernel[i]->nn->s_d_weights = (float**)nalloc(size_input, sizeof(float*));
network_cp->kernel[i]->nn->v_d_weights = (float**)nalloc(size_input, sizeof(float*));
#endif
for (int j=0; j < size_input; j++) { for (int j=0; j < size_input; j++) {
network_cp->kernel[i]->nn->weights[j] = (float*)nalloc(size_output, sizeof(float)); network_cp->kernel[i]->nn->weights[j] = (float*)nalloc(size_output, sizeof(float));
network_cp->kernel[i]->nn->d_weights[j] = (float*)nalloc(size_output, sizeof(float));
#ifdef ADAM_DENSE_WEIGHTS
network_cp->kernel[i]->nn->s_d_weights[j] = (float*)nalloc(size_output, sizeof(float));
network_cp->kernel[i]->nn->v_d_weights[j] = (float*)nalloc(size_output, sizeof(float));
#endif
for (int k=0; k < size_output; k++) { for (int k=0; k < size_output; k++) {
copyVar(kernel[i]->nn->weights[j][k]); copyVar(kernel[i]->nn->weights[j][k]);
network_cp->kernel[i]->nn->d_weights[j][k] = 0.;
#ifdef ADAM_DENSE_WEIGHTS
network_cp->kernel[i]->nn->s_d_weights[j][k] = 0.;
network_cp->kernel[i]->nn->v_d_weights[j][k] = 0.;
#endif
} }
} }
} }
@ -211,70 +187,25 @@ Network* copy_network(Network* network) {
copyVar(kernel[i]->cnn->columns); copyVar(kernel[i]->cnn->columns);
network_cp->kernel[i]->cnn->bias = (float***)nalloc(columns, sizeof(float**)); network_cp->kernel[i]->cnn->bias = (float***)nalloc(columns, sizeof(float**));
network_cp->kernel[i]->cnn->d_bias = (float***)nalloc(columns, sizeof(float**));
#ifdef ADAM_CNN_BIAS
network_cp->kernel[i]->cnn->s_d_bias = (float***)nalloc(columns, sizeof(float**));
network_cp->kernel[i]->cnn->v_d_bias = (float***)nalloc(columns, sizeof(float**));
#endif
for (int j=0; j < columns; j++) { for (int j=0; j < columns; j++) {
network_cp->kernel[i]->cnn->bias[j] = (float**)nalloc(output_width, sizeof(float*)); network_cp->kernel[i]->cnn->bias[j] = (float**)nalloc(output_width, sizeof(float*));
network_cp->kernel[i]->cnn->d_bias[j] = (float**)nalloc(output_width, sizeof(float*));
#ifdef ADAM_CNN_BIAS
network_cp->kernel[i]->cnn->s_d_bias[j] = (float**)nalloc(output_width, sizeof(float*));
network_cp->kernel[i]->cnn->v_d_bias[j] = (float**)nalloc(output_width, sizeof(float*));
#endif
for (int k=0; k < output_width; k++) { for (int k=0; k < output_width; k++) {
network_cp->kernel[i]->cnn->bias[j][k] = (float*)nalloc(output_width, sizeof(float)); network_cp->kernel[i]->cnn->bias[j][k] = (float*)nalloc(output_width, sizeof(float));
network_cp->kernel[i]->cnn->d_bias[j][k] = (float*)nalloc(output_width, sizeof(float));
#ifdef ADAM_CNN_BIAS
network_cp->kernel[i]->cnn->s_d_bias[j][k] = (float*)nalloc(output_width, sizeof(float));
network_cp->kernel[i]->cnn->v_d_bias[j][k] = (float*)nalloc(output_width, sizeof(float));
#endif
for (int l=0; l < output_width; l++) { for (int l=0; l < output_width; l++) {
copyVar(kernel[i]->cnn->bias[j][k][l]); copyVar(kernel[i]->cnn->bias[j][k][l]);
network_cp->kernel[i]->cnn->d_bias[j][k][l] = 0.;
#ifdef ADAM_CNN_BIAS
network_cp->kernel[i]->cnn->s_d_bias[j][k][l] = 0.;
network_cp->kernel[i]->cnn->v_d_bias[j][k][l] = 0.;
#endif
} }
} }
} }
network_cp->kernel[i]->cnn->weights = (float****)nalloc(rows, sizeof(float***)); network_cp->kernel[i]->cnn->weights = (float****)nalloc(rows, sizeof(float***));
network_cp->kernel[i]->cnn->d_weights = (float****)nalloc(rows, sizeof(float***));
#ifdef ADAM_CNN_WEIGHTS
network_cp->kernel[i]->cnn->s_d_weights = (float****)nalloc(rows, sizeof(float***));
network_cp->kernel[i]->cnn->v_d_weights = (float****)nalloc(rows, sizeof(float***));
#endif
for (int j=0; j < rows; j++) { for (int j=0; j < rows; j++) {
network_cp->kernel[i]->cnn->weights[j] = (float***)nalloc(columns, sizeof(float**)); network_cp->kernel[i]->cnn->weights[j] = (float***)nalloc(columns, sizeof(float**));
network_cp->kernel[i]->cnn->d_weights[j] = (float***)nalloc(columns, sizeof(float**));
#ifdef ADAM_CNN_WEIGHTS
network_cp->kernel[i]->cnn->s_d_weights[j] = (float***)nalloc(columns, sizeof(float**));
network_cp->kernel[i]->cnn->v_d_weights[j] = (float***)nalloc(columns, sizeof(float**));
#endif
for (int k=0; k < columns; k++) { for (int k=0; k < columns; k++) {
network_cp->kernel[i]->cnn->weights[j][k] = (float**)nalloc(k_size, sizeof(float*)); network_cp->kernel[i]->cnn->weights[j][k] = (float**)nalloc(k_size, sizeof(float*));
network_cp->kernel[i]->cnn->d_weights[j][k] = (float**)nalloc(k_size, sizeof(float*));
#ifdef ADAM_CNN_WEIGHTS
network_cp->kernel[i]->cnn->s_d_weights[j][k] = (float**)nalloc(k_size, sizeof(float*));
network_cp->kernel[i]->cnn->v_d_weights[j][k] = (float**)nalloc(k_size, sizeof(float*));
#endif
for (int l=0; l < k_size; l++) { for (int l=0; l < k_size; l++) {
network_cp->kernel[i]->cnn->weights[j][k][l] = (float*)nalloc(k_size, sizeof(float)); network_cp->kernel[i]->cnn->weights[j][k][l] = (float*)nalloc(k_size, sizeof(float));
network_cp->kernel[i]->cnn->d_weights[j][k][l] = (float*)nalloc(k_size, sizeof(float));
#ifdef ADAM_CNN_WEIGHTS
network_cp->kernel[i]->cnn->s_d_weights[j][k][l] = (float*)nalloc(k_size, sizeof(float));
network_cp->kernel[i]->cnn->v_d_weights[j][k][l] = (float*)nalloc(k_size, sizeof(float));
#endif
for (int m=0; m < k_size; m++) { for (int m=0; m < k_size; m++) {
copyVar(kernel[i]->cnn->weights[j][k][l][m]); copyVar(kernel[i]->cnn->weights[j][k][l][m]);
network_cp->kernel[i]->cnn->d_weights[j][k][l][m] = 0.;
#ifdef ADAM_CNN_WEIGHTS
network_cp->kernel[i]->cnn->s_d_weights[j][k][l][m] = 0.;
network_cp->kernel[i]->cnn->v_d_weights[j][k][l][m] = 0.;
#endif
} }
} }
} }

View File

@ -1,5 +1,6 @@
#include <stdio.h> #include <stdio.h>
#include <stdbool.h> #include <stdbool.h>
#include <time.h>
#include "include/colors.h" #include "include/colors.h"
@ -15,11 +16,13 @@ void printf_info(char* string) {
printf(BOLDBLUE "[ INFO ]" RESET " %s", string); printf(BOLDBLUE "[ INFO ]" RESET " %s", string);
} }
void printf_time(float time) { void printf_time(clock_t time) {
int hours = time/3600; double real_time = (double) time / CLOCKS_PER_SEC;
int minutes = ((int)time %3600)/60;
int seconds = ((int)time) %60; int hours = real_time/3600;
int milliseconds = (time - (int)time)*1000; int minutes = ((int)real_time %3600)/60;
int seconds = ((int)real_time) %60;
int milliseconds = (real_time - (int)real_time)*1000;
if (hours != 0) { if (hours != 0) {
printf("%dh %dmn", hours, minutes); printf("%dh %dmn", hours, minutes);

View File

@ -1,4 +1,5 @@
#include <stdio.h> #include <stdio.h>
#include <time.h>
#ifndef DEF_COLORS_H #ifndef DEF_COLORS_H
#define DEF_COLORS_H #define DEF_COLORS_H
@ -51,7 +52,7 @@ extern "C"
/* /*
* Affiche un timing en heures minutes secondes millisecondes en limitant la précision aux deux unités les plus significatives * Affiche un timing en heures minutes secondes millisecondes en limitant la précision aux deux unités les plus significatives
*/ */
void printf_time(float time); void printf_time(clock_t time);
#ifdef __CUDACC__ #ifdef __CUDACC__
extern "C" extern "C"

View File

@ -4,7 +4,14 @@
#include <float.h> #include <float.h>
#include <stdbool.h> #include <stdbool.h>
#include <pthread.h> #include <pthread.h>
#include <sys/sysinfo.h>
#ifdef __linux__
#include <sys/sysinfo.h>
#elif defined(__APPLE__)
#include <sys/sysctl.h>
#else
#error Unknown platform
#endif
#include "include/neural_network.h" #include "include/neural_network.h"
#include "../common/include/colors.h" #include "../common/include/colors.h"
@ -201,7 +208,17 @@ void train(int epochs, char* recovery, char* image_file, char* label_file, char*
float accuracy; float accuracy;
float current_accuracy; float current_accuracy;
int nb_threads = get_nprocs(); #ifdef __linux__
int nb_threads = get_nprocs();
#elif defined(__APPLE__)
int nb_threads;
size_t len = sizeof(nb_threads);
if (sysctlbyname("hw.logicalcpu", &nb_threads, &len, NULL, 0) == -1) {
perror("sysctl");
exit(1);
}
#endif
pthread_t *tid = (pthread_t *)malloc(nb_threads * sizeof(pthread_t)); pthread_t *tid = (pthread_t *)malloc(nb_threads * sizeof(pthread_t));
/* /*

View File

@ -115,34 +115,14 @@ void run_convolution_test(int input_width, int output_width, int rows, int colum
// bias[kernel->columns] // bias[kernel->columns]
kernel->bias = (float*)malloc(kernel->columns, sizeof(float)); kernel->bias = (float*)malloc(kernel->columns, sizeof(float));
kernel->d_bias = (float*)malloc(kernel->columns, sizeof(float));
#ifdef ADAM_CNN_BIAS
kernel->s_d_bias = (float*)malloc(kernel->columns, sizeof(float));
kernel->v_d_bias = (float*)malloc(kernel->columns, sizeof(float));
#endif
for (int i=0; i<kernel->columns; i++) { for (int i=0; i<kernel->columns; i++) {
kernel->bias[i] = random_float(0.0f, 15.0f); kernel->bias[i] = random_float(0.0f, 15.0f);
kernel->d_bias[i] = random_float(0.0f, 1.5f);
#ifdef ADAM_CNN_BIAS
kernel->s_d_bias[i] = random_float(0.0f, 1.5f);
kernel->v_d_bias[i] = random_float(0.0f, 1.5f);
#endif
} }
// weights[rows][columns][k_size][k_size] // weights[rows][columns][k_size][k_size]
kernel->weights = (float****)malloc(sizeof(float***)*kernel->rows); kernel->weights = (float****)malloc(sizeof(float***)*kernel->rows);
kernel->d_weights = (float****)malloc(sizeof(float***)*kernel->rows);
#ifdef ADAM_CNN_WEIGHTS
kernel->s_d_weights = (float****)malloc(sizeof(float***)*kernel->rows);
kernel->v_d_weights = (float****)malloc(sizeof(float***)*kernel->rows);
#endif
for (int i=0; i < kernel->rows; i++) { for (int i=0; i < kernel->rows; i++) {
kernel->weights[i] = create_matrix(kernel->columns, kernel->k_size, kernel->k_size, 15.0f); kernel->weights[i] = create_matrix(kernel->columns, kernel->k_size, kernel->k_size, 15.0f);
kernel->d_weights[i] = create_matrix(kernel->columns, kernel->k_size, kernel->k_size, 1.5f);
#ifdef ADAM_CNN_WEIGHTS
kernel->s_d_weights[i] = create_matrix(kernel->columns, kernel->k_size, kernel->k_size, 1.5f);
kernel->v_d_weights[i] = create_matrix(kernel->columns, kernel->k_size, kernel->k_size, 1.5f);
#endif
} }
float*** input = create_matrix(kernel->rows, input_width, input_width, 5.0f); float*** input = create_matrix(kernel->rows, input_width, input_width, 5.0f);
@ -165,7 +145,7 @@ void run_convolution_test(int input_width, int output_width, int rows, int colum
start = clock(); start = clock();
make_convolution_cpu(kernel, input, output_cpu, output_width, 1); make_convolution_cpu(kernel, input, output_cpu, output_width, 1, 0);
end = clock(); end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC; cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
@ -179,26 +159,11 @@ void run_convolution_test(int input_width, int output_width, int rows, int colum
//printf(GREEN "OK\n" RESET); //printf(GREEN "OK\n" RESET);
free(kernel->bias); free(kernel->bias);
free(kernel->d_bias);
#ifdef ADAM_CNN_BIAS
free(kernel->s_d_bias);
free(kernel->v_d_bias);
#endif
for (int i=0; i < kernel->rows; i++) { for (int i=0; i < kernel->rows; i++) {
free_matrix(kernel->weights[i], kernel->columns, kernel->k_size); free_matrix(kernel->weights[i], kernel->columns, kernel->k_size);
free_matrix(kernel->d_weights[i], kernel->columns, kernel->k_size);
#ifdef ADAM_CNN_WEIGHTS
free_matrix(kernel->s_d_weights[i], kernel->columns, kernel->k_size);
free_matrix(kernel->v_d_weights[i], kernel->columns, kernel->k_size);
#endif
} }
free(kernel->weights); free(kernel->weights);
free(kernel->d_weights);
#ifdef ADAM_CNN_WEIGHTS
free(kernel->s_d_weights);
free(kernel->v_d_weights);
#endif
free_matrix(input, kernel->rows, input_width); free_matrix(input, kernel->rows, input_width);
free_matrix(output_cpu, kernel->columns, output_width); free_matrix(output_cpu, kernel->columns, output_width);

View File

@ -4,7 +4,7 @@
#include <assert.h> #include <assert.h>
#include <math.h> #include <math.h>
#include <time.h> #include <time.h>
#include <omp.h> #include <time.h>
#include "../src/common/include/memory_management.h" #include "../src/common/include/memory_management.h"
#include "../src/cnn/include/convolution.h" #include "../src/cnn/include/convolution.h"
@ -106,26 +106,11 @@ void run_convolution_test(int input_width, int output_width, int rows, int colum
// bias[kernel->columns][output_width][output_width] // bias[kernel->columns][output_width][output_width]
kernel->bias = create_matrix(kernel->columns, output_width, output_width, 15.0f); kernel->bias = create_matrix(kernel->columns, output_width, output_width, 15.0f);
kernel->d_bias = create_matrix(kernel->columns, output_width, output_width, 1.5f);
#ifdef ADAM_CNN_BIAS
kernel->s_d_bias = create_matrix(kernel->columns, output_width, output_width, 1.5f);
kernel->v_d_bias = create_matrix(kernel->columns, output_width, output_width, 1.5f);
#endif
// weights[rows][columns][k_size][k_size] // weights[rows][columns][k_size][k_size]
kernel->weights = (float****)nalloc(kernel->rows, sizeof(float***)); kernel->weights = (float****)nalloc(kernel->rows, sizeof(float***));
kernel->d_weights = (float****)nalloc(kernel->rows, sizeof(float***));
#ifdef ADAM_CNN_WEIGHTS
kernel->s_d_weights = (float****)nalloc(kernel->rows, sizeof(float***));
kernel->v_d_weights = (float****)nalloc(kernel->rows, sizeof(float***));
#endif
for (int i=0; i < kernel->rows; i++) { for (int i=0; i < kernel->rows; i++) {
kernel->weights[i] = create_matrix(kernel->columns, kernel->k_size, kernel->k_size, 15.0f); kernel->weights[i] = create_matrix(kernel->columns, kernel->k_size, kernel->k_size, 15.0f);
kernel->d_weights[i] = create_matrix(kernel->columns, kernel->k_size, kernel->k_size, 1.5f);
#ifdef ADAM_CNN_WEIGHTS
kernel->s_d_weights[i] = create_matrix(kernel->columns, kernel->k_size, kernel->k_size, 1.5f);
kernel->v_d_weights[i] = create_matrix(kernel->columns, kernel->k_size, kernel->k_size, 1.5f);
#endif
} }
float*** input = create_matrix(kernel->rows, input_width, input_width, 5.0f); float*** input = create_matrix(kernel->rows, input_width, input_width, 5.0f);
@ -136,21 +121,21 @@ void run_convolution_test(int input_width, int output_width, int rows, int colum
// Lancement des calculs // Lancement des calculs
double start_time, end_time; clock_t start_time, end_time;
double cpu_time_used, gpu_time_used; clock_t cpu_time_used, gpu_time_used;
start_time = omp_get_wtime(); start_time = clock();
make_convolution_device(kernel, input, output_gpu, output_width, 1, 0); make_convolution_device(kernel, input, output_gpu, output_width, 1, 0);
end_time = omp_get_wtime(); end_time = clock();
gpu_time_used = end_time - start_time; gpu_time_used = end_time - start_time;
printf("(%d, %d, %d, %d) Time used for GPU: %lf seconds\n", rows, columns, input_width, output_width, gpu_time_used); printf("(%d, %d, %d, %d) Time used for GPU: %lf seconds\n", rows, columns, input_width, output_width, gpu_time_used);
start_time = omp_get_wtime(); start_time = clock();
make_convolution_cpu(kernel, input, output_cpu, output_width, 1, 0); make_convolution_cpu(kernel, input, output_cpu, output_width, 1, 0);
end_time = omp_get_wtime(); end_time = clock();
cpu_time_used = end_time - start_time; cpu_time_used = end_time - start_time;
printf("(%d, %d, %d, %d) Time used for CPU: %lf seconds\n", rows, columns, input_width, output_width, cpu_time_used); printf("(%d, %d, %d, %d) Time used for CPU: %lf seconds\n", rows, columns, input_width, output_width, cpu_time_used);
@ -163,26 +148,11 @@ void run_convolution_test(int input_width, int output_width, int rows, int colum
printf(GREEN "OK\n" RESET); printf(GREEN "OK\n" RESET);
free_matrix(kernel->bias, kernel->columns, output_width); free_matrix(kernel->bias, kernel->columns, output_width);
free_matrix(kernel->d_bias, kernel->columns, output_width);
#ifdef ADAM_CNN_BIAS
free_matrix(kernel->s_d_bias, kernel->columns, output_width);
free_matrix(kernel->v_d_bias, kernel->columns, output_width);
#endif
for (int i=0; i < kernel->rows; i++) { for (int i=0; i < kernel->rows; i++) {
free_matrix(kernel->weights[i], kernel->columns, kernel->k_size); free_matrix(kernel->weights[i], kernel->columns, kernel->k_size);
free_matrix(kernel->d_weights[i], kernel->columns, kernel->k_size);
#ifdef ADAM_CNN_WEIGHTS
free_matrix(kernel->s_d_weights[i], kernel->columns, kernel->k_size);
free_matrix(kernel->v_d_weights[i], kernel->columns, kernel->k_size);
#endif
} }
gree(kernel->weights, false); gree(kernel->weights, false);
gree(kernel->d_weights, false);
#ifdef ADAM_CNN_WEIGHTS
gree(kernel->s_d_weights, false);
gree(kernel->v_d_weights, false);
#endif
free_matrix(input, kernel->rows, input_width); free_matrix(input, kernel->rows, input_width);
free_matrix(output_cpu, kernel->columns, output_width); free_matrix(output_cpu, kernel->columns, output_width);
@ -199,7 +169,7 @@ int main() {
} }
printf(GREEN "OK\n" RESET); printf(GREEN "OK\n" RESET);
srand(time(NULL)); srand(clock());
run_convolution_test(20, 15, 30, 40); run_convolution_test(20, 15, 30, 40);
run_convolution_test(30, 25, 40, 50); run_convolution_test(30, 25, 40, 50);

View File

@ -1,6 +1,6 @@
#include <stdlib.h> #include <stdlib.h>
#include <stdio.h> #include <stdio.h>
#include <omp.h> #include <time.h>
#include "../src/common/include/colors.h" #include "../src/common/include/colors.h"
@ -21,10 +21,10 @@ int main(int argc, char* argv[]) {
printf("Taille des images: %dx%d\n", dataset->width, dataset->height); printf("Taille des images: %dx%d\n", dataset->width, dataset->height);
// Calcul du temps de chargement des images une à une // Calcul du temps de chargement des images une à une
double start_time, end_time; clock_t start_time, end_time;
int N = min(100000, dataset->numImages); int N = min(100000, dataset->numImages);
start_time = omp_get_wtime(); start_time = clock();
printf("Chargement de %d images\n", N); printf("Chargement de %d images\n", N);
for (int i=0; i < N; i++) { for (int i=0; i < N; i++) {
imgRawImage* image = loadJpegImageFile(dataset->fileNames[i]); imgRawImage* image = loadJpegImageFile(dataset->fileNames[i]);
@ -32,8 +32,10 @@ int main(int argc, char* argv[]) {
free(image); free(image);
} }
printf("OK\n"); printf("OK\n");
end_time = omp_get_wtime(); end_time = clock();
printf("Temps par image (calculé sur une moyenne de %d): %lf s\n", N, (end_time - start_time)/N); printf("Temps par image (calculé sur une moyenne de %d): ", N);
printf_time((end_time - start_time)/N);
printf("\n");
for (int i=0; i < (int)dataset->numImages; i++) { for (int i=0; i < (int)dataset->numImages; i++) {
if (!dataset->fileNames[i]) { if (!dataset->fileNames[i]) {

View File

@ -3,7 +3,7 @@
#include <stdbool.h> #include <stdbool.h>
#include <math.h> #include <math.h>
#include <time.h> #include <time.h>
#include <omp.h> #include <time.h>
#include "../src/cnn/include/matrix_multiplication.h" #include "../src/cnn/include/matrix_multiplication.h"
#include "../src/common/include/memory_management.h" #include "../src/common/include/memory_management.h"
@ -72,8 +72,8 @@ bool check_matrices_equality(float** m1, float** m2, int n, int p, int acceptati
} }
void run_matrices_test(int n, int p, int q) { void run_matrices_test(int n, int p, int q) {
double start_time, end_time; clock_t start_time, end_time;
double cpu_time_used, gpu_time_used; clock_t cpu_time_used, gpu_time_used;
float** matrix1 = create_matrix(n, p); float** matrix1 = create_matrix(n, p);
float** matrix2 = create_matrix(p, q); float** matrix2 = create_matrix(p, q);
@ -81,16 +81,16 @@ void run_matrices_test(int n, int p, int q) {
float** result_cpu = create_empty_matrix(n, q); float** result_cpu = create_empty_matrix(n, q);
printf("(%d,%d)x(%d,%d) Data generation complete.\n", n, p, p, q); printf("(%d,%d)x(%d,%d) Data generation complete.\n", n, p, p, q);
start_time = omp_get_wtime(); start_time = clock();
matrix_multiplication_device(matrix1, matrix2, result_gpu, n, p, q); matrix_multiplication_device(matrix1, matrix2, result_gpu, n, p, q);
end_time = omp_get_wtime(); end_time = clock();
cpu_time_used = end_time - start_time; cpu_time_used = end_time - start_time;
printf("(%d,%d)x(%d,%d) Time used for GPU: %lf seconds\n", n, p, p, q, cpu_time_used); printf("(%d,%d)x(%d,%d) Time used for GPU: %lf seconds\n", n, p, p, q, cpu_time_used);
start_time = omp_get_wtime(); start_time = clock();
matrix_multiplication_host(matrix1, matrix2, result_cpu, n, p, q); matrix_multiplication_host(matrix1, matrix2, result_cpu, n, p, q);
end_time = omp_get_wtime(); end_time = clock();
gpu_time_used = end_time - start_time; gpu_time_used = end_time - start_time;
printf("(%d,%d)x(%d,%d) Time used for CPU: %lf seconds\n", n, p, p, q, gpu_time_used); printf("(%d,%d)x(%d,%d) Time used for CPU: %lf seconds\n", n, p, p, q, gpu_time_used);
@ -134,7 +134,7 @@ int main() {
} }
printf(GREEN "OK\n" RESET); printf(GREEN "OK\n" RESET);
srand(time(NULL)); srand(clock());
run_matrices_test(200, 1000, 200); run_matrices_test(200, 1000, 200);
run_matrices_test(200, 1000, 20); run_matrices_test(200, 1000, 20);
run_matrices_test(20, 1000, 200); run_matrices_test(20, 1000, 200);

View File

@ -13,7 +13,7 @@
int main() { int main() {
printf("Création du réseau\n"); printf("Création du réseau\n");
Network* network = create_network_lenet5(0, 0, 3, GLOROT, 32, 1); Network* network = create_network_lenet5(0, 0, 3, GLOROT, 32, 1, 2); // Pas besoin d'initialiser toute la backprop
printf(GREEN "OK\n" RESET); printf(GREEN "OK\n" RESET);
printf("Écriture du réseau\n"); printf("Écriture du réseau\n");

View File

@ -8,12 +8,13 @@
#include "../src/cnn/include/models.h" #include "../src/cnn/include/models.h"
#include "../src/cnn/include/utils.h" #include "../src/cnn/include/utils.h"
#include "../src/cnn/include/free.h" #include "../src/cnn/include/free.h"
#include "../src/cnn/include/cnn.h"
int main() { int main() {
Kernel* kernel; Kernel* kernel;
printf("Création du réseau\n"); printf("Création du réseau\n");
Network* network = create_network_lenet5(0, 0, 3, 2, 32, 1); Network* network = create_network_lenet5(0, 0, 3, 2, 32, 1, NN_ONLY); // Pas besoin d'initialiser toute la backprop
printf(GREEN "OK\n" RESET); printf(GREEN "OK\n" RESET);
printf("Architecture LeNet5:\n"); printf("Architecture LeNet5:\n");

View File

@ -9,8 +9,8 @@
int main() { int main() {
printf("Création du réseau\n"); printf("Création du réseau\n");
Network* network = create_network_lenet5(0, 0, 3, 2, 32, 1); Network* network = create_network_lenet5(0, 0, 3, 2, 32, 1, 0);
Network* network2 = create_network_lenet5(0, 0, 3, 2, 32, 1); Network* network2 = create_network_lenet5(0, 0, 3, 2, 32, 1, 0);
printf(GREEN "OK\n" RESET); printf(GREEN "OK\n" RESET);
printf("Copie du réseau via copy_network\n"); printf("Copie du réseau via copy_network\n");