Compare commits

..

No commits in common. "4625ad29923dc7010eb8dac93c74684615f2b9f8" and "0fb23c9b159cabb041a949d9884bbcd7aebd2caf" have entirely different histories.

13 changed files with 127 additions and 183 deletions

View File

@ -41,12 +41,7 @@ NVCCFLAGS = -g
# -fsanitize=address -lasan
#! WARNING: test/cnn-neuron_io fails with this option enabled
all: dense cnn;
#
# Build dense
#
@ -66,95 +61,82 @@ $(BUILDDIR)/dense_%.o: $(DENSE_SRCDIR)/%.c $(DENSE_SRCDIR)/include/%.h
$(CC) -c $< -o $@ $(CFLAGS)
#
# Build cnn
#
cnn: $(BUILDDIR)/cnn-main $(BUILDDIR)/cnn-main-cuda $(BUILDDIR)/cnn-preview $(BUILDDIR)/cnn-export;
$(BUILDDIR)/cnn-main: $(CNN_SRCDIR)/main.c \
$(BUILDDIR)/cnn_backpropagation.o \
$(BUILDDIR)/cnn_initialisation.o \
$(BUILDDIR)/cnn_train.o \
$(BUILDDIR)/cnn_test_network.o \
$(BUILDDIR)/cnn_convolution.o \
$(BUILDDIR)/cnn_cnn.o \
$(BUILDDIR)/cnn_creation.o \
$(BUILDDIR)/cnn_initialisation.o \
$(BUILDDIR)/cnn_make.o \
$(BUILDDIR)/cnn_neuron_io.o \
$(BUILDDIR)/cnn_function.o \
$(BUILDDIR)/cnn_creation.o \
$(BUILDDIR)/cnn_models.o \
$(BUILDDIR)/cnn_update.o \
$(BUILDDIR)/cnn_train.o \
$(BUILDDIR)/cnn_utils.o \
$(BUILDDIR)/cnn_make.o \
$(BUILDDIR)/cnn_update.o \
$(BUILDDIR)/cnn_free.o \
$(BUILDDIR)/cnn_jpeg.o \
$(BUILDDIR)/cnn_cnn.o \
\
$(BUILDDIR)/cnn_convolution.o \
$(BUILDDIR)/cnn_backpropagation.o \
$(BUILDDIR)/memory_management.o \
$(BUILDDIR)/colors.o \
$(BUILDDIR)/mnist.o \
$(BUILDDIR)/utils.o
$(CC) $^ -o $@ $(CFLAGS) $(LD_CFLAGS)
ifdef NVCC_INSTALLED
$(BUILDDIR)/cnn-main-cuda: $(BUILDDIR)/cnn_main.cuda.o \
$(BUILDDIR)/cnn_initialisation.cuda.o \
$(BUILDDIR)/cnn_test_network.cuda.o \
$(BUILDDIR)/cnn_neuron_io.cuda.o \
$(BUILDDIR)/cnn_creation.cuda.o \
$(BUILDDIR)/cnn_models.cuda.o \
$(BUILDDIR)/cnn_update.cuda.o \
$(BUILDDIR)/cnn_train.cuda.o \
$(BUILDDIR)/cnn_test_network.cuda.o \
$(BUILDDIR)/cnn_cnn.cuda.o \
$(BUILDDIR)/cnn_creation.cuda.o \
$(BUILDDIR)/cnn_initialisation.cuda.o \
$(BUILDDIR)/cnn_cuda_make.o \
$(BUILDDIR)/cnn_neuron_io.cuda.o \
$(BUILDDIR)/cnn_cuda_function.o \
$(BUILDDIR)/cnn_utils.cuda.o \
$(BUILDDIR)/cnn_update.cuda.o \
$(BUILDDIR)/cnn_free.cuda.o \
$(BUILDDIR)/cnn_jpeg.cuda.o \
$(BUILDDIR)/cnn_cnn.cuda.o \
\
$(BUILDDIR)/cnn_cuda_backpropagation.o \
$(BUILDDIR)/cnn_cuda_convolution.o \
$(BUILDDIR)/cnn_cuda_function.o \
$(BUILDDIR)/cnn_cuda_make.o \
\
$(BUILDDIR)/cuda_memory_management.o \
$(BUILDDIR)/cnn_cuda_backpropagation.o \
$(BUILDDIR)/colors.cuda.o \
$(BUILDDIR)/cuda_memory_management.o \
$(BUILDDIR)/mnist.cuda.o \
$(BUILDDIR)/cuda_utils.o
$(BUILDDIR)/cuda_utils.o
$(NVCC) $(LD_NVCCFLAGS) $(NVCCFLAGS) $^ -o $@
else
$(BUILDDIR)/cnn-main-cuda:
@echo "$(NVCC) not found, skipping"
endif
$(BUILDDIR)/cnn-preview: $(CNN_SRCDIR)/preview.c $(BUILDDIR)/cnn_jpeg.o $(BUILDDIR)/colors.o $(BUILDDIR)/utils.o
$(CC) $^ -o $@ $(CFLAGS) $(LD_CFLAGS)
$(BUILDDIR)/cnn-export: $(CNN_SRCDIR)/export.c \
$(BUILDDIR)/cnn_backpropagation.o \
$(BUILDDIR)/cnn_convolution.o \
$(BUILDDIR)/cnn_neuron_io.o \
$(BUILDDIR)/cnn_function.o \
$(BUILDDIR)/cnn_free.o \
$(BUILDDIR)/cnn_make.o \
$(BUILDDIR)/cnn_cnn.o \
$(BUILDDIR)/cnn_jpeg.o \
\
$(BUILDDIR)/cnn_neuron_io.o \
$(BUILDDIR)/utils.o \
$(BUILDDIR)/memory_management.o \
$(BUILDDIR)/cnn_cnn.o \
$(BUILDDIR)/cnn_make.o \
$(BUILDDIR)/cnn_backpropagation.o \
$(BUILDDIR)/cnn_function.o \
$(BUILDDIR)/cnn_convolution.o \
$(BUILDDIR)/colors.o \
$(BUILDDIR)/mnist.o \
$(BUILDDIR)/utils.o
$(BUILDDIR)/cnn_jpeg.o
$(CC) $^ -o $@ $(CFLAGS) $(LD_CFLAGS)
$(BUILDDIR)/cnn_%.o: $(CNN_SRCDIR)/%.c $(CNN_SRCDIR)/include/%.h
$(CC) -c $< -o $@ $(CFLAGS)
$(BUILDDIR)/cnn_%.cuda.o: $(CNN_SRCDIR)/%.c $(CNN_SRCDIR)/include/%.h
$(CC) -c $< -o $@ $(CFLAGS) -DUSE_CUDA -lcuda -I$(CUDA_INCLUDE)
ifdef NVCC_INSTALLED
$(BUILDDIR)/cnn_cuda_%.o: $(CNN_SRCDIR)/%.cu $(CNN_SRCDIR)/include/%.h
$(NVCC) $(NVCCFLAGS) -c -dc $< -o $@
@ -162,20 +144,15 @@ else
$(BUILDDIR)/cnn_cuda_%.o: $(CNN_SRCDIR)/%.cu $(CNN_SRCDIR)/include/%.h
@echo "$(NVCC) not found, skipping"
endif
#
# Build general files
#
$(BUILDDIR)/%.o: $(COMMON_SRCDIR)/%.c $(COMMON_SRCDIR)/include/%.h
$(CC) -c $< -o $@ $(CFLAGS)
$(BUILDDIR)/%.cuda.o: $(COMMON_SRCDIR)/%.c $(COMMON_SRCDIR)/include/%.h
$(CC) -c $< -o $@ $(CFLAGS) -DUSE_CUDA -lcuda -I$(CUDA_INCLUDE)
ifdef NVCC_INSTALLED
$(BUILDDIR)/cuda_%.o: $(COMMON_SRCDIR)/%.cu $(COMMON_SRCDIR)/include/%.h
$(NVCC) $(NVCCFLAGS) -c -dc $< -o $@
@ -183,8 +160,6 @@ else
@echo "$(NVCC) not found, skipping"
endif
#
# Tests
#
@ -232,8 +207,6 @@ $(BUILDDIR)/test-cnn_%: $(TEST_SRCDIR)/cnn_%.cu
@echo "$(NVCC) not found, skipping"
endif
#
# Utils
#
@ -258,7 +231,6 @@ $(CACHE_DIR)/mnist-reseau-cnn.bin: $(BUILDDIR)/cnn-main
--out $(CACHE_DIR)/mnist-reseau-cnn.bin
#
# Clean project
#

View File

@ -38,8 +38,6 @@ uint32_t|linearisation|
uint32_t|k_size|
uint32_t|rows|
uint32_t|columns|
uint32_t|stride|
uint32_t|padding|
#### Si la couche est un nn:
type | nom de la variable | commentaire
@ -54,8 +52,6 @@ type | nom de la variable | commentaire
:---:|:---:|:---:
uint32_t|linearisation|
uint32_t|pooling|
uint32_t|stride|
uint32_t|padding|
### Corps

View File

@ -34,6 +34,73 @@ Network* create_network(int max_size, float learning_rate, int dropout, int init
return network;
}
Network* create_network_lenet5(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth) {
Network* network = create_network(8, learning_rate, dropout, initialisation, input_width, input_depth);
add_convolution(network, 5, 6, 1, 0, activation);
add_average_pooling(network, 2, 2, 0);
add_convolution(network, 5, 16, 1, 0, activation);
add_average_pooling(network, 2, 2, 0);
add_dense_linearisation(network, 120, activation);
add_dense(network, 84, activation);
add_dense(network, 10, SOFTMAX);
return network;
}
Network* create_network_alexnet(float learning_rate, int dropout, int activation, int initialisation, int size_output) {
Network* network = create_network(12, learning_rate, dropout, initialisation, 227, 3);
add_convolution(network, 11, 96, 4, 0, activation);
add_average_pooling(network, 3, 2, 0);
add_convolution(network, 5, 256, 1, 2, activation);
add_average_pooling(network, 3, 2, 0);
add_convolution(network, 3, 384, 1, 1, activation);
add_convolution(network, 3, 384, 1, 1, activation);
add_convolution(network, 3, 256, 1, 1, activation);
add_average_pooling(network, 3, 2, 0);
add_dense_linearisation(network, 4096, activation);
add_dense(network, 4096, activation);
add_dense(network, size_output, SOFTMAX);
return network;
}
Network* create_network_VGG16(float learning_rate, int dropout, int activation, int initialisation, int size_output) {
Network* network = create_network(23, learning_rate, dropout, initialisation, 256, 3);
add_convolution(network, 3, 64, 1, 0, activation); // Conv3-64
add_convolution(network, 3, 64, 1, 0, activation); // Conv3-64
add_average_pooling(network, 2, 2, 0); // Max Pool
add_convolution(network, 3, 128, 1, 0, activation); // Conv3-128
add_convolution(network, 1, 128, 1, 0, activation); // Conv1-128
add_average_pooling(network, 2, 2, 0); // Max Pool
add_convolution(network, 3, 256, 1, 0, activation); // Conv3-256
add_convolution(network, 3, 256, 1, 0, activation); // Conv3-256
add_convolution(network, 1, 256, 1, 0, activation); // Conv1-256
add_average_pooling(network, 2, 2, 0); // Max Pool
add_convolution(network, 3, 512, 1, 0, activation); // Conv3-512
add_convolution(network, 3, 512, 1, 0, activation); // Conv3-512
add_convolution(network, 1, 512, 1, 0, activation); // Conv1-512
add_average_pooling(network, 2, 2, 0); // Max Pool
add_convolution(network, 3, 512, 1, 0, activation); // Conv3-512
add_convolution(network, 3, 512, 1, 0, activation); // Conv3-512
add_convolution(network, 1, 512, 1, 0, activation); // Conv1-512
add_average_pooling(network, 2, 2, 0); // Max Pool
add_dense_linearisation(network, 2048, activation);
add_dense(network, 2048, activation);
add_dense(network, 256, activation);
add_dense(network, size_output, SOFTMAX);
return network;
}
Network* create_simple_one(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth) {
Network* network = create_network(3, learning_rate, dropout, initialisation, input_width, input_depth);
add_dense_linearisation(network, 80, activation);
add_dense(network, 10, SOFTMAX);
return network;
}
void create_a_cube_input_layer(Network* network, int pos, int depth, int dim) {
network->input[pos] = (float***)nalloc(depth, sizeof(float**));
for (int i=0; i < depth; i++) {

View File

@ -9,6 +9,28 @@
*/
Network* create_network(int max_size, float learning_rate, int dropout, int initialisation, int input_width, int input_depth);
/*
* Renvoie un réseau suivant l'architecture LeNet5
*/
Network* create_network_lenet5(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth);
/*
* Renvoie un réseau suivant l'architecture AlexNet
* C'est à dire en entrée 3x227x227 et une sortie de taille 'size_output'
*/
Network* create_network_alexnet(float learning_rate, int dropout, int activation, int initialisation, int size_output);
/*
* Renvoie un réseau suivant l'architecture VGG16 modifiée pour prendre en entrée 3x256x256
* et une sortie de taille 'size_output'
*/
Network* create_network_VGG16(float learning_rate, int dropout, int activation, int initialisation, int size_output);
/*
* Renvoie un réseau sans convolution, similaire à celui utilisé dans src/dense
*/
Network* create_simple_one(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth);
/*
* Créé et alloue de la mémoire à une couche de type input cube
*/

View File

@ -1,29 +0,0 @@
#include <stdlib.h>
#include <stdio.h>
#include "struct.h"
#ifndef DEF_MODELS_H
#define DEF_MODELS_H
/*
* Renvoie un réseau suivant l'architecture LeNet5
*/
Network* create_network_lenet5(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth);
/*
* Renvoie un réseau suivant l'architecture AlexNet
* C'est à dire en entrée 3x227x227 et une sortie de taille 'size_output'
*/
Network* create_network_alexnet(float learning_rate, int dropout, int activation, int initialisation, int size_output);
/*
* Renvoie un réseau suivant l'architecture VGG16 modifiée pour prendre en entrée 3x256x256
* et une sortie de taille 'size_output'
*/
Network* create_network_VGG16(float learning_rate, int dropout, int activation, int initialisation, int size_output);
/*
* Renvoie un réseau sans convolution, similaire à celui utilisé dans src/dense
*/
Network* create_simple_one(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth);
#endif

View File

@ -1,75 +0,0 @@
#include <stdlib.h>
#include <stdio.h>
#include "include/creation.h"
#include "include/function.h"
#include "include/struct.h"
#include "include/models.h"
Network* create_network_lenet5(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth) {
Network* network = create_network(8, learning_rate, dropout, initialisation, input_width, input_depth);
add_convolution(network, 5, 6, 1, 0, activation);
add_average_pooling(network, 2, 2, 0);
add_convolution(network, 5, 16, 1, 0, activation);
add_average_pooling(network, 2, 2, 0);
add_dense_linearisation(network, 120, activation);
add_dense(network, 84, activation);
add_dense(network, 10, SOFTMAX);
return network;
}
Network* create_network_alexnet(float learning_rate, int dropout, int activation, int initialisation, int size_output) {
Network* network = create_network(12, learning_rate, dropout, initialisation, 227, 3);
add_convolution(network, 11, 96, 4, 0, activation);
add_average_pooling(network, 3, 2, 0);
add_convolution(network, 5, 256, 1, 2, activation);
add_average_pooling(network, 3, 2, 0);
add_convolution(network, 3, 384, 1, 1, activation);
add_convolution(network, 3, 384, 1, 1, activation);
add_convolution(network, 3, 256, 1, 1, activation);
add_average_pooling(network, 3, 2, 0);
add_dense_linearisation(network, 4096, activation);
add_dense(network, 4096, activation);
add_dense(network, size_output, SOFTMAX);
return network;
}
Network* create_network_VGG16(float learning_rate, int dropout, int activation, int initialisation, int size_output) {
Network* network = create_network(23, learning_rate, dropout, initialisation, 256, 3);
add_convolution(network, 3, 64, 1, 0, activation); // Conv3-64
add_convolution(network, 3, 64, 1, 0, activation); // Conv3-64
add_average_pooling(network, 2, 2, 0); // Max Pool
add_convolution(network, 3, 128, 1, 0, activation); // Conv3-128
add_convolution(network, 1, 128, 1, 0, activation); // Conv1-128
add_average_pooling(network, 2, 2, 0); // Max Pool
add_convolution(network, 3, 256, 1, 0, activation); // Conv3-256
add_convolution(network, 3, 256, 1, 0, activation); // Conv3-256
add_convolution(network, 1, 256, 1, 0, activation); // Conv1-256
add_average_pooling(network, 2, 2, 0); // Max Pool
add_convolution(network, 3, 512, 1, 0, activation); // Conv3-512
add_convolution(network, 3, 512, 1, 0, activation); // Conv3-512
add_convolution(network, 1, 512, 1, 0, activation); // Conv1-512
add_average_pooling(network, 2, 2, 0); // Max Pool
add_convolution(network, 3, 512, 1, 0, activation); // Conv3-512
add_convolution(network, 3, 512, 1, 0, activation); // Conv3-512
add_convolution(network, 1, 512, 1, 0, activation); // Conv1-512
add_average_pooling(network, 2, 2, 0); // Max Pool
add_dense_linearisation(network, 2048, activation);
add_dense(network, 2048, activation);
add_dense(network, 256, activation);
add_dense(network, size_output, SOFTMAX);
return network;
}
Network* create_simple_one(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth) {
Network* network = create_network(3, learning_rate, dropout, initialisation, input_width, input_depth);
add_dense_linearisation(network, 80, activation);
add_dense(network, 10, SOFTMAX);
return network;
}

View File

@ -10,8 +10,7 @@
#include "include/neuron_io.h"
#define INITIAL_MAGIC_NUMBER 1010
#define MAGIC_NUMBER 1013 // Increment this whenever you change the code
#define MAGIC_NUMBER 1012
#define CNN 0
#define NN 1
@ -115,11 +114,13 @@ void write_couche(Network* network, int indice_couche, int type_couche, FILE* pt
Kernel_nn* nn = kernel->nn;
// Écriture du pré-corps
uint32_t pre_buffer[4];
uint32_t pre_buffer[6];
pre_buffer[0] = kernel->activation;
pre_buffer[1] = kernel->linearisation;
pre_buffer[2] = nn->size_input;
pre_buffer[3] = nn->size_output;
pre_buffer[4] = kernel->stride;
pre_buffer[5] = kernel->padding;
fwrite(pre_buffer, sizeof(pre_buffer), 1, ptr);
// Écriture du corps
@ -162,12 +163,7 @@ Network* read_network(char* filename) {
(void) !fread(&magic, sizeof(uint32_t), 1, ptr);
if (magic != MAGIC_NUMBER) {
printf_error((char*)"Incorrect magic number !\n");
if (INITIAL_MAGIC_NUMBER < magic && magic >= INITIAL_MAGIC_NUMBER) {
printf("\tThis backup is no longer supported\n");
printf("\tnPlease update it manually or re-train the network.\n");
printf("\t(You can update it with a script or manually with a Hex Editor)\n");
}
printf_error("Incorrect magic number !\n");
exit(1);
}
@ -334,15 +330,15 @@ Kernel* read_kernel(int type_couche, int output_width, FILE* ptr) {
// Lecture du "Pré-corps"
kernel->nn = (Kernel_nn*)nalloc(1, sizeof(Kernel_nn));
kernel->cnn = NULL;
uint32_t buffer[4];
uint32_t buffer[6];
(void) !fread(&buffer, sizeof(buffer), 1, ptr);
kernel->activation = buffer[0];
kernel->linearisation = buffer[1];
kernel->nn->size_input = buffer[2];
kernel->nn->size_output = buffer[3];
kernel->padding = -1;
kernel->stride = -1;
kernel->stride = buffer[4];
kernel->padding = buffer[5];
// Lecture du corps
Kernel_nn* nn = kernel->nn;

View File

@ -8,15 +8,15 @@
#include <omp.h>
#include "../common/include/memory_management.h"
#include "../common/include/colors.h"
#include "../common/include/utils.h"
#include "../common/include/mnist.h"
#include "include/initialisation.h"
#include "include/test_network.h"
#include "include/neuron_io.h"
#include "../common/include/colors.h"
#include "../common/include/utils.h"
#include "include/function.h"
#include "include/creation.h"
#include "include/update.h"
#include "include/models.h"
#include "include/utils.h"
#include "include/free.h"
#include "include/jpeg.h"

View File

@ -214,7 +214,6 @@ void gree(void* ptr, bool already_freed) {
memory = free_memory(ptr, memory, already_freed);
pthread_mutex_unlock(&memory_lock);
#else
(void)already_freed;
free(ptr);
#endif
}

View File

@ -214,7 +214,6 @@ void gree(void* ptr, bool already_freed) {
memory = free_memory(ptr, memory, already_freed);
pthread_mutex_unlock(&memory_lock);
#else
(void)already_freed;
free(ptr);
#endif
}

View File

@ -6,7 +6,6 @@
#include "../src/common/include/colors.h"
#include "../src/cnn/include/neuron_io.h"
#include "../src/cnn/include/creation.h"
#include "../src/cnn/include/models.h"
#include "../src/cnn/include/utils.h"
#include "../src/cnn/include/free.h"

View File

@ -5,7 +5,6 @@
#include "../src/common/include/colors.h"
#include "../src/cnn/include/creation.h"
#include "../src/cnn/include/models.h"
#include "../src/cnn/include/utils.h"
#include "../src/cnn/include/free.h"

View File

@ -3,7 +3,6 @@
#include "../src/common/include/colors.h"
#include "../src/cnn/include/creation.h"
#include "../src/cnn/include/models.h"
#include "../src/cnn/include/utils.h"
#include "../src/cnn/include/free.h"