Compare commits

..

No commits in common. "2f333bfc1d48a9a76d632193e0f979af74650b90" and "003183d3fdda98a94844c1395ab626e120f07fbe" have entirely different histories.

15 changed files with 165 additions and 309 deletions

View File

@ -1,6 +1,5 @@
#include <stdbool.h>
#include <stdlib.h>
#include <assert.h>
#include <stdio.h>
#include <float.h>
#include <math.h>
@ -18,6 +17,8 @@
#include "include/cnn.h"
// Augmente les dimensions de l'image d'entrée
#define PADDING_INPUT 2
int indice_max(float* tab, int n) {
int indice = -1;
@ -130,27 +131,25 @@ void write_image_in_network_32(int** image, int height, int width, float** input
}
}
void write_256_image_in_network(unsigned char* image, int img_width, int img_depth, int input_width, float*** input) {
assert(img_width <= input_width);
assert((input_width - img_width)%2 == 0);
int padding = (input_width - img_width)/2;
void write_image_in_network_260(unsigned char* image, int height, int width, float*** input) {
int size_input = 260;
int padding = (size_input - height)/2;
for (int i=0; i < padding; i++) {
for (int j=0; j < input_width; j++) {
for (int composante=0; composante < img_depth; composante++) {
for (int j=0; j < size_input; j++) {
for (int composante=0; composante < 3; composante++) {
input[composante][i][j] = 0.;
input[composante][input_width-1-i][j] = 0.;
input[composante][size_input-1-i][j] = 0.;
input[composante][j][i] = 0.;
input[composante][j][input_width-1-i] = 0.;
input[composante][j][size_input-1-i] = 0.;
}
}
}
for (int i=0; i < img_width; i++) {
for (int j=0; j < img_width; j++) {
for (int composante=0; composante < img_depth; composante++) {
input[composante][i+padding][j+padding] = (float)image[(i*img_width+j)*img_depth + composante] / 255.0f;
for (int i=0; i < width; i++) {
for (int j=0; j < height; j++) {
for (int composante=0; composante < 3; composante++) {
input[composante][i+2][j+2] = (float)image[(i*height+j)*3 + composante] / 255.0f;
}
}
}
@ -220,7 +219,7 @@ void forward_propagation(Network* network) {
make_max_pooling(input, output, kernel_size, output_depth, output_width, stride, padding);
}
else {
printf_error((char*)"Impossible de reconnaître le type de couche de pooling: ");
printf_error("Impossible de reconnaître le type de couche de pooling: ");
printf("identifiant: %d, position: %d\n", pooling, i);
}
}
@ -235,7 +234,7 @@ void backward_propagation(Network* network, int wanted_number) {
// Backward sur la dernière couche qui utilise toujours SOFTMAX
float* wanted_output = generate_wanted_output(wanted_number, network->width[network->size -1]); // Sortie désirée, permet d'initialiser une erreur
softmax_backward_cross_entropy(network->input[n-1][0][0], wanted_output, network->width[n-1]);
gree(wanted_output, false);
gree(wanted_output);
/*
* On propage à chaque étape:

View File

@ -126,7 +126,7 @@ void visual_propagation(char* modele_file, char* mnist_images_file, char* out_ba
free(mnist_parameters);
if (numero < 0 || numero >= nb_elem) {
printf_error((char*)"Numéro d'image spécifié invalide.");
printf_error("Numéro d'image spécifié invalide.");
printf(" Le fichier contient %d images.\n", nb_elem);
exit(1);
}
@ -145,7 +145,7 @@ void visual_propagation(char* modele_file, char* mnist_images_file, char* out_ba
} else {
imgRawImage* image = loadJpegImageFile(jpeg_file);
write_256_image_in_network(image->lpData, image->width, image->numComponents, network->width[0], network->input[0]);
write_image_in_network_260(image->lpData, image->height, image->width, network->input[0]);
// Free allocated memory from image reading
free(image->lpData);

View File

@ -1,4 +1,3 @@
#include <stdbool.h>
#include <stdlib.h>
#include <stdio.h>
@ -9,25 +8,25 @@
void free_a_cube_input_layer(Network* network, int pos, int depth, int dim) {
for (int i=0; i < depth; i++) {
for (int j=0; j < dim; j++) {
gree(network->input[pos][i][j], true);
gree(network->input_z[pos][i][j], true);
gree(network->input[pos][i][j]);
gree(network->input_z[pos][i][j]);
}
gree(network->input[pos][i], true);
gree(network->input_z[pos][i], true);
gree(network->input[pos][i]);
gree(network->input_z[pos][i]);
}
gree(network->input[pos], true);
gree(network->input_z[pos], true);
gree(network->input[pos]);
gree(network->input_z[pos]);
}
void free_a_line_input_layer(Network* network, int pos) {
// Libère l'espace mémoire de network->input[pos] et network->input_z[pos]
// lorsque ces couches sont denses (donc sont des matrice de dimension 1)
gree(network->input[pos][0][0], true);
gree(network->input_z[pos][0][0], true);
gree(network->input[pos][0], true);
gree(network->input_z[pos][0], true);
gree(network->input[pos], true);
gree(network->input_z[pos], true);
gree(network->input[pos][0][0]);
gree(network->input_z[pos][0][0]);
gree(network->input[pos][0]);
gree(network->input_z[pos][0]);
gree(network->input[pos]);
gree(network->input_z[pos]);
}
void free_pooling(Network* network, int pos) {
@ -44,59 +43,59 @@ void free_convolution(Network* network, int pos) {
free_a_cube_input_layer(network, pos+1, network->depth[pos+1], network->width[pos+1]);
for (int i=0; i < c; i++) {
for (int j=0; j < bias_size; j++) {
gree(k_pos->bias[i][j], true);
gree(k_pos->d_bias[i][j], true);
gree(k_pos->bias[i][j]);
gree(k_pos->d_bias[i][j]);
#ifdef ADAM_CNN_BIAS
gree(k_pos->s_d_bias[i][j], true);
gree(k_pos->v_d_bias[i][j], true);
gree(k_pos->s_d_bias[i][j]);
gree(k_pos->v_d_bias[i][j]);
#endif
}
gree(k_pos->bias[i], true);
gree(k_pos->d_bias[i], true);
gree(k_pos->bias[i]);
gree(k_pos->d_bias[i]);
#ifdef ADAM_CNN_BIAS
gree(k_pos->s_d_bias[i], true);
gree(k_pos->v_d_bias[i], true);
gree(k_pos->s_d_bias[i]);
gree(k_pos->v_d_bias[i]);
#endif
}
gree(k_pos->bias, true);
gree(k_pos->d_bias, true);
gree(k_pos->bias);
gree(k_pos->d_bias);
#ifdef ADAM_CNN_BIAS
gree(k_pos->s_d_bias, true);
gree(k_pos->v_d_bias, true);
gree(k_pos->s_d_bias);
gree(k_pos->v_d_bias);
#endif
for (int i=0; i < r; i++) {
for (int j=0; j < c; j++) {
for (int k=0; k < k_size; k++) {
gree(k_pos->weights[i][j][k], true);
gree(k_pos->d_weights[i][j][k], true);
gree(k_pos->weights[i][j][k]);
gree(k_pos->d_weights[i][j][k]);
#ifdef ADAM_CNN_WEIGHTS
gree(k_pos->s_d_weights[i][j][k], true);
gree(k_pos->v_d_weights[i][j][k], true);
gree(k_pos->s_d_weights[i][j][k]);
gree(k_pos->v_d_weights[i][j][k]);
#endif
}
gree(k_pos->weights[i][j], true);
gree(k_pos->d_weights[i][j], true);
gree(k_pos->weights[i][j]);
gree(k_pos->d_weights[i][j]);
#ifdef ADAM_CNN_WEIGHTS
gree(k_pos->s_d_weights[i][j], true);
gree(k_pos->v_d_weights[i][j], true);
gree(k_pos->s_d_weights[i][j]);
gree(k_pos->v_d_weights[i][j]);
#endif
}
gree(k_pos->weights[i], true);
gree(k_pos->d_weights[i], true);
gree(k_pos->weights[i]);
gree(k_pos->d_weights[i]);
#ifdef ADAM_CNN_WEIGHTS
gree(k_pos->s_d_weights[i], true);
gree(k_pos->v_d_weights[i], true);
gree(k_pos->s_d_weights[i]);
gree(k_pos->v_d_weights[i]);
#endif
}
gree(k_pos->weights, true);
gree(k_pos->d_weights, true);
gree(k_pos->weights);
gree(k_pos->d_weights);
#ifdef ADAM_CNN_WEIGHTS
gree(k_pos->s_d_weights, true);
gree(k_pos->v_d_weights, true);
gree(k_pos->s_d_weights);
gree(k_pos->v_d_weights);
#endif
gree(k_pos, true);
gree(k_pos);
}
void free_dense(Network* network, int pos) {
@ -104,28 +103,28 @@ void free_dense(Network* network, int pos) {
Kernel_nn* k_pos = network->kernel[pos]->nn;
int dim = k_pos->size_input;
for (int i=0; i < dim; i++) {
gree(k_pos->weights[i], true);
gree(k_pos->d_weights[i], true);
gree(k_pos->weights[i]);
gree(k_pos->d_weights[i]);
#ifdef ADAM_DENSE_WEIGHTS
gree(k_pos->s_d_weights[i], true);
gree(k_pos->v_d_weights[i], true);
gree(k_pos->s_d_weights[i]);
gree(k_pos->v_d_weights[i]);
#endif
}
gree(k_pos->weights, true);
gree(k_pos->d_weights, true);
gree(k_pos->weights);
gree(k_pos->d_weights);
#ifdef ADAM_DENSE_WEIGHTS
gree(k_pos->s_d_weights, true);
gree(k_pos->v_d_weights, true);
gree(k_pos->s_d_weights);
gree(k_pos->v_d_weights);
#endif
gree(k_pos->bias, true);
gree(k_pos->d_bias, true);
gree(k_pos->bias);
gree(k_pos->d_bias);
#ifdef ADAM_DENSE_BIAS
gree(k_pos->s_d_bias, true);
gree(k_pos->v_d_bias, true);
gree(k_pos->s_d_bias);
gree(k_pos->v_d_bias);
#endif
gree(k_pos, true);
gree(k_pos);
}
void free_dense_linearisation(Network* network, int pos) {
@ -133,28 +132,28 @@ void free_dense_linearisation(Network* network, int pos) {
Kernel_nn* k_pos = network->kernel[pos]->nn;
int dim = k_pos->size_input;
for (int i=0; i < dim; i++) {
gree(k_pos->weights[i], true);
gree(k_pos->d_weights[i], true);
gree(k_pos->weights[i]);
gree(k_pos->d_weights[i]);
#ifdef ADAM_DENSE_WEIGHTS
gree(k_pos->s_d_weights[i], true);
gree(k_pos->v_d_weights[i], true);
gree(k_pos->s_d_weights[i]);
gree(k_pos->v_d_weights[i]);
#endif
}
gree(k_pos->weights, true);
gree(k_pos->d_weights, true);
gree(k_pos->weights);
gree(k_pos->d_weights);
#ifdef ADAM_DENSE_WEIGHTS
gree(k_pos->s_d_weights, true);
gree(k_pos->v_d_weights, true);
gree(k_pos->s_d_weights);
gree(k_pos->v_d_weights);
#endif
gree(k_pos->bias, true);
gree(k_pos->d_bias, true);
gree(k_pos->bias);
gree(k_pos->d_bias);
#ifdef ADAM_DENSE_BIAS
gree(k_pos->s_d_bias, true);
gree(k_pos->v_d_bias, true);
gree(k_pos->s_d_bias);
gree(k_pos->v_d_bias);
#endif
gree(k_pos, true);
gree(k_pos);
}
void free_network_creation(Network* network) {
@ -162,46 +161,36 @@ void free_network_creation(Network* network) {
free_a_cube_input_layer(network, 0, network->depth[0], network->width[0]);
for (int i=0; i < network->max_size-1; i++) {
gree(network->kernel[i], true);
gree(network->kernel[i]);
}
gree(network->width, true);
gree(network->depth, true);
gree(network->kernel, true);
gree(network->input, true);
gree(network->input_z, true);
gree(network->width);
gree(network->depth);
gree(network->kernel);
gree(network->input);
gree(network->input_z);
gree(network, true);
gree(network);
}
void free_network(Network* network) {
#if defined(USE_CUDA) || defined(TEST_MEMORY_MANAGEMENT)
// Supprimer toute la mémoire allouée avec nalloc directement
// Il n'y a alors plus besoin de parcourir tout le réseau,
// mais il faut que TOUTE la mémoire du réseau ait été allouée de cette manière
// et que cela soit le cas UNIQUEMENT pour la mémoire allouée au réseau
// Représente un gain de 45mn sur VGG16
free_all_memory();
#else
for (int i=network->size-2; i>=0; i--) {
if (network->kernel[i]->cnn != NULL) {
// Convolution
free_convolution(network, i);
}
else if (network->kernel[i]->nn != NULL) {
// Dense
if (network->kernel[i]->linearisation == DOESNT_LINEARISE) {
// Dense normale
free_dense(network, i);
} else {
// Dense qui linéarise
free_dense_linearisation(network, i);
}
for (int i=network->size-2; i>=0; i--) {
if (network->kernel[i]->cnn != NULL) {
// Convolution
free_convolution(network, i);
}
else if (network->kernel[i]->nn != NULL) {
// Dense
if (network->kernel[i]->linearisation == DOESNT_LINEARISE) {
// Dense normale
free_dense(network, i);
} else {
// Pooling
free_pooling(network, i);
// Dense qui linéarise
free_dense_linearisation(network, i);
}
} else {
// Pooling
free_pooling(network, i);
}
free_network_creation(network);
#endif
}
free_network_creation(network);
}

View File

@ -20,12 +20,9 @@ int will_be_drop(int dropout_prob);
void write_image_in_network_32(int** image, int height, int width, float** input, bool random_offset);
/*
* Écrit une image linéarisée de img_width*img_width*img_depth pixels dans un tableau de taille size_input*size_input*3
* Les conditions suivantes doivent être respectées:
* - l'image est au plus de la même taille que input
* - la différence de taille entre input et l'image doit être un multiple de 2 (pour centrer l'image)
* Écrit une image linéarisée de 256*256*3 pixels dans un tableau de taille 260*260*3
*/
void write_256_image_in_network(unsigned char* image, int img_width, int img_depth, int input_width, float*** input);
void write_image_in_network_260(unsigned char* image, int height, int width, float*** input);
/*
* Propage en avant le cnn. Le dropout est actif que si le réseau est en phase d'apprentissage.

View File

@ -40,9 +40,9 @@
//* Paramètres CUDA
// Le produit des 3 dimensions doit être au maximum 1024 (atteignable avec 8*8*16)
// Le réduire permet d'éviter des erreurs "Out of memory" ou "too many resources requested" au lancement des Kernel
#define BLOCKSIZE_x 8
#define BLOCKSIZE_y 8
#define BLOCKSIZE_z 8
// Le réduire permet d'éviter des erreurs "Out of memory" au lancement des Kernel
#define BLOCKSIZE_x 10
#define BLOCKSIZE_y 10
#define BLOCKSIZE_z 10
#endif

View File

@ -51,7 +51,7 @@ float* test_network_mnist(Network* network, char* images_file, char* labels_file
// Compute loss
wanted_output = generate_wanted_output(labels[i], 10);
loss += compute_mean_squared_error(network->input[network->size-1][0][0], wanted_output, 10);
gree(wanted_output, false);
gree(wanted_output);
for (int j=0; j < height; j++) {
free(images[i][j]);
@ -79,7 +79,7 @@ float* test_network_jpg(Network* network, char* data_dir, bool preview_fails, bo
printf("Avancement: %.1f%%\r", 1000*i/(float)dataset->numImages);
fflush(stdout);
}
write_256_image_in_network(dataset->images[i], dataset->height, dataset->numComponents, network->width[0], network->input[0]);
write_image_in_network_260(dataset->images[i], dataset->height, dataset->height, network->input[0]);
forward_propagation(network);
maxi = indice_max(network->input[network->size-1][0][0], 50);
@ -196,7 +196,7 @@ void recognize_jpg(Network* network, char* input_file, char* out) {
}
// Load image in the first layer of the Network
write_256_image_in_network(image->lpData, width, image->numComponents, network->width[0], network->input[0]);
write_image_in_network_260(image->lpData, height, width, network->input[0]);
forward_propagation(network);

View File

@ -84,7 +84,7 @@ void* train_thread(void* parameters) {
wanted_output = generate_wanted_output(labels[index[i]], 10);
loss += compute_mean_squared_error(network->input[network->size-1][0][0], wanted_output, 10);
gree(wanted_output, false);
gree(wanted_output);
backward_propagation(network, labels[index[i]]);
@ -103,7 +103,7 @@ void* train_thread(void* parameters) {
load_image_param->index = index[i+1];
pthread_create(&tid, NULL, load_image, (void*) load_image_param);
}
write_256_image_in_network(param->dataset->images[index[i]], width, param->dataset->numComponents, network->width[0], network->input[0]);
write_image_in_network_260(param->dataset->images[index[i]], height, width, network->input[0]);
forward_propagation(network);
maxi = indice_max(network->input[network->size-1][0][0], param->dataset->numCategories);
backward_propagation(network, param->dataset->labels[index[i]]);
@ -185,12 +185,8 @@ void train(int dataset_type, char* images_file, char* labels_file, char* data_di
//* Création du réseau
Network* network;
if (!recover) {
if (dataset_type == 0) {
network = create_network_lenet5(LEARNING_RATE, 0, RELU, NORMALIZED_XAVIER, input_width, input_depth);
//network = create_simple_one(LEARNING_RATE, 0, RELU, GLOROT, input_width, input_depth);
} else {
network = create_network_VGG16(LEARNING_RATE, 0, RELU, NORMALIZED_XAVIER, dataset->numCategories);
}
network = create_network_lenet5(LEARNING_RATE, 0, RELU, NORMALIZED_XAVIER, input_width, input_depth);
//network = create_simple_one(LEARNING_RATE, 0, RELU, GLOROT, input_width, input_depth);
} else {
network = read_network(recover);
network->learning_rate = LEARNING_RATE;

View File

@ -8,11 +8,6 @@
// https://forums.developer.nvidia.com/t/find-the-limit-of-shared-memory-that-can-be-used-per-block/48556
#define MEMORY_BLOCK 49152
// On n'alloue de la mémoire que dans le dernier bloc créé, on ne parcourt donc pas la liste
// Cela augmente légèrement l'utilisation de la mémoire, mais permet un gain de temps conséquent
// Pour VGG16, environ 1% de mémoire supplémentaire utilisée,
// L'initialisation passe de 1h02 à 2.4s sur mon matériel
#define MEMORY_TAIL_OPT
// We define our memory with a linked list of memory blocks
typedef struct Memory {
@ -64,20 +59,6 @@ void print_memory_rec(Memory* mem);
void print_memory();
#ifdef __CUDACC__
extern "C"
#endif
/*
* Supprime tous les blocs de mémoire
*/
void free_all_memory();
/*
* Fonction récursive correspondante
*/
void free_all_memory_rec(Memory* mem);
/*
* Créer un bloc de mémoire de taille size
*/
@ -90,11 +71,8 @@ void* allocate_memory(int nb_elements, size_t size, Memory* mem);
/*
* Essayer de libérer le pointeur représenté par ptr dans mem
* Si `already_freed`, le programme ne renvoiera pas d'erreur si
* le bloc correspondant à l'élément est déjà libéré
* (dans l'utilisation de `free_all_memory()` par exemple)
*/
Memory* free_memory(void* ptr, Memory* mem, bool already_freed);
Memory* free_memory(void* ptr, Memory* mem);
#ifdef __CUDACC__
extern "C"
@ -109,10 +87,7 @@ extern "C"
#endif
/*
* Libérer le mémoire allouée avec nalloc
* Si `already_freed`, le programme ne renvoiera pas d'erreur si
* le bloc correspondant à l'élément est déjà libéré
* (dans l'utilisation de `free_all_memory()` par exemple)
*/
void gree(void* ptr, bool already_freed);
void gree(void* ptr);
#endif

View File

@ -8,12 +8,8 @@
#include "include/utils.h"
pthread_mutex_t memory_lock = PTHREAD_MUTEX_INITIALIZER;
Memory* memory = NULL;
#ifdef MEMORY_TAIL_OPT
Memory* tail = NULL;
#endif
pthread_mutex_t memory_lock = PTHREAD_MUTEX_INITIALIZER;
int get_distinct_allocations(Memory* mem) {
@ -40,7 +36,6 @@ int get_memory_blocks_number() {
return get_length(memory);
}
void print_memory_rec(Memory* mem) {
if (!mem) {
return;
@ -52,43 +47,12 @@ void print_memory_rec(Memory* mem) {
print_memory_rec(mem->next);
}
void print_memory() {
printf(BLUE "==== MEMORY ====\n" RESET);
print_memory_rec(memory);
}
#ifdef __CUDACC__
extern "C"
#endif
void free_all_memory() {
pthread_mutex_lock(&memory_lock); // We don't want ANY interruption so we lock here
free_all_memory_rec(memory);
#ifdef MEMORY_TAIL_OPT
tail = NULL;
#endif
pthread_mutex_unlock(&memory_lock);
}
void free_all_memory_rec(Memory* mem) {
if (!mem) {
return;
}
Memory* next = mem->next;
#ifdef __CUDACC__
cudaFree(mem->start);
#else
free(mem->start);
#endif
free(mem);
free_all_memory_rec(next);
}
Memory* create_memory_block(size_t size) {
Memory* mem = (Memory*)malloc(sizeof(Memory));
#ifdef __CUDACC__
@ -104,9 +68,6 @@ Memory* create_memory_block(size_t size) {
mem->nb_alloc = 0;
mem->next = NULL;
mem->id = rand() %100000;
#ifdef MEMORY_TAIL_OPT
tail = mem;
#endif
return mem;
}
@ -144,8 +105,8 @@ void* allocate_memory(int nb_elements, size_t size, Memory* mem) {
}
Memory* free_memory(void* ptr, Memory* mem, bool already_freed) {
if (!mem && !already_freed) {
Memory* free_memory(void* ptr, Memory* mem) {
if (!mem) {
printf_error((char*)"Le pointeur ");
printf("%p a déjà été libéré ou n'a jamais été alloué\n", ptr);
return mem;
@ -155,13 +116,6 @@ Memory* free_memory(void* ptr, Memory* mem, bool already_freed) {
// printf(GREEN "%p <= %p < %p\n" RESET, mem->start, ptr, (void*)((intptr_t)mem->start + mem->size));
if (mem->nb_alloc == 0) {
Memory* mem_next = mem->next;
#ifdef MEMORY_TAIL_OPT
if (tail == mem) {
tail = memory;
}
#endif
#ifdef __CUDACC__
cudaFree(mem->start);
#else
@ -173,7 +127,7 @@ Memory* free_memory(void* ptr, Memory* mem, bool already_freed) {
return mem;
}
} else {
mem->next = free_memory(ptr, mem->next, already_freed);
mem->next = free_memory(ptr, mem->next);
return mem;
}
}
@ -191,11 +145,7 @@ void* nalloc(int nb_elements, size_t size) {
}
//printf("Distinct allocations: %d Blocks: %d\n", get_distinct_allocations(memory), get_length(memory));
//printf("Requested memory of size %ld\n", sz);
#ifdef MEMORY_TAIL_OPT
void* ptr = allocate_memory(nb_elements, size, tail);
#else
void* ptr = allocate_memory(nb_elements, size, memory);
#endif
pthread_mutex_unlock(&memory_lock);
return ptr;
@ -208,10 +158,10 @@ void* nalloc(int nb_elements, size_t size) {
#ifdef __CUDACC__
extern "C"
#endif
void gree(void* ptr, bool already_freed) {
void gree(void* ptr) {
#if defined(__CUDACC__) || defined(TEST_MEMORY_MANAGEMENT)
pthread_mutex_lock(&memory_lock);
memory = free_memory(ptr, memory, already_freed);
memory = free_memory(ptr, memory);
pthread_mutex_unlock(&memory_lock);
#else
free(ptr);

View File

@ -8,12 +8,8 @@
#include "include/utils.h"
pthread_mutex_t memory_lock = PTHREAD_MUTEX_INITIALIZER;
Memory* memory = NULL;
#ifdef MEMORY_TAIL_OPT
Memory* tail = NULL;
#endif
pthread_mutex_t memory_lock = PTHREAD_MUTEX_INITIALIZER;
int get_distinct_allocations(Memory* mem) {
@ -40,7 +36,6 @@ int get_memory_blocks_number() {
return get_length(memory);
}
void print_memory_rec(Memory* mem) {
if (!mem) {
return;
@ -52,43 +47,12 @@ void print_memory_rec(Memory* mem) {
print_memory_rec(mem->next);
}
void print_memory() {
printf(BLUE "==== MEMORY ====\n" RESET);
print_memory_rec(memory);
}
#ifdef __CUDACC__
extern "C"
#endif
void free_all_memory() {
pthread_mutex_lock(&memory_lock); // We don't want ANY interruption so we lock here
free_all_memory_rec(memory);
#ifdef MEMORY_TAIL_OPT
tail = NULL;
#endif
pthread_mutex_unlock(&memory_lock);
}
void free_all_memory_rec(Memory* mem) {
if (!mem) {
return;
}
Memory* next = mem->next;
#ifdef __CUDACC__
cudaFree(mem->start);
#else
free(mem->start);
#endif
free(mem);
free_all_memory_rec(next);
}
Memory* create_memory_block(size_t size) {
Memory* mem = (Memory*)malloc(sizeof(Memory));
#ifdef __CUDACC__
@ -104,9 +68,6 @@ Memory* create_memory_block(size_t size) {
mem->nb_alloc = 0;
mem->next = NULL;
mem->id = rand() %100000;
#ifdef MEMORY_TAIL_OPT
tail = mem;
#endif
return mem;
}
@ -144,8 +105,8 @@ void* allocate_memory(int nb_elements, size_t size, Memory* mem) {
}
Memory* free_memory(void* ptr, Memory* mem, bool already_freed) {
if (!mem && !already_freed) {
Memory* free_memory(void* ptr, Memory* mem) {
if (!mem) {
printf_error((char*)"Le pointeur ");
printf("%p a déjà été libéré ou n'a jamais été alloué\n", ptr);
return mem;
@ -155,13 +116,6 @@ Memory* free_memory(void* ptr, Memory* mem, bool already_freed) {
// printf(GREEN "%p <= %p < %p\n" RESET, mem->start, ptr, (void*)((intptr_t)mem->start + mem->size));
if (mem->nb_alloc == 0) {
Memory* mem_next = mem->next;
#ifdef MEMORY_TAIL_OPT
if (tail == mem) {
tail = memory;
}
#endif
#ifdef __CUDACC__
cudaFree(mem->start);
#else
@ -173,7 +127,7 @@ Memory* free_memory(void* ptr, Memory* mem, bool already_freed) {
return mem;
}
} else {
mem->next = free_memory(ptr, mem->next, already_freed);
mem->next = free_memory(ptr, mem->next);
return mem;
}
}
@ -191,11 +145,7 @@ void* nalloc(int nb_elements, size_t size) {
}
//printf("Distinct allocations: %d Blocks: %d\n", get_distinct_allocations(memory), get_length(memory));
//printf("Requested memory of size %ld\n", sz);
#ifdef MEMORY_TAIL_OPT
void* ptr = allocate_memory(nb_elements, size, tail);
#else
void* ptr = allocate_memory(nb_elements, size, memory);
#endif
pthread_mutex_unlock(&memory_lock);
return ptr;
@ -208,10 +158,10 @@ void* nalloc(int nb_elements, size_t size) {
#ifdef __CUDACC__
extern "C"
#endif
void gree(void* ptr, bool already_freed) {
void gree(void* ptr) {
#if defined(__CUDACC__) || defined(TEST_MEMORY_MANAGEMENT)
pthread_mutex_lock(&memory_lock);
memory = free_memory(ptr, memory, already_freed);
memory = free_memory(ptr, memory);
pthread_mutex_unlock(&memory_lock);
#else
free(ptr);

View File

@ -72,11 +72,11 @@ float*** create_empty_matrix(int n, int p, int q) {
void free_matrix(float*** matrix, int n, int p) {
for (int i=0; i < n; i++) {
for (int j=0; j < p; j++) {
gree(matrix[i][j], false);
gree(matrix[i][j]);
}
gree(matrix[i], false);
gree(matrix[i]);
}
gree(matrix, false);
gree(matrix);
}
bool check_matrices_equality(float*** m1, float*** m2, int n, int p, int q, int acceptation) {
@ -140,7 +140,7 @@ void run_convolution_test(int input_width, int output_width, int rows, int colum
double cpu_time_used, gpu_time_used;
start_time = omp_get_wtime();
make_convolution_device(kernel, input, output_gpu, output_width, 1, 0);
make_convolution_device(kernel, input, output_gpu, output_width, 1);
end_time = omp_get_wtime();
@ -149,7 +149,7 @@ void run_convolution_test(int input_width, int output_width, int rows, int colum
start_time = omp_get_wtime();
make_convolution_cpu(kernel, input, output_cpu, output_width, 1, 0);
make_convolution_cpu(kernel, input, output_cpu, output_width, 1);
end_time = omp_get_wtime();
cpu_time_used = end_time - start_time;
@ -177,11 +177,11 @@ void run_convolution_test(int input_width, int output_width, int rows, int colum
free_matrix(kernel->v_d_weights[i], kernel->columns, kernel->k_size);
#endif
}
gree(kernel->weights, false);
gree(kernel->d_weights, false);
gree(kernel->weights);
gree(kernel->d_weights);
#ifdef ADAM_CNN_WEIGHTS
gree(kernel->s_d_weights, false);
gree(kernel->v_d_weights, false);
gree(kernel->s_d_weights);
gree(kernel->v_d_weights);
#endif
free_matrix(input, kernel->rows, input_width);

View File

@ -76,13 +76,13 @@ void test1(int activation, bool use_local_kernel) {
exit(1);
}
}
gree(input[i][j], false);
gree(input[i][j]);
free(input_initial[i][j]);
}
gree(input[i], false);
gree(input[i]);
free(input_initial[i]);
}
gree(input, false);
gree(input);
free(input_initial);
printf("\t" GREEN "OK\n" RESET);

View File

@ -104,24 +104,24 @@ void run_matrices_test(int n, int p, int q) {
// On libère l'espace mémoire alloué
for (int i=0; i < n; i++) {
gree(matrix1[i], false);
gree(matrix1[i]);
}
gree(matrix1, false);
gree(matrix1);
for (int i=0; i < p; i++) {
gree(matrix2[i], false);
gree(matrix2[i]);
}
gree(matrix2, false);
gree(matrix2);
for (int i=0; i < n; i++) {
gree(result_cpu[i], false);
gree(result_cpu[i]);
}
gree(result_cpu, false);
gree(result_cpu);
for (int i=0; i < n; i++) {
gree(result_gpu[i], false);
gree(result_gpu[i]);
}
gree(result_gpu, false);
gree(result_gpu);
}

View File

@ -28,7 +28,7 @@ int main() {
printf_error((char*)"Plus d'un élément de mémoire alloué en une seule allocation\n");
exit(1);
}
gree(ptr, false);
gree(ptr);
if (! (get_memory_blocks_number() == blocks_used)) {
printf_error((char*)"La mémoire n'a pas été libérée correctement\n");
exit(1);
@ -56,11 +56,11 @@ int main() {
// We test that the memory does not overlap itself
assert(pointeurs[i][j] == i);
}
gree(pointeurs[i], false);
gree(pointeurs[i]);
}
gree(ptr1, false);
gree(ptr2, false);
gree(ptr1);
gree(ptr2);
if (! (get_memory_distinct_allocations() == 0 && get_memory_blocks_number() == 0)) {
printf_error((char*)"La mémoire n'a pas été libérée correctement\n");
exit(1);

View File

@ -45,7 +45,7 @@ int main() {
printf("Plus d'un élément de mémoire alloué en une seule allocation\n");
exit(1);
}
gree(ptr, false);
gree(ptr);
if (! (get_memory_blocks_number() == blocks_used)) {
printf("La mémoire n'a pas été libérée correctement\n");
exit(1);
@ -86,11 +86,11 @@ int main() {
// We test that the memory does not overlap itself
assert(pointeurs[i][j] == i+1);
}
gree(pointeurs[i], false);
gree(pointeurs[i]);
}
gree(ptr1, false);
gree(ptr2, false);
gree(ptr1);
gree(ptr2);
if (! (get_memory_distinct_allocations() == 0 && get_memory_blocks_number() == 0)) {
printf("La mémoire n'a pas été libérée correctement\n");
exit(1);