Compare commits

..

3 Commits

Author SHA1 Message Date
964687d1b4 mem management:avoid potential infinite allocation 2023-05-19 17:43:56 +02:00
julienChemillier
a4b42445c1 Add VGG16 architecture with 227x227 image input 2023-05-19 17:28:07 +02:00
julienChemillier
4fcfba5b33 Attempt to fix VGG16 architecture 2023-05-19 17:27:03 +02:00
4 changed files with 52 additions and 17 deletions

View File

@ -22,6 +22,13 @@ Network* create_network_alexnet(float learning_rate, int dropout, int activation
*/
Network* create_network_VGG16(float learning_rate, int dropout, int activation, int initialisation, int size_output);
/*
* Renvoie un réseau suivant l'architecture VGG16 originel pour prendre en entrée 3x227x227
* et une sortie de taille 1 000
*/
Network* create_network_VGG16_227(float learning_rate, int dropout, int activation, int initialisation);
/*
* Renvoie un réseau sans convolution, similaire à celui utilisé dans src/dense
*/

View File

@ -36,37 +36,67 @@ Network* create_network_alexnet(float learning_rate, int dropout, int activation
}
Network* create_network_VGG16(float learning_rate, int dropout, int activation, int initialisation, int size_output) {
Network* network = create_network(23, learning_rate, dropout, initialisation, 256, 3);
add_convolution(network, 3, 64, 1, 0, activation); // Conv3-64
add_convolution(network, 3, 64, 1, 0, activation); // Conv3-64
Network* network = create_network(22, learning_rate, dropout, initialisation, 256, 3);
add_convolution(network, 3, 64, 1, 1, activation); // Conv3-64
add_convolution(network, 3, 64, 1, 1, activation); // Conv3-64
add_average_pooling(network, 2, 2, 0); // Max Pool
add_convolution(network, 3, 128, 1, 0, activation); // Conv3-128
add_convolution(network, 3, 128, 1, 1, activation); // Conv3-128
add_convolution(network, 1, 128, 1, 0, activation); // Conv1-128
add_average_pooling(network, 2, 2, 0); // Max Pool
add_convolution(network, 3, 256, 1, 0, activation); // Conv3-256
add_convolution(network, 3, 256, 1, 0, activation); // Conv3-256
add_convolution(network, 3, 256, 1, 1, activation); // Conv3-256
add_convolution(network, 3, 256, 1, 1, activation); // Conv3-256
add_convolution(network, 1, 256, 1, 0, activation); // Conv1-256
add_average_pooling(network, 2, 2, 0); // Max Pool
add_convolution(network, 3, 512, 1, 0, activation); // Conv3-512
add_convolution(network, 3, 512, 1, 0, activation); // Conv3-512
add_convolution(network, 3, 512, 1, 1, activation); // Conv3-512
add_convolution(network, 3, 512, 1, 1, activation); // Conv3-512
add_convolution(network, 1, 512, 1, 0, activation); // Conv1-512
add_average_pooling(network, 2, 2, 0); // Max Pool
add_convolution(network, 3, 512, 1, 0, activation); // Conv3-512
add_convolution(network, 3, 512, 1, 0, activation); // Conv3-512
add_convolution(network, 3, 512, 1, 1, activation); // Conv3-512
add_convolution(network, 3, 512, 1, 1, activation); // Conv3-512
add_convolution(network, 1, 512, 1, 0, activation); // Conv1-512
add_average_pooling(network, 2, 2, 0); // Max Pool
add_dense_linearisation(network, 2048, activation);
add_dense(network, 2048, activation);
add_dense(network, 256, activation);
add_dense_linearisation(network, 4096, activation);
add_dense(network, 4096, activation);
add_dense(network, size_output, SOFTMAX);
return network;
}
Network* create_network_VGG16_227(float learning_rate, int dropout, int activation, int initialisation) {
Network* network = create_network(22, learning_rate, dropout, initialisation, 227, 3);
add_convolution(network, 3, 64, 1, 1, activation); // Conv3-64
add_convolution(network, 3, 64, 1, 1, activation); // Conv3-64
add_average_pooling(network, 2, 2, 0); // Max Pool
add_convolution(network, 3, 128, 1, 1, activation); // Conv3-128
add_convolution(network, 1, 128, 1, 0, activation); // Conv1-128
add_average_pooling(network, 2, 2, 0); // Max Pool
add_convolution(network, 3, 256, 1, 1, activation); // Conv3-256
add_convolution(network, 3, 256, 1, 1, activation); // Conv3-256
add_convolution(network, 1, 256, 1, 0, activation); // Conv1-256
add_average_pooling(network, 2, 2, 0); // Max Pool
add_convolution(network, 3, 512, 1, 1, activation); // Conv3-512
add_convolution(network, 3, 512, 1, 1, activation); // Conv3-512
add_convolution(network, 1, 512, 1, 0, activation); // Conv1-512
add_average_pooling(network, 2, 2, 0); // Max Pool
add_convolution(network, 3, 512, 1, 1, activation); // Conv3-512
add_convolution(network, 3, 512, 1, 1, activation); // Conv3-512
add_convolution(network, 1, 512, 1, 0, activation); // Conv1-512
add_average_pooling(network, 2, 2, 0); // Max Pool
add_dense_linearisation(network, 4096, activation);
add_dense(network, 4096, activation);
add_dense(network, 1000, SOFTMAX);
return network;
}
Network* create_simple_one(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth) {
Network* network = create_network(3, learning_rate, dropout, initialisation, input_width, input_depth);
add_dense_linearisation(network, 80, activation);

View File

@ -136,8 +136,7 @@ void* allocate_memory(int nb_elements, size_t size, Memory* mem) {
//printf("Mémoire disponible: %ld. Nécessaire: %ld\n", mem->size - ((intptr_t)mem->cursor - (intptr_t)mem->start), nb_elements*size);
// Sinon on continue sur l'élément suivant de la liste
if (!mem->next) {
//! WARNING: May cause Infinite allocations when trying to allocate more than MEMORY_BLOCK size at once that is not naturally aligned (CUDA only)
mem->next = create_memory_block(MEMORY_BLOCK < nb_elements*size ? nb_elements*size : MEMORY_BLOCK);
mem->next = create_memory_block(MEMORY_BLOCK < (nb_elements+1)*size ? (nb_elements+1)*size : MEMORY_BLOCK);
}
return allocate_memory(nb_elements, size, mem->next);
}

View File

@ -136,8 +136,7 @@ void* allocate_memory(int nb_elements, size_t size, Memory* mem) {
//printf("Mémoire disponible: %ld. Nécessaire: %ld\n", mem->size - ((intptr_t)mem->cursor - (intptr_t)mem->start), nb_elements*size);
// Sinon on continue sur l'élément suivant de la liste
if (!mem->next) {
//! WARNING: May cause Infinite allocations when trying to allocate more than MEMORY_BLOCK size at once that is not naturally aligned (CUDA only)
mem->next = create_memory_block(MEMORY_BLOCK < nb_elements*size ? nb_elements*size : MEMORY_BLOCK);
mem->next = create_memory_block(MEMORY_BLOCK < (nb_elements+1)*size ? (nb_elements+1)*size : MEMORY_BLOCK);
}
return allocate_memory(nb_elements, size, mem->next);
}