mirror of
https://github.com/augustin64/projet-tipe
synced 2025-01-23 23:26:25 +01:00
Compare commits
3 Commits
0aa7df2869
...
964687d1b4
Author | SHA1 | Date | |
---|---|---|---|
964687d1b4 | |||
|
a4b42445c1 | ||
|
4fcfba5b33 |
@ -22,6 +22,13 @@ Network* create_network_alexnet(float learning_rate, int dropout, int activation
|
|||||||
*/
|
*/
|
||||||
Network* create_network_VGG16(float learning_rate, int dropout, int activation, int initialisation, int size_output);
|
Network* create_network_VGG16(float learning_rate, int dropout, int activation, int initialisation, int size_output);
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Renvoie un réseau suivant l'architecture VGG16 originel pour prendre en entrée 3x227x227
|
||||||
|
* et une sortie de taille 1 000
|
||||||
|
*/
|
||||||
|
Network* create_network_VGG16_227(float learning_rate, int dropout, int activation, int initialisation);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Renvoie un réseau sans convolution, similaire à celui utilisé dans src/dense
|
* Renvoie un réseau sans convolution, similaire à celui utilisé dans src/dense
|
||||||
*/
|
*/
|
||||||
|
@ -36,37 +36,67 @@ Network* create_network_alexnet(float learning_rate, int dropout, int activation
|
|||||||
}
|
}
|
||||||
|
|
||||||
Network* create_network_VGG16(float learning_rate, int dropout, int activation, int initialisation, int size_output) {
|
Network* create_network_VGG16(float learning_rate, int dropout, int activation, int initialisation, int size_output) {
|
||||||
Network* network = create_network(23, learning_rate, dropout, initialisation, 256, 3);
|
Network* network = create_network(22, learning_rate, dropout, initialisation, 256, 3);
|
||||||
add_convolution(network, 3, 64, 1, 0, activation); // Conv3-64
|
add_convolution(network, 3, 64, 1, 1, activation); // Conv3-64
|
||||||
add_convolution(network, 3, 64, 1, 0, activation); // Conv3-64
|
add_convolution(network, 3, 64, 1, 1, activation); // Conv3-64
|
||||||
add_average_pooling(network, 2, 2, 0); // Max Pool
|
add_average_pooling(network, 2, 2, 0); // Max Pool
|
||||||
|
|
||||||
add_convolution(network, 3, 128, 1, 0, activation); // Conv3-128
|
add_convolution(network, 3, 128, 1, 1, activation); // Conv3-128
|
||||||
add_convolution(network, 1, 128, 1, 0, activation); // Conv1-128
|
add_convolution(network, 1, 128, 1, 0, activation); // Conv1-128
|
||||||
add_average_pooling(network, 2, 2, 0); // Max Pool
|
add_average_pooling(network, 2, 2, 0); // Max Pool
|
||||||
|
|
||||||
add_convolution(network, 3, 256, 1, 0, activation); // Conv3-256
|
add_convolution(network, 3, 256, 1, 1, activation); // Conv3-256
|
||||||
add_convolution(network, 3, 256, 1, 0, activation); // Conv3-256
|
add_convolution(network, 3, 256, 1, 1, activation); // Conv3-256
|
||||||
add_convolution(network, 1, 256, 1, 0, activation); // Conv1-256
|
add_convolution(network, 1, 256, 1, 0, activation); // Conv1-256
|
||||||
add_average_pooling(network, 2, 2, 0); // Max Pool
|
add_average_pooling(network, 2, 2, 0); // Max Pool
|
||||||
|
|
||||||
add_convolution(network, 3, 512, 1, 0, activation); // Conv3-512
|
add_convolution(network, 3, 512, 1, 1, activation); // Conv3-512
|
||||||
add_convolution(network, 3, 512, 1, 0, activation); // Conv3-512
|
add_convolution(network, 3, 512, 1, 1, activation); // Conv3-512
|
||||||
add_convolution(network, 1, 512, 1, 0, activation); // Conv1-512
|
add_convolution(network, 1, 512, 1, 0, activation); // Conv1-512
|
||||||
add_average_pooling(network, 2, 2, 0); // Max Pool
|
add_average_pooling(network, 2, 2, 0); // Max Pool
|
||||||
|
|
||||||
add_convolution(network, 3, 512, 1, 0, activation); // Conv3-512
|
add_convolution(network, 3, 512, 1, 1, activation); // Conv3-512
|
||||||
add_convolution(network, 3, 512, 1, 0, activation); // Conv3-512
|
add_convolution(network, 3, 512, 1, 1, activation); // Conv3-512
|
||||||
add_convolution(network, 1, 512, 1, 0, activation); // Conv1-512
|
add_convolution(network, 1, 512, 1, 0, activation); // Conv1-512
|
||||||
add_average_pooling(network, 2, 2, 0); // Max Pool
|
add_average_pooling(network, 2, 2, 0); // Max Pool
|
||||||
|
|
||||||
add_dense_linearisation(network, 2048, activation);
|
add_dense_linearisation(network, 4096, activation);
|
||||||
add_dense(network, 2048, activation);
|
add_dense(network, 4096, activation);
|
||||||
add_dense(network, 256, activation);
|
|
||||||
add_dense(network, size_output, SOFTMAX);
|
add_dense(network, size_output, SOFTMAX);
|
||||||
return network;
|
return network;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Network* create_network_VGG16_227(float learning_rate, int dropout, int activation, int initialisation) {
|
||||||
|
Network* network = create_network(22, learning_rate, dropout, initialisation, 227, 3);
|
||||||
|
add_convolution(network, 3, 64, 1, 1, activation); // Conv3-64
|
||||||
|
add_convolution(network, 3, 64, 1, 1, activation); // Conv3-64
|
||||||
|
add_average_pooling(network, 2, 2, 0); // Max Pool
|
||||||
|
|
||||||
|
add_convolution(network, 3, 128, 1, 1, activation); // Conv3-128
|
||||||
|
add_convolution(network, 1, 128, 1, 0, activation); // Conv1-128
|
||||||
|
add_average_pooling(network, 2, 2, 0); // Max Pool
|
||||||
|
|
||||||
|
add_convolution(network, 3, 256, 1, 1, activation); // Conv3-256
|
||||||
|
add_convolution(network, 3, 256, 1, 1, activation); // Conv3-256
|
||||||
|
add_convolution(network, 1, 256, 1, 0, activation); // Conv1-256
|
||||||
|
add_average_pooling(network, 2, 2, 0); // Max Pool
|
||||||
|
|
||||||
|
add_convolution(network, 3, 512, 1, 1, activation); // Conv3-512
|
||||||
|
add_convolution(network, 3, 512, 1, 1, activation); // Conv3-512
|
||||||
|
add_convolution(network, 1, 512, 1, 0, activation); // Conv1-512
|
||||||
|
add_average_pooling(network, 2, 2, 0); // Max Pool
|
||||||
|
|
||||||
|
add_convolution(network, 3, 512, 1, 1, activation); // Conv3-512
|
||||||
|
add_convolution(network, 3, 512, 1, 1, activation); // Conv3-512
|
||||||
|
add_convolution(network, 1, 512, 1, 0, activation); // Conv1-512
|
||||||
|
add_average_pooling(network, 2, 2, 0); // Max Pool
|
||||||
|
|
||||||
|
add_dense_linearisation(network, 4096, activation);
|
||||||
|
add_dense(network, 4096, activation);
|
||||||
|
add_dense(network, 1000, SOFTMAX);
|
||||||
|
return network;
|
||||||
|
}
|
||||||
|
|
||||||
Network* create_simple_one(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth) {
|
Network* create_simple_one(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth) {
|
||||||
Network* network = create_network(3, learning_rate, dropout, initialisation, input_width, input_depth);
|
Network* network = create_network(3, learning_rate, dropout, initialisation, input_width, input_depth);
|
||||||
add_dense_linearisation(network, 80, activation);
|
add_dense_linearisation(network, 80, activation);
|
||||||
|
@ -136,8 +136,7 @@ void* allocate_memory(int nb_elements, size_t size, Memory* mem) {
|
|||||||
//printf("Mémoire disponible: %ld. Nécessaire: %ld\n", mem->size - ((intptr_t)mem->cursor - (intptr_t)mem->start), nb_elements*size);
|
//printf("Mémoire disponible: %ld. Nécessaire: %ld\n", mem->size - ((intptr_t)mem->cursor - (intptr_t)mem->start), nb_elements*size);
|
||||||
// Sinon on continue sur l'élément suivant de la liste
|
// Sinon on continue sur l'élément suivant de la liste
|
||||||
if (!mem->next) {
|
if (!mem->next) {
|
||||||
//! WARNING: May cause Infinite allocations when trying to allocate more than MEMORY_BLOCK size at once that is not naturally aligned (CUDA only)
|
mem->next = create_memory_block(MEMORY_BLOCK < (nb_elements+1)*size ? (nb_elements+1)*size : MEMORY_BLOCK);
|
||||||
mem->next = create_memory_block(MEMORY_BLOCK < nb_elements*size ? nb_elements*size : MEMORY_BLOCK);
|
|
||||||
}
|
}
|
||||||
return allocate_memory(nb_elements, size, mem->next);
|
return allocate_memory(nb_elements, size, mem->next);
|
||||||
}
|
}
|
||||||
|
@ -136,8 +136,7 @@ void* allocate_memory(int nb_elements, size_t size, Memory* mem) {
|
|||||||
//printf("Mémoire disponible: %ld. Nécessaire: %ld\n", mem->size - ((intptr_t)mem->cursor - (intptr_t)mem->start), nb_elements*size);
|
//printf("Mémoire disponible: %ld. Nécessaire: %ld\n", mem->size - ((intptr_t)mem->cursor - (intptr_t)mem->start), nb_elements*size);
|
||||||
// Sinon on continue sur l'élément suivant de la liste
|
// Sinon on continue sur l'élément suivant de la liste
|
||||||
if (!mem->next) {
|
if (!mem->next) {
|
||||||
//! WARNING: May cause Infinite allocations when trying to allocate more than MEMORY_BLOCK size at once that is not naturally aligned (CUDA only)
|
mem->next = create_memory_block(MEMORY_BLOCK < (nb_elements+1)*size ? (nb_elements+1)*size : MEMORY_BLOCK);
|
||||||
mem->next = create_memory_block(MEMORY_BLOCK < nb_elements*size ? nb_elements*size : MEMORY_BLOCK);
|
|
||||||
}
|
}
|
||||||
return allocate_memory(nb_elements, size, mem->next);
|
return allocate_memory(nb_elements, size, mem->next);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user