mirror of
https://github.com/augustin64/projet-tipe
synced 2025-01-23 23:26:25 +01:00
Compare commits
No commits in common. "964687d1b4277a9e4c6a98c54b48b172b379487b" and "0aa7df2869df73e56f83baf62fe3a930b484a19b" have entirely different histories.
964687d1b4
...
0aa7df2869
@ -22,13 +22,6 @@ Network* create_network_alexnet(float learning_rate, int dropout, int activation
|
||||
*/
|
||||
Network* create_network_VGG16(float learning_rate, int dropout, int activation, int initialisation, int size_output);
|
||||
|
||||
|
||||
/*
|
||||
* Renvoie un réseau suivant l'architecture VGG16 originel pour prendre en entrée 3x227x227
|
||||
* et une sortie de taille 1 000
|
||||
*/
|
||||
Network* create_network_VGG16_227(float learning_rate, int dropout, int activation, int initialisation);
|
||||
|
||||
/*
|
||||
* Renvoie un réseau sans convolution, similaire à celui utilisé dans src/dense
|
||||
*/
|
||||
|
@ -36,67 +36,37 @@ Network* create_network_alexnet(float learning_rate, int dropout, int activation
|
||||
}
|
||||
|
||||
Network* create_network_VGG16(float learning_rate, int dropout, int activation, int initialisation, int size_output) {
|
||||
Network* network = create_network(22, learning_rate, dropout, initialisation, 256, 3);
|
||||
add_convolution(network, 3, 64, 1, 1, activation); // Conv3-64
|
||||
add_convolution(network, 3, 64, 1, 1, activation); // Conv3-64
|
||||
Network* network = create_network(23, learning_rate, dropout, initialisation, 256, 3);
|
||||
add_convolution(network, 3, 64, 1, 0, activation); // Conv3-64
|
||||
add_convolution(network, 3, 64, 1, 0, activation); // Conv3-64
|
||||
add_average_pooling(network, 2, 2, 0); // Max Pool
|
||||
|
||||
add_convolution(network, 3, 128, 1, 1, activation); // Conv3-128
|
||||
add_convolution(network, 3, 128, 1, 0, activation); // Conv3-128
|
||||
add_convolution(network, 1, 128, 1, 0, activation); // Conv1-128
|
||||
add_average_pooling(network, 2, 2, 0); // Max Pool
|
||||
|
||||
add_convolution(network, 3, 256, 1, 1, activation); // Conv3-256
|
||||
add_convolution(network, 3, 256, 1, 1, activation); // Conv3-256
|
||||
add_convolution(network, 3, 256, 1, 0, activation); // Conv3-256
|
||||
add_convolution(network, 3, 256, 1, 0, activation); // Conv3-256
|
||||
add_convolution(network, 1, 256, 1, 0, activation); // Conv1-256
|
||||
add_average_pooling(network, 2, 2, 0); // Max Pool
|
||||
|
||||
add_convolution(network, 3, 512, 1, 1, activation); // Conv3-512
|
||||
add_convolution(network, 3, 512, 1, 1, activation); // Conv3-512
|
||||
add_convolution(network, 3, 512, 1, 0, activation); // Conv3-512
|
||||
add_convolution(network, 3, 512, 1, 0, activation); // Conv3-512
|
||||
add_convolution(network, 1, 512, 1, 0, activation); // Conv1-512
|
||||
add_average_pooling(network, 2, 2, 0); // Max Pool
|
||||
|
||||
add_convolution(network, 3, 512, 1, 1, activation); // Conv3-512
|
||||
add_convolution(network, 3, 512, 1, 1, activation); // Conv3-512
|
||||
add_convolution(network, 3, 512, 1, 0, activation); // Conv3-512
|
||||
add_convolution(network, 3, 512, 1, 0, activation); // Conv3-512
|
||||
add_convolution(network, 1, 512, 1, 0, activation); // Conv1-512
|
||||
add_average_pooling(network, 2, 2, 0); // Max Pool
|
||||
|
||||
add_dense_linearisation(network, 4096, activation);
|
||||
add_dense(network, 4096, activation);
|
||||
add_dense_linearisation(network, 2048, activation);
|
||||
add_dense(network, 2048, activation);
|
||||
add_dense(network, 256, activation);
|
||||
add_dense(network, size_output, SOFTMAX);
|
||||
return network;
|
||||
}
|
||||
|
||||
Network* create_network_VGG16_227(float learning_rate, int dropout, int activation, int initialisation) {
|
||||
Network* network = create_network(22, learning_rate, dropout, initialisation, 227, 3);
|
||||
add_convolution(network, 3, 64, 1, 1, activation); // Conv3-64
|
||||
add_convolution(network, 3, 64, 1, 1, activation); // Conv3-64
|
||||
add_average_pooling(network, 2, 2, 0); // Max Pool
|
||||
|
||||
add_convolution(network, 3, 128, 1, 1, activation); // Conv3-128
|
||||
add_convolution(network, 1, 128, 1, 0, activation); // Conv1-128
|
||||
add_average_pooling(network, 2, 2, 0); // Max Pool
|
||||
|
||||
add_convolution(network, 3, 256, 1, 1, activation); // Conv3-256
|
||||
add_convolution(network, 3, 256, 1, 1, activation); // Conv3-256
|
||||
add_convolution(network, 1, 256, 1, 0, activation); // Conv1-256
|
||||
add_average_pooling(network, 2, 2, 0); // Max Pool
|
||||
|
||||
add_convolution(network, 3, 512, 1, 1, activation); // Conv3-512
|
||||
add_convolution(network, 3, 512, 1, 1, activation); // Conv3-512
|
||||
add_convolution(network, 1, 512, 1, 0, activation); // Conv1-512
|
||||
add_average_pooling(network, 2, 2, 0); // Max Pool
|
||||
|
||||
add_convolution(network, 3, 512, 1, 1, activation); // Conv3-512
|
||||
add_convolution(network, 3, 512, 1, 1, activation); // Conv3-512
|
||||
add_convolution(network, 1, 512, 1, 0, activation); // Conv1-512
|
||||
add_average_pooling(network, 2, 2, 0); // Max Pool
|
||||
|
||||
add_dense_linearisation(network, 4096, activation);
|
||||
add_dense(network, 4096, activation);
|
||||
add_dense(network, 1000, SOFTMAX);
|
||||
return network;
|
||||
}
|
||||
|
||||
Network* create_simple_one(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth) {
|
||||
Network* network = create_network(3, learning_rate, dropout, initialisation, input_width, input_depth);
|
||||
add_dense_linearisation(network, 80, activation);
|
||||
|
@ -136,7 +136,8 @@ void* allocate_memory(int nb_elements, size_t size, Memory* mem) {
|
||||
//printf("Mémoire disponible: %ld. Nécessaire: %ld\n", mem->size - ((intptr_t)mem->cursor - (intptr_t)mem->start), nb_elements*size);
|
||||
// Sinon on continue sur l'élément suivant de la liste
|
||||
if (!mem->next) {
|
||||
mem->next = create_memory_block(MEMORY_BLOCK < (nb_elements+1)*size ? (nb_elements+1)*size : MEMORY_BLOCK);
|
||||
//! WARNING: May cause Infinite allocations when trying to allocate more than MEMORY_BLOCK size at once that is not naturally aligned (CUDA only)
|
||||
mem->next = create_memory_block(MEMORY_BLOCK < nb_elements*size ? nb_elements*size : MEMORY_BLOCK);
|
||||
}
|
||||
return allocate_memory(nb_elements, size, mem->next);
|
||||
}
|
||||
|
@ -136,7 +136,8 @@ void* allocate_memory(int nb_elements, size_t size, Memory* mem) {
|
||||
//printf("Mémoire disponible: %ld. Nécessaire: %ld\n", mem->size - ((intptr_t)mem->cursor - (intptr_t)mem->start), nb_elements*size);
|
||||
// Sinon on continue sur l'élément suivant de la liste
|
||||
if (!mem->next) {
|
||||
mem->next = create_memory_block(MEMORY_BLOCK < (nb_elements+1)*size ? (nb_elements+1)*size : MEMORY_BLOCK);
|
||||
//! WARNING: May cause Infinite allocations when trying to allocate more than MEMORY_BLOCK size at once that is not naturally aligned (CUDA only)
|
||||
mem->next = create_memory_block(MEMORY_BLOCK < nb_elements*size ? nb_elements*size : MEMORY_BLOCK);
|
||||
}
|
||||
return allocate_memory(nb_elements, size, mem->next);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user