Attempt to fix VGG16 architecture

This commit is contained in:
julienChemillier 2023-05-19 17:27:03 +02:00
parent 0aa7df2869
commit 4fcfba5b33

View File

@ -36,33 +36,32 @@ Network* create_network_alexnet(float learning_rate, int dropout, int activation
} }
Network* create_network_VGG16(float learning_rate, int dropout, int activation, int initialisation, int size_output) { Network* create_network_VGG16(float learning_rate, int dropout, int activation, int initialisation, int size_output) {
Network* network = create_network(23, learning_rate, dropout, initialisation, 256, 3); Network* network = create_network(22, learning_rate, dropout, initialisation, 256, 3);
add_convolution(network, 3, 64, 1, 0, activation); // Conv3-64 add_convolution(network, 3, 64, 1, 1, activation); // Conv3-64
add_convolution(network, 3, 64, 1, 0, activation); // Conv3-64 add_convolution(network, 3, 64, 1, 1, activation); // Conv3-64
add_average_pooling(network, 2, 2, 0); // Max Pool add_average_pooling(network, 2, 2, 0); // Max Pool
add_convolution(network, 3, 128, 1, 0, activation); // Conv3-128 add_convolution(network, 3, 128, 1, 1, activation); // Conv3-128
add_convolution(network, 1, 128, 1, 0, activation); // Conv1-128 add_convolution(network, 1, 128, 1, 0, activation); // Conv1-128
add_average_pooling(network, 2, 2, 0); // Max Pool add_average_pooling(network, 2, 2, 0); // Max Pool
add_convolution(network, 3, 256, 1, 0, activation); // Conv3-256 add_convolution(network, 3, 256, 1, 1, activation); // Conv3-256
add_convolution(network, 3, 256, 1, 0, activation); // Conv3-256 add_convolution(network, 3, 256, 1, 1, activation); // Conv3-256
add_convolution(network, 1, 256, 1, 0, activation); // Conv1-256 add_convolution(network, 1, 256, 1, 0, activation); // Conv1-256
add_average_pooling(network, 2, 2, 0); // Max Pool add_average_pooling(network, 2, 2, 0); // Max Pool
add_convolution(network, 3, 512, 1, 0, activation); // Conv3-512 add_convolution(network, 3, 512, 1, 1, activation); // Conv3-512
add_convolution(network, 3, 512, 1, 0, activation); // Conv3-512 add_convolution(network, 3, 512, 1, 1, activation); // Conv3-512
add_convolution(network, 1, 512, 1, 0, activation); // Conv1-512 add_convolution(network, 1, 512, 1, 0, activation); // Conv1-512
add_average_pooling(network, 2, 2, 0); // Max Pool add_average_pooling(network, 2, 2, 0); // Max Pool
add_convolution(network, 3, 512, 1, 0, activation); // Conv3-512 add_convolution(network, 3, 512, 1, 1, activation); // Conv3-512
add_convolution(network, 3, 512, 1, 0, activation); // Conv3-512 add_convolution(network, 3, 512, 1, 1, activation); // Conv3-512
add_convolution(network, 1, 512, 1, 0, activation); // Conv1-512 add_convolution(network, 1, 512, 1, 0, activation); // Conv1-512
add_average_pooling(network, 2, 2, 0); // Max Pool add_average_pooling(network, 2, 2, 0); // Max Pool
add_dense_linearisation(network, 2048, activation); add_dense_linearisation(network, 4096, activation);
add_dense(network, 2048, activation); add_dense(network, 4096, activation);
add_dense(network, 256, activation);
add_dense(network, size_output, SOFTMAX); add_dense(network, size_output, SOFTMAX);
return network; return network;
} }