mirror of
https://github.com/augustin64/projet-tipe
synced 2025-01-23 15:16:26 +01:00
Change 'output_units' to 'size_output'
This commit is contained in:
parent
c67d2bf697
commit
9ed53ceabb
@ -45,7 +45,7 @@ type | nom de la variable | commentaire
|
||||
uint32_t|activation|
|
||||
uint32_t|linearisation|
|
||||
uint32_t|size_input|
|
||||
uint32_t|output_units|
|
||||
uint32_t|size_output|
|
||||
|
||||
#### Si la couche est de type pooling:
|
||||
type | nom de la variable | commentaire
|
||||
@ -76,7 +76,7 @@ type | nom de la variable | commentaire
|
||||
:---:|:---:|:---:
|
||||
float|bias[0]|biais
|
||||
float|...|
|
||||
float|bias[nn->output_units-1]|biais
|
||||
float|bias[nn->size_output-1]|biais
|
||||
float|weights[0][0]|poids
|
||||
float|...|
|
||||
float|weights[nn->size_input-1][nn->output_units-1]|
|
||||
float|weights[nn->size_input-1][nn->size_output-1]|
|
||||
|
@ -202,7 +202,7 @@ void add_convolution(Network* network, int depth_output, int dim_output, int act
|
||||
network->size++;
|
||||
}
|
||||
|
||||
void add_dense(Network* network, int output_units, int activation) {
|
||||
void add_dense(Network* network, int size_output, int activation) {
|
||||
int n = network->size;
|
||||
int k_pos = n-1;
|
||||
int size_input = network->width[k_pos];
|
||||
@ -217,31 +217,31 @@ void add_dense(Network* network, int output_units, int activation) {
|
||||
network->kernel[k_pos]->linearisation = 0;
|
||||
network->kernel[k_pos]->pooling = 0;
|
||||
nn->size_input = size_input;
|
||||
nn->output_units = output_units;
|
||||
nn->bias = (float*)nalloc(sizeof(float)*output_units);
|
||||
nn->d_bias = (float*)nalloc(sizeof(float)*output_units);
|
||||
for (int i=0; i < output_units; i++) {
|
||||
nn->size_output = size_output;
|
||||
nn->bias = (float*)nalloc(sizeof(float)*size_output);
|
||||
nn->d_bias = (float*)nalloc(sizeof(float)*size_output);
|
||||
for (int i=0; i < size_output; i++) {
|
||||
nn->d_bias[i] = 0.;
|
||||
}
|
||||
|
||||
nn->weights = (float**)nalloc(sizeof(float*)*size_input);
|
||||
nn->d_weights = (float**)nalloc(sizeof(float*)*size_input);
|
||||
for (int i=0; i < size_input; i++) {
|
||||
nn->weights[i] = (float*)nalloc(sizeof(float)*output_units);
|
||||
nn->d_weights[i] = (float*)nalloc(sizeof(float)*output_units);
|
||||
for (int j=0; j < output_units; j++) {
|
||||
nn->weights[i] = (float*)nalloc(sizeof(float)*size_output);
|
||||
nn->d_weights[i] = (float*)nalloc(sizeof(float)*size_output);
|
||||
for (int j=0; j < size_output; j++) {
|
||||
nn->d_weights[i][j] = 0.;
|
||||
}
|
||||
}
|
||||
|
||||
initialisation_1d_matrix(network->initialisation, nn->bias, output_units, size_input);
|
||||
initialisation_2d_matrix(network->initialisation, nn->weights, size_input, output_units, size_input, output_units);
|
||||
create_a_line_input_layer(network, n, output_units);
|
||||
create_a_line_input_z_layer(network, n, output_units);
|
||||
initialisation_1d_matrix(network->initialisation, nn->bias, size_output, size_input);
|
||||
initialisation_2d_matrix(network->initialisation, nn->weights, size_input, size_output, size_input, size_output);
|
||||
create_a_line_input_layer(network, n, size_output);
|
||||
create_a_line_input_z_layer(network, n, size_output);
|
||||
network->size++;
|
||||
}
|
||||
|
||||
void add_dense_linearisation(Network* network, int output_units, int activation) {
|
||||
void add_dense_linearisation(Network* network, int size_output, int activation) {
|
||||
// Can replace size_input by a research of this dim
|
||||
|
||||
int n = network->size;
|
||||
@ -258,25 +258,25 @@ void add_dense_linearisation(Network* network, int output_units, int activation)
|
||||
network->kernel[k_pos]->linearisation = 1;
|
||||
network->kernel[k_pos]->pooling = 0;
|
||||
nn->size_input = size_input;
|
||||
nn->output_units = output_units;
|
||||
nn->size_output = size_output;
|
||||
|
||||
nn->bias = (float*)nalloc(sizeof(float)*output_units);
|
||||
nn->d_bias = (float*)nalloc(sizeof(float)*output_units);
|
||||
for (int i=0; i < output_units; i++) {
|
||||
nn->bias = (float*)nalloc(sizeof(float)*size_output);
|
||||
nn->d_bias = (float*)nalloc(sizeof(float)*size_output);
|
||||
for (int i=0; i < size_output; i++) {
|
||||
nn->d_bias[i] = 0.;
|
||||
}
|
||||
nn->weights = (float**)nalloc(sizeof(float*)*size_input);
|
||||
nn->d_weights = (float**)nalloc(sizeof(float*)*size_input);
|
||||
for (int i=0; i < size_input; i++) {
|
||||
nn->weights[i] = (float*)nalloc(sizeof(float)*output_units);
|
||||
nn->d_weights[i] = (float*)nalloc(sizeof(float)*output_units);
|
||||
for (int j=0; j < output_units; j++) {
|
||||
nn->weights[i] = (float*)nalloc(sizeof(float)*size_output);
|
||||
nn->d_weights[i] = (float*)nalloc(sizeof(float)*size_output);
|
||||
for (int j=0; j < size_output; j++) {
|
||||
nn->d_weights[i][j] = 0.;
|
||||
}
|
||||
}
|
||||
initialisation_1d_matrix(network->initialisation, nn->bias, output_units, size_input);
|
||||
initialisation_2d_matrix(network->initialisation, nn->weights, size_input, output_units, size_input, output_units);
|
||||
create_a_line_input_layer(network, n, output_units);
|
||||
create_a_line_input_z_layer(network, n, output_units);
|
||||
initialisation_1d_matrix(network->initialisation, nn->bias, size_output, size_input);
|
||||
initialisation_2d_matrix(network->initialisation, nn->weights, size_input, size_output, size_input, size_output);
|
||||
create_a_line_input_layer(network, n, size_output);
|
||||
create_a_line_input_z_layer(network, n, size_output);
|
||||
network->size++;
|
||||
}
|
@ -52,11 +52,11 @@ void add_convolution(Network* network, int depth_output, int dim_output, int act
|
||||
/*
|
||||
* Ajoute au réseau une couche dense et initialise les poids et les biais
|
||||
*/
|
||||
void add_dense(Network* network, int output_units, int activation);
|
||||
void add_dense(Network* network, int size_output, int activation);
|
||||
|
||||
/*
|
||||
* Ajoute au réseau une couche dense qui aplatit
|
||||
*/
|
||||
void add_dense_linearisation(Network* network, int output_units, int activation);
|
||||
void add_dense_linearisation(Network* network, int size_output, int activation);
|
||||
|
||||
#endif
|
@ -13,11 +13,11 @@ typedef struct Kernel_cnn {
|
||||
|
||||
typedef struct Kernel_nn {
|
||||
int size_input; // Nombre d'éléments en entrée
|
||||
int output_units; // Nombre d'éléments en sortie
|
||||
float* bias; // bias[output_units]
|
||||
float* d_bias; // d_bias[output_units]
|
||||
float** weights; // weight[size_input][output_units]
|
||||
float** d_weights; // d_weights[size_input][output_units]
|
||||
int size_output; // Nombre d'éléments en sortie
|
||||
float* bias; // bias[size_output]
|
||||
float* d_bias; // d_bias[size_output]
|
||||
float** weights; // weight[size_input][size_output]
|
||||
float** d_weights; // d_weights[size_input][size_output]
|
||||
} Kernel_nn;
|
||||
|
||||
typedef struct Kernel {
|
||||
|
@ -112,20 +112,20 @@ void write_couche(Network* network, int indice_couche, int type_couche, FILE* pt
|
||||
pre_buffer[0] = kernel->activation;
|
||||
pre_buffer[1] = kernel->linearisation;
|
||||
pre_buffer[2] = nn->size_input;
|
||||
pre_buffer[3] = nn->output_units;
|
||||
pre_buffer[3] = nn->size_output;
|
||||
fwrite(pre_buffer, sizeof(pre_buffer), 1, ptr);
|
||||
|
||||
// Écriture du corps
|
||||
float buffer[nn->output_units];
|
||||
for (int i=0; i < nn->output_units; i++) {
|
||||
float buffer[nn->size_output];
|
||||
for (int i=0; i < nn->size_output; i++) {
|
||||
bufferAdd(nn->bias[i]);
|
||||
}
|
||||
fwrite(buffer, sizeof(buffer), 1, ptr);
|
||||
|
||||
for (int i=0; i < nn->size_input; i++) {
|
||||
indice_buffer = 0;
|
||||
float buffer[nn->output_units];
|
||||
for (int j=0; j < nn->output_units; j++) {
|
||||
float buffer[nn->size_output];
|
||||
for (int j=0; j < nn->size_output; j++) {
|
||||
bufferAdd(nn->weights[i][j]);
|
||||
}
|
||||
fwrite(buffer, sizeof(buffer), 1, ptr);
|
||||
@ -288,15 +288,15 @@ Kernel* read_kernel(int type_couche, int output_dim, FILE* ptr) {
|
||||
kernel->activation = buffer[0];
|
||||
kernel->linearisation = buffer[1];
|
||||
kernel->nn->size_input = buffer[2];
|
||||
kernel->nn->output_units = buffer[3];
|
||||
kernel->nn->size_output = buffer[3];
|
||||
|
||||
// Lecture du corps
|
||||
Kernel_nn* nn = kernel->nn;
|
||||
float tmp;
|
||||
|
||||
nn->bias = (float*)nalloc(sizeof(float)*nn->output_units);
|
||||
nn->d_bias = (float*)nalloc(sizeof(float)*nn->output_units);
|
||||
for (int i=0; i < nn->output_units; i++) {
|
||||
nn->bias = (float*)nalloc(sizeof(float)*nn->size_output);
|
||||
nn->d_bias = (float*)nalloc(sizeof(float)*nn->size_output);
|
||||
for (int i=0; i < nn->size_output; i++) {
|
||||
fread(&tmp, sizeof(tmp), 1, ptr);
|
||||
nn->bias[i] = tmp;
|
||||
nn->d_bias[i] = 0.;
|
||||
@ -305,9 +305,9 @@ Kernel* read_kernel(int type_couche, int output_dim, FILE* ptr) {
|
||||
nn->weights = (float**)nalloc(sizeof(float*)*nn->size_input);
|
||||
nn->d_weights = (float**)nalloc(sizeof(float*)*nn->size_input);
|
||||
for (int i=0; i < nn->size_input; i++) {
|
||||
nn->weights[i] = (float*)nalloc(sizeof(float)*nn->output_units);
|
||||
nn->d_weights[i] = (float*)nalloc(sizeof(float)*nn->output_units);
|
||||
for (int j=0; j < nn->output_units; j++) {
|
||||
nn->weights[i] = (float*)nalloc(sizeof(float)*nn->size_output);
|
||||
nn->d_weights[i] = (float*)nalloc(sizeof(float)*nn->size_output);
|
||||
for (int j=0; j < nn->size_output; j++) {
|
||||
fread(&tmp, sizeof(tmp), 1, ptr);
|
||||
nn->weights[i][j] = tmp;
|
||||
nn->d_weights[i][j] = 0.;
|
||||
|
@ -57,12 +57,12 @@ bool equals_networks(Network* network1, Network* network2) {
|
||||
} else if (!network1->kernel[i]->cnn) {
|
||||
// Type NN
|
||||
checkEquals(kernel[i]->nn->size_input, "kernel[i]->nn->size_input", i);
|
||||
checkEquals(kernel[i]->nn->output_units, "kernel[i]->nn->output_units", i);
|
||||
for (int j=0; j < network1->kernel[i]->nn->output_units; j++) {
|
||||
checkEquals(kernel[i]->nn->size_output, "kernel[i]->nn->size_output", i);
|
||||
for (int j=0; j < network1->kernel[i]->nn->size_output; j++) {
|
||||
checkEquals(kernel[i]->nn->bias[j], "kernel[i]->nn->bias[j]", j);
|
||||
}
|
||||
for (int j=0; j < network1->kernel[i]->nn->size_input; j++) {
|
||||
for (int k=0; k < network1->kernel[i]->nn->output_units; k++) {
|
||||
for (int k=0; k < network1->kernel[i]->nn->size_output; k++) {
|
||||
checkEquals(kernel[i]->nn->weights[j][k], "kernel[i]->nn->weights[j][k]", k);
|
||||
}
|
||||
}
|
||||
@ -101,7 +101,7 @@ Network* copy_network(Network* network) {
|
||||
int size = network->size;
|
||||
// Paramètres des couches NN
|
||||
int size_input;
|
||||
int output_units;
|
||||
int size_output;
|
||||
// Paramètres des couches CNN
|
||||
int rows;
|
||||
int k_size;
|
||||
@ -138,17 +138,17 @@ Network* copy_network(Network* network) {
|
||||
copyVar(kernel[i]->linearisation); // 0
|
||||
|
||||
size_input = network->kernel[i]->nn->size_input;
|
||||
output_units = network->kernel[i]->nn->output_units;
|
||||
size_output = network->kernel[i]->nn->size_output;
|
||||
|
||||
network_cp->kernel[i]->cnn = NULL;
|
||||
network_cp->kernel[i]->nn = (Kernel_nn*)nalloc(sizeof(Kernel_nn));
|
||||
|
||||
copyVar(kernel[i]->nn->size_input);
|
||||
copyVar(kernel[i]->nn->output_units);
|
||||
copyVar(kernel[i]->nn->size_output);
|
||||
|
||||
network_cp->kernel[i]->nn->bias = (float*)nalloc(sizeof(float)*output_units);
|
||||
network_cp->kernel[i]->nn->d_bias = (float*)nalloc(sizeof(float)*output_units);
|
||||
for (int j=0; j < output_units; j++) {
|
||||
network_cp->kernel[i]->nn->bias = (float*)nalloc(sizeof(float)*size_output);
|
||||
network_cp->kernel[i]->nn->d_bias = (float*)nalloc(sizeof(float)*size_output);
|
||||
for (int j=0; j < size_output; j++) {
|
||||
copyVar(kernel[i]->nn->bias[j]);
|
||||
network_cp->kernel[i]->nn->d_bias[j] = 0.;
|
||||
}
|
||||
@ -156,9 +156,9 @@ Network* copy_network(Network* network) {
|
||||
network_cp->kernel[i]->nn->weights = (float**)nalloc(sizeof(float*)*size_input);
|
||||
network_cp->kernel[i]->nn->d_weights = (float**)nalloc(sizeof(float*)*size_input);
|
||||
for (int j=0; j < size_input; j++) {
|
||||
network_cp->kernel[i]->nn->weights[j] = (float*)nalloc(sizeof(float)*output_units);
|
||||
network_cp->kernel[i]->nn->d_weights[j] = (float*)nalloc(sizeof(float)*output_units);
|
||||
for (int k=0; k < output_units; k++) {
|
||||
network_cp->kernel[i]->nn->weights[j] = (float*)nalloc(sizeof(float)*size_output);
|
||||
network_cp->kernel[i]->nn->d_weights[j] = (float*)nalloc(sizeof(float)*size_output);
|
||||
for (int k=0; k < size_output; k++) {
|
||||
copyVar(kernel[i]->nn->weights[j][k]);
|
||||
network_cp->kernel[i]->nn->d_weights[j][k] = 0.;
|
||||
}
|
||||
@ -255,7 +255,7 @@ void copy_network_parameters(Network* network_src, Network* network_dest) {
|
||||
int size = network_src->size;
|
||||
// Paramètres des couches NN
|
||||
int size_input;
|
||||
int output_units;
|
||||
int size_output;
|
||||
// Paramètres des couches CNN
|
||||
int rows;
|
||||
int k_size;
|
||||
@ -268,13 +268,13 @@ void copy_network_parameters(Network* network_src, Network* network_dest) {
|
||||
if (!network_src->kernel[i]->cnn && network_src->kernel[i]->nn) { // Cas du NN
|
||||
|
||||
size_input = network_src->kernel[i]->nn->size_input;
|
||||
output_units = network_src->kernel[i]->nn->output_units;
|
||||
size_output = network_src->kernel[i]->nn->size_output;
|
||||
|
||||
for (int j=0; j < output_units; j++) {
|
||||
for (int j=0; j < size_output; j++) {
|
||||
copyVarParams(kernel[i]->nn->bias[j]);
|
||||
}
|
||||
for (int j=0; j < size_input; j++) {
|
||||
for (int k=0; k < output_units; k++) {
|
||||
for (int k=0; k < size_output; k++) {
|
||||
copyVarParams(kernel[i]->nn->weights[j][k]);
|
||||
}
|
||||
}
|
||||
@ -316,7 +316,7 @@ int count_null_weights(Network* network) {
|
||||
int size = network->size;
|
||||
// Paramètres des couches NN
|
||||
int size_input;
|
||||
int output_units;
|
||||
int size_output;
|
||||
// Paramètres des couches CNN
|
||||
int rows;
|
||||
int k_size;
|
||||
@ -327,13 +327,13 @@ int count_null_weights(Network* network) {
|
||||
if (!network->kernel[i]->cnn && network->kernel[i]->nn) { // Cas du NN
|
||||
|
||||
size_input = network->kernel[i]->nn->size_input;
|
||||
output_units = network->kernel[i]->nn->output_units;
|
||||
size_output = network->kernel[i]->nn->size_output;
|
||||
|
||||
for (int j=0; j < output_units; j++) {
|
||||
for (int j=0; j < size_output; j++) {
|
||||
null_bias += fabs(network->kernel[i]->nn->bias[j]) <= epsilon;
|
||||
}
|
||||
for (int j=0; j < size_input; j++) {
|
||||
for (int k=0; k < output_units; k++) {
|
||||
for (int k=0; k < size_output; k++) {
|
||||
null_weights += fabs(network->kernel[i]->nn->weights[j][k]) <= epsilon;
|
||||
}
|
||||
}
|
||||
|
@ -28,7 +28,7 @@ int main() {
|
||||
} else if (!kernel->cnn) {
|
||||
printf("\n==== Couche %d de type "GREEN"NN"RESET" ====\n", i);
|
||||
printf("input: %d\n", kernel->nn->size_input);
|
||||
printf("output: %d\n", kernel->nn->output_units);
|
||||
printf("output: %d\n", kernel->nn->size_output);
|
||||
} else {
|
||||
printf("\n==== Couche %d de type "BLUE"CNN"RESET" ====\n", i);
|
||||
printf("k_size: %d\n", kernel->cnn->k_size);
|
||||
|
Loading…
Reference in New Issue
Block a user