Modification in the structure

This commit is contained in:
Julien Chemillier 2022-10-08 14:13:02 +02:00
parent 638cada723
commit e618989632
3 changed files with 31 additions and 1 deletions

View File

@ -105,26 +105,33 @@ void add_convolution(Network* network, int depth_output, int dim_output, int act
cnn->columns = depth_output; cnn->columns = depth_output;
cnn->w = (float****)malloc(sizeof(float***)*depth_input); cnn->w = (float****)malloc(sizeof(float***)*depth_input);
cnn->d_w = (float****)malloc(sizeof(float***)*depth_input); cnn->d_w = (float****)malloc(sizeof(float***)*depth_input);
cnn->last_d_w = (float****)malloc(sizeof(float***)*depth_input);
for (int i=0; i < depth_input; i++) { for (int i=0; i < depth_input; i++) {
cnn->w[i] = (float***)malloc(sizeof(float**)*depth_output); cnn->w[i] = (float***)malloc(sizeof(float**)*depth_output);
cnn->d_w[i] = (float***)malloc(sizeof(float**)*depth_output); cnn->d_w[i] = (float***)malloc(sizeof(float**)*depth_output);
cnn->last_d_w[i] = (float***)malloc(sizeof(float**)*depth_output);
for (int j=0; j < depth_output; j++) { for (int j=0; j < depth_output; j++) {
cnn->w[i][j] = (float**)malloc(sizeof(float*)*kernel_size); cnn->w[i][j] = (float**)malloc(sizeof(float*)*kernel_size);
cnn->d_w[i][j] = (float**)malloc(sizeof(float*)*kernel_size); cnn->d_w[i][j] = (float**)malloc(sizeof(float*)*kernel_size);
cnn->last_d_w[i][j] = (float**)malloc(sizeof(float*)*kernel_size);
for (int k=0; k < kernel_size; k++) { for (int k=0; k < kernel_size; k++) {
cnn->w[i][j][k] = (float*)malloc(sizeof(float)*kernel_size); cnn->w[i][j][k] = (float*)malloc(sizeof(float)*kernel_size);
cnn->d_w[i][j][k] = (float*)malloc(sizeof(float)*kernel_size); cnn->d_w[i][j][k] = (float*)malloc(sizeof(float)*kernel_size);
cnn->last_d_w[i][j][k] = (float*)malloc(sizeof(float)*kernel_size);
} }
} }
} }
cnn->bias = (float***)malloc(sizeof(float**)*depth_output); cnn->bias = (float***)malloc(sizeof(float**)*depth_output);
cnn->d_bias = (float***)malloc(sizeof(float**)*depth_output); cnn->d_bias = (float***)malloc(sizeof(float**)*depth_output);
cnn->last_d_bias = (float***)malloc(sizeof(float**)*depth_output);
for (int i=0; i < depth_output; i++) { for (int i=0; i < depth_output; i++) {
cnn->bias[i] = (float**)malloc(sizeof(float*)*bias_size); cnn->bias[i] = (float**)malloc(sizeof(float*)*bias_size);
cnn->d_bias[i] = (float**)malloc(sizeof(float*)*bias_size); cnn->d_bias[i] = (float**)malloc(sizeof(float*)*bias_size);
cnn->last_d_bias[i] = (float**)malloc(sizeof(float*)*bias_size);
for (int j=0; j < bias_size; j++) { for (int j=0; j < bias_size; j++) {
cnn->bias[i][j] = (float*)malloc(sizeof(float)*bias_size); cnn->bias[i][j] = (float*)malloc(sizeof(float)*bias_size);
cnn->d_bias[i][j] = (float*)malloc(sizeof(float)*bias_size); cnn->d_bias[i][j] = (float*)malloc(sizeof(float)*bias_size);
cnn->last_d_bias[i][j] = (float*)malloc(sizeof(float)*bias_size);
} }
} }
create_a_cube_input_layer(network, n, depth_output, bias_size); create_a_cube_input_layer(network, n, depth_output, bias_size);
@ -155,11 +162,14 @@ void add_dense(Network* network, int output_units, int activation) {
nn->output_units = output_units; nn->output_units = output_units;
nn->bias = (float*)malloc(sizeof(float)*output_units); nn->bias = (float*)malloc(sizeof(float)*output_units);
nn->d_bias = (float*)malloc(sizeof(float)*output_units); nn->d_bias = (float*)malloc(sizeof(float)*output_units);
nn->last_d_bias = (float*)malloc(sizeof(float)*output_units);
nn->weights = (float**)malloc(sizeof(float*)*input_units); nn->weights = (float**)malloc(sizeof(float*)*input_units);
nn->d_weights = (float**)malloc(sizeof(float*)*input_units); nn->d_weights = (float**)malloc(sizeof(float*)*input_units);
nn->last_d_weights = (float**)malloc(sizeof(float*)*input_units);
for (int i=0; i < input_units; i++) { for (int i=0; i < input_units; i++) {
nn->weights[i] = (float*)malloc(sizeof(float)*output_units); nn->weights[i] = (float*)malloc(sizeof(float)*output_units);
nn->d_weights[i] = (float*)malloc(sizeof(float)*output_units); nn->d_weights[i] = (float*)malloc(sizeof(float)*output_units);
nn->last_d_weights[i] = (float*)malloc(sizeof(float)*output_units);
} }
create_a_line_input_layer(network, n, output_units); create_a_line_input_layer(network, n, output_units);
/* Not currently used /* Not currently used
@ -190,11 +200,14 @@ void add_dense_linearisation(Network* network, int output_units, int activation)
nn->bias = (float*)malloc(sizeof(float)*output_units); nn->bias = (float*)malloc(sizeof(float)*output_units);
nn->d_bias = (float*)malloc(sizeof(float)*output_units); nn->d_bias = (float*)malloc(sizeof(float)*output_units);
nn->last_d_bias = (float*)malloc(sizeof(float)*output_units);
nn->weights = (float**)malloc(sizeof(float*)*input_units); nn->weights = (float**)malloc(sizeof(float*)*input_units);
nn->d_weights = (float**)malloc(sizeof(float*)*input_units); nn->d_weights = (float**)malloc(sizeof(float*)*input_units);
nn->last_d_weights = (float**)malloc(sizeof(float*)*input_units);
for (int i=0; i < input_units; i++) { for (int i=0; i < input_units; i++) {
nn->weights[i] = (float*)malloc(sizeof(float)*output_units); nn->weights[i] = (float*)malloc(sizeof(float)*output_units);
nn->d_weights[i] = (float*)malloc(sizeof(float)*output_units); nn->d_weights[i] = (float*)malloc(sizeof(float)*output_units);
nn->last_d_weights[i] = (float*)malloc(sizeof(float)*output_units);
} }
/* Not currently used /* Not currently used
initialisation_1d_matrix(network->initialisation, nn->bias, output_units, output_units+input_units); initialisation_1d_matrix(network->initialisation, nn->bias, output_units, output_units+input_units);

View File

@ -33,27 +33,34 @@ void free_convolution(Network* network, int pos) {
for (int j=0; j < bias_size; j++) { for (int j=0; j < bias_size; j++) {
free(k_pos->bias[i][j]); free(k_pos->bias[i][j]);
free(k_pos->d_bias[i][j]); free(k_pos->d_bias[i][j]);
free(k_pos->last_d_bias[i][j]);
} }
free(k_pos->bias[i]); free(k_pos->bias[i]);
free(k_pos->d_bias[i]); free(k_pos->d_bias[i]);
free(k_pos->last_d_bias[i]);
} }
free(k_pos->bias); free(k_pos->bias);
free(k_pos->d_bias); free(k_pos->d_bias);
free(k_pos->last_d_bias);
for (int i=0; i < r; i++) { for (int i=0; i < r; i++) {
for (int j=0; j < c; j++) { for (int j=0; j < c; j++) {
for (int k=0; k < k_size; k++) { for (int k=0; k < k_size; k++) {
free(k_pos->w[i][j][k]); free(k_pos->w[i][j][k]);
free(k_pos->d_w[i][j][k]); free(k_pos->d_w[i][j][k]);
free(k_pos->last_d_w[i][j][k]);
} }
free(k_pos->w[i][j]); free(k_pos->w[i][j]);
free(k_pos->d_w[i][j]); free(k_pos->d_w[i][j]);
free(k_pos->last_d_w[i][j]);
} }
free(k_pos->w[i]); free(k_pos->w[i]);
free(k_pos->d_w[i]); free(k_pos->d_w[i]);
free(k_pos->last_d_w[i]);
} }
free(k_pos->w); free(k_pos->w);
free(k_pos->d_w); free(k_pos->d_w);
free(k_pos->last_d_w);
free(k_pos); free(k_pos);
} }
@ -65,12 +72,15 @@ void free_dense(Network* network, int pos) {
for (int i=0; i < dim; i++) { for (int i=0; i < dim; i++) {
free(k_pos->weights[i]); free(k_pos->weights[i]);
free(k_pos->d_weights[i]); free(k_pos->d_weights[i]);
free(k_pos->last_d_weights[i]);
} }
free(k_pos->weights); free(k_pos->weights);
free(k_pos->d_weights); free(k_pos->d_weights);
free(k_pos->last_d_weights);
free(k_pos->bias); free(k_pos->bias);
free(k_pos->d_bias); free(k_pos->d_bias);
free(k_pos->last_d_bias);
free(k_pos); free(k_pos);
} }
@ -82,12 +92,15 @@ void free_dense_linearisation(Network* network, int pos) {
for (int i=0; i < dim; i++) { for (int i=0; i < dim; i++) {
free(k_pos->weights[i]); free(k_pos->weights[i]);
free(k_pos->d_weights[i]); free(k_pos->d_weights[i]);
free(k_pos->last_d_weights[i]);
} }
free(k_pos->weights); free(k_pos->weights);
free(k_pos->d_weights); free(k_pos->d_weights);
free(k_pos->last_d_weights);
free(k_pos->bias); free(k_pos->bias);
free(k_pos->d_bias); free(k_pos->d_bias);
free(k_pos->last_d_bias);
free(k_pos); free(k_pos);
} }

View File

@ -7,8 +7,10 @@ typedef struct Kernel_cnn {
int columns; // Depth of the output int columns; // Depth of the output
float*** bias; // bias[columns][k_size][k_size] float*** bias; // bias[columns][k_size][k_size]
float*** d_bias; // d_bias[columns][k_size][k_size] float*** d_bias; // d_bias[columns][k_size][k_size]
float*** last_d_bias; // last_d_bias[columns][k_size][k_size]
float**** w; // w[rows][columns][k_size][k_size] float**** w; // w[rows][columns][k_size][k_size]
float**** d_w; // dw[rows][columns][k_size][k_size] float**** d_w; // d_w[rows][columns][k_size][k_size]
float**** last_d_w; // last_d_w[rows][columns][k_size][k_size]
} Kernel_cnn; } Kernel_cnn;
typedef struct Kernel_nn { typedef struct Kernel_nn {
@ -16,8 +18,10 @@ typedef struct Kernel_nn {
int output_units; // Nombre d'éléments en sortie int output_units; // Nombre d'éléments en sortie
float* bias; // bias[output_units] float* bias; // bias[output_units]
float* d_bias; // d_bias[output_units] float* d_bias; // d_bias[output_units]
float* last_d_bias; // last_d_bias[output_units]
float** weights; // weight[input_units][output_units] float** weights; // weight[input_units][output_units]
float** d_weights; // d_weights[input_units][output_units] float** d_weights; // d_weights[input_units][output_units]
float** last_d_weights; // last_d_weights[input_units][output_units]
} Kernel_nn; } Kernel_nn;
typedef struct Kernel { typedef struct Kernel {