tipe/src/cnn/neuron_io.c

328 lines
11 KiB
C
Raw Normal View History

2022-09-23 14:25:56 +02:00
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <inttypes.h>
2022-10-24 12:54:51 +02:00
#include "../include/colors.h"
2023-01-28 22:04:38 +01:00
#include "../include/utils.h"
2023-02-03 15:12:59 +01:00
#include "include/function.h"
2022-09-23 14:25:56 +02:00
#include "include/struct.h"
2023-02-03 15:12:59 +01:00
#include "include/neuron_io.h"
2022-09-23 16:03:53 +02:00
#define MAGIC_NUMBER 1012
2022-09-23 14:25:56 +02:00
2022-09-28 12:42:44 +02:00
#define bufferAdd(val) {buffer[indice_buffer] = val; indice_buffer++;}
2022-09-23 14:25:56 +02:00
void write_network(char* filename, Network* network) {
FILE *ptr;
int size = network->size;
2023-01-17 17:01:16 +01:00
int type_couche[size-1];
2022-09-28 12:42:44 +02:00
int indice_buffer = 0;
2022-09-23 14:25:56 +02:00
ptr = fopen(filename, "wb");
2023-01-17 17:01:16 +01:00
// Le buffer est composé de:
// - MAGIC_NUMBER (1)
// - size (2)
// - network->initialisation (3)
// - network->dropout (4)
// - network->width[i] & network->depth[i] (4+network->size*2)
// - type_couche[i] (3+network->size*3) - On exclue la dernière couche
uint32_t buffer[(network->size)*3+3];
2022-09-23 14:25:56 +02:00
2022-09-28 12:42:44 +02:00
bufferAdd(MAGIC_NUMBER);
bufferAdd(size);
bufferAdd(network->initialisation);
bufferAdd(network->dropout);
2022-09-23 14:25:56 +02:00
2022-09-25 11:52:36 +02:00
// Écriture du header
2022-09-23 14:25:56 +02:00
for (int i=0; i < size; i++) {
2022-09-28 12:42:44 +02:00
bufferAdd(network->width[i]);
bufferAdd(network->depth[i]);
2022-09-23 14:25:56 +02:00
}
2023-01-17 12:50:13 +01:00
for (int i=0; i < size-1; i++) {
2022-09-23 14:25:56 +02:00
if ((!network->kernel[i]->cnn)&&(!network->kernel[i]->nn)) {
type_couche[i] = 2;
} else if (!network->kernel[i]->cnn) {
type_couche[i] = 1;
} else {
type_couche[i] = 0;
}
2022-09-28 12:42:44 +02:00
bufferAdd(type_couche[i]);
2022-09-23 14:25:56 +02:00
}
fwrite(buffer, sizeof(buffer), 1, ptr);
2022-09-25 11:52:36 +02:00
// Écriture du pré-corps et corps
2023-01-17 12:50:13 +01:00
for (int i=0; i < size-1; i++) {
2022-11-08 19:57:27 +01:00
write_couche(network, i, type_couche[i], ptr);
2022-09-23 14:25:56 +02:00
}
fclose(ptr);
}
2022-11-08 19:57:27 +01:00
void write_couche(Network* network, int indice_couche, int type_couche, FILE* ptr) {
Kernel* kernel = network->kernel[indice_couche];
2022-09-28 12:42:44 +02:00
int indice_buffer = 0;
2022-09-25 11:52:36 +02:00
if (type_couche == 0) { // Cas du CNN
Kernel_cnn* cnn = kernel->cnn;
2022-11-09 12:55:55 +01:00
int output_dim = network->width[indice_couche+1];
2022-09-23 14:25:56 +02:00
2022-09-25 11:52:36 +02:00
// Écriture du pré-corps
uint32_t pre_buffer[5];
2022-09-25 11:52:36 +02:00
pre_buffer[0] = kernel->activation;
pre_buffer[1] = kernel->linearisation;
pre_buffer[2] = cnn->k_size;
pre_buffer[3] = cnn->rows;
pre_buffer[4] = cnn->columns;
2022-09-25 11:52:36 +02:00
fwrite(pre_buffer, sizeof(pre_buffer), 1, ptr);
2022-09-23 14:25:56 +02:00
2022-09-25 11:52:36 +02:00
// Écriture du corps
// We need to split in small buffers to keep some free memory in the computer
2022-09-23 14:25:56 +02:00
for (int i=0; i < cnn->columns; i++) {
indice_buffer = 0;
float buffer[output_dim*output_dim];
2022-11-08 19:57:27 +01:00
for (int j=0; j < output_dim; j++) {
for (int k=0; k < output_dim; k++) {
2022-09-28 12:42:44 +02:00
bufferAdd(cnn->bias[i][j][k]);
2022-09-23 14:25:56 +02:00
}
}
fwrite(buffer, sizeof(buffer), 1, ptr);
2022-09-23 14:25:56 +02:00
}
for (int i=0; i < cnn->rows; i++) {
indice_buffer = 0;
float buffer[cnn->columns*cnn->k_size*cnn->k_size];
2022-09-23 14:25:56 +02:00
for (int j=0; j < cnn->columns; j++) {
for (int k=0; k < cnn->k_size; k++) {
for (int l=0; l < cnn->k_size; l++) {
2022-09-28 12:42:44 +02:00
bufferAdd(cnn->w[i][j][k][l]);
2022-09-23 14:25:56 +02:00
}
}
}
fwrite(buffer, sizeof(buffer), 1, ptr);
2022-09-23 14:25:56 +02:00
}
2022-09-25 11:52:36 +02:00
} else if (type_couche == 1) { // Cas du NN
2022-09-23 14:25:56 +02:00
Kernel_nn* nn = kernel->nn;
2022-09-25 11:52:36 +02:00
// Écriture du pré-corps
uint32_t pre_buffer[4];
2022-09-25 11:52:36 +02:00
pre_buffer[0] = kernel->activation;
pre_buffer[1] = kernel->linearisation;
pre_buffer[2] = nn->input_units;
pre_buffer[3] = nn->output_units;
2022-09-25 11:52:36 +02:00
fwrite(pre_buffer, sizeof(pre_buffer), 1, ptr);
// Écriture du corps
float buffer[nn->output_units];
2022-09-23 14:25:56 +02:00
for (int i=0; i < nn->output_units; i++) {
2022-09-28 12:42:44 +02:00
bufferAdd(nn->bias[i]);
2022-09-23 14:25:56 +02:00
}
fwrite(buffer, sizeof(buffer), 1, ptr);
2022-09-23 14:25:56 +02:00
for (int i=0; i < nn->input_units; i++) {
indice_buffer = 0;
float buffer[nn->output_units];
2022-09-23 14:25:56 +02:00
for (int j=0; j < nn->output_units; j++) {
2022-09-28 12:42:44 +02:00
bufferAdd(nn->weights[i][j]);
2022-09-23 14:25:56 +02:00
}
fwrite(buffer, sizeof(buffer), 1, ptr);
2022-09-23 14:25:56 +02:00
}
2022-09-25 11:52:36 +02:00
} else if (type_couche == 2) { // Cas du Pooling Layer
uint32_t pre_buffer[2];
2023-02-03 15:12:59 +01:00
pre_buffer[0] = kernel->linearisation;
pre_buffer[1] = kernel->pooling;
2022-09-25 11:52:36 +02:00
fwrite(pre_buffer, sizeof(pre_buffer), 1, ptr);
2022-09-23 14:25:56 +02:00
}
2022-09-23 16:03:53 +02:00
}
Network* read_network(char* filename) {
FILE *ptr;
2023-01-28 22:04:38 +01:00
Network* network = (Network*)nalloc(sizeof(Network));
2022-09-23 16:03:53 +02:00
ptr = fopen(filename, "rb");
uint32_t magic;
uint32_t size;
uint32_t initialisation;
uint32_t dropout;
uint32_t tmp;
fread(&magic, sizeof(uint32_t), 1, ptr);
if (magic != MAGIC_NUMBER) {
printf("Incorrect magic number !\n");
exit(1);
}
// Lecture des constantes du réseau
fread(&size, sizeof(uint32_t), 1, ptr);
network->size = size;
2022-09-28 12:42:44 +02:00
network->max_size = size;
2022-09-23 16:03:53 +02:00
fread(&initialisation, sizeof(uint32_t), 1, ptr);
network->initialisation = initialisation;
fread(&dropout, sizeof(uint32_t), 1, ptr);
network->dropout = dropout;
// Lecture de la taille de l'entrée des différentes matrices
2023-01-28 22:04:38 +01:00
network->width = (int*)nalloc(sizeof(int)*size);
network->depth = (int*)nalloc(sizeof(int)*size);
2022-09-23 16:03:53 +02:00
for (int i=0; i < (int)size; i++) {
2022-09-28 12:42:44 +02:00
fread(&tmp, sizeof(uint32_t), 1, ptr);
2022-09-23 16:03:53 +02:00
network->width[i] = tmp;
2022-09-28 12:42:44 +02:00
fread(&tmp, sizeof(uint32_t), 1, ptr);
network->depth[i] = tmp;
2022-09-23 16:03:53 +02:00
}
// Lecture du type de chaque couche
2023-01-17 17:01:16 +01:00
uint32_t type_couche[size-1];
2022-09-23 16:03:53 +02:00
2023-01-17 17:01:16 +01:00
for (int i=0; i < (int)size-1; i++) {
2022-09-23 16:03:53 +02:00
fread(&tmp, sizeof(tmp), 1, ptr);
type_couche[i] = tmp;
}
// Lecture de chaque couche
2023-01-28 22:04:38 +01:00
network->kernel = (Kernel**)nalloc(sizeof(Kernel*)*(size-1));
2022-09-23 16:03:53 +02:00
2023-01-14 14:30:59 +01:00
for (int i=0; i < (int)size-1; i++) {
2022-11-09 12:55:55 +01:00
network->kernel[i] = read_kernel(type_couche[i], network->width[i+1], ptr);
2022-09-23 16:03:53 +02:00
}
2022-10-05 11:28:29 +02:00
2023-01-28 22:04:38 +01:00
network->input = (float****)nalloc(sizeof(float***)*size);
2022-10-24 12:54:51 +02:00
for (int i=0; i < (int)size; i++) { // input[size][couche->depth][couche->dim][couche->dim]
2023-01-28 22:04:38 +01:00
network->input[i] = (float***)nalloc(sizeof(float**)*network->depth[i]);
2022-10-05 11:28:29 +02:00
for (int j=0; j < network->depth[i]; j++) {
2023-01-28 22:04:38 +01:00
network->input[i][j] = (float**)nalloc(sizeof(float*)*network->width[i]);
2022-10-05 11:28:29 +02:00
for (int k=0; k < network->width[i]; k++) {
2023-01-28 22:04:38 +01:00
network->input[i][j][k] = (float*)nalloc(sizeof(float)*network->width[i]);
2022-10-05 11:28:29 +02:00
for (int l=0; l < network->width[i]; l++) {
network->input[i][j][k][l] = 0.;
}
}
}
}
2022-11-23 11:45:28 +01:00
2023-01-28 22:04:38 +01:00
network->input_z = (float****)nalloc(sizeof(float***)*size);
2022-11-23 11:45:28 +01:00
for (int i=0; i < (int)size; i++) { // input[size][couche->depth][couche->dim][couche->dim]
2023-01-28 22:04:38 +01:00
network->input_z[i] = (float***)nalloc(sizeof(float**)*network->depth[i]);
2022-11-23 11:45:28 +01:00
for (int j=0; j < network->depth[i]; j++) {
2023-01-28 22:04:38 +01:00
network->input_z[i][j] = (float**)nalloc(sizeof(float*)*network->width[i]);
2022-11-23 11:45:28 +01:00
for (int k=0; k < network->width[i]; k++) {
2023-01-28 22:04:38 +01:00
network->input_z[i][j][k] = (float*)nalloc(sizeof(float)*network->width[i]);
2022-11-23 11:45:28 +01:00
for (int l=0; l < network->width[i]; l++) {
network->input_z[i][j][k][l] = 0.;
}
}
}
}
2023-01-17 15:34:29 +01:00
2022-09-23 16:03:53 +02:00
fclose(ptr);
return network;
}
2022-11-08 19:57:27 +01:00
Kernel* read_kernel(int type_couche, int output_dim, FILE* ptr) {
2023-01-28 22:04:38 +01:00
Kernel* kernel = (Kernel*)nalloc(sizeof(Kernel));
2022-09-25 11:52:36 +02:00
if (type_couche == 0) { // Cas du CNN
// Lecture du "Pré-corps"
2023-01-28 22:04:38 +01:00
kernel->cnn = (Kernel_cnn*)nalloc(sizeof(Kernel_cnn));
2022-09-25 11:52:36 +02:00
kernel->nn = NULL;
uint32_t buffer[5];
2022-09-25 11:52:36 +02:00
fread(&buffer, sizeof(buffer), 1, ptr);
2023-01-17 15:34:29 +01:00
2022-09-25 11:52:36 +02:00
kernel->activation = buffer[0];
kernel->linearisation = buffer[1];
kernel->cnn->k_size = buffer[2];
kernel->cnn->rows = buffer[3];
kernel->cnn->columns = buffer[4];
2022-09-25 11:52:36 +02:00
// Lecture du corps
Kernel_cnn* cnn = kernel->cnn;
float tmp;
2023-01-28 22:04:38 +01:00
cnn->bias = (float***)nalloc(sizeof(float**)*cnn->columns);
cnn->d_bias = (float***)nalloc(sizeof(float**)*cnn->columns);
2022-09-25 11:52:36 +02:00
for (int i=0; i < cnn->columns; i++) {
2023-01-28 22:04:38 +01:00
cnn->bias[i] = (float**)nalloc(sizeof(float*)*output_dim);
cnn->d_bias[i] = (float**)nalloc(sizeof(float*)*output_dim);
2022-11-08 19:57:27 +01:00
for (int j=0; j < output_dim; j++) {
2023-01-28 22:04:38 +01:00
cnn->bias[i][j] = (float*)nalloc(sizeof(float)*output_dim);
cnn->d_bias[i][j] = (float*)nalloc(sizeof(float)*output_dim);
2022-11-08 19:57:27 +01:00
for (int k=0; k < output_dim; k++) {
2022-09-25 11:52:36 +02:00
fread(&tmp, sizeof(tmp), 1, ptr);
cnn->bias[i][j][k] = tmp;
cnn->d_bias[i][j][k] = 0.;
}
}
}
2023-01-28 22:04:38 +01:00
cnn->w = (float****)nalloc(sizeof(float***)*cnn->rows);
cnn->d_w = (float****)nalloc(sizeof(float***)*cnn->rows);
2022-09-25 11:52:36 +02:00
for (int i=0; i < cnn->rows; i++) {
2023-01-28 22:04:38 +01:00
cnn->w[i] = (float***)nalloc(sizeof(float**)*cnn->columns);
cnn->d_w[i] = (float***)nalloc(sizeof(float**)*cnn->columns);
2022-09-25 11:52:36 +02:00
for (int j=0; j < cnn->columns; j++) {
2023-01-28 22:04:38 +01:00
cnn->w[i][j] = (float**)nalloc(sizeof(float*)*cnn->k_size);
cnn->d_w[i][j] = (float**)nalloc(sizeof(float*)*cnn->k_size);
2022-09-25 11:52:36 +02:00
for (int k=0; k < cnn->k_size; k++) {
2023-01-28 22:04:38 +01:00
cnn->w[i][j][k] = (float*)nalloc(sizeof(float)*cnn->k_size);
cnn->d_w[i][j][k] = (float*)nalloc(sizeof(float)*cnn->k_size);
2022-09-25 11:52:36 +02:00
for (int l=0; l < cnn->k_size; l++) {
fread(&tmp, sizeof(tmp), 1, ptr);
cnn->w[i][j][k][l] = tmp;
cnn->d_w[i][j][k][l] = 0.;
}
}
}
}
} else if (type_couche == 1) { // Cas du NN
// Lecture du "Pré-corps"
2023-01-28 22:04:38 +01:00
kernel->nn = (Kernel_nn*)nalloc(sizeof(Kernel_nn));
2022-09-25 11:52:36 +02:00
kernel->cnn = NULL;
uint32_t buffer[4];
2022-09-25 11:52:36 +02:00
fread(&buffer, sizeof(buffer), 1, ptr);
kernel->activation = buffer[0];
kernel->linearisation = buffer[1];
kernel->nn->input_units = buffer[2];
kernel->nn->output_units = buffer[3];
2022-09-25 11:52:36 +02:00
// Lecture du corps
Kernel_nn* nn = kernel->nn;
float tmp;
2023-01-28 22:04:38 +01:00
nn->bias = (float*)nalloc(sizeof(float)*nn->output_units);
nn->d_bias = (float*)nalloc(sizeof(float)*nn->output_units);
2022-09-25 11:52:36 +02:00
for (int i=0; i < nn->output_units; i++) {
fread(&tmp, sizeof(tmp), 1, ptr);
nn->bias[i] = tmp;
nn->d_bias[i] = 0.;
}
2023-01-28 22:04:38 +01:00
nn->weights = (float**)nalloc(sizeof(float*)*nn->input_units);
nn->d_weights = (float**)nalloc(sizeof(float*)*nn->input_units);
2022-09-25 11:52:36 +02:00
for (int i=0; i < nn->input_units; i++) {
2023-01-28 22:04:38 +01:00
nn->weights[i] = (float*)nalloc(sizeof(float)*nn->output_units);
nn->d_weights[i] = (float*)nalloc(sizeof(float)*nn->output_units);
2022-09-25 11:52:36 +02:00
for (int j=0; j < nn->output_units; j++) {
fread(&tmp, sizeof(tmp), 1, ptr);
nn->weights[i][j] = tmp;
nn->d_weights[i][j] = 0.;
}
}
} else if (type_couche == 2) { // Cas du Pooling Layer
uint32_t pooling, linearisation;
fread(&linearisation, sizeof(linearisation), 1, ptr);
2023-02-03 15:12:59 +01:00
fread(&pooling, sizeof(pooling), 1, ptr);
2022-09-23 16:03:53 +02:00
kernel->cnn = NULL;
kernel->nn = NULL;
2023-02-03 15:12:59 +01:00
kernel->activation = IDENTITY;
kernel->pooling = pooling;
kernel->linearisation = linearisation;
2022-09-23 16:03:53 +02:00
}
return kernel;
2022-09-23 14:25:56 +02:00
}