mirror of
https://github.com/augustin64/projet-tipe
synced 2025-01-24 07:36:24 +01:00
Merge branch 'main' of https://github.com/julienChemillier/TIPE
This commit is contained in:
commit
4e8f440db7
19
Makefile
19
Makefile
@ -3,7 +3,7 @@ SRCDIR := ./src
|
|||||||
CACHE_DIR := ./cache
|
CACHE_DIR := ./cache
|
||||||
NVCC := nvcc
|
NVCC := nvcc
|
||||||
|
|
||||||
NVCC_INSTALLED := $(shell command -v nvcc 2> /dev/null)
|
NVCC_INSTALLED := $(shell command -v $(NVCC) 2> /dev/null)
|
||||||
|
|
||||||
MNIST_SRCDIR := $(SRCDIR)/mnist
|
MNIST_SRCDIR := $(SRCDIR)/mnist
|
||||||
CNN_SRCDIR := $(SRCDIR)/cnn
|
CNN_SRCDIR := $(SRCDIR)/cnn
|
||||||
@ -68,7 +68,7 @@ cnn: $(BUILDDIR)/cnn-main;
|
|||||||
$(BUILDDIR)/cnn-main: $(CNN_SRCDIR)/main.c $(BUILDDIR)/cnn_train.o $(BUILDDIR)/cnn_cnn.o $(BUILDDIR)/cnn_creation.o $(BUILDDIR)/cnn_initialisation.o $(BUILDDIR)/cnn_make.o $(BUILDDIR)/cnn_neuron_io.o $(BUILDDIR)/cnn_function.o $(BUILDDIR)/cnn_utils.o $(BUILDDIR)/cnn_free.o $(BUILDDIR)/cnn_convolution.o $(BUILDDIR)/cnn_backpropagation.o $(BUILDDIR)/colors.o $(BUILDDIR)/mnist.o
|
$(BUILDDIR)/cnn-main: $(CNN_SRCDIR)/main.c $(BUILDDIR)/cnn_train.o $(BUILDDIR)/cnn_cnn.o $(BUILDDIR)/cnn_creation.o $(BUILDDIR)/cnn_initialisation.o $(BUILDDIR)/cnn_make.o $(BUILDDIR)/cnn_neuron_io.o $(BUILDDIR)/cnn_function.o $(BUILDDIR)/cnn_utils.o $(BUILDDIR)/cnn_free.o $(BUILDDIR)/cnn_convolution.o $(BUILDDIR)/cnn_backpropagation.o $(BUILDDIR)/colors.o $(BUILDDIR)/mnist.o
|
||||||
$(CC) $^ -o $@ $(CFLAGS)
|
$(CC) $^ -o $@ $(CFLAGS)
|
||||||
|
|
||||||
$(BUILDDIR)/cnn-main-cuda: $(BUILDDIR)/cnn_main.o $(BUILDDIR)/cnn_train.o $(BUILDDIR)/cnn_cnn.o $(BUILDDIR)/cnn_creation.o $(BUILDDIR)/cnn_initialisation.o $(BUILDDIR)/cnn_make.o $(BUILDDIR)/cnn_neuron_io.o $(BUILDDIR)/cnn_function.o $(BUILDDIR)/cnn_utils.o $(BUILDDIR)/cnn_free.o $(BUILDDIR)/cnn_cuda_convolution.o $(BUILDDIR)/cnn_backpropagation.o $(BUILDDIR)/colors.o $(BUILDDIR)/mnist.o
|
$(BUILDDIR)/cnn-main-cuda: $(BUILDDIR)/cnn_main.o $(BUILDDIR)/cnn_train.o $(BUILDDIR)/cnn_cnn.o $(BUILDDIR)/cnn_creation.o $(BUILDDIR)/cnn_initialisation.o $(BUILDDIR)/cnn_make.o $(BUILDDIR)/cnn_neuron_io.o $(BUILDDIR)/cnn_function.o $(BUILDDIR)/cnn_utils.o $(BUILDDIR)/cnn_free.o $(BUILDDIR)/cnn_cuda_convolution.o $(BUILDDIR)/cnn_backpropagation.o $(BUILDDIR)/cuda_utils.o $(BUILDDIR)/colors.o $(BUILDDIR)/mnist.o
|
||||||
$(NVCC) $(NVCCFLAGS) $^ -o $@
|
$(NVCC) $(NVCCFLAGS) $^ -o $@
|
||||||
|
|
||||||
$(BUILDDIR)/cnn_%.o: $(CNN_SRCDIR)/%.c $(CNN_SRCDIR)/include/%.h
|
$(BUILDDIR)/cnn_%.o: $(CNN_SRCDIR)/%.c $(CNN_SRCDIR)/include/%.h
|
||||||
@ -76,7 +76,7 @@ $(BUILDDIR)/cnn_%.o: $(CNN_SRCDIR)/%.c $(CNN_SRCDIR)/include/%.h
|
|||||||
|
|
||||||
$(BUILDDIR)/cnn_cuda_%.o: $(CNN_SRCDIR)/%.cu $(CNN_SRCDIR)/include/%.h
|
$(BUILDDIR)/cnn_cuda_%.o: $(CNN_SRCDIR)/%.cu $(CNN_SRCDIR)/include/%.h
|
||||||
ifndef NVCC_INSTALLED
|
ifndef NVCC_INSTALLED
|
||||||
@echo "nvcc not found, skipping"
|
@echo "$(NVCC) not found, skipping"
|
||||||
else
|
else
|
||||||
$(NVCC) $(NVCCFLAGS) -c $< -o $@
|
$(NVCC) $(NVCCFLAGS) -c $< -o $@
|
||||||
endif
|
endif
|
||||||
@ -86,12 +86,19 @@ endif
|
|||||||
$(BUILDDIR)/%.o: $(SRCDIR)/%.c $(SRCDIR)/include/%.h
|
$(BUILDDIR)/%.o: $(SRCDIR)/%.c $(SRCDIR)/include/%.h
|
||||||
$(CC) -c $< -o $@ $(CFLAGS)
|
$(CC) -c $< -o $@ $(CFLAGS)
|
||||||
|
|
||||||
|
$(BUILDDIR)/cuda_%.o: $(SRCDIR)/%.cu $(SRCDIR)/include/%.h
|
||||||
|
ifndef NVCC_INSTALLED
|
||||||
|
@echo "$(NVCC) not found, skipping"
|
||||||
|
else
|
||||||
|
$(NVCC) $(NVCCFLAGS) -c $< -o $@
|
||||||
|
endif
|
||||||
|
|
||||||
#
|
#
|
||||||
# Tests
|
# Tests
|
||||||
#
|
#
|
||||||
run-tests: build-tests
|
run-tests: build-tests
|
||||||
$(foreach file, $(wildcard $(BUILDDIR)/test-*), $(file);)
|
|
||||||
$(foreach file, $(wildcard $(TEST_SRCDIR)/*.sh), $(file);)
|
$(foreach file, $(wildcard $(TEST_SRCDIR)/*.sh), $(file);)
|
||||||
|
@echo "$$(for file in build/test-*; do echo -e \\033[33m#####\\033[0m $$file \\033[33m#####\\033[0m; $$file; done)"
|
||||||
|
|
||||||
build-tests: prepare-tests $(TESTS_OBJ) $(BUILDDIR)/test-cnn_matrix_multiplication $(BUILDDIR)/test-cnn_convolution
|
build-tests: prepare-tests $(TESTS_OBJ) $(BUILDDIR)/test-cnn_matrix_multiplication $(BUILDDIR)/test-cnn_convolution
|
||||||
|
|
||||||
@ -107,9 +114,9 @@ build/test-cnn_%: test/cnn_%.c $(CNN_OBJ) $(BUILDDIR)/colors.o $(BUILDDIR)/mnist
|
|||||||
build/test-mnist_%: test/mnist_%.c $(MNIST_OBJ) $(BUILDDIR)/colors.o
|
build/test-mnist_%: test/mnist_%.c $(MNIST_OBJ) $(BUILDDIR)/colors.o
|
||||||
$(CC) $^ -o $@ $(CFLAGS)
|
$(CC) $^ -o $@ $(CFLAGS)
|
||||||
|
|
||||||
$(BUILDDIR)/test-cnn_%: test/cnn_%.cu $(BUILDDIR)/cnn_cuda_%.o $(BUILDDIR)/colors.o $(BUILDDIR)/mnist.o
|
$(BUILDDIR)/test-cnn_%: test/cnn_%.cu $(BUILDDIR)/cnn_cuda_%.o $(BUILDDIR)/cuda_utils.o $(BUILDDIR)/colors.o $(BUILDDIR)/mnist.o
|
||||||
ifndef NVCC_INSTALLED
|
ifndef NVCC_INSTALLED
|
||||||
@echo "nvcc not found, skipping"
|
@echo "$(NVCC) not found, skipping"
|
||||||
else
|
else
|
||||||
$(NVCC) $(NVCCFLAGS) $^ -o $@
|
$(NVCC) $(NVCCFLAGS) $^ -o $@
|
||||||
endif
|
endif
|
||||||
|
@ -1,26 +1,44 @@
|
|||||||
/* This file is a copy of src/cnn/convolution.cu */
|
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdbool.h>
|
#include <stdbool.h>
|
||||||
|
|
||||||
#include "include/struct.h"
|
#include "include/struct.h"
|
||||||
|
#ifdef __CUDACC__
|
||||||
|
#include "../include/utils.h"
|
||||||
|
#else
|
||||||
|
bool check_cuda_compatibility() {
|
||||||
|
#ifdef __CUDACC__
|
||||||
|
int nDevices;
|
||||||
|
cudaDeviceProp prop;
|
||||||
|
|
||||||
|
cudaGetDeviceCount(&nDevices);
|
||||||
|
if (nDevices == 0) {
|
||||||
|
printf("Pas d'utilisation du GPU\n\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("GPUs disponibles:\n");
|
||||||
|
|
||||||
|
for (int i=0; i < nDevices; i++) {
|
||||||
|
cudaGetDeviceProperties(&prop, i);
|
||||||
|
printf(" - %s\n", prop.name);
|
||||||
|
}
|
||||||
|
|
||||||
|
cudaGetDeviceProperties(&prop, 0);
|
||||||
|
printf("Utilisation du GPU: " BLUE "%s" RESET " (Compute capability: %d.%d)\n\n", prop.name, prop.major, prop.minor);
|
||||||
|
return true;
|
||||||
|
#else
|
||||||
|
printf("Pas d'utilisation du GPU\n\n");
|
||||||
|
return false;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#define BLOCKSIZE_x 16
|
#define BLOCKSIZE_x 16
|
||||||
#define BLOCKSIZE_y 8
|
#define BLOCKSIZE_y 8
|
||||||
#define BLOCKSIZE_z 8
|
#define BLOCKSIZE_z 8
|
||||||
|
|
||||||
|
|
||||||
#ifdef __CUDACC__
|
|
||||||
/* CUDA memcheck */
|
|
||||||
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
|
|
||||||
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) {
|
|
||||||
if (code != cudaSuccess) {
|
|
||||||
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
|
|
||||||
if (abort) exit(code);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void make_convolution_cpu(Kernel_cnn* kernel, float*** input, float*** output, int output_dim) {
|
void make_convolution_cpu(Kernel_cnn* kernel, float*** input, float*** output, int output_dim) {
|
||||||
// c'est le kernel de input
|
// c'est le kernel de input
|
||||||
// input[kernel->rows][kernel_k_size + output_dim-1][kernel_k_size + output_dim-1]
|
// input[kernel->rows][kernel_k_size + output_dim-1][kernel_k_size + output_dim-1]
|
||||||
@ -45,9 +63,6 @@ void make_convolution_cpu(Kernel_cnn* kernel, float*** input, float*** output, i
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef __CUDACC__
|
#ifdef __CUDACC__
|
||||||
int i_div_up(int a, int b) { // Partie entière supérieure de a/b
|
|
||||||
return ((a % b) != 0) ? (a / b + 1) : (a / b);
|
|
||||||
}
|
|
||||||
|
|
||||||
__global__ void make_convolution_kernel(int k_size, int columns, int rows, float*** bias, size_t pitch_bias, float**** w, size_t pitch_w, float*** input, size_t pitch_input, float*** output, size_t pitch_output, int output_dim) {
|
__global__ void make_convolution_kernel(int k_size, int columns, int rows, float*** bias, size_t pitch_bias, float**** w, size_t pitch_w, float*** input, size_t pitch_input, float*** output, size_t pitch_output, int output_dim) {
|
||||||
// Équivalents respectifs de i, j et k dans la boucle effectuée par le cpu
|
// Équivalents respectifs de i, j et k dans la boucle effectuée par le cpu
|
||||||
|
@ -3,21 +3,41 @@
|
|||||||
#include <stdbool.h>
|
#include <stdbool.h>
|
||||||
|
|
||||||
#include "include/struct.h"
|
#include "include/struct.h"
|
||||||
|
#ifdef __CUDACC__
|
||||||
|
#include "../include/utils.h"
|
||||||
|
#else
|
||||||
|
bool check_cuda_compatibility() {
|
||||||
|
#ifdef __CUDACC__
|
||||||
|
int nDevices;
|
||||||
|
cudaDeviceProp prop;
|
||||||
|
|
||||||
|
cudaGetDeviceCount(&nDevices);
|
||||||
|
if (nDevices == 0) {
|
||||||
|
printf("Pas d'utilisation du GPU\n\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("GPUs disponibles:\n");
|
||||||
|
|
||||||
|
for (int i=0; i < nDevices; i++) {
|
||||||
|
cudaGetDeviceProperties(&prop, i);
|
||||||
|
printf(" - %s\n", prop.name);
|
||||||
|
}
|
||||||
|
|
||||||
|
cudaGetDeviceProperties(&prop, 0);
|
||||||
|
printf("Utilisation du GPU: " BLUE "%s" RESET " (Compute capability: %d.%d)\n\n", prop.name, prop.major, prop.minor);
|
||||||
|
return true;
|
||||||
|
#else
|
||||||
|
printf("Pas d'utilisation du GPU\n\n");
|
||||||
|
return false;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#define BLOCKSIZE_x 16
|
#define BLOCKSIZE_x 16
|
||||||
#define BLOCKSIZE_y 8
|
#define BLOCKSIZE_y 8
|
||||||
#define BLOCKSIZE_z 8
|
#define BLOCKSIZE_z 8
|
||||||
|
|
||||||
#ifdef __CUDACC__
|
|
||||||
/* CUDA memcheck */
|
|
||||||
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
|
|
||||||
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) {
|
|
||||||
if (code != cudaSuccess) {
|
|
||||||
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
|
|
||||||
if (abort) exit(code);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void make_convolution_cpu(Kernel_cnn* kernel, float*** input, float*** output, int output_dim) {
|
void make_convolution_cpu(Kernel_cnn* kernel, float*** input, float*** output, int output_dim) {
|
||||||
// c'est le kernel de input
|
// c'est le kernel de input
|
||||||
@ -43,9 +63,6 @@ void make_convolution_cpu(Kernel_cnn* kernel, float*** input, float*** output, i
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef __CUDACC__
|
#ifdef __CUDACC__
|
||||||
int i_div_up(int a, int b) { // Partie entière supérieure de a/b
|
|
||||||
return ((a % b) != 0) ? (a / b + 1) : (a / b);
|
|
||||||
}
|
|
||||||
|
|
||||||
__global__ void make_convolution_kernel(int k_size, int columns, int rows, float*** bias, size_t pitch_bias, float**** w, size_t pitch_w, float*** input, size_t pitch_input, float*** output, size_t pitch_output, int output_dim) {
|
__global__ void make_convolution_kernel(int k_size, int columns, int rows, float*** bias, size_t pitch_bias, float**** w, size_t pitch_w, float*** input, size_t pitch_input, float*** output, size_t pitch_output, int output_dim) {
|
||||||
// Équivalents respectifs de i, j et k dans la boucle effectuée par le cpu
|
// Équivalents respectifs de i, j et k dans la boucle effectuée par le cpu
|
||||||
|
@ -112,10 +112,10 @@ ptr get_function_activation(int activation) {
|
|||||||
} else if (activation == -SIGMOID) {
|
} else if (activation == -SIGMOID) {
|
||||||
return &sigmoid_derivative;
|
return &sigmoid_derivative;
|
||||||
} else if (activation == SOFTMAX) {
|
} else if (activation == SOFTMAX) {
|
||||||
printf("Erreur, impossible de renvoyer la fonction softmax");
|
printf("Erreur, impossible de renvoyer la fonction softmax\n");
|
||||||
return NULL;
|
return NULL;
|
||||||
} else if (activation == -SOFTMAX) {
|
} else if (activation == -SOFTMAX) {
|
||||||
printf("Erreur, impossible de renvoyer la dérivée de la fonction softmax");
|
printf("Erreur, impossible de renvoyer la dérivée de la fonction softmax\n");
|
||||||
return NULL;
|
return NULL;
|
||||||
} else if (activation == TANH) {
|
} else if (activation == TANH) {
|
||||||
return &tanh_;
|
return &tanh_;
|
||||||
|
@ -6,11 +6,6 @@
|
|||||||
void make_convolution_cpu(Kernel_cnn* kernel, float*** input, float*** output, int output_dim);
|
void make_convolution_cpu(Kernel_cnn* kernel, float*** input, float*** output, int output_dim);
|
||||||
|
|
||||||
#ifdef __CUDACC__
|
#ifdef __CUDACC__
|
||||||
/*
|
|
||||||
* Partie entière supérieure de a/b
|
|
||||||
*/
|
|
||||||
int i_div_up(int a, int b);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Kernel de la convolution sur carte graphique
|
* Kernel de la convolution sur carte graphique
|
||||||
*/
|
*/
|
||||||
|
@ -7,10 +7,6 @@
|
|||||||
|
|
||||||
|
|
||||||
#ifdef __CUDACC__
|
#ifdef __CUDACC__
|
||||||
/*
|
|
||||||
* Partie entière supérieure de a/b
|
|
||||||
*/
|
|
||||||
int i_div_up(int a, int b);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Fonction exécutée par chaque thread lancé dans `matrix_multiplication_device`
|
* Fonction exécutée par chaque thread lancé dans `matrix_multiplication_device`
|
||||||
@ -23,11 +19,6 @@ __global__ void matrix_mul_kernel(float* Md, float* Nd, float* Pd, int n, int p,
|
|||||||
void matrix_multiplication_device(float** m1, float** m2, float** result, int n, int p, int q);
|
void matrix_multiplication_device(float** m1, float** m2, float** result, int n, int p, int q);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
|
||||||
* Vérification de la compatibilité CUDA
|
|
||||||
*/
|
|
||||||
bool check_cuda_compatibility();
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Multiplication naïve de matrices sur le CPU (1 seul coeur)
|
* Multiplication naïve de matrices sur le CPU (1 seul coeur)
|
||||||
*/
|
*/
|
||||||
|
@ -2,27 +2,13 @@
|
|||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdbool.h>
|
#include <stdbool.h>
|
||||||
|
|
||||||
|
#include "../include/colors.h"
|
||||||
|
#include "../include/utils.h"
|
||||||
|
|
||||||
#define BLOCKSIZE_x 16
|
#define BLOCKSIZE_x 16
|
||||||
#define BLOCKSIZE_y 16
|
#define BLOCKSIZE_y 16
|
||||||
|
|
||||||
#ifdef __CUDACC__
|
#ifdef __CUDACC__
|
||||||
/* CUDA memcheck */
|
|
||||||
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
|
|
||||||
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) {
|
|
||||||
if (code != cudaSuccess) {
|
|
||||||
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
|
|
||||||
if (abort) exit(code);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef __CUDACC__
|
|
||||||
int i_div_up(int a, int b) { // Partie entière supérieure de a/b
|
|
||||||
return ((a % b) != 0) ? (a / b + 1) : (a / b);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
__global__ void matrix_mul_kernel(float* Md, float* Nd, float* Pd, int n, int p, int q, size_t pitch_m, size_t pitch_n, size_t pitch_p) {
|
__global__ void matrix_mul_kernel(float* Md, float* Nd, float* Pd, int n, int p, int q, size_t pitch_m, size_t pitch_n, size_t pitch_p) {
|
||||||
// Chaque thread calcule toutes les multiplications utilisant l'élément Nd[tx][ty]
|
// Chaque thread calcule toutes les multiplications utilisant l'élément Nd[tx][ty]
|
||||||
int tx = (blockIdx.x*blockDim.x) + threadIdx.x; // Indice de colonne
|
int tx = (blockIdx.x*blockDim.x) + threadIdx.x; // Indice de colonne
|
||||||
@ -90,34 +76,6 @@ void matrix_multiplication_device(float** m1, float** m2, float** result, int n,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
bool check_cuda_compatibility() {
|
|
||||||
#ifdef __CUDACC__
|
|
||||||
int nDevices;
|
|
||||||
cudaDeviceProp prop;
|
|
||||||
|
|
||||||
cudaGetDeviceCount(&nDevices);
|
|
||||||
if (nDevices == 0) {
|
|
||||||
printf("Pas d'utilisation du GPU\n\n");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
printf("GPUs disponibles:\n");
|
|
||||||
|
|
||||||
for (int i=0; i < nDevices; i++) {
|
|
||||||
cudaGetDeviceProperties(&prop, i);
|
|
||||||
printf(" - %s\n", prop.name);
|
|
||||||
}
|
|
||||||
|
|
||||||
cudaGetDeviceProperties(&prop, 0);
|
|
||||||
printf("Utilisation du GPU: %s (Compute capability: %d.%d)\n\n", prop.name, prop.major, prop.minor);
|
|
||||||
return true;
|
|
||||||
#else
|
|
||||||
printf("Pas d'utilisation du GPU\n\n");
|
|
||||||
return false;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void matrix_multiplication_host(float** m1, float** m2, float** result, int n, int p, int q) {
|
void matrix_multiplication_host(float** m1, float** m2, float** result, int n, int p, int q) {
|
||||||
for (int i=0; i < n; i++) {
|
for (int i=0; i < n; i++) {
|
||||||
for (int j=0; j < q; j++) {
|
for (int j=0; j < q; j++) {
|
||||||
|
@ -59,7 +59,7 @@ void write_couche(Network* network, int indice_couche, int type_couche, FILE* pt
|
|||||||
int indice_buffer = 0;
|
int indice_buffer = 0;
|
||||||
if (type_couche == 0) { // Cas du CNN
|
if (type_couche == 0) { // Cas du CNN
|
||||||
Kernel_cnn* cnn = kernel->cnn;
|
Kernel_cnn* cnn = kernel->cnn;
|
||||||
int output_dim = network->width[indice_couche];
|
int output_dim = network->width[indice_couche+1];
|
||||||
|
|
||||||
// Écriture du pré-corps
|
// Écriture du pré-corps
|
||||||
uint32_t pre_buffer[4];
|
uint32_t pre_buffer[4];
|
||||||
@ -75,7 +75,6 @@ void write_couche(Network* network, int indice_couche, int type_couche, FILE* pt
|
|||||||
for (int i=0; i < cnn->columns; i++) {
|
for (int i=0; i < cnn->columns; i++) {
|
||||||
for (int j=0; j < output_dim; j++) {
|
for (int j=0; j < output_dim; j++) {
|
||||||
for (int k=0; k < output_dim; k++) {
|
for (int k=0; k < output_dim; k++) {
|
||||||
printf("%f\n", cnn->bias[i][j][k]);
|
|
||||||
bufferAdd(cnn->bias[i][j][k]);
|
bufferAdd(cnn->bias[i][j][k]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -169,7 +168,7 @@ Network* read_network(char* filename) {
|
|||||||
network->kernel = (Kernel**)malloc(sizeof(Kernel*)*size);
|
network->kernel = (Kernel**)malloc(sizeof(Kernel*)*size);
|
||||||
|
|
||||||
for (int i=0; i < (int)size; i++) {
|
for (int i=0; i < (int)size; i++) {
|
||||||
network->kernel[i] = read_kernel(type_couche[i], network->width[i], ptr);
|
network->kernel[i] = read_kernel(type_couche[i], network->width[i+1], ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
network->input = (float****)malloc(sizeof(float***)*size);
|
network->input = (float****)malloc(sizeof(float***)*size);
|
||||||
|
@ -51,7 +51,7 @@ bool equals_networks(Network* network1, Network* network2) {
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Type CNN
|
// Type CNN
|
||||||
output_dim = network1->width[i];
|
output_dim = network1->width[i+1];
|
||||||
checkEquals(kernel[i]->cnn->k_size, "kernel[i]->k_size", i);
|
checkEquals(kernel[i]->cnn->k_size, "kernel[i]->k_size", i);
|
||||||
checkEquals(kernel[i]->cnn->rows, "kernel[i]->rows", i);
|
checkEquals(kernel[i]->cnn->rows, "kernel[i]->rows", i);
|
||||||
checkEquals(kernel[i]->cnn->columns, "kernel[i]->columns", i);
|
checkEquals(kernel[i]->cnn->columns, "kernel[i]->columns", i);
|
||||||
@ -152,7 +152,7 @@ Network* copy_network(Network* network) {
|
|||||||
rows = network->kernel[i]->cnn->rows;
|
rows = network->kernel[i]->cnn->rows;
|
||||||
k_size = network->kernel[i]->cnn->k_size;
|
k_size = network->kernel[i]->cnn->k_size;
|
||||||
columns = network->kernel[i]->cnn->columns;
|
columns = network->kernel[i]->cnn->columns;
|
||||||
output_dim = network->width[i];
|
output_dim = network->width[i+1];
|
||||||
|
|
||||||
|
|
||||||
network_cp->kernel[i]->nn = NULL;
|
network_cp->kernel[i]->nn = NULL;
|
||||||
|
25
src/include/utils.h
Normal file
25
src/include/utils.h
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
#ifndef DEF_UTILS_CU_H
|
||||||
|
#define DEF_UTILS_CU_H
|
||||||
|
|
||||||
|
#ifdef __CUDACC__
|
||||||
|
/* CUDA memcheck */
|
||||||
|
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
|
||||||
|
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) {
|
||||||
|
if (code != cudaSuccess) {
|
||||||
|
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
|
||||||
|
if (abort) exit(code);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Partie entière supérieure de a/b
|
||||||
|
*/
|
||||||
|
int i_div_up(int a, int b);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Vérification de la compatibilité CUDA
|
||||||
|
*/
|
||||||
|
bool check_cuda_compatibility();
|
||||||
|
|
||||||
|
#endif
|
37
src/utils.cu
Normal file
37
src/utils.cu
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
#include <stdlib.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
|
||||||
|
#include "include/utils.h"
|
||||||
|
#include "include/colors.h"
|
||||||
|
|
||||||
|
|
||||||
|
int i_div_up(int a, int b) { // Partie entière supérieure de a/b
|
||||||
|
return ((a % b) != 0) ? (a / b + 1) : (a / b);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool check_cuda_compatibility() {
|
||||||
|
#ifdef __CUDACC__
|
||||||
|
int nDevices;
|
||||||
|
cudaDeviceProp prop;
|
||||||
|
|
||||||
|
cudaGetDeviceCount(&nDevices);
|
||||||
|
if (nDevices == 0) {
|
||||||
|
printf("Pas d'utilisation du GPU\n\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("GPUs disponibles:\n");
|
||||||
|
|
||||||
|
for (int i=0; i < nDevices; i++) {
|
||||||
|
cudaGetDeviceProperties(&prop, i);
|
||||||
|
printf(" - %s\n", prop.name);
|
||||||
|
}
|
||||||
|
|
||||||
|
cudaGetDeviceProperties(&prop, 0);
|
||||||
|
printf("Utilisation du GPU: " BLUE "%s" RESET " (Compute capability: %d.%d)\n\n", prop.name, prop.major, prop.minor);
|
||||||
|
return true;
|
||||||
|
#else
|
||||||
|
printf("Pas d'utilisation du GPU\n\n");
|
||||||
|
return false;
|
||||||
|
#endif
|
||||||
|
}
|
@ -7,6 +7,8 @@
|
|||||||
|
|
||||||
#include "../src/cnn/include/convolution.h"
|
#include "../src/cnn/include/convolution.h"
|
||||||
#include "../src/cnn/include/struct.h"
|
#include "../src/cnn/include/struct.h"
|
||||||
|
#include "../src/include/colors.h"
|
||||||
|
#include "../src/include/utils.h"
|
||||||
|
|
||||||
|
|
||||||
float random_float(float low, float high) {
|
float random_float(float low, float high) {
|
||||||
@ -75,13 +77,12 @@ void free_matrix(float*** matrix, int n, int p) {
|
|||||||
free(matrix);
|
free(matrix);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool check_matrices_equality(float*** m1, float*** m2, int n, int p, int q, int acceptation) {
|
bool check_matrices_equality(float*** m1, float*** m2, int n, int p, int q, int acceptation) {
|
||||||
for (int i=0; i < n; i++) {
|
for (int i=0; i < n; i++) {
|
||||||
for (int j=0; j < p; j++) {
|
for (int j=0; j < p; j++) {
|
||||||
for (int k=0; k < q; k++) {
|
for (int k=0; k < q; k++) {
|
||||||
if (fabs(m1[i][j][k] - m2[i][j][k]) > 0.01*acceptation) {
|
if (fabs(m1[i][j][k] - m2[i][j][k]) > 0.01*acceptation) {
|
||||||
printf("diff %d %d %d: %f val: %f et %f\n", i, j, k, fabs(m1[i][j][k] - m2[i][j][k]), m1[i][j][k], m2[i][j][k]);
|
printf(RED "diff %d %d %d: %f val: %f et %f\n" RESET, i, j, k, fabs(m1[i][j][k] - m2[i][j][k]), m1[i][j][k], m2[i][j][k]);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -144,7 +145,7 @@ void run_convolution_test(int input_dim, int output_dim, int rows, int columns)
|
|||||||
if (!check_matrices_equality(output_gpu, output_cpu, kernel->columns, output_dim, output_dim, kernel->k_size)) {// TODO: change acceptation
|
if (!check_matrices_equality(output_gpu, output_cpu, kernel->columns, output_dim, output_dim, kernel->k_size)) {// TODO: change acceptation
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
printf("OK\n");
|
printf(GREEN "OK\n" RESET);
|
||||||
|
|
||||||
free_matrix(kernel->bias, kernel->columns, output_dim);
|
free_matrix(kernel->bias, kernel->columns, output_dim);
|
||||||
free_matrix(kernel->d_bias, kernel->columns, output_dim);
|
free_matrix(kernel->d_bias, kernel->columns, output_dim);
|
||||||
@ -163,11 +164,19 @@ void run_convolution_test(int input_dim, int output_dim, int rows, int columns)
|
|||||||
|
|
||||||
|
|
||||||
int main() {
|
int main() {
|
||||||
|
printf("Checking CUDA compatibility.\n");
|
||||||
|
bool cuda_compatible = check_cuda_compatibility();
|
||||||
|
if (!cuda_compatible) {
|
||||||
|
printf(RED "CUDA not compatible, skipping tests.\n" RESET);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
printf(GREEN "OK\n" RESET);
|
||||||
|
|
||||||
srand(time(NULL));
|
srand(time(NULL));
|
||||||
|
|
||||||
run_convolution_test(20, 15, 30, 40);
|
run_convolution_test(20, 15, 30, 40);
|
||||||
run_convolution_test(30, 25, 40, 50);
|
run_convolution_test(30, 25, 40, 50);
|
||||||
run_convolution_test(200, 10, 40, 50);
|
run_convolution_test(250, 200, 3, 3);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
@ -5,6 +5,8 @@
|
|||||||
#include <time.h>
|
#include <time.h>
|
||||||
|
|
||||||
#include "../src/cnn/include/matrix_multiplication.h"
|
#include "../src/cnn/include/matrix_multiplication.h"
|
||||||
|
#include "../src/include/colors.h"
|
||||||
|
#include "../src/include/utils.h"
|
||||||
|
|
||||||
|
|
||||||
float random_float(float low, float high) {
|
float random_float(float low, float high) {
|
||||||
@ -76,30 +78,27 @@ void run_matrices_test(int n, int p, int q) {
|
|||||||
float** result_gpu = create_empty_matrix(n, q);
|
float** result_gpu = create_empty_matrix(n, q);
|
||||||
float** result_cpu = create_empty_matrix(n, q);
|
float** result_cpu = create_empty_matrix(n, q);
|
||||||
|
|
||||||
printf("(%d,%d)x(%d,%d) Computing on GPU.\n", n, p, p, q);
|
printf("(%d,%d)x(%d,%d) Data generation complete.\n", n, p, p, q);
|
||||||
start = clock();
|
start = clock();
|
||||||
matrix_multiplication_device(matrix1, matrix2, result_gpu, n, p, q);
|
matrix_multiplication_device(matrix1, matrix2, result_gpu, n, p, q);
|
||||||
end = clock();
|
end = clock();
|
||||||
|
|
||||||
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
|
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
|
||||||
printf("(%d,%d)x(%d,%d) Time used for GPU: %lf seconds\n", n, p, p, q, cpu_time_used);
|
printf("(%d,%d)x(%d,%d) Time used for GPU: %lf seconds\n", n, p, p, q, cpu_time_used);
|
||||||
printf("OK\n");
|
|
||||||
|
|
||||||
printf("(%d,%d)x(%d,%d) Computing on CPU.\n", n, p, p, q);
|
|
||||||
start = clock();
|
start = clock();
|
||||||
matrix_multiplication_host(matrix1, matrix2, result_cpu, n, p, q);
|
matrix_multiplication_host(matrix1, matrix2, result_cpu, n, p, q);
|
||||||
end = clock();
|
end = clock();
|
||||||
|
|
||||||
gpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
|
gpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
|
||||||
printf("(%d,%d)x(%d,%d) Time used for CPU: %lf seconds\n", n, p, p, q, gpu_time_used);
|
printf("(%d,%d)x(%d,%d) Time used for CPU: %lf seconds\n", n, p, p, q, gpu_time_used);
|
||||||
printf("OK\n");
|
|
||||||
|
|
||||||
// Vérification de l'égalité des matrices
|
// Vérification de l'égalité des matrices
|
||||||
printf("(%d,%d)x(%d,%d) Checking equality.\n", n, p, p, q);
|
printf("(%d,%d)x(%d,%d) Checking equality.\n", n, p, p, q);
|
||||||
if (!check_matrices_equality(result_gpu, result_cpu, n, q, p)) {
|
if (!check_matrices_equality(result_gpu, result_cpu, n, q, p)) {
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
printf("OK\n");
|
printf(GREEN "OK\n" RESET);
|
||||||
|
|
||||||
// On libère l'espace mémoire alloué
|
// On libère l'espace mémoire alloué
|
||||||
for (int i=0; i < n; i++) {
|
for (int i=0; i < n; i++) {
|
||||||
@ -128,10 +127,10 @@ int main() {
|
|||||||
printf("Checking CUDA compatibility.\n");
|
printf("Checking CUDA compatibility.\n");
|
||||||
bool cuda_compatible = check_cuda_compatibility();
|
bool cuda_compatible = check_cuda_compatibility();
|
||||||
if (!cuda_compatible) {
|
if (!cuda_compatible) {
|
||||||
printf("CUDA not compatible, skipping tests.\n");
|
printf(RED "CUDA not compatible, skipping tests.\n" RESET);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
printf("OK\n");
|
printf(GREEN "OK\n" RESET);
|
||||||
|
|
||||||
srand(time(NULL));
|
srand(time(NULL));
|
||||||
run_matrices_test(200, 1000, 200);
|
run_matrices_test(200, 1000, 200);
|
||||||
|
@ -7,27 +7,28 @@
|
|||||||
#include "../src/cnn/include/neuron_io.h"
|
#include "../src/cnn/include/neuron_io.h"
|
||||||
#include "../src/cnn/include/creation.h"
|
#include "../src/cnn/include/creation.h"
|
||||||
#include "../src/cnn/include/utils.h"
|
#include "../src/cnn/include/utils.h"
|
||||||
|
#include "../src/include/colors.h"
|
||||||
|
|
||||||
|
|
||||||
int main() {
|
int main() {
|
||||||
printf("Création du réseau\n");
|
printf("Création du réseau\n");
|
||||||
Network* network = create_network_lenet5(0, 0, 3, 2, 32, 1);
|
Network* network = create_network_lenet5(0, 0, 3, 2, 32, 1);
|
||||||
printf("OK\n");
|
printf(GREEN "OK\n" RESET);
|
||||||
|
|
||||||
printf("Écriture du réseau\n");
|
printf("Écriture du réseau\n");
|
||||||
write_network(".test-cache/cnn_neuron_io.bin", network);
|
write_network((char*)".test-cache/cnn_neuron_io.bin", network);
|
||||||
printf("OK\n");
|
printf(GREEN "OK\n" RESET);
|
||||||
|
|
||||||
printf("Vérification de l'accès en lecture\n");
|
printf("Vérification de l'accès en lecture\n");
|
||||||
Network* network2 = read_network(".test-cache/cnn_neuron_io.bin");
|
Network* network2 = read_network((char*)".test-cache/cnn_neuron_io.bin");
|
||||||
printf("OK\n");
|
printf(GREEN "OK\n" RESET);
|
||||||
|
|
||||||
printf("Vérification de l'égalité des réseaux\n");
|
printf("Vérification de l'égalité des réseaux\n");
|
||||||
if (! equals_networks(network, network2)) {
|
if (! equals_networks(network, network2)) {
|
||||||
printf_error("Les deux réseaux obtenus ne sont pas égaux.\n");
|
printf_error(RED "Les deux réseaux obtenus ne sont pas égaux.\n" RESET);
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
printf("OK\n");
|
printf(GREEN "OK\n" RESET);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
@ -4,22 +4,23 @@
|
|||||||
#include "../src/include/colors.h"
|
#include "../src/include/colors.h"
|
||||||
#include "../src/cnn/include/creation.h"
|
#include "../src/cnn/include/creation.h"
|
||||||
#include "../src/cnn/include/utils.h"
|
#include "../src/cnn/include/utils.h"
|
||||||
|
#include "../src/include/colors.h"
|
||||||
|
|
||||||
int main() {
|
int main() {
|
||||||
printf("Création du réseau\n");
|
printf("Création du réseau\n");
|
||||||
Network* network = create_network_lenet5(0, 0, 3, 2, 32, 1);
|
Network* network = create_network_lenet5(0, 0, 3, 2, 32, 1);
|
||||||
printf("OK\n");
|
printf(GREEN "OK\n" RESET);
|
||||||
|
|
||||||
printf("Copie du réseau\n");
|
printf("Copie du réseau\n");
|
||||||
Network* network_cp = copy_network(network);
|
Network* network_cp = copy_network(network);
|
||||||
printf("OK\n");
|
printf(GREEN "OK\n" RESET);
|
||||||
|
|
||||||
printf("Vérification de l'égalité des réseaux\n");
|
printf("Vérification de l'égalité des réseaux\n");
|
||||||
if (! equals_networks(network, network_cp)) {
|
if (! equals_networks(network, network_cp)) {
|
||||||
printf_error("Les deux réseaux obtenus ne sont pas égaux.\n");
|
printf_error(RED "Les deux réseaux obtenus ne sont pas égaux.\n" RESET);
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
printf("OK\n");
|
printf(GREEN "OK\n" RESET);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
@ -3,6 +3,7 @@
|
|||||||
#include <inttypes.h>
|
#include <inttypes.h>
|
||||||
|
|
||||||
#include "../src/mnist/include/mnist.h"
|
#include "../src/mnist/include/mnist.h"
|
||||||
|
#include "../src/include/colors.h"
|
||||||
|
|
||||||
|
|
||||||
void read_test(int nb_images, int width, int height, int*** images, unsigned int* labels) {
|
void read_test(int nb_images, int width, int height, int*** images, unsigned int* labels) {
|
||||||
@ -10,7 +11,7 @@ void read_test(int nb_images, int width, int height, int*** images, unsigned int
|
|||||||
for (int i=0; i < nb_images; i++) {
|
for (int i=0; i < nb_images; i++) {
|
||||||
(void)labels[i];
|
(void)labels[i];
|
||||||
}
|
}
|
||||||
printf("\tOK\n");
|
printf(GREEN "\tOK\n" RESET);
|
||||||
printf("\tLecture des images\n");
|
printf("\tLecture des images\n");
|
||||||
for (int i=0; i < nb_images; i++) {
|
for (int i=0; i < nb_images; i++) {
|
||||||
for (int j=0; j < height; j++) {
|
for (int j=0; j < height; j++) {
|
||||||
@ -19,12 +20,12 @@ void read_test(int nb_images, int width, int height, int*** images, unsigned int
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
printf("\tOK\n");
|
printf(GREEN "\tOK\n" RESET);
|
||||||
}
|
}
|
||||||
|
|
||||||
int main() {
|
int main() {
|
||||||
char* image_file = "data/mnist/t10k-images-idx3-ubyte";
|
char* image_file = (char*)"data/mnist/t10k-images-idx3-ubyte";
|
||||||
char* labels_file = "data/mnist/t10k-labels-idx1-ubyte";
|
char* labels_file = (char*)"data/mnist/t10k-labels-idx1-ubyte";
|
||||||
printf("Chargement des paramètres\n");
|
printf("Chargement des paramètres\n");
|
||||||
|
|
||||||
int* parameters = read_mnist_images_parameters(image_file);
|
int* parameters = read_mnist_images_parameters(image_file);
|
||||||
@ -32,21 +33,21 @@ int main() {
|
|||||||
int height = parameters[1];
|
int height = parameters[1];
|
||||||
int width = parameters[2];
|
int width = parameters[2];
|
||||||
|
|
||||||
printf("OK\n");
|
printf(GREEN "OK\n" RESET);
|
||||||
printf("Chargement des images\n");
|
printf("Chargement des images\n");
|
||||||
|
|
||||||
int*** images = read_mnist_images(image_file);
|
int*** images = read_mnist_images(image_file);
|
||||||
|
|
||||||
printf("OK\n");
|
printf(GREEN "OK\n" RESET);
|
||||||
printf("Chargement des labels\n");
|
printf("Chargement des labels\n");
|
||||||
|
|
||||||
unsigned int* labels = read_mnist_labels(labels_file);
|
unsigned int* labels = read_mnist_labels(labels_file);
|
||||||
|
|
||||||
printf("OK\n");
|
printf(GREEN "OK\n" RESET);
|
||||||
printf("Vérification de l'accès en lecture\n");
|
printf("Vérification de l'accès en lecture\n");
|
||||||
|
|
||||||
read_test(nb_images, width, height, images, labels);
|
read_test(nb_images, width, height, images, labels);
|
||||||
|
|
||||||
printf("OK\n");
|
printf(GREEN "OK\n" RESET);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
@ -5,17 +5,18 @@
|
|||||||
|
|
||||||
#include "../src/mnist/include/neural_network.h"
|
#include "../src/mnist/include/neural_network.h"
|
||||||
#include "../src/mnist/include/neuron_io.h"
|
#include "../src/mnist/include/neuron_io.h"
|
||||||
|
#include "../src/include/colors.h"
|
||||||
|
|
||||||
int main() {
|
int main() {
|
||||||
printf("Création du réseau\n");
|
printf("Création du réseau\n");
|
||||||
Network* network = (Network*)malloc(sizeof(Network));
|
Network* network = (Network*)malloc(sizeof(Network));
|
||||||
int tab[5] = {30, 25, 20, 15, 10};
|
int tab[5] = {30, 25, 20, 15, 10};
|
||||||
network_creation(network, tab, 5);
|
network_creation(network, tab, 5);
|
||||||
printf("OK\n");
|
printf(GREEN "OK\n" RESET);
|
||||||
|
|
||||||
printf("Initialisation du réseau\n");
|
printf("Initialisation du réseau\n");
|
||||||
network_initialisation(network);
|
network_initialisation(network);
|
||||||
printf("OK\n");
|
printf(GREEN "OK\n" RESET);
|
||||||
|
|
||||||
deletion_of_network(network);
|
deletion_of_network(network);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
|
|
||||||
#include "../src/mnist/include/neuron_io.h"
|
#include "../src/mnist/include/neuron_io.h"
|
||||||
#include "../src/mnist/include/neural_network.h"
|
#include "../src/mnist/include/neural_network.h"
|
||||||
|
#include "../src/include/colors.h"
|
||||||
|
|
||||||
|
|
||||||
Neuron* creer_neuron(int nb_sortants) {
|
Neuron* creer_neuron(int nb_sortants) {
|
||||||
@ -63,20 +64,20 @@ Network* create_network(int nb_layers, int nb_max_neurons, int nb_min_neurons) {
|
|||||||
int main() {
|
int main() {
|
||||||
printf("Création du réseau\n");
|
printf("Création du réseau\n");
|
||||||
Network* network = create_network(5, 300, 10);
|
Network* network = create_network(5, 300, 10);
|
||||||
printf("OK\n");
|
printf(GREEN "OK\n" RESET);
|
||||||
|
|
||||||
printf("Écriture du réseau\n");
|
printf("Écriture du réseau\n");
|
||||||
write_network(".test-cache/neuron_io.bin", network);
|
write_network((char*)".test-cache/neuron_io.bin", network);
|
||||||
printf("OK\n");
|
printf(GREEN "OK\n" RESET);
|
||||||
|
|
||||||
printf("Vérification de l'accès en lecture\n");
|
printf("Vérification de l'accès en lecture\n");
|
||||||
Network* network2 = read_network(".test-cache/neuron_io.bin");
|
Network* network2 = read_network((char*)".test-cache/neuron_io.bin");
|
||||||
printf("OK\n");
|
printf(GREEN "OK\n" RESET);
|
||||||
|
|
||||||
printf("Suppression des réseaux\n");
|
printf("Suppression des réseaux\n");
|
||||||
deletion_of_network(network);
|
deletion_of_network(network);
|
||||||
deletion_of_network(network2);
|
deletion_of_network(network2);
|
||||||
printf("OK\n");
|
printf(GREEN "OK\n" RESET);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
@ -7,17 +7,17 @@ make $OUT/mnist-utils
|
|||||||
|
|
||||||
echo "Compte des labels"
|
echo "Compte des labels"
|
||||||
"$OUT/mnist-utils" count-labels -l data/mnist/t10k-labels-idx1-ubyte
|
"$OUT/mnist-utils" count-labels -l data/mnist/t10k-labels-idx1-ubyte
|
||||||
echo "OK"
|
echo -e "\033[32mOK\033[0m"
|
||||||
|
|
||||||
echo "Création du réseau"
|
echo "Création du réseau"
|
||||||
mkdir -p .test-cache
|
mkdir -p .test-cache
|
||||||
"$OUT/mnist-utils" creer-reseau -n 3 -o .test-cache/reseau.bin
|
"$OUT/mnist-utils" creer-reseau -n 3 -o .test-cache/reseau.bin
|
||||||
echo "OK"
|
echo -e "\033[32mOK\033[0m"
|
||||||
|
|
||||||
echo "Affichage poids"
|
echo "Affichage poids"
|
||||||
"$OUT/mnist-utils" print-poids -r .test-cache/reseau.bin > /dev/null
|
"$OUT/mnist-utils" print-poids -r .test-cache/reseau.bin > /dev/null
|
||||||
echo "OK"
|
echo -e "\033[32mOK\033[0m"
|
||||||
|
|
||||||
echo "Affichage biais"
|
echo "Affichage biais"
|
||||||
"$OUT/mnist-utils" print-biais -r .test-cache/reseau.bin > /dev/null
|
"$OUT/mnist-utils" print-biais -r .test-cache/reseau.bin > /dev/null
|
||||||
echo "OK"
|
echo -e "\033[32mOK\033[0m"
|
||||||
|
Loading…
Reference in New Issue
Block a user