Update Makefile

This commit is contained in:
augustin64 2022-11-09 12:55:55 +01:00
parent 43ffdc1b56
commit 17bca42253
6 changed files with 68 additions and 8 deletions

View File

@ -90,8 +90,8 @@ $(BUILDDIR)/%.o: $(SRCDIR)/%.c $(SRCDIR)/include/%.h
# Tests
#
run-tests: build-tests
$(foreach file, $(wildcard $(BUILDDIR)/test-*), $(file);)
$(foreach file, $(wildcard $(TEST_SRCDIR)/*.sh), $(file);)
@echo "$$(for file in build/test-*; do echo $$file; $$file; done)"
build-tests: prepare-tests $(TESTS_OBJ) $(BUILDDIR)/test-cnn_matrix_multiplication $(BUILDDIR)/test-cnn_convolution

View File

@ -21,6 +21,33 @@ inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=t
}
#endif
bool check_cuda_compatibility() {
#ifdef __CUDACC__
int nDevices;
cudaDeviceProp prop;
cudaGetDeviceCount(&nDevices);
if (nDevices == 0) {
printf("Pas d'utilisation du GPU\n\n");
return false;
}
printf("GPUs disponibles:\n");
for (int i=0; i < nDevices; i++) {
cudaGetDeviceProperties(&prop, i);
printf(" - %s\n", prop.name);
}
cudaGetDeviceProperties(&prop, 0);
printf("Utilisation du GPU: %s (Compute capability: %d.%d)\n\n", prop.name, prop.major, prop.minor);
return true;
#else
printf("Pas d'utilisation du GPU\n\n");
return false;
#endif
}
void make_convolution_cpu(Kernel_cnn* kernel, float*** input, float*** output, int output_dim) {
// c'est le kernel de input
// input[kernel->rows][kernel_k_size + output_dim-1][kernel_k_size + output_dim-1]

View File

@ -112,10 +112,10 @@ ptr get_function_activation(int activation) {
} else if (activation == -SIGMOID) {
return &sigmoid_derivative;
} else if (activation == SOFTMAX) {
printf("Erreur, impossible de renvoyer la fonction softmax");
printf("Erreur, impossible de renvoyer la fonction softmax\n");
return NULL;
} else if (activation == -SOFTMAX) {
printf("Erreur, impossible de renvoyer la dérivée de la fonction softmax");
printf("Erreur, impossible de renvoyer la dérivée de la fonction softmax\n");
return NULL;
} else if (activation == TANH) {
return &tanh_;

View File

@ -59,7 +59,7 @@ void write_couche(Network* network, int indice_couche, int type_couche, FILE* pt
int indice_buffer = 0;
if (type_couche == 0) { // Cas du CNN
Kernel_cnn* cnn = kernel->cnn;
int output_dim = network->width[indice_couche];
int output_dim = network->width[indice_couche+1];
// Écriture du pré-corps
uint32_t pre_buffer[4];
@ -75,7 +75,6 @@ void write_couche(Network* network, int indice_couche, int type_couche, FILE* pt
for (int i=0; i < cnn->columns; i++) {
for (int j=0; j < output_dim; j++) {
for (int k=0; k < output_dim; k++) {
printf("%f\n", cnn->bias[i][j][k]);
bufferAdd(cnn->bias[i][j][k]);
}
}
@ -169,7 +168,7 @@ Network* read_network(char* filename) {
network->kernel = (Kernel**)malloc(sizeof(Kernel*)*size);
for (int i=0; i < (int)size; i++) {
network->kernel[i] = read_kernel(type_couche[i], network->width[i], ptr);
network->kernel[i] = read_kernel(type_couche[i], network->width[i+1], ptr);
}
network->input = (float****)malloc(sizeof(float***)*size);

View File

@ -51,7 +51,7 @@ bool equals_networks(Network* network1, Network* network2) {
}
} else {
// Type CNN
output_dim = network1->width[i];
output_dim = network1->width[i+1];
checkEquals(kernel[i]->cnn->k_size, "kernel[i]->k_size", i);
checkEquals(kernel[i]->cnn->rows, "kernel[i]->rows", i);
checkEquals(kernel[i]->cnn->columns, "kernel[i]->columns", i);
@ -152,7 +152,7 @@ Network* copy_network(Network* network) {
rows = network->kernel[i]->cnn->rows;
k_size = network->kernel[i]->cnn->k_size;
columns = network->kernel[i]->cnn->columns;
output_dim = network->width[i];
output_dim = network->width[i+1];
network_cp->kernel[i]->nn = NULL;

View File

@ -75,6 +75,32 @@ void free_matrix(float*** matrix, int n, int p) {
free(matrix);
}
bool check_cuda_compatibility() {
#ifdef __CUDACC__
int nDevices;
cudaDeviceProp prop;
cudaGetDeviceCount(&nDevices);
if (nDevices == 0) {
printf("Pas d'utilisation du GPU\n\n");
return false;
}
printf("GPUs disponibles:\n");
for (int i=0; i < nDevices; i++) {
cudaGetDeviceProperties(&prop, i);
printf(" - %s\n", prop.name);
}
cudaGetDeviceProperties(&prop, 0);
printf("Utilisation du GPU: %s (Compute capability: %d.%d)\n\n", prop.name, prop.major, prop.minor);
return true;
#else
printf("Pas d'utilisation du GPU\n\n");
return false;
#endif
}
bool check_matrices_equality(float*** m1, float*** m2, int n, int p, int q, int acceptation) {
for (int i=0; i < n; i++) {
@ -163,6 +189,14 @@ void run_convolution_test(int input_dim, int output_dim, int rows, int columns)
int main() {
printf("Checking CUDA compatibility.\n");
bool cuda_compatible = check_cuda_compatibility();
if (!cuda_compatible) {
printf("CUDA not compatible, skipping tests.\n");
return 0;
}
printf("OK\n");
srand(time(NULL));
run_convolution_test(20, 15, 30, 40);