diff --git a/src/cnn/matrix_multiplication.cu b/src/cnn/matrix_multiplication.cu index 61394dc..985b45d 100644 --- a/src/cnn/matrix_multiplication.cu +++ b/src/cnn/matrix_multiplication.cu @@ -1,5 +1,6 @@ #include #include +#include #include #define BLOCKSIZE_x 16 @@ -16,21 +17,16 @@ inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=t } #endif -int iDivUp(int hostPtr, int b){ - return ((hostPtr % b) != 0) ? (hostPtr / b + 1) : (hostPtr / b); -} - - -float RandFloat(float low, float high) { +float random_float(float low, float high) { float t = (float)rand() / (float)RAND_MAX; return (1.0f - t) * low + t * high; } -void fillMatrixWithRandomValues(float **matrix, int n, int p) { +void fill_matrix_random(float **matrix, int n, int p) { for (int i=0; i < n; i++) { for (int j=0; j < p; j++) { - matrix[i][j] = RandFloat(0.0f, 15.0f); + matrix[i][j] = random_float(0.0f, 15.0f); } } } @@ -53,7 +49,7 @@ float** create_matrix(int n, int p) { matrix[i] = (float*)malloc(sizeof(float)*p); } - fillMatrixWithRandomValues(matrix, n, p); + fill_matrix_random(matrix, n, p); return matrix; } @@ -71,7 +67,12 @@ float** create_empty_matrix(int n, int p) { #ifdef __CUDACC__ -__global__ void MatrixMulKernel(float* Md, float* Nd, float* Pd, int n, int p, int q, size_t pitch_m, size_t pitch_n, size_t pitch_p) { +int i_div_up(int hostPtr, int b){ + return ((hostPtr % b) != 0) ? (hostPtr / b + 1) : (hostPtr / b); +} + + +__global__ void matrix_mul_kernel(float* Md, float* Nd, float* Pd, int n, int p, int q, size_t pitch_m, size_t pitch_n, size_t pitch_p) { // 2D Thread ID int tx = blockIdx.x*blockDim.x + threadIdx.x; int ty = blockIdx.y*blockDim.y + threadIdx.y; @@ -92,7 +93,7 @@ __global__ void MatrixMulKernel(float* Md, float* Nd, float* Pd, int n, int p, i } -void matrix_multiplication(float** m1, float** m2, float** result, int n, int p, int q) { +void matrix_multiplication_device(float** m1, float** m2, float** result, int n, int p, int q) { // Préparation des matrices size_t pitch_m1_dev; size_t pitch_m2_dev; @@ -114,10 +115,10 @@ void matrix_multiplication(float** m1, float** m2, float** result, int n, int p, gpuErrchk( cudaMallocPitch((void**)&result_dev, &pitch_result_dev, q * sizeof(float), n)); // Traitement - dim3 gridSize(iDivUp(n, BLOCKSIZE_x), iDivUp(q, BLOCKSIZE_y)); + dim3 gridSize(i_div_up(n, BLOCKSIZE_x), i_div_up(q, BLOCKSIZE_y)); dim3 blockSize(BLOCKSIZE_y, BLOCKSIZE_x); - MatrixMulKernel<<>>(m1_dev, m2_dev, result_dev, n, p, q, pitch_m1_dev, pitch_m2_dev, pitch_result_dev); + matrix_mul_kernel<<>>(m1_dev, m2_dev, result_dev, n, p, q, pitch_m1_dev, pitch_m2_dev, pitch_result_dev); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); @@ -132,18 +133,60 @@ void matrix_multiplication(float** m1, float** m2, float** result, int n, int p, gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); } +#endif -#else -void matrix_multiplication(float* m1, float* m2, float* result, int n, int p, int q) { + +bool check_cuda_compatibility() { + #ifdef __CUDACC__ + int nDevices; + cudaDeviceProp prop; + + cudaGetDeviceCount(&nDevices); + if (nDevices == 0) { + printf("Pas d'utilisation du GPU\n\n"); + return false; + } + + printf("GPUs disponibles:\n"); + + for (int i=0; i < nDevices; i++) { + cudaGetDeviceProperties(&prop, i); + printf(" - %s\n", prop.name); + } + + cudaGetDeviceProperties(&prop, 0); + printf("Utilisation du GPU: %s\n\n", prop.name); + return true; + #else + printf("Pas d'utilisation du GPU\n\n"); + return false; + #endif +} + + +void matrix_multiplication_host(float** m1, float** m2, float** result, int n, int p, int q) { for (int i=0; i < n; i++) { for (int j=0; j < q; j++) { + result[i][j] = 0.; for (int k=0; k < p; k++) { - result[i*q+j] += m1[i*p+k] + m2[k*q+j]; + result[i][j] += m1[i][k] + m2[k][j]; } } } } -#endif + + +void matrix_multiplication(float** m1, float** m2, float** result, int n, int p, int q, bool use_cuda) { + #ifdef __CUDACC__ + if (use_cuda) { + matrix_multiplication_device(m1, m2, result, n, p, q); + } else { + matrix_multiplication_host(m1, m2, result, n, p, q); + } + #else + matrix_multiplication_host(m1, m2, result, n, p, q); + #endif +} int main() { @@ -159,7 +202,7 @@ int main() { double cpu_time_used; start = clock(); - matrix_multiplication(matrix1, matrix2, result, n, p, q); + matrix_multiplication(matrix1, matrix2, result, n, p, q, check_cuda_compatibility()); end = clock(); cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;