mirror of
https://github.com/augustin64/projet-tipe
synced 2025-01-24 15:36:25 +01:00
Add check_cuda_compatibility()
This commit is contained in:
parent
4839872c9b
commit
31e11f8d90
@ -1,5 +1,6 @@
|
|||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
|
#include <stdbool.h>
|
||||||
#include <time.h>
|
#include <time.h>
|
||||||
|
|
||||||
#define BLOCKSIZE_x 16
|
#define BLOCKSIZE_x 16
|
||||||
@ -16,21 +17,16 @@ inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=t
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
int iDivUp(int hostPtr, int b){
|
float random_float(float low, float high) {
|
||||||
return ((hostPtr % b) != 0) ? (hostPtr / b + 1) : (hostPtr / b);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
float RandFloat(float low, float high) {
|
|
||||||
float t = (float)rand() / (float)RAND_MAX;
|
float t = (float)rand() / (float)RAND_MAX;
|
||||||
return (1.0f - t) * low + t * high;
|
return (1.0f - t) * low + t * high;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void fillMatrixWithRandomValues(float **matrix, int n, int p) {
|
void fill_matrix_random(float **matrix, int n, int p) {
|
||||||
for (int i=0; i < n; i++) {
|
for (int i=0; i < n; i++) {
|
||||||
for (int j=0; j < p; j++) {
|
for (int j=0; j < p; j++) {
|
||||||
matrix[i][j] = RandFloat(0.0f, 15.0f);
|
matrix[i][j] = random_float(0.0f, 15.0f);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -53,7 +49,7 @@ float** create_matrix(int n, int p) {
|
|||||||
matrix[i] = (float*)malloc(sizeof(float)*p);
|
matrix[i] = (float*)malloc(sizeof(float)*p);
|
||||||
}
|
}
|
||||||
|
|
||||||
fillMatrixWithRandomValues(matrix, n, p);
|
fill_matrix_random(matrix, n, p);
|
||||||
return matrix;
|
return matrix;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -71,7 +67,12 @@ float** create_empty_matrix(int n, int p) {
|
|||||||
|
|
||||||
|
|
||||||
#ifdef __CUDACC__
|
#ifdef __CUDACC__
|
||||||
__global__ void MatrixMulKernel(float* Md, float* Nd, float* Pd, int n, int p, int q, size_t pitch_m, size_t pitch_n, size_t pitch_p) {
|
int i_div_up(int hostPtr, int b){
|
||||||
|
return ((hostPtr % b) != 0) ? (hostPtr / b + 1) : (hostPtr / b);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
__global__ void matrix_mul_kernel(float* Md, float* Nd, float* Pd, int n, int p, int q, size_t pitch_m, size_t pitch_n, size_t pitch_p) {
|
||||||
// 2D Thread ID
|
// 2D Thread ID
|
||||||
int tx = blockIdx.x*blockDim.x + threadIdx.x;
|
int tx = blockIdx.x*blockDim.x + threadIdx.x;
|
||||||
int ty = blockIdx.y*blockDim.y + threadIdx.y;
|
int ty = blockIdx.y*blockDim.y + threadIdx.y;
|
||||||
@ -92,7 +93,7 @@ __global__ void MatrixMulKernel(float* Md, float* Nd, float* Pd, int n, int p, i
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void matrix_multiplication(float** m1, float** m2, float** result, int n, int p, int q) {
|
void matrix_multiplication_device(float** m1, float** m2, float** result, int n, int p, int q) {
|
||||||
// Préparation des matrices
|
// Préparation des matrices
|
||||||
size_t pitch_m1_dev;
|
size_t pitch_m1_dev;
|
||||||
size_t pitch_m2_dev;
|
size_t pitch_m2_dev;
|
||||||
@ -114,10 +115,10 @@ void matrix_multiplication(float** m1, float** m2, float** result, int n, int p,
|
|||||||
gpuErrchk( cudaMallocPitch((void**)&result_dev, &pitch_result_dev, q * sizeof(float), n));
|
gpuErrchk( cudaMallocPitch((void**)&result_dev, &pitch_result_dev, q * sizeof(float), n));
|
||||||
|
|
||||||
// Traitement
|
// Traitement
|
||||||
dim3 gridSize(iDivUp(n, BLOCKSIZE_x), iDivUp(q, BLOCKSIZE_y));
|
dim3 gridSize(i_div_up(n, BLOCKSIZE_x), i_div_up(q, BLOCKSIZE_y));
|
||||||
dim3 blockSize(BLOCKSIZE_y, BLOCKSIZE_x);
|
dim3 blockSize(BLOCKSIZE_y, BLOCKSIZE_x);
|
||||||
|
|
||||||
MatrixMulKernel<<<gridSize, blockSize>>>(m1_dev, m2_dev, result_dev, n, p, q, pitch_m1_dev, pitch_m2_dev, pitch_result_dev);
|
matrix_mul_kernel<<<gridSize, blockSize>>>(m1_dev, m2_dev, result_dev, n, p, q, pitch_m1_dev, pitch_m2_dev, pitch_result_dev);
|
||||||
gpuErrchk( cudaPeekAtLastError() );
|
gpuErrchk( cudaPeekAtLastError() );
|
||||||
gpuErrchk( cudaDeviceSynchronize() );
|
gpuErrchk( cudaDeviceSynchronize() );
|
||||||
|
|
||||||
@ -132,18 +133,60 @@ void matrix_multiplication(float** m1, float** m2, float** result, int n, int p,
|
|||||||
gpuErrchk( cudaPeekAtLastError() );
|
gpuErrchk( cudaPeekAtLastError() );
|
||||||
gpuErrchk( cudaDeviceSynchronize() );
|
gpuErrchk( cudaDeviceSynchronize() );
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#else
|
|
||||||
void matrix_multiplication(float* m1, float* m2, float* result, int n, int p, int q) {
|
bool check_cuda_compatibility() {
|
||||||
|
#ifdef __CUDACC__
|
||||||
|
int nDevices;
|
||||||
|
cudaDeviceProp prop;
|
||||||
|
|
||||||
|
cudaGetDeviceCount(&nDevices);
|
||||||
|
if (nDevices == 0) {
|
||||||
|
printf("Pas d'utilisation du GPU\n\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("GPUs disponibles:\n");
|
||||||
|
|
||||||
|
for (int i=0; i < nDevices; i++) {
|
||||||
|
cudaGetDeviceProperties(&prop, i);
|
||||||
|
printf(" - %s\n", prop.name);
|
||||||
|
}
|
||||||
|
|
||||||
|
cudaGetDeviceProperties(&prop, 0);
|
||||||
|
printf("Utilisation du GPU: %s\n\n", prop.name);
|
||||||
|
return true;
|
||||||
|
#else
|
||||||
|
printf("Pas d'utilisation du GPU\n\n");
|
||||||
|
return false;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void matrix_multiplication_host(float** m1, float** m2, float** result, int n, int p, int q) {
|
||||||
for (int i=0; i < n; i++) {
|
for (int i=0; i < n; i++) {
|
||||||
for (int j=0; j < q; j++) {
|
for (int j=0; j < q; j++) {
|
||||||
|
result[i][j] = 0.;
|
||||||
for (int k=0; k < p; k++) {
|
for (int k=0; k < p; k++) {
|
||||||
result[i*q+j] += m1[i*p+k] + m2[k*q+j];
|
result[i][j] += m1[i][k] + m2[k][j];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
|
void matrix_multiplication(float** m1, float** m2, float** result, int n, int p, int q, bool use_cuda) {
|
||||||
|
#ifdef __CUDACC__
|
||||||
|
if (use_cuda) {
|
||||||
|
matrix_multiplication_device(m1, m2, result, n, p, q);
|
||||||
|
} else {
|
||||||
|
matrix_multiplication_host(m1, m2, result, n, p, q);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
matrix_multiplication_host(m1, m2, result, n, p, q);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
int main() {
|
int main() {
|
||||||
@ -159,7 +202,7 @@ int main() {
|
|||||||
double cpu_time_used;
|
double cpu_time_used;
|
||||||
|
|
||||||
start = clock();
|
start = clock();
|
||||||
matrix_multiplication(matrix1, matrix2, result, n, p, q);
|
matrix_multiplication(matrix1, matrix2, result, n, p, q, check_cuda_compatibility());
|
||||||
end = clock();
|
end = clock();
|
||||||
|
|
||||||
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
|
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
|
||||||
|
Loading…
Reference in New Issue
Block a user