2022-10-14 15:45:47 +02:00
|
|
|
#include <stdlib.h>
|
|
|
|
#include <stdio.h>
|
2022-10-14 17:54:12 +02:00
|
|
|
#include <stdbool.h>
|
2022-10-14 15:45:47 +02:00
|
|
|
|
2022-11-11 11:20:30 +01:00
|
|
|
#include "../include/colors.h"
|
|
|
|
#include "../include/utils.h"
|
|
|
|
|
2022-10-14 15:45:47 +02:00
|
|
|
#define BLOCKSIZE_x 16
|
|
|
|
#define BLOCKSIZE_y 16
|
|
|
|
|
|
|
|
#ifdef __CUDACC__
|
2022-10-19 13:05:59 +02:00
|
|
|
__global__ void matrix_mul_kernel(float* Md, float* Nd, float* Pd, int n, int p, int q, size_t pitch_m, size_t pitch_n, size_t pitch_p) {
|
2022-10-25 17:49:58 +02:00
|
|
|
// Chaque thread calcule toutes les multiplications utilisant l'élément Nd[tx][ty]
|
|
|
|
int tx = (blockIdx.x*blockDim.x) + threadIdx.x; // Indice de colonne
|
|
|
|
int ty = (blockIdx.y*blockDim.y) + threadIdx.y; // Indice de ligne
|
2022-10-19 13:05:59 +02:00
|
|
|
|
2022-10-25 17:49:58 +02:00
|
|
|
if (tx >= p || ty >= q) {
|
2022-10-19 13:05:59 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-10-14 15:45:47 +02:00
|
|
|
// Pvalue stores the Pd element that is computed by the thread
|
|
|
|
float* M_offset;
|
2022-10-25 17:49:58 +02:00
|
|
|
float* P_offset;
|
|
|
|
float* N_offset = (float *)((char*)Nd + tx * pitch_n);
|
|
|
|
float Nxy = N_offset[ty]; // N[tx][ty]
|
|
|
|
|
|
|
|
for (int i = 0; i < n; i++) {
|
|
|
|
M_offset = (float *)((char*)Md + i * pitch_m);
|
|
|
|
P_offset = (float*)((char*)Pd + i * pitch_p); // P[i], pitch_p est un décalage en bytes
|
2022-11-01 17:24:29 +01:00
|
|
|
atomicAdd(&P_offset[ty], M_offset[tx] * Nxy); // P[i][ty] += P[i][tx] * N[tx][ty]
|
2022-10-14 15:45:47 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-10-14 17:54:12 +02:00
|
|
|
void matrix_multiplication_device(float** m1, float** m2, float** result, int n, int p, int q) {
|
2022-10-14 15:45:47 +02:00
|
|
|
// Préparation des matrices
|
|
|
|
size_t pitch_m1_dev;
|
|
|
|
size_t pitch_m2_dev;
|
|
|
|
size_t pitch_result_dev;
|
|
|
|
float* m1_dev;
|
|
|
|
float* m2_dev;
|
|
|
|
float* result_dev;
|
2023-01-17 15:34:29 +01:00
|
|
|
|
2022-10-14 15:45:47 +02:00
|
|
|
gpuErrchk( cudaMallocPitch((void**)&m1_dev, &pitch_m1_dev, p * sizeof(float), n));
|
2022-10-14 16:30:28 +02:00
|
|
|
for (int i=0; i < n; i++) {
|
2022-10-25 17:49:58 +02:00
|
|
|
gpuErrchk( cudaMemcpy((void*)((char*)m1_dev + i*pitch_m1_dev), (const void*)&(m1[i][0]), p*sizeof(float), cudaMemcpyHostToDevice));
|
2022-10-14 16:30:28 +02:00
|
|
|
}
|
2023-01-17 15:34:29 +01:00
|
|
|
|
2022-10-14 15:45:47 +02:00
|
|
|
gpuErrchk( cudaMallocPitch((void**)&m2_dev, &pitch_m2_dev, q * sizeof(float), p));
|
2022-10-14 16:30:28 +02:00
|
|
|
for (int i=0; i < p; i++) {
|
2022-10-25 17:49:58 +02:00
|
|
|
gpuErrchk( cudaMemcpy((void*)((char*)m2_dev + i*pitch_m2_dev), (const void*)&(m2[i][0]), q*sizeof(float), cudaMemcpyHostToDevice));
|
2022-10-14 16:30:28 +02:00
|
|
|
}
|
2022-10-14 15:45:47 +02:00
|
|
|
|
|
|
|
gpuErrchk( cudaMallocPitch((void**)&result_dev, &pitch_result_dev, q * sizeof(float), n));
|
2022-10-25 17:49:58 +02:00
|
|
|
gpuErrchk( cudaMemset(result_dev, 0, pitch_result_dev*n));
|
2022-10-14 15:45:47 +02:00
|
|
|
|
|
|
|
// Traitement
|
2022-10-25 17:49:58 +02:00
|
|
|
dim3 gridSize(i_div_up(p, BLOCKSIZE_x), i_div_up(q, BLOCKSIZE_y));
|
2022-10-14 19:56:39 +02:00
|
|
|
dim3 blockSize(BLOCKSIZE_x, BLOCKSIZE_y);
|
2022-10-14 15:45:47 +02:00
|
|
|
|
2022-10-19 13:05:59 +02:00
|
|
|
matrix_mul_kernel<<<gridSize, blockSize>>>(m1_dev, m2_dev, result_dev, n, p, q, pitch_m1_dev, pitch_m2_dev, pitch_result_dev);
|
2022-10-14 15:45:47 +02:00
|
|
|
gpuErrchk( cudaPeekAtLastError() );
|
|
|
|
gpuErrchk( cudaDeviceSynchronize() );
|
|
|
|
|
|
|
|
// Post-traitement
|
2022-10-14 16:30:28 +02:00
|
|
|
for (int i=0; i < n; i++) {
|
2022-10-25 17:49:58 +02:00
|
|
|
gpuErrchk( cudaMemcpy((void*)&(result[i][0]), (const void*)((char*)result_dev + i*pitch_result_dev), sizeof(float)*q, cudaMemcpyDeviceToHost));
|
2022-10-14 15:45:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
gpuErrchk( cudaFree(result_dev) );
|
|
|
|
gpuErrchk( cudaFree(m1_dev) );
|
|
|
|
gpuErrchk( cudaFree(m2_dev) );
|
|
|
|
gpuErrchk( cudaPeekAtLastError() );
|
|
|
|
gpuErrchk( cudaDeviceSynchronize() );
|
|
|
|
}
|
2022-10-14 17:54:12 +02:00
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
void matrix_multiplication_host(float** m1, float** m2, float** result, int n, int p, int q) {
|
2022-10-14 15:45:47 +02:00
|
|
|
for (int i=0; i < n; i++) {
|
|
|
|
for (int j=0; j < q; j++) {
|
2022-10-14 17:54:12 +02:00
|
|
|
result[i][j] = 0.;
|
2022-10-14 15:45:47 +02:00
|
|
|
for (int k=0; k < p; k++) {
|
2022-10-14 18:17:29 +02:00
|
|
|
result[i][j] += m1[i][k] * m2[k][j];
|
2022-10-14 15:45:47 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-10-14 17:54:12 +02:00
|
|
|
|
|
|
|
|
|
|
|
void matrix_multiplication(float** m1, float** m2, float** result, int n, int p, int q, bool use_cuda) {
|
|
|
|
#ifdef __CUDACC__
|
|
|
|
if (use_cuda) {
|
|
|
|
matrix_multiplication_device(m1, m2, result, n, p, q);
|
|
|
|
} else {
|
|
|
|
matrix_multiplication_host(m1, m2, result, n, p, q);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
matrix_multiplication_host(m1, m2, result, n, p, q);
|
|
|
|
#endif
|
2022-10-14 15:45:47 +02:00
|
|
|
}
|