Update matrix_multiplication.cu

This commit is contained in:
augustin64 2022-10-19 13:05:59 +02:00
parent 0f5867ebb6
commit 6ebbfda127
3 changed files with 194 additions and 2 deletions

View File

@ -23,10 +23,15 @@ int i_div_up(int a, int b) { // Partie entière supérieure de a/b
}
__global__ void matrix_mul_kernel(float* Md, float* Nd, float* Pd, int p, size_t pitch_m, size_t pitch_n, size_t pitch_p) {
__global__ void matrix_mul_kernel(float* Md, float* Nd, float* Pd, int n, int p, int q, size_t pitch_m, size_t pitch_n, size_t pitch_p) {
// 2D Thread ID
int tx = blockIdx.x*blockDim.x + threadIdx.x; // Indice de colonne
int ty = blockIdx.y*blockDim.y + threadIdx.y; // Indice de ligne
if (tx >= n || ty >= q) {
return;
}
// Pvalue stores the Pd element that is computed by the thread
float Pvalue = 0.;
float* M_offset;
@ -69,7 +74,7 @@ void matrix_multiplication_device(float** m1, float** m2, float** result, int n,
dim3 gridSize(i_div_up(n, BLOCKSIZE_x), i_div_up(q, BLOCKSIZE_y));
dim3 blockSize(BLOCKSIZE_x, BLOCKSIZE_y);
matrix_mul_kernel<<<gridSize, blockSize>>>(m1_dev, m2_dev, result_dev, p, pitch_m1_dev, pitch_m2_dev, pitch_result_dev);
matrix_mul_kernel<<<gridSize, blockSize>>>(m1_dev, m2_dev, result_dev, n, p, q, pitch_m1_dev, pitch_m2_dev, pitch_result_dev);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );

View File

@ -0,0 +1,64 @@
#!/usr/bin/python3
import subprocess
import json
from matplotlib import pyplot as plt
def average(l):
return round(sum(l)/len(l), 10)
def avg(vals):
return {
"GPUtime": average([val["GPUtime"] for val in vals]),
"CPUtime": average([val["CPUtime"] for val in vals]),
"errMax": average([val["errMax"] for val in vals]),
"errMoy": average([val["errMoy"] for val in vals]),
"width": vals[0]["width"],
"depth": vals[0]["depth"]
}
def mul_matrix(n, p, q):
output = subprocess.check_output(["./a.out", str(n), str(p), str(q)])
result = [float(i.split(":")[-1]) for i in output.decode("utf8").split("\n") if i != ""]
return {
"GPUtime": result[0],
"CPUtime": result[1],
"errMax": result[2],
"errMoy": result[3],
"width": q,
"depth": p
}
def generate_data():
values = []
depth = 40
for i in range(60):
values.append(avg([mul_matrix((i+1)*100, depth, (i+1)*100) for j in range(10)]))
print(f"Added M({(i+1)*100}x{depth}) x M({depth}x{(i+1)*100})")
with open("result.json", "w") as file:
json.dump(values, file, indent=4)
def plot_temps_exec(data):
x = [i["width"] for i in data]
GPUtime = [i["GPUtime"] for i in data]
CPUtime = [i["CPUtime"] for i in data]
plt.plot(x, GPUtime)
plt.plot(x, CPUtime)
plt.show()
def plot_erreur(data):
x = [i["width"] for i in data]
GPUtime = [i["errMoy"] for i in data]
CPUtime = [i["errMax"] for i in data]
plt.plot(x, GPUtime)
plt.plot(x, CPUtime)
plt.show()
def load_data():
with open("result.json", 'r') as f:
data = json.load(f)
return data

View File

@ -0,0 +1,123 @@
#include <stdlib.h>
#include <stdio.h>
#include <stdbool.h>
#include <math.h>
#include <time.h>
#include "../cnn/matrix_multiplication.cu"
float random_float(float low, float high) {
float t = (float)rand() / (float)RAND_MAX;
return (1.0f - t) * low + t * high;
}
void fill_matrix_random(float **matrix, int n, int p) {
for (int i=0; i < n; i++) {
for (int j=0; j < p; j++) {
matrix[i][j] = random_float(0.0f, 15.0f);
}
}
}
void print_matrix(float** mat, int n, int p) {
for (int i=0; i < n; i++) {
printf("[\t");
for (int j=0; j < p; j++) {
printf("%0.1f\t", mat[i][j]);
}
printf("]\n");
}
}
float** create_matrix(int n, int p) {
float** matrix = (float**)malloc(n*sizeof(float*));
for (int i=0; i < n; i++) {
matrix[i] = (float*)malloc(sizeof(float)*p);
}
fill_matrix_random(matrix, n, p);
return matrix;
}
float** create_empty_matrix(int n, int p) {
float** matrix = (float**)malloc(n*sizeof(float*));
for (int i=0; i < n; i++) {
matrix[i] = (float*)malloc(p*sizeof(float));
for (int j=0; j < p; j++) {
matrix[i][j] = 0.;
}
}
return matrix;
}
float max_float(float a, float b) {
return a > b ? a : b;
}
bool check_matrices_equality(float** m1, float** m2, int n, int p) {
float err_max = 0.;
float err_moy = 0.;
for (int i=0; i < n; i++) {
for (int j=0; j < p; j++) {
if (fabs(m1[i][j] - m2[i][j]) > 0.8) {
//printf("%d %d\n", i, j);
//return false;
}
err_max = max_float(err_max, fabs(m1[i][j] - m2[i][j]));
err_moy += fabs(m1[i][j] - m2[i][j]);
}
}
printf("err_max:%lf\n", err_max);
printf("err_moy:%lf\n", err_moy/(n*p));
return true;
}
int main(int argc, char* argv[]) {
if (argc < 4) {
return 1;
}
int n = strtol(argv[1], NULL, 10);
int p = strtol(argv[2], NULL, 10);
int q = strtol(argv[3], NULL, 10);
clock_t start, end;
double cpu_time_used;
srand(time(NULL));
float** matrix1 = create_matrix(n, p);
float** matrix2 = create_matrix(p, q);
float** result_gpu = create_empty_matrix(n, q);
float** result_cpu = create_empty_matrix(n, q);
start = clock();
matrix_multiplication_device(matrix1, matrix2, result_gpu, n, p, q);
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("GPU:%lf\n", cpu_time_used);
start = clock();
//matrix_multiplication_host(matrix1, matrix2, result_cpu, n, p, q);
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("CPU:%lf\n", cpu_time_used);
//check_matrices_equality(result_gpu, result_cpu, n, q);
return 0;
}
// On obtient une différence entre le calcul fait par le GPU et par le CPU.
// Cette différence est linéaire en p. (err_moy = p*1.639e-6)
// Elle ne varie pas en fonction de n et q.
// Cette erreur est sûrement dûe à différences mineurs dans la précision du stockage des flottants
// Dans la mémoire RAM et VRAM (du GPU)