2022-07-05 08:13:25 +02:00
|
|
|
#include <stdio.h>
|
2023-01-30 09:39:45 +01:00
|
|
|
#include <float.h>
|
2023-03-09 14:27:23 +01:00
|
|
|
#include <math.h>
|
2022-09-28 10:20:08 +02:00
|
|
|
|
2022-11-01 17:24:29 +01:00
|
|
|
#include "include/convolution.h"
|
2023-02-15 11:42:24 +01:00
|
|
|
#include "../include/colors.h"
|
|
|
|
#include "../include/utils.h"
|
|
|
|
|
2022-09-16 14:53:35 +02:00
|
|
|
#include "include/make.h"
|
2022-07-05 08:13:25 +02:00
|
|
|
|
2023-03-28 12:54:49 +02:00
|
|
|
#include "include/config.h"
|
2023-02-15 11:42:24 +01:00
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Average Pooling
|
|
|
|
*/
|
|
|
|
#ifdef __CUDACC__
|
2023-03-03 21:59:51 +01:00
|
|
|
__global__ void make_average_pooling_kernel(float*** input, float*** output, int size, int output_depth, int output_width) {
|
2023-02-15 11:42:24 +01:00
|
|
|
// Équivalents respectifs de i, j et k dans la boucle effectuée par le cpu
|
|
|
|
int idx = threadIdx.x + blockDim.x*blockIdx.x; // < output_depth
|
2023-03-03 21:59:51 +01:00
|
|
|
int idy = threadIdx.y + blockDim.y*blockIdx.y; // < output_width
|
|
|
|
int idz = threadIdx.z + blockDim.z*blockIdx.z; // < output_width
|
2023-02-15 11:42:24 +01:00
|
|
|
int n = size*size;
|
|
|
|
|
2023-03-03 21:59:51 +01:00
|
|
|
if (idx >= output_depth || idy >= output_width || idz >= output_width) {
|
2023-02-15 11:42:24 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-02-27 18:53:13 +01:00
|
|
|
float sum = 0;
|
2023-02-15 11:42:24 +01:00
|
|
|
|
|
|
|
for (int a=0; a < size; a++) {
|
|
|
|
for (int b=0; b < size; b++) {
|
2023-02-27 18:53:13 +01:00
|
|
|
sum += input[idx][size*idy +a][size*idz +b];
|
2023-02-15 11:42:24 +01:00
|
|
|
}
|
|
|
|
}
|
2023-02-27 18:53:13 +01:00
|
|
|
output[idx][idy][idz] = sum/(float)n;
|
2023-02-15 11:42:24 +01:00
|
|
|
}
|
|
|
|
|
2023-03-03 21:59:51 +01:00
|
|
|
void make_average_pooling_device(float*** input, float*** output, int size, int output_depth, int output_width) {
|
2023-02-15 11:42:24 +01:00
|
|
|
// Make computation
|
2023-03-03 21:59:51 +01:00
|
|
|
dim3 gridSize(i_div_up(output_depth, BLOCKSIZE_x), i_div_up(output_width, BLOCKSIZE_y), i_div_up(output_width, BLOCKSIZE_z));
|
2023-02-15 11:42:24 +01:00
|
|
|
dim3 blockSize(BLOCKSIZE_x, BLOCKSIZE_y, BLOCKSIZE_z);
|
|
|
|
|
2023-03-03 21:59:51 +01:00
|
|
|
make_average_pooling_kernel<<<gridSize, blockSize>>>(input, output, size, output_depth, output_width);
|
2023-02-15 11:42:24 +01:00
|
|
|
gpuErrchk( cudaPeekAtLastError() );
|
|
|
|
gpuErrchk( cudaDeviceSynchronize() );
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2023-03-03 21:59:51 +01:00
|
|
|
void make_average_pooling_cpu(float*** input, float*** output, int size, int output_depth, int output_width) {
|
|
|
|
// input[output_depth][output_width+size-1][output_width+size-1]
|
|
|
|
// output[output_depth][output_width][output_width]
|
2023-02-27 18:53:13 +01:00
|
|
|
float sum;
|
2022-09-09 17:39:07 +02:00
|
|
|
int n = size*size;
|
2023-02-15 11:42:24 +01:00
|
|
|
|
2022-09-09 17:39:07 +02:00
|
|
|
for (int i=0; i < output_depth; i++) {
|
2023-03-03 21:59:51 +01:00
|
|
|
for (int j=0; j < output_width; j++) {
|
|
|
|
for (int k=0; k < output_width; k++) {
|
2023-02-27 18:53:13 +01:00
|
|
|
sum = 0;
|
2022-09-09 17:39:07 +02:00
|
|
|
for (int a=0; a < size; a++) {
|
|
|
|
for (int b=0; b < size; b++) {
|
2023-02-27 18:53:13 +01:00
|
|
|
sum += input[i][size*j +a][size*k +b];
|
2022-07-05 08:13:25 +02:00
|
|
|
}
|
|
|
|
}
|
2023-02-27 18:53:13 +01:00
|
|
|
output[i][j][k] = sum/(float)n;
|
2022-07-05 08:13:25 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-15 11:42:24 +01:00
|
|
|
#ifdef __CUDACC__
|
|
|
|
extern "C"
|
|
|
|
#endif
|
2023-03-03 21:59:51 +01:00
|
|
|
void make_average_pooling(float*** input, float*** output, int size, int output_depth, int output_width) {
|
2023-02-15 11:42:24 +01:00
|
|
|
#ifndef __CUDACC__
|
2023-03-03 21:59:51 +01:00
|
|
|
make_average_pooling_cpu(input, output, size, output_depth, output_width);
|
2023-02-15 11:42:24 +01:00
|
|
|
#else
|
2023-03-03 21:59:51 +01:00
|
|
|
make_average_pooling_device(input, output, size, output_depth, output_width);
|
2023-02-15 11:42:24 +01:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Max Pooling
|
|
|
|
*/
|
|
|
|
#ifdef __CUDACC__
|
2023-03-03 21:59:51 +01:00
|
|
|
__global__ void make_max_pooling_kernel(float*** input, float*** output, int size, int output_depth, int output_width) {
|
2023-02-15 11:42:24 +01:00
|
|
|
// Équivalents respectifs de i, j et k dans la boucle effectuée par le cpu
|
|
|
|
int idx = threadIdx.x + blockDim.x*blockIdx.x; // < output_depth
|
2023-03-03 21:59:51 +01:00
|
|
|
int idy = threadIdx.y + blockDim.y*blockIdx.y; // < output_width
|
|
|
|
int idz = threadIdx.z + blockDim.z*blockIdx.z; // < output_width
|
2023-02-15 11:42:24 +01:00
|
|
|
|
2023-03-03 21:59:51 +01:00
|
|
|
if (idx >= output_depth || idy >= output_width || idz >= output_width) {
|
2023-02-15 11:42:24 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-03-10 18:20:10 +01:00
|
|
|
float m = -FLT_MAX;
|
2023-02-15 11:42:24 +01:00
|
|
|
float temp;
|
|
|
|
|
|
|
|
for (int a=0; a < size; a++) {
|
|
|
|
for (int b=0; b < size; b++) {
|
|
|
|
temp = input[idx][size*idy +a][size*idz +b];
|
|
|
|
m = m > temp ? m : temp; // max(m, temp)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
output[idx][idy][idz] = m;
|
|
|
|
}
|
|
|
|
|
2023-03-03 21:59:51 +01:00
|
|
|
void make_max_pooling_device(float*** input, float*** output, int size, int output_depth, int output_width) {
|
2023-02-15 11:42:24 +01:00
|
|
|
// Make computation
|
2023-03-03 21:59:51 +01:00
|
|
|
dim3 gridSize(i_div_up(output_depth, BLOCKSIZE_x), i_div_up(output_width, BLOCKSIZE_y), i_div_up(output_width, BLOCKSIZE_z));
|
2023-02-15 11:42:24 +01:00
|
|
|
dim3 blockSize(BLOCKSIZE_x, BLOCKSIZE_y, BLOCKSIZE_z);
|
|
|
|
|
2023-03-03 21:59:51 +01:00
|
|
|
make_max_pooling_kernel<<<gridSize, blockSize>>>(input, output, size, output_depth, output_width);
|
2023-02-15 11:42:24 +01:00
|
|
|
gpuErrchk( cudaPeekAtLastError() );
|
|
|
|
gpuErrchk( cudaDeviceSynchronize() );
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2023-03-03 21:59:51 +01:00
|
|
|
void make_max_pooling_cpu(float*** input, float*** output, int size, int output_depth, int output_width) {
|
|
|
|
// input[output_depth][output_width+size-1][output_width+size-1]
|
|
|
|
// output[output_depth][output_width][output_width]
|
2023-01-30 09:39:45 +01:00
|
|
|
float m;
|
|
|
|
for (int i=0; i < output_depth; i++) {
|
2023-03-03 21:59:51 +01:00
|
|
|
for (int j=0; j < output_width; j++) {
|
|
|
|
for (int k=0; k < output_width; k++) {
|
2023-03-10 18:20:10 +01:00
|
|
|
m = -FLT_MAX;
|
2023-01-30 09:39:45 +01:00
|
|
|
for (int a=0; a < size; a++) {
|
|
|
|
for (int b=0; b < size; b++) {
|
2023-03-09 14:27:23 +01:00
|
|
|
m = fmaxf(m, input[i][size*j +a][size*k +b]);
|
2023-01-30 09:39:45 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
output[i][j][k] = m;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-15 11:42:24 +01:00
|
|
|
#ifdef __CUDACC__
|
|
|
|
extern "C"
|
|
|
|
#endif
|
2023-03-03 21:59:51 +01:00
|
|
|
void make_max_pooling(float*** input, float*** output, int size, int output_depth, int output_width) {
|
2023-02-15 11:42:24 +01:00
|
|
|
#ifndef __CUDACC__
|
2023-03-03 21:59:51 +01:00
|
|
|
make_max_pooling_cpu(input, output, size, output_depth, output_width);
|
2023-02-15 11:42:24 +01:00
|
|
|
#else
|
2023-03-03 21:59:51 +01:00
|
|
|
make_max_pooling_device(input, output, size, output_depth, output_width);
|
2023-02-15 11:42:24 +01:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Dense
|
|
|
|
*/
|
|
|
|
#ifdef __CUDACC__
|
|
|
|
__global__ void make_dense_kernel(Kernel_nn* kernel, float* input, float* output, int size_input, int size_output) {
|
|
|
|
// Équivalents respectifs de i, j et k dans la boucle effectuée par le cpu
|
|
|
|
int idx = threadIdx.x + blockDim.x*blockIdx.x; // < size_output
|
|
|
|
|
|
|
|
if (idx >= size_output) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
float f = kernel->bias[idx];
|
|
|
|
|
|
|
|
for (int j=0; j < size_input; j++) {
|
|
|
|
f += kernel->weights[j][idx]*input[j];
|
|
|
|
}
|
|
|
|
output[idx] = f;
|
|
|
|
}
|
|
|
|
|
|
|
|
void make_dense_device(Kernel_nn* kernel, float* input, float* output, int size_input, int size_output) {
|
|
|
|
// Make computation
|
|
|
|
dim3 gridSize(i_div_up(size_output, BLOCKSIZE_x*BLOCKSIZE_y), 1, 1);
|
|
|
|
dim3 blockSize(BLOCKSIZE_x*BLOCKSIZE_y, 1, BLOCKSIZE_z);
|
|
|
|
|
|
|
|
make_dense_kernel<<<gridSize, blockSize>>>(kernel, input, output, size_input, size_output);
|
|
|
|
gpuErrchk( cudaPeekAtLastError() );
|
|
|
|
gpuErrchk( cudaDeviceSynchronize() );
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef __CUDACC__
|
|
|
|
extern "C"
|
|
|
|
#endif
|
|
|
|
void make_dense_cpu(Kernel_nn* kernel, float* input, float* output, int size_input, int size_output) {
|
2022-11-01 10:10:43 +01:00
|
|
|
// input[size_input]
|
|
|
|
// output[size_output]
|
2022-07-05 08:13:25 +02:00
|
|
|
float f;
|
2022-11-03 18:13:01 +01:00
|
|
|
|
2022-09-09 17:39:07 +02:00
|
|
|
for (int i=0; i < size_output; i++) {
|
2022-07-05 08:13:25 +02:00
|
|
|
f = kernel->bias[i];
|
2022-09-09 17:39:07 +02:00
|
|
|
for (int j=0; j < size_input; j++) {
|
2023-01-15 17:51:23 +01:00
|
|
|
f += kernel->weights[j][i]*input[j];
|
2022-07-05 08:13:25 +02:00
|
|
|
}
|
|
|
|
output[i] = f;
|
|
|
|
}
|
2022-10-02 20:31:20 +02:00
|
|
|
}
|
|
|
|
|
2023-02-15 11:42:24 +01:00
|
|
|
#ifdef __CUDACC__
|
|
|
|
extern "C"
|
|
|
|
#endif
|
|
|
|
void make_dense(Kernel_nn* kernel, float* input, float* output, int size_input, int size_output) {
|
|
|
|
#ifndef __CUDACC__
|
|
|
|
make_dense_cpu(kernel, input, output, size_input, size_output);
|
|
|
|
#else
|
|
|
|
make_dense_device(kernel, input, output, size_input, size_output);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2023-02-28 11:47:57 +01:00
|
|
|
* Dense linearized
|
2023-02-15 11:42:24 +01:00
|
|
|
*/
|
|
|
|
#ifdef __CUDACC__
|
2023-02-28 11:47:57 +01:00
|
|
|
__global__ void make_dense_linearized_kernel(float** weights, float* bias, float*** input, float* output, int depth_input, int dim_input, int size_output) {
|
2023-02-15 11:42:24 +01:00
|
|
|
// Équivalents respectifs de i, j et k dans la boucle effectuée par le cpu
|
|
|
|
int idx = threadIdx.x + blockDim.x*blockIdx.x; // < size_output
|
|
|
|
|
|
|
|
if (idx >= size_output) {
|
|
|
|
return;
|
|
|
|
}
|
2023-02-28 11:47:57 +01:00
|
|
|
float f = bias[idx];
|
2023-02-15 11:42:24 +01:00
|
|
|
|
|
|
|
for (int i=0; i < depth_input; i++) {
|
|
|
|
for (int j=0; j < dim_input; j++) {
|
|
|
|
for (int k=0; k < dim_input; k++) {
|
2023-02-22 15:08:14 +01:00
|
|
|
f += input[i][j][k]*weights[k + j*dim_input + i*depth_input][idx];
|
2023-02-15 11:42:24 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
output[idx] = f;
|
|
|
|
}
|
|
|
|
|
2023-02-28 11:47:57 +01:00
|
|
|
void make_dense_linearized_device(Kernel_nn* kernel, float*** input, float* output, int depth_input, int dim_input, int size_output) {
|
2023-02-15 11:42:24 +01:00
|
|
|
// Make computation
|
|
|
|
dim3 gridSize(i_div_up(size_output, BLOCKSIZE_x*BLOCKSIZE_y), 1, 1);
|
|
|
|
dim3 blockSize(BLOCKSIZE_x*BLOCKSIZE_y, 1, BLOCKSIZE_z);
|
|
|
|
|
2023-02-28 11:47:57 +01:00
|
|
|
make_dense_linearized_kernel<<<gridSize, blockSize>>>(kernel->weights, kernel->bias, input, output, depth_input, dim_input, size_output);
|
2023-02-15 11:42:24 +01:00
|
|
|
gpuErrchk( cudaPeekAtLastError() );
|
|
|
|
gpuErrchk( cudaDeviceSynchronize() );
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2023-02-28 11:47:57 +01:00
|
|
|
void make_dense_linearized_cpu(Kernel_nn* kernel, float*** input, float* output, int depth_input, int dim_input, int size_output) {
|
2022-11-01 10:10:43 +01:00
|
|
|
// input[depth_input][dim_input][dim_input]
|
|
|
|
// output[size_output]
|
2022-10-02 20:31:20 +02:00
|
|
|
float f;
|
2022-11-03 18:13:01 +01:00
|
|
|
|
|
|
|
for (int l=0; l < size_output; l++) {
|
2023-02-28 11:47:57 +01:00
|
|
|
f = kernel->bias[l];
|
2022-11-03 18:13:01 +01:00
|
|
|
for (int i=0; i < depth_input; i++) {
|
|
|
|
for (int j=0; j < dim_input; j++) {
|
|
|
|
for (int k=0; k < dim_input; k++) {
|
2022-10-02 20:31:20 +02:00
|
|
|
f += input[i][j][k]*kernel->weights[k + j*dim_input + i*depth_input][l];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-11-03 11:04:47 +01:00
|
|
|
output[l] = f;
|
2022-10-02 20:31:20 +02:00
|
|
|
}
|
2023-02-15 11:42:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef __CUDACC__
|
|
|
|
extern "C"
|
|
|
|
#endif
|
2023-02-28 11:47:57 +01:00
|
|
|
void make_dense_linearized(Kernel_nn* kernel, float*** input, float* output, int depth_input, int dim_input, int size_output) {
|
2023-02-15 11:42:24 +01:00
|
|
|
#ifndef __CUDACC__
|
2023-02-28 11:47:57 +01:00
|
|
|
make_dense_linearized_cpu(kernel, input, output, depth_input, dim_input, size_output);
|
2023-02-15 11:42:24 +01:00
|
|
|
#else
|
2023-02-28 11:47:57 +01:00
|
|
|
make_dense_linearized_device(kernel, input, output, depth_input, dim_input, size_output);
|
2023-02-15 11:42:24 +01:00
|
|
|
#endif
|
|
|
|
}
|