Compare commits

...

10 Commits

Author SHA1 Message Date
321994df2b Fix cuda compilation 2023-05-13 22:42:13 +02:00
julienChemillier
c0808b9240 Change notation toward a consensus 2023-05-13 17:22:47 +02:00
julienChemillier
521a1bb729 Add definition of 'convolution_not_outside' 2023-05-13 15:50:01 +02:00
julienChemillier
3dd2e33fa9 Fix issues with the network not converging 2023-05-13 15:39:22 +02:00
julienChemillier
f316882eeb Add 'stride' and 'padding' to the forward 2023-05-13 13:37:46 +02:00
julienChemillier
a68805894f Change in comments of 'free.c' and 'free.h' 2023-05-13 11:17:32 +02:00
julienChemillier
7d3a7c1aff Change a variable to a more explicit one 2023-05-13 10:41:24 +02:00
julienChemillier
fa169e3a37 Removal of useless comments 2023-05-13 10:36:27 +02:00
julienChemillier
0a63988d3c Removal of useless comments 2023-05-13 10:30:29 +02:00
julienChemillier
2790883586 Removal of warnings 2023-05-13 10:05:54 +02:00
26 changed files with 687 additions and 491 deletions

View File

@ -173,8 +173,8 @@ Résultats pour un réseau assez conséquent, avec des images de 256x256 pixels:
<details>
```c
Network* create_large_network(float learning_rate, int dropout, int activation, int initialisation, int input_dim, int input_depth) {
Network* network = create_network(16, learning_rate, dropout, activation, initialisation, input_dim, input_depth);
Network* create_large_network(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth) {
Network* network = create_network(16, learning_rate, dropout, activation, initialisation, input_width, input_depth);
add_convolution(network, 6, 258, activation);
add_convolution(network, 16, 256, activation);
add_average_pooling(network, 64);

View File

@ -358,16 +358,16 @@ void backward_dense(Kernel_nn* ker, float* input, float* input_z, float* output,
* Backward linearisation
*/
#ifdef __CUDACC__
__global__ void backward_linearisation_kernel_1(Kernel_nn* ker, float*** input, float* output, int depth_input, int dim_input, int size_output) {
int idx = threadIdx.x + blockDim.x*blockIdx.x; // < depth_input
int idy = threadIdx.y + blockDim.y*blockIdx.y; // < dim_input
int idz = threadIdx.z + blockDim.z*blockIdx.z; // < dim_input
__global__ void backward_linearisation_kernel_1(Kernel_nn* ker, float*** input, float* output, int input_depth, int input_width, int size_output) {
int idx = threadIdx.x + blockDim.x*blockIdx.x; // < input_depth
int idy = threadIdx.y + blockDim.y*blockIdx.y; // < input_width
int idz = threadIdx.z + blockDim.z*blockIdx.z; // < input_width
if (idx >= depth_input || idy >= dim_input || idz >= dim_input) {
if (idx >= input_depth || idy >= input_width || idz >= input_width) {
return;
}
int id = idx*dim_input*dim_input + idy*dim_input + idz;
int id = idx*input_width*input_width + idy*input_width + idz;
for (int j=0; j < size_output; j++) {
ker->d_weights[id][j] += input[idx][idy][idz]*output[j];
@ -379,15 +379,15 @@ __global__ void backward_linearisation_kernel_1(Kernel_nn* ker, float*** input,
}
}
__global__ void backward_linearisation_kernel_2(Kernel_nn* ker, float*** input, float*** input_z, float* output, int depth_input, int dim_input, int size_output, funcPtr d_f) {
int idx = threadIdx.x + blockDim.x*blockIdx.x; // < depth_input
int idy = threadIdx.y + blockDim.y*blockIdx.y; // < dim_input
int idz = threadIdx.z + blockDim.z*blockIdx.z; // < dim_input
__global__ void backward_linearisation_kernel_2(Kernel_nn* ker, float*** input, float*** input_z, float* output, int input_depth, int input_width, int size_output, funcPtr d_f) {
int idx = threadIdx.x + blockDim.x*blockIdx.x; // < input_depth
int idy = threadIdx.y + blockDim.y*blockIdx.y; // < input_width
int idz = threadIdx.z + blockDim.z*blockIdx.z; // < input_width
if (idx >= depth_input || idy >= dim_input || idz >= dim_input) {
if (idx >= input_depth || idy >= input_width || idz >= input_width) {
return;
}
int id = idx*dim_input*dim_input + idy*dim_input + idz;
int id = idx*input_width*input_width + idy*input_width + idz;
float tmp=0;
for (int j=0; j < size_output; j++) {
@ -396,12 +396,12 @@ __global__ void backward_linearisation_kernel_2(Kernel_nn* ker, float*** input,
input[idx][idy][idz] = tmp*( (*d_f)(input_z[idx][idy][idz]) );
}
void backward_linearisation_device(Kernel_nn* ker, float*** input, float*** input_z, float* output, int depth_input, int dim_input, int size_output, int activation) {
void backward_linearisation_device(Kernel_nn* ker, float*** input, float*** input_z, float* output, int input_depth, int input_width, int size_output, int activation) {
// Make computation
dim3 gridSize(i_div_up(depth_input, BLOCKSIZE_x), i_div_up(dim_input, BLOCKSIZE_y), i_div_up(dim_input, BLOCKSIZE_y));
dim3 gridSize(i_div_up(input_depth, BLOCKSIZE_x), i_div_up(input_width, BLOCKSIZE_y), i_div_up(input_width, BLOCKSIZE_y));
dim3 blockSize(BLOCKSIZE_x, BLOCKSIZE_y, BLOCKSIZE_z);
backward_linearisation_kernel_1<<<gridSize, blockSize>>>(ker, input, output, depth_input, dim_input, size_output);
backward_linearisation_kernel_1<<<gridSize, blockSize>>>(ker, input, output, input_depth, input_width, size_output);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
@ -409,14 +409,14 @@ void backward_linearisation_device(Kernel_nn* ker, float*** input, float*** inpu
// Second kernel
funcPtr d_function = get_activation_function_cuda(activation);
backward_linearisation_kernel_2<<<gridSize, blockSize>>>(ker, input, input_z, output, depth_input, dim_input, size_output, d_function);
backward_linearisation_kernel_2<<<gridSize, blockSize>>>(ker, input, input_z, output, input_depth, input_width, size_output, d_function);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
}
#endif
void backward_linearisation_cpu(Kernel_nn* ker, float*** input, float*** input_z, float* output, int depth_input, int dim_input, int size_output, int activation) {
void backward_linearisation_cpu(Kernel_nn* ker, float*** input, float*** input_z, float* output, int input_depth, int input_width, int size_output, int activation) {
funcPtr d_function = get_activation_function(activation);
@ -427,9 +427,9 @@ void backward_linearisation_cpu(Kernel_nn* ker, float*** input, float*** input_z
// Weights
int cpt = 0;
for (int i=0; i < depth_input; i++) {
for (int k=0; k < dim_input; k++) {
for (int l=0; l < dim_input; l++) {
for (int i=0; i < input_depth; i++) {
for (int k=0; k < input_width; k++) {
for (int l=0; l < input_width; l++) {
for (int j=0; j < size_output; j++) {
ker->d_weights[cpt][j] += input[i][k][l]*output[j];
}
@ -440,9 +440,9 @@ void backward_linearisation_cpu(Kernel_nn* ker, float*** input, float*** input_z
// Input
cpt = 0;
for (int i=0; i < depth_input; i++) {
for (int k=0; k < dim_input; k++) {
for (int l=0; l < dim_input; l++) {
for (int i=0; i < input_depth; i++) {
for (int k=0; k < input_width; k++) {
for (int l=0; l < input_width; l++) {
float tmp=0;
for (int j=0; j < size_output; j++) {
tmp += output[j]*ker->weights[cpt][j];
@ -457,11 +457,11 @@ void backward_linearisation_cpu(Kernel_nn* ker, float*** input, float*** input_z
#ifdef __CUDACC__
extern "C"
#endif
void backward_linearisation(Kernel_nn* ker, float*** input, float*** input_z, float* output, int depth_input, int dim_input, int size_output, int activation) {
void backward_linearisation(Kernel_nn* ker, float*** input, float*** input_z, float* output, int input_depth, int input_width, int size_output, int activation) {
#ifndef __CUDACC__
backward_linearisation_cpu(ker, input, input_z, output, depth_input, dim_input, size_output, activation);
backward_linearisation_cpu(ker, input, input_z, output, input_depth, input_width, size_output, activation);
#else
backward_linearisation_device(ker, input, input_z, output, depth_input, dim_input, size_output, activation);
backward_linearisation_device(ker, input, input_z, output, input_depth, input_width, size_output, activation);
#endif
}
@ -469,18 +469,18 @@ void backward_linearisation(Kernel_nn* ker, float*** input, float*** input_z, fl
* Backward convolution
*/
#ifdef __CUDACC__
__global__ void backward_convolution_dbias_kernel(Kernel_cnn* ker, float*** output, int depth_output, int dim_output) {
__global__ void backward_convolution_dbias_kernel(Kernel_cnn* ker, float*** output, int output_depth, int output_width) {
int idx = threadIdx.x + blockDim.x*blockIdx.x;
int idy = threadIdx.y + blockDim.y*blockIdx.y;
int idz = threadIdx.z + blockDim.z*blockIdx.z;
if (idx >= depth_output || idy >= dim_output || idz >= dim_output) {
if (idx >= output_depth || idy >= output_width || idz >= output_width) {
return;
}
ker->d_bias[idx][idy][idz] += output[idx][idy][idz];
}
__global__ void backward_convolution_dweight_kernel(Kernel_cnn* ker, float*** input, float*** output, int depth_input, int depth_output, int dim_output, int k_size) {
__global__ void backward_convolution_dweight_kernel(Kernel_cnn* ker, float*** input, float*** output, int input_depth, int output_depth, int output_width, int k_size) {
int idx = threadIdx.x + blockDim.x*blockIdx.x;
int idy = threadIdx.y + blockDim.y*blockIdx.y;
int idz = threadIdx.z + blockDim.z*blockIdx.z;
@ -488,35 +488,35 @@ __global__ void backward_convolution_dweight_kernel(Kernel_cnn* ker, float*** in
int idz1 = idz / k_size;
int idz2 = idz % k_size;
if (idx >= depth_input || idy >= depth_output || idz1 >= k_size || idz2 >= k_size) {
if (idx >= input_depth || idy >= output_depth || idz1 >= k_size || idz2 >= k_size) {
return;
}
float tmp = 0;
for (int l=0; l < dim_output; l++) {
for (int m=0; m < dim_output; m++) {
for (int l=0; l < output_width; l++) {
for (int m=0; m < output_width; m++) {
tmp += input[idx][l+idz1][m+idz2]*output[idy][l][m];
}
}
ker->d_weights[idx][idy][idz1][idz2] += tmp;
}
__global__ void backward_convolution_propagate_kernel(Kernel_cnn* ker, float*** input, float*** input_z, float*** output, int depth_input, int dim_input, int depth_output, int k_size, funcPtr d_f) {
__global__ void backward_convolution_propagate_kernel(Kernel_cnn* ker, float*** input, float*** input_z, float*** output, int input_depth, int input_width, int output_depth, int k_size, funcPtr d_f) {
int idx = threadIdx.x + blockDim.x*blockIdx.x;
int idy = threadIdx.y + blockDim.y*blockIdx.y;
int idz = threadIdx.z + blockDim.z*blockIdx.z;
if (idx >= depth_input || idy >= dim_input || idz >= dim_input) {
if (idx >= input_depth || idy >= input_width || idz >= input_width) {
return;
}
int min_m, max_m, min_n, max_n;
float tmp = 0;
for (int l=0; l < depth_output; l++) {
for (int l=0; l < output_depth; l++) {
min_m = max(0, k_size-1-idy);
max_m = min(k_size, dim_input - idy);
max_m = min(k_size, input_width - idy);
min_n = max(0, k_size-1-idz);
max_n = min(k_size, dim_input-idz);
max_n = min(k_size, input_width-idz);
for (int m=min_m; m < max_m; m++) {
for (int n=min_n; n < max_n; n++) {
tmp += output[l][idy-k_size+m+1][idz-k_size+n+1]*ker->weights[idx][l][m][n];
@ -526,35 +526,35 @@ __global__ void backward_convolution_propagate_kernel(Kernel_cnn* ker, float***
input[idx][idy][idz] = tmp*( (*d_f)(input_z[idx][idy][idz]) );
}
void backward_convolution_device(Kernel_cnn* ker, float*** input, float*** input_z, float*** output, int depth_input, int dim_input, int depth_output, int dim_output, int activation, int is_first) {
void backward_convolution_device(Kernel_cnn* ker, float*** input, float*** input_z, float*** output, int input_depth, int input_width, int output_depth, int output_width, int activation, int is_first) {
// Bias Kernel
dim3 gridSize1(i_div_up(depth_output, BLOCKSIZE_x), i_div_up(dim_output, BLOCKSIZE_y), i_div_up(dim_output, BLOCKSIZE_y));
dim3 gridSize1(i_div_up(output_depth, BLOCKSIZE_x), i_div_up(output_width, BLOCKSIZE_y), i_div_up(output_width, BLOCKSIZE_y));
dim3 blockSize1(BLOCKSIZE_x, BLOCKSIZE_y, BLOCKSIZE_z);
backward_convolution_dbias_kernel<<<gridSize1, blockSize1>>>(ker, output, depth_output, dim_output);
backward_convolution_dbias_kernel<<<gridSize1, blockSize1>>>(ker, output, output_depth, output_width);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
// Weights Kernel
int k_size = dim_input - dim_output +1;
int k_size = input_width - output_width +1;
dim3 gridSize2(i_div_up(depth_input, BLOCKSIZE_x), i_div_up(depth_output, BLOCKSIZE_y), i_div_up(k_size*k_size, BLOCKSIZE_y));
dim3 gridSize2(i_div_up(input_depth, BLOCKSIZE_x), i_div_up(output_depth, BLOCKSIZE_y), i_div_up(k_size*k_size, BLOCKSIZE_y));
dim3 blockSize2(BLOCKSIZE_x, BLOCKSIZE_y, BLOCKSIZE_z);
backward_convolution_dweight_kernel<<<gridSize2, blockSize2>>>(ker, input, output, depth_input, depth_output, dim_output, k_size);
backward_convolution_dweight_kernel<<<gridSize2, blockSize2>>>(ker, input, output, input_depth, output_depth, output_width, k_size);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
// input propagation Kernel
if (is_first != 1) {
dim3 gridSize3(i_div_up(depth_input, BLOCKSIZE_x), i_div_up(dim_input, BLOCKSIZE_y), i_div_up(dim_input, BLOCKSIZE_y));
dim3 gridSize3(i_div_up(input_depth, BLOCKSIZE_x), i_div_up(input_width, BLOCKSIZE_y), i_div_up(input_width, BLOCKSIZE_y));
dim3 blockSize3(BLOCKSIZE_x, BLOCKSIZE_y, BLOCKSIZE_z);
funcPtr d_function = get_activation_function_cuda(activation);
backward_convolution_propagate_kernel<<<gridSize3, blockSize3>>>(ker, input, input_z, output, depth_input, dim_input, depth_output, k_size, d_function);
backward_convolution_propagate_kernel<<<gridSize3, blockSize3>>>(ker, input, input_z, output, input_depth, input_width, output_depth, k_size, d_function);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
@ -563,29 +563,29 @@ void backward_convolution_device(Kernel_cnn* ker, float*** input, float*** input
#endif
void backward_convolution_cpu(Kernel_cnn* ker, float*** input, float*** input_z, float*** output, int depth_input, int dim_input, int depth_output, int dim_output, int activation, int is_first) {
void backward_convolution_cpu(Kernel_cnn* ker, float*** input, float*** input_z, float*** output, int input_depth, int input_width, int output_depth, int output_width, int activation, int is_first) {
funcPtr d_function = get_activation_function(activation);
// Bias
for (int i=0; i < depth_output; i++) {
for (int j=0; j < dim_output; j++) {
for (int k=0; k < dim_output; k++) {
for (int i=0; i < output_depth; i++) {
for (int j=0; j < output_width; j++) {
for (int k=0; k < output_width; k++) {
ker->d_bias[i][j][k] += output[i][j][k];
}
}
}
// Weights
int k_size = dim_input - dim_output +1;
int k_size = input_width - output_width +1;
for (int h=0; h < depth_input; h++) {
for (int i=0; i < depth_output; i++) {
for (int h=0; h < input_depth; h++) {
for (int i=0; i < output_depth; i++) {
for (int j=0; j < k_size; j++) {
for (int k=0; k < k_size; k++) {
float tmp = 0;
for (int l=0; l < dim_output; l++) {
for (int m=0; m < dim_output; m++) {
for (int l=0; l < output_width; l++) {
for (int m=0; m < output_width; m++) {
tmp += input[h][l+j][m+k]*output[i][l][m];
}
}
@ -599,15 +599,15 @@ void backward_convolution_cpu(Kernel_cnn* ker, float*** input, float*** input_z,
if (is_first==1) // Pas besoin de backpropager dans l'input
return;
int min_m, max_m, min_n, max_n;
for (int i=0; i < depth_input; i++) {
for (int j=0; j < dim_input; j++) {
for (int k=0; k < dim_input; k++) {
for (int i=0; i < input_depth; i++) {
for (int j=0; j < input_width; j++) {
for (int k=0; k < input_width; k++) {
float tmp = 0;
for (int l=0; l < depth_output; l++) {
for (int l=0; l < output_depth; l++) {
min_m = max(0, k_size-1-j);
max_m = min(k_size, dim_input - j);
max_m = min(k_size, input_width - j);
min_n = max(0, k_size-1-k);
max_n = min(k_size, dim_input-k);
max_n = min(k_size, input_width-k);
for (int m=min_m; m < max_m; m++) {
for (int n=min_n; n < max_n; n++) {
tmp += output[l][j-k_size+m+1][k-k_size+n+1]*ker->weights[i][l][m][n];
@ -623,10 +623,10 @@ void backward_convolution_cpu(Kernel_cnn* ker, float*** input, float*** input_z,
#ifdef __CUDACC__
extern "C"
#endif
void backward_convolution(Kernel_cnn* ker, float*** input, float*** input_z, float*** output, int depth_input, int dim_input, int depth_output, int dim_output, int activation, int is_first) {
void backward_convolution(Kernel_cnn* ker, float*** input, float*** input_z, float*** output, int input_depth, int input_width, int output_depth, int output_width, int activation, int is_first) {
#ifndef __CUDACC__
backward_convolution_cpu(ker, input, input_z, output, depth_input, dim_input, depth_output, dim_output, activation, is_first);
backward_convolution_cpu(ker, input, input_z, output, input_depth, input_width, output_depth, output_width, activation, is_first);
#else
backward_convolution_device(ker, input, input_z, output, depth_input, dim_input, depth_output, dim_output, activation, is_first);
backward_convolution_device(ker, input, input_z, output, input_depth, input_width, output_depth, output_width, activation, is_first);
#endif
}

View File

@ -358,16 +358,16 @@ void backward_dense(Kernel_nn* ker, float* input, float* input_z, float* output,
* Backward linearisation
*/
#ifdef __CUDACC__
__global__ void backward_linearisation_kernel_1(Kernel_nn* ker, float*** input, float* output, int depth_input, int dim_input, int size_output) {
int idx = threadIdx.x + blockDim.x*blockIdx.x; // < depth_input
int idy = threadIdx.y + blockDim.y*blockIdx.y; // < dim_input
int idz = threadIdx.z + blockDim.z*blockIdx.z; // < dim_input
__global__ void backward_linearisation_kernel_1(Kernel_nn* ker, float*** input, float* output, int input_depth, int input_width, int size_output) {
int idx = threadIdx.x + blockDim.x*blockIdx.x; // < input_depth
int idy = threadIdx.y + blockDim.y*blockIdx.y; // < input_width
int idz = threadIdx.z + blockDim.z*blockIdx.z; // < input_width
if (idx >= depth_input || idy >= dim_input || idz >= dim_input) {
if (idx >= input_depth || idy >= input_width || idz >= input_width) {
return;
}
int id = idx*dim_input*dim_input + idy*dim_input + idz;
int id = idx*input_width*input_width + idy*input_width + idz;
for (int j=0; j < size_output; j++) {
ker->d_weights[id][j] += input[idx][idy][idz]*output[j];
@ -379,15 +379,15 @@ __global__ void backward_linearisation_kernel_1(Kernel_nn* ker, float*** input,
}
}
__global__ void backward_linearisation_kernel_2(Kernel_nn* ker, float*** input, float*** input_z, float* output, int depth_input, int dim_input, int size_output, funcPtr d_f) {
int idx = threadIdx.x + blockDim.x*blockIdx.x; // < depth_input
int idy = threadIdx.y + blockDim.y*blockIdx.y; // < dim_input
int idz = threadIdx.z + blockDim.z*blockIdx.z; // < dim_input
__global__ void backward_linearisation_kernel_2(Kernel_nn* ker, float*** input, float*** input_z, float* output, int input_depth, int input_width, int size_output, funcPtr d_f) {
int idx = threadIdx.x + blockDim.x*blockIdx.x; // < input_depth
int idy = threadIdx.y + blockDim.y*blockIdx.y; // < input_width
int idz = threadIdx.z + blockDim.z*blockIdx.z; // < input_width
if (idx >= depth_input || idy >= dim_input || idz >= dim_input) {
if (idx >= input_depth || idy >= input_width || idz >= input_width) {
return;
}
int id = idx*dim_input*dim_input + idy*dim_input + idz;
int id = idx*input_width*input_width + idy*input_width + idz;
float tmp=0;
for (int j=0; j < size_output; j++) {
@ -396,12 +396,12 @@ __global__ void backward_linearisation_kernel_2(Kernel_nn* ker, float*** input,
input[idx][idy][idz] = tmp*( (*d_f)(input_z[idx][idy][idz]) );
}
void backward_linearisation_device(Kernel_nn* ker, float*** input, float*** input_z, float* output, int depth_input, int dim_input, int size_output, int activation) {
void backward_linearisation_device(Kernel_nn* ker, float*** input, float*** input_z, float* output, int input_depth, int input_width, int size_output, int activation) {
// Make computation
dim3 gridSize(i_div_up(depth_input, BLOCKSIZE_x), i_div_up(dim_input, BLOCKSIZE_y), i_div_up(dim_input, BLOCKSIZE_y));
dim3 gridSize(i_div_up(input_depth, BLOCKSIZE_x), i_div_up(input_width, BLOCKSIZE_y), i_div_up(input_width, BLOCKSIZE_y));
dim3 blockSize(BLOCKSIZE_x, BLOCKSIZE_y, BLOCKSIZE_z);
backward_linearisation_kernel_1<<<gridSize, blockSize>>>(ker, input, output, depth_input, dim_input, size_output);
backward_linearisation_kernel_1<<<gridSize, blockSize>>>(ker, input, output, input_depth, input_width, size_output);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
@ -409,14 +409,14 @@ void backward_linearisation_device(Kernel_nn* ker, float*** input, float*** inpu
// Second kernel
funcPtr d_function = get_activation_function_cuda(activation);
backward_linearisation_kernel_2<<<gridSize, blockSize>>>(ker, input, input_z, output, depth_input, dim_input, size_output, d_function);
backward_linearisation_kernel_2<<<gridSize, blockSize>>>(ker, input, input_z, output, input_depth, input_width, size_output, d_function);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
}
#endif
void backward_linearisation_cpu(Kernel_nn* ker, float*** input, float*** input_z, float* output, int depth_input, int dim_input, int size_output, int activation) {
void backward_linearisation_cpu(Kernel_nn* ker, float*** input, float*** input_z, float* output, int input_depth, int input_width, int size_output, int activation) {
funcPtr d_function = get_activation_function(activation);
@ -427,9 +427,9 @@ void backward_linearisation_cpu(Kernel_nn* ker, float*** input, float*** input_z
// Weights
int cpt = 0;
for (int i=0; i < depth_input; i++) {
for (int k=0; k < dim_input; k++) {
for (int l=0; l < dim_input; l++) {
for (int i=0; i < input_depth; i++) {
for (int k=0; k < input_width; k++) {
for (int l=0; l < input_width; l++) {
for (int j=0; j < size_output; j++) {
ker->d_weights[cpt][j] += input[i][k][l]*output[j];
}
@ -440,9 +440,9 @@ void backward_linearisation_cpu(Kernel_nn* ker, float*** input, float*** input_z
// Input
cpt = 0;
for (int i=0; i < depth_input; i++) {
for (int k=0; k < dim_input; k++) {
for (int l=0; l < dim_input; l++) {
for (int i=0; i < input_depth; i++) {
for (int k=0; k < input_width; k++) {
for (int l=0; l < input_width; l++) {
float tmp=0;
for (int j=0; j < size_output; j++) {
tmp += output[j]*ker->weights[cpt][j];
@ -457,11 +457,11 @@ void backward_linearisation_cpu(Kernel_nn* ker, float*** input, float*** input_z
#ifdef __CUDACC__
extern "C"
#endif
void backward_linearisation(Kernel_nn* ker, float*** input, float*** input_z, float* output, int depth_input, int dim_input, int size_output, int activation) {
void backward_linearisation(Kernel_nn* ker, float*** input, float*** input_z, float* output, int input_depth, int input_width, int size_output, int activation) {
#ifndef __CUDACC__
backward_linearisation_cpu(ker, input, input_z, output, depth_input, dim_input, size_output, activation);
backward_linearisation_cpu(ker, input, input_z, output, input_depth, input_width, size_output, activation);
#else
backward_linearisation_device(ker, input, input_z, output, depth_input, dim_input, size_output, activation);
backward_linearisation_device(ker, input, input_z, output, input_depth, input_width, size_output, activation);
#endif
}
@ -469,18 +469,18 @@ void backward_linearisation(Kernel_nn* ker, float*** input, float*** input_z, fl
* Backward convolution
*/
#ifdef __CUDACC__
__global__ void backward_convolution_dbias_kernel(Kernel_cnn* ker, float*** output, int depth_output, int dim_output) {
__global__ void backward_convolution_dbias_kernel(Kernel_cnn* ker, float*** output, int output_depth, int output_width) {
int idx = threadIdx.x + blockDim.x*blockIdx.x;
int idy = threadIdx.y + blockDim.y*blockIdx.y;
int idz = threadIdx.z + blockDim.z*blockIdx.z;
if (idx >= depth_output || idy >= dim_output || idz >= dim_output) {
if (idx >= output_depth || idy >= output_width || idz >= output_width) {
return;
}
ker->d_bias[idx][idy][idz] += output[idx][idy][idz];
}
__global__ void backward_convolution_dweight_kernel(Kernel_cnn* ker, float*** input, float*** output, int depth_input, int depth_output, int dim_output, int k_size) {
__global__ void backward_convolution_dweight_kernel(Kernel_cnn* ker, float*** input, float*** output, int input_depth, int output_depth, int output_width, int k_size) {
int idx = threadIdx.x + blockDim.x*blockIdx.x;
int idy = threadIdx.y + blockDim.y*blockIdx.y;
int idz = threadIdx.z + blockDim.z*blockIdx.z;
@ -488,35 +488,35 @@ __global__ void backward_convolution_dweight_kernel(Kernel_cnn* ker, float*** in
int idz1 = idz / k_size;
int idz2 = idz % k_size;
if (idx >= depth_input || idy >= depth_output || idz1 >= k_size || idz2 >= k_size) {
if (idx >= input_depth || idy >= output_depth || idz1 >= k_size || idz2 >= k_size) {
return;
}
float tmp = 0;
for (int l=0; l < dim_output; l++) {
for (int m=0; m < dim_output; m++) {
for (int l=0; l < output_width; l++) {
for (int m=0; m < output_width; m++) {
tmp += input[idx][l+idz1][m+idz2]*output[idy][l][m];
}
}
ker->d_weights[idx][idy][idz1][idz2] += tmp;
}
__global__ void backward_convolution_propagate_kernel(Kernel_cnn* ker, float*** input, float*** input_z, float*** output, int depth_input, int dim_input, int depth_output, int k_size, funcPtr d_f) {
__global__ void backward_convolution_propagate_kernel(Kernel_cnn* ker, float*** input, float*** input_z, float*** output, int input_depth, int input_width, int output_depth, int k_size, funcPtr d_f) {
int idx = threadIdx.x + blockDim.x*blockIdx.x;
int idy = threadIdx.y + blockDim.y*blockIdx.y;
int idz = threadIdx.z + blockDim.z*blockIdx.z;
if (idx >= depth_input || idy >= dim_input || idz >= dim_input) {
if (idx >= input_depth || idy >= input_width || idz >= input_width) {
return;
}
int min_m, max_m, min_n, max_n;
float tmp = 0;
for (int l=0; l < depth_output; l++) {
for (int l=0; l < output_depth; l++) {
min_m = max(0, k_size-1-idy);
max_m = min(k_size, dim_input - idy);
max_m = min(k_size, input_width - idy);
min_n = max(0, k_size-1-idz);
max_n = min(k_size, dim_input-idz);
max_n = min(k_size, input_width-idz);
for (int m=min_m; m < max_m; m++) {
for (int n=min_n; n < max_n; n++) {
tmp += output[l][idy-k_size+m+1][idz-k_size+n+1]*ker->weights[idx][l][m][n];
@ -526,35 +526,35 @@ __global__ void backward_convolution_propagate_kernel(Kernel_cnn* ker, float***
input[idx][idy][idz] = tmp*( (*d_f)(input_z[idx][idy][idz]) );
}
void backward_convolution_device(Kernel_cnn* ker, float*** input, float*** input_z, float*** output, int depth_input, int dim_input, int depth_output, int dim_output, int activation, int is_first) {
void backward_convolution_device(Kernel_cnn* ker, float*** input, float*** input_z, float*** output, int input_depth, int input_width, int output_depth, int output_width, int activation, int is_first) {
// Bias Kernel
dim3 gridSize1(i_div_up(depth_output, BLOCKSIZE_x), i_div_up(dim_output, BLOCKSIZE_y), i_div_up(dim_output, BLOCKSIZE_y));
dim3 gridSize1(i_div_up(output_depth, BLOCKSIZE_x), i_div_up(output_width, BLOCKSIZE_y), i_div_up(output_width, BLOCKSIZE_y));
dim3 blockSize1(BLOCKSIZE_x, BLOCKSIZE_y, BLOCKSIZE_z);
backward_convolution_dbias_kernel<<<gridSize1, blockSize1>>>(ker, output, depth_output, dim_output);
backward_convolution_dbias_kernel<<<gridSize1, blockSize1>>>(ker, output, output_depth, output_width);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
// Weights Kernel
int k_size = dim_input - dim_output +1;
int k_size = input_width - output_width +1;
dim3 gridSize2(i_div_up(depth_input, BLOCKSIZE_x), i_div_up(depth_output, BLOCKSIZE_y), i_div_up(k_size*k_size, BLOCKSIZE_y));
dim3 gridSize2(i_div_up(input_depth, BLOCKSIZE_x), i_div_up(output_depth, BLOCKSIZE_y), i_div_up(k_size*k_size, BLOCKSIZE_y));
dim3 blockSize2(BLOCKSIZE_x, BLOCKSIZE_y, BLOCKSIZE_z);
backward_convolution_dweight_kernel<<<gridSize2, blockSize2>>>(ker, input, output, depth_input, depth_output, dim_output, k_size);
backward_convolution_dweight_kernel<<<gridSize2, blockSize2>>>(ker, input, output, input_depth, output_depth, output_width, k_size);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
// input propagation Kernel
if (is_first != 1) {
dim3 gridSize3(i_div_up(depth_input, BLOCKSIZE_x), i_div_up(dim_input, BLOCKSIZE_y), i_div_up(dim_input, BLOCKSIZE_y));
dim3 gridSize3(i_div_up(input_depth, BLOCKSIZE_x), i_div_up(input_width, BLOCKSIZE_y), i_div_up(input_width, BLOCKSIZE_y));
dim3 blockSize3(BLOCKSIZE_x, BLOCKSIZE_y, BLOCKSIZE_z);
funcPtr d_function = get_activation_function_cuda(activation);
backward_convolution_propagate_kernel<<<gridSize3, blockSize3>>>(ker, input, input_z, output, depth_input, dim_input, depth_output, k_size, d_function);
backward_convolution_propagate_kernel<<<gridSize3, blockSize3>>>(ker, input, input_z, output, input_depth, input_width, output_depth, k_size, d_function);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
@ -563,29 +563,29 @@ void backward_convolution_device(Kernel_cnn* ker, float*** input, float*** input
#endif
void backward_convolution_cpu(Kernel_cnn* ker, float*** input, float*** input_z, float*** output, int depth_input, int dim_input, int depth_output, int dim_output, int activation, int is_first) {
void backward_convolution_cpu(Kernel_cnn* ker, float*** input, float*** input_z, float*** output, int input_depth, int input_width, int output_depth, int output_width, int activation, int is_first) {
funcPtr d_function = get_activation_function(activation);
// Bias
for (int i=0; i < depth_output; i++) {
for (int j=0; j < dim_output; j++) {
for (int k=0; k < dim_output; k++) {
for (int i=0; i < output_depth; i++) {
for (int j=0; j < output_width; j++) {
for (int k=0; k < output_width; k++) {
ker->d_bias[i][j][k] += output[i][j][k];
}
}
}
// Weights
int k_size = dim_input - dim_output +1;
int k_size = input_width - output_width +1;
for (int h=0; h < depth_input; h++) {
for (int i=0; i < depth_output; i++) {
for (int h=0; h < input_depth; h++) {
for (int i=0; i < output_depth; i++) {
for (int j=0; j < k_size; j++) {
for (int k=0; k < k_size; k++) {
float tmp = 0;
for (int l=0; l < dim_output; l++) {
for (int m=0; m < dim_output; m++) {
for (int l=0; l < output_width; l++) {
for (int m=0; m < output_width; m++) {
tmp += input[h][l+j][m+k]*output[i][l][m];
}
}
@ -599,15 +599,15 @@ void backward_convolution_cpu(Kernel_cnn* ker, float*** input, float*** input_z,
if (is_first==1) // Pas besoin de backpropager dans l'input
return;
int min_m, max_m, min_n, max_n;
for (int i=0; i < depth_input; i++) {
for (int j=0; j < dim_input; j++) {
for (int k=0; k < dim_input; k++) {
for (int i=0; i < input_depth; i++) {
for (int j=0; j < input_width; j++) {
for (int k=0; k < input_width; k++) {
float tmp = 0;
for (int l=0; l < depth_output; l++) {
for (int l=0; l < output_depth; l++) {
min_m = max(0, k_size-1-j);
max_m = min(k_size, dim_input - j);
max_m = min(k_size, input_width - j);
min_n = max(0, k_size-1-k);
max_n = min(k_size, dim_input-k);
max_n = min(k_size, input_width-k);
for (int m=min_m; m < max_m; m++) {
for (int n=min_n; n < max_n; n++) {
tmp += output[l][j-k_size+m+1][k-k_size+n+1]*ker->weights[i][l][m][n];
@ -623,10 +623,10 @@ void backward_convolution_cpu(Kernel_cnn* ker, float*** input, float*** input_z,
#ifdef __CUDACC__
extern "C"
#endif
void backward_convolution(Kernel_cnn* ker, float*** input, float*** input_z, float*** output, int depth_input, int dim_input, int depth_output, int dim_output, int activation, int is_first) {
void backward_convolution(Kernel_cnn* ker, float*** input, float*** input_z, float*** output, int input_depth, int input_width, int output_depth, int output_width, int activation, int is_first) {
#ifndef __CUDACC__
backward_convolution_cpu(ker, input, input_z, output, depth_input, dim_input, depth_output, dim_output, activation, is_first);
backward_convolution_cpu(ker, input, input_z, output, input_depth, input_width, output_depth, output_width, activation, is_first);
#else
backward_convolution_device(ker, input, input_z, output, depth_input, dim_input, depth_output, dim_output, activation, is_first);
backward_convolution_device(ker, input, input_z, output, input_depth, input_width, output_depth, output_width, activation, is_first);
#endif
}

View File

@ -1,7 +1,7 @@
#include <stdbool.h>
#include <stdlib.h>
#include <stdio.h>
#include <float.h> // Is it used ?
#include <float.h>
#include <math.h>
#include "../common/include/memory_management.h"
@ -9,6 +9,7 @@
#include "../common/include/utils.h"
#include "include/backpropagation.h"
#include "include/initialisation.h"
#include "include/convolution.h"
#include "include/function.h"
#include "include/creation.h"
#include "include/update.h"
@ -177,6 +178,8 @@ void forward_propagation(Network* network) {
int activation = k_i->activation;
int pooling = k_i->pooling;
int stride = k_i->stride;
int padding = k_i->padding;
if (k_i->nn) {
drop_neurones(input, 1, 1, input_width, network->dropout);
@ -189,29 +192,33 @@ void forward_propagation(Network* network) {
* On copie les valeurs de output dans output_z, puis on applique la fonction d'activation à output_z
*/
if (k_i->cnn) { // Convolution
make_convolution(k_i->cnn, input, output, output_width, 1);
make_convolution(k_i->cnn, input, output, output_width, stride, padding);
copy_3d_array(output, output_z, output_depth, output_width, output_width);
apply_function_to_matrix(activation, output, output_depth, output_width);
}
else if (k_i->nn) { // Full connection
if (k_i->linearisation == DOESNT_LINEARISE) { // Vecteur -> Vecteur
make_dense(k_i->nn, input[0][0], output[0][0], input_width, output_width);
} else { // Matrice -> Vecteur
}
else { // Matrice -> Vecteur
make_dense_linearized(k_i->nn, input, output[0][0], input_depth, input_width, output_width);
}
copy_3d_array(output, output_z, 1, 1, output_width);
apply_function_to_vector(activation, output, output_width);
}
else { // Pooling
int kernel_size = 2*padding + input_width + stride - output_width*stride;
if (i == n-2) {
printf_error("Le réseau ne peut pas finir par un pooling layer\n");
return;
} else { // Pooling sur une matrice
if (pooling == AVG_POOLING) {
make_average_pooling(input, output, input_width/output_width, output_depth, output_width, input_width/output_width);
} else if (pooling == MAX_POOLING) {
make_max_pooling(input, output, input_width/output_width, output_depth, output_width, input_width/output_width);
} else {
make_average_pooling(input, output, kernel_size, output_depth, output_width, stride, padding);
}
else if (pooling == MAX_POOLING) {
make_max_pooling(input, output, kernel_size, output_depth, output_width, stride, padding);
}
else {
printf_error("Impossible de reconnaître le type de couche de pooling: ");
printf("identifiant: %d, position: %d\n", pooling, i);
}
@ -249,14 +256,15 @@ void backward_propagation(Network* network, int wanted_number) {
int output_depth = network->depth[i+1];
int output_width = network->width[i+1];
int activation = i==0?SIGMOID:network->kernel[i-1]->activation;
int is_last_layer = i==0;
int activation = is_last_layer?SIGMOID:network->kernel[i-1]->activation;
if (k_i->cnn) { // Convolution
backward_convolution(k_i->cnn, input, input_z, output, input_depth, input_width, output_depth, output_width, -activation, i==0);
backward_convolution(k_i->cnn, input, input_z, output, input_depth, input_width, output_depth, output_width, -activation, is_last_layer);
} else if (k_i->nn) { // Full connection
if (k_i->linearisation == DOESNT_LINEARISE) { // Vecteur -> Vecteur
backward_dense(k_i->nn, input[0][0], input_z[0][0], output[0][0], input_width, output_width, -activation, i==0);
backward_dense(k_i->nn, input[0][0], input_z[0][0], output[0][0], input_width, output_width, -activation, is_last_layer);
} else { // Matrice -> vecteur
backward_linearisation(k_i->nn, input, input_z, output[0][0], input_depth, input_width, output_width, -activation);
}

View File

@ -7,25 +7,36 @@
#include "include/config.h"
#ifdef __CUDACC__
__host__ __device__
#endif
int convolution_not_outside(int x, int y, int lower_bound, int upper_bound) {
return !(x < lower_bound || y < lower_bound || x >= upper_bound || y>= upper_bound);
}
void make_convolution_cpu(Kernel_cnn* kernel, float*** input, float*** output, int output_dim, int stride) {
void make_convolution_cpu(Kernel_cnn* kernel, float*** input, float*** output, int output_width, int stride, int padding) {
// c'est le kernel de input
// input[kernel->rows][kernel_k_size + output_dim-1][kernel_k_size + output_dim-1]
// output[kernel->columns][output_dim][output_dim]
// input[kernel->rows][kernel_k_size + output_width-1][kernel_k_size + output_width-1]
// output[kernel->columns][output_width][output_width]
int k_size = kernel->k_size;
int k_columns = kernel->columns;
int k_rows = kernel->rows;
int max_move = kernel->k_size - padding;
int input_width = output_width*stride - 2*padding + kernel->k_size - stride;
float f;
for (int i=0; i < k_columns; i++) { // filtre
for (int j=0; j < output_dim; j++) { // ligne de sortie
for (int k=0; k < output_dim; k++) { // colonne de sortie
for (int j=0; j < output_width; j++) { // ligne de sortie
for (int k=0; k < output_width; k++) { // colonne de sortie
f = kernel->bias[i][j][k];
for (int a=0; a < k_rows; a++) { // Canal de couleur
for (int b=0; b < k_size; b++) { // ligne du filtre
for (int c=0; c < k_size; c++) { // colonne du filtre
f += kernel->weights[a][i][b][c]*input[a][stride*j+b][stride*k+c];
for (int b=-padding; b < max_move; b++) { // ligne du filtre
for (int c=-padding; c < max_move; c++) { // colonne du filtre
int x = (stride*j+b);
int y = (stride*k+c);
if (convolution_not_outside(x, y, 0, input_width)) {
f += kernel->weights[a][i][b][c]*input[a][stride*j+b][stride*k+c];
}
}
}
}
@ -37,22 +48,28 @@ void make_convolution_cpu(Kernel_cnn* kernel, float*** input, float*** output, i
#ifdef __CUDACC__
__global__ void make_convolution_kernel(Kernel_cnn* kernel, float*** input, float*** output, int output_dim, int stride) {
__global__ void make_convolution_kernel(Kernel_cnn* kernel, float*** input, float*** output, int output_width, int stride, int padding) {
// Équivalents respectifs de i, j et k dans la boucle effectuée par le cpu
int idx = threadIdx.x + blockDim.x*blockIdx.x; // < kernel->columns
int idy = threadIdx.y + blockDim.y*blockIdx.y; // < min(output_dim, k_size)
int idz = threadIdx.z + blockDim.z*blockIdx.z; // < min(output_dim, k_size)
int idy = threadIdx.y + blockDim.y*blockIdx.y; // < min(output_width, k_size)
int idz = threadIdx.z + blockDim.z*blockIdx.z; // < min(output_width, k_size)
int max_move = kernel->k_size - padding;
int input_width = output_width*stride - 2*padding + kernel->k_size - stride;
if (idx >= kernel->columns || idy >= output_dim || idz >= output_dim) {
if (idx >= kernel->columns || idy >= output_width || idz >= output_width) {
return;
}
float f = kernel->bias[idx][idy][idz];
for (int a=0; a < kernel->rows; a++) {
for (int b=0; b < kernel->k_size; b++) {
for (int c=0; c < kernel->k_size; c++) {
f += kernel->weights[a][idx][b][c]*input[a][idy*stride+b][idz*stride+c];
for (int b=-padding; b < max_move; b++) {
for (int c=-padding; c < max_move; c++) {
int idy_2 = idy*stride+b;
int idz_2 = idz*stride+c;
if (convolution_not_outside(idy_2, idz_2, 0, input_width)) {
f += kernel->weights[a][idx][b][c]*input[a][idy_2][idz_2];
}
}
}
}
@ -60,21 +77,24 @@ __global__ void make_convolution_kernel(Kernel_cnn* kernel, float*** input, floa
output[idx][idy][idz] = f;
}
void make_convolution_device(Kernel_cnn* kernel, float*** input, float*** output, int output_dim, int stride) {
void make_convolution_device(Kernel_cnn* kernel, float*** input, float*** output, int output_width, int stride, int padding) {
// Make computation
dim3 gridSize(i_div_up(kernel->columns, BLOCKSIZE_x), i_div_up(output_dim, BLOCKSIZE_y), i_div_up(output_dim, BLOCKSIZE_z));
dim3 gridSize(i_div_up(kernel->columns, BLOCKSIZE_x), i_div_up(output_width, BLOCKSIZE_y), i_div_up(output_width, BLOCKSIZE_z));
dim3 blockSize(BLOCKSIZE_x, BLOCKSIZE_y, BLOCKSIZE_z);
make_convolution_kernel<<<gridSize, blockSize>>>(kernel, input, output, output_dim, stride);
make_convolution_kernel<<<gridSize, blockSize>>>(kernel, input, output, output_width, stride, padding);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
}
#endif
void make_convolution(Kernel_cnn* kernel, float*** input, float*** output, int output_dim, int stride) {
#ifdef __CUDACC__
extern "C"
#endif
void make_convolution(Kernel_cnn* kernel, float*** input, float*** output, int output_width, int stride, int padding) {
#ifndef __CUDACC__
make_convolution_cpu(kernel, input, output, output_dim, stride);
make_convolution_cpu(kernel, input, output, output_width, stride, padding);
#else
make_convolution_device(kernel, input, output, output_dim, stride);
make_convolution_device(kernel, input, output, output_width, stride, padding);
#endif
}

View File

@ -7,25 +7,40 @@
#include "include/config.h"
void make_convolution_cpu(Kernel_cnn* kernel, float*** input, float*** output, int output_dim, int stride) {
// c'est le kernel de input
// input[kernel->rows][kernel_k_size + output_dim-1][kernel_k_size + output_dim-1]
// output[kernel->columns][output_dim][output_dim]
#ifdef __CUDACC__
__host__ __device__
#endif
int convolution_not_outside(int x, int y, int lower_bound, int upper_bound) {
// On renvoie true si et seulement si _ et _:
// lower_bound <= x < upper_bound
// lower_bound <= y < upper_bound
return !(x < lower_bound || y < lower_bound || x >= upper_bound || y>= upper_bound);
}
void make_convolution_cpu(Kernel_cnn* kernel, float*** input, float*** output, int output_width, int stride, int padding) {
// c'est le kernel de input
// input[kernel->rows][kernel_k_size + output_width-1][kernel_k_size + output_width-1]
// output[kernel->columns][output_width][output_width]
int k_size = kernel->k_size;
int k_columns = kernel->columns;
int k_rows = kernel->rows;
int max_move = kernel->k_size - padding;
int input_width = output_width*stride - 2*padding + kernel->k_size - stride;
float f;
for (int i=0; i < k_columns; i++) { // filtre
for (int j=0; j < output_dim; j++) { // ligne de sortie
for (int k=0; k < output_dim; k++) { // colonne de sortie
for (int j=0; j < output_width; j++) { // ligne de sortie
for (int k=0; k < output_width; k++) { // colonne de sortie
f = kernel->bias[i][j][k];
for (int a=0; a < k_rows; a++) { // Canal de couleur
for (int b=0; b < k_size; b++) { // ligne du filtre
for (int c=0; c < k_size; c++) { // colonne du filtre
f += kernel->weights[a][i][b][c]*input[a][stride*j+b][stride*k+c];
for (int b=-padding; b < max_move; b++) { // ligne du filtre
for (int c=-padding; c < max_move; c++) { // colonne du filtre
int x = (stride*j+b);
int y = (stride*k+c);
if (convolution_not_outside(x, y, 0, input_width)) {
f += kernel->weights[a][i][b][c]*input[a][stride*j+b][stride*k+c];
}
}
}
}
@ -37,22 +52,28 @@ void make_convolution_cpu(Kernel_cnn* kernel, float*** input, float*** output, i
#ifdef __CUDACC__
__global__ void make_convolution_kernel(Kernel_cnn* kernel, float*** input, float*** output, int output_dim, int stride) {
__global__ void make_convolution_kernel(Kernel_cnn* kernel, float*** input, float*** output, int output_width, int stride, int padding) {
// Équivalents respectifs de i, j et k dans la boucle effectuée par le cpu
int idx = threadIdx.x + blockDim.x*blockIdx.x; // < kernel->columns
int idy = threadIdx.y + blockDim.y*blockIdx.y; // < min(output_dim, k_size)
int idz = threadIdx.z + blockDim.z*blockIdx.z; // < min(output_dim, k_size)
int idy = threadIdx.y + blockDim.y*blockIdx.y; // < min(output_width, k_size)
int idz = threadIdx.z + blockDim.z*blockIdx.z; // < min(output_width, k_size)
int max_move = kernel->k_size - padding;
int input_width = output_width*stride - 2*padding + kernel->k_size - stride;
if (idx >= kernel->columns || idy >= output_dim || idz >= output_dim) {
if (idx >= kernel->columns || idy >= output_width || idz >= output_width) {
return;
}
float f = kernel->bias[idx][idy][idz];
for (int a=0; a < kernel->rows; a++) {
for (int b=0; b < kernel->k_size; b++) {
for (int c=0; c < kernel->k_size; c++) {
f += kernel->weights[a][idx][b][c]*input[a][idy*stride+b][idz*stride+c];
for (int b=-padding; b < max_move; b++) {
for (int c=-padding; c < max_move; c++) {
int idy_2 = idy*stride+b;
int idz_2 = idz*stride+c;
if (convolution_not_outside(idy_2, idz_2, 0, input_width)) {
f += kernel->weights[a][idx][b][c]*input[a][idy_2][idz_2];
}
}
}
}
@ -60,21 +81,24 @@ __global__ void make_convolution_kernel(Kernel_cnn* kernel, float*** input, floa
output[idx][idy][idz] = f;
}
void make_convolution_device(Kernel_cnn* kernel, float*** input, float*** output, int output_dim, int stride) {
void make_convolution_device(Kernel_cnn* kernel, float*** input, float*** output, int output_width, int stride, int padding) {
// Make computation
dim3 gridSize(i_div_up(kernel->columns, BLOCKSIZE_x), i_div_up(output_dim, BLOCKSIZE_y), i_div_up(output_dim, BLOCKSIZE_z));
dim3 gridSize(i_div_up(kernel->columns, BLOCKSIZE_x), i_div_up(output_width, BLOCKSIZE_y), i_div_up(output_width, BLOCKSIZE_z));
dim3 blockSize(BLOCKSIZE_x, BLOCKSIZE_y, BLOCKSIZE_z);
make_convolution_kernel<<<gridSize, blockSize>>>(kernel, input, output, output_dim, stride);
make_convolution_kernel<<<gridSize, blockSize>>>(kernel, input, output, output_width, stride, padding);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
}
#endif
void make_convolution(Kernel_cnn* kernel, float*** input, float*** output, int output_dim, int stride) {
#ifdef __CUDACC__
extern "C"
#endif
void make_convolution(Kernel_cnn* kernel, float*** input, float*** output, int output_width, int stride, int padding) {
#ifndef __CUDACC__
make_convolution_cpu(kernel, input, output, output_dim, stride);
make_convolution_cpu(kernel, input, output, output_width, stride, padding);
#else
make_convolution_device(kernel, input, output, output_dim, stride);
make_convolution_device(kernel, input, output, output_width, stride, padding);
#endif
}

View File

@ -9,7 +9,7 @@
#include "include/creation.h"
Network* create_network(int max_size, float learning_rate, int dropout, int activation, int initialisation, int input_dim, int input_depth) {
Network* create_network(int max_size, float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth) {
if (dropout < 0 || dropout > 100) {
printf_error("La probabilité de dropout n'est pas respecté, elle doit être comprise entre 0 et 100\n");
}
@ -29,29 +29,29 @@ Network* create_network(int max_size, float learning_rate, int dropout, int acti
}
network->kernel[0]->linearisation = DOESNT_LINEARISE;
network->kernel[0]->activation = activation;
network->width[0] = input_dim;
network->width[0] = input_width;
network->depth[0] = input_depth;
network->kernel[0]->nn = NULL;
network->kernel[0]->cnn = NULL;
create_a_cube_input_layer(network, 0, input_depth, input_dim);
create_a_cube_input_z_layer(network, 0, input_depth, input_dim);
create_a_cube_input_layer(network, 0, input_depth, input_width);
create_a_cube_input_z_layer(network, 0, input_depth, input_width);
return network;
}
Network* create_network_lenet5(float learning_rate, int dropout, int activation, int initialisation, int input_dim, int input_depth) {
Network* network = create_network(8, learning_rate, dropout, activation, initialisation, input_dim, input_depth);
add_convolution(network, 6, 28, activation);
add_average_pooling(network, 14);
add_convolution(network, 16, 10, activation);
add_average_pooling(network, 5);
Network* create_network_lenet5(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth) {
Network* network = create_network(8, learning_rate, dropout, activation, initialisation, input_width, input_depth);
add_convolution(network, 5, 6, 1, 0, activation);
add_average_pooling(network, 2, 2, 0);
add_convolution(network, 5, 16, 1, 0, activation);
add_average_pooling(network, 2, 2, 0);
add_dense_linearisation(network, 120, activation);
add_dense(network, 84, activation);
add_dense(network, 10, SOFTMAX);
return network;
}
Network* create_simple_one(float learning_rate, int dropout, int activation, int initialisation, int input_dim, int input_depth) {
Network* network = create_network(3, learning_rate, dropout, activation, initialisation, input_dim, input_depth);
Network* create_simple_one(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth) {
Network* network = create_network(3, learning_rate, dropout, activation, initialisation, input_width, input_depth);
add_dense_linearisation(network, 80, activation);
add_dense(network, 10, SOFTMAX);
return network;
@ -97,86 +97,92 @@ void create_a_line_input_z_layer(Network* network, int pos, int dim) {
network->depth[pos] = 1;
}
void add_average_pooling(Network* network, int dim_output) {
void add_average_pooling(Network* network, int kernel_size, int stride, int padding) {
int n = network->size;
int k_pos = n-1;
int dim_input = network->width[k_pos];
if (network->max_size == n) {
printf_error("Impossible de rajouter une couche d'average pooling, le réseau est déjà plein\n");
return;
}
if (dim_input%dim_output != 0) {
printf_error("Dimension de l'average pooling incorrecte\n");
return;
}
int input_width = network->width[k_pos];
int output_width = (2*padding + input_width - (kernel_size - stride))/stride;
network->kernel[k_pos]->cnn = NULL;
network->kernel[k_pos]->nn = NULL;
network->kernel[k_pos]->stride = stride;
network->kernel[k_pos]->padding = padding;
network->kernel[k_pos]->activation = IDENTITY; // Ne contient pas de fonction d'activation
network->kernel[k_pos]->linearisation = DOESNT_LINEARISE;
network->kernel[k_pos]->pooling = AVG_POOLING;
create_a_cube_input_layer(network, n, network->depth[n-1], network->width[n-1]/2);
create_a_cube_input_z_layer(network, n, network->depth[n-1], network->width[n-1]/2); // Will it be used ?
create_a_cube_input_layer(network, n, network->depth[n-1], output_width);
create_a_cube_input_z_layer(network, n, network->depth[n-1], output_width);
network->size++;
}
void add_max_pooling(Network* network, int dim_output) {
void add_max_pooling(Network* network, int kernel_size, int stride, int padding) {
int n = network->size;
int k_pos = n-1;
int dim_input = network->width[k_pos];
if (network->max_size == n) {
printf_error("Impossible de rajouter une couche de max pooling, le réseau est déjà plein\n");
return;
}
if (dim_input%dim_output != 0) {
printf_error("Dimension du max pooling incorrecte\n");
return;
}
int input_width = network->width[k_pos];
int output_width = (2*padding + input_width - (kernel_size - stride))/stride;
network->kernel[k_pos]->cnn = NULL;
network->kernel[k_pos]->nn = NULL;
network->kernel[k_pos]->activation = IDENTITY; // Ne contient pas de fonction d'activation
network->kernel[k_pos]->linearisation = DOESNT_LINEARISE;
network->kernel[k_pos]->pooling = MAX_POOLING;
create_a_cube_input_layer(network, n, network->depth[n-1], network->width[n-1]/2);
create_a_cube_input_z_layer(network, n, network->depth[n-1], network->width[n-1]/2); // Will it be used ?
create_a_cube_input_layer(network, n, network->depth[n-1], output_width);
create_a_cube_input_z_layer(network, n, network->depth[n-1], output_width);
network->size++;
}
void add_convolution(Network* network, int depth_output, int dim_output, int activation) {
void add_convolution(Network* network, int kernel_size, int number_of_kernels, int stride, int padding, int activation) {
int n = network->size;
int k_pos = n-1;
if (network->max_size == n) {
printf_error("Impossible de rajouter une couche de convolution, le réseau est déjà plein \n");
return;
}
int depth_input = network->depth[k_pos];
int dim_input = network->width[k_pos];
int input_depth = network->depth[k_pos];
int input_width = network->width[k_pos];
int output_width = (2*padding + input_width - (kernel_size - stride))/stride;
int output_depth = number_of_kernels;
int bias_size = output_width;
int bias_size = dim_output;
int kernel_size = dim_input - dim_output +1;
network->kernel[k_pos]->nn = NULL;
network->kernel[k_pos]->stride = stride;
network->kernel[k_pos]->padding = padding;
network->kernel[k_pos]->activation = activation;
network->kernel[k_pos]->linearisation = DOESNT_LINEARISE;
network->kernel[k_pos]->pooling = NO_POOLING;
network->kernel[k_pos]->cnn = (Kernel_cnn*)nalloc(1, sizeof(Kernel_cnn));
Kernel_cnn* cnn = network->kernel[k_pos]->cnn;
cnn->k_size = kernel_size;
cnn->rows = depth_input;
cnn->columns = depth_output;
cnn->weights = (float****)nalloc(depth_input, sizeof(float***));
cnn->d_weights = (float****)nalloc(depth_input, sizeof(float***));
cnn->rows = input_depth;
cnn->columns = output_depth;
cnn->weights = (float****)nalloc(input_depth, sizeof(float***));
cnn->d_weights = (float****)nalloc(input_depth, sizeof(float***));
#ifdef ADAM_CNN_WEIGHTS
cnn->s_d_weights = (float****)nalloc(depth_input, sizeof(float***));
cnn->v_d_weights = (float****)nalloc(depth_input, sizeof(float***));
cnn->s_d_weights = (float****)nalloc(input_depth, sizeof(float***));
cnn->v_d_weights = (float****)nalloc(input_depth, sizeof(float***));
#endif
for (int i=0; i < depth_input; i++) {
cnn->weights[i] = (float***)nalloc(depth_output, sizeof(float**));
cnn->d_weights[i] = (float***)nalloc(depth_output, sizeof(float**));
for (int i=0; i < input_depth; i++) {
cnn->weights[i] = (float***)nalloc(output_depth, sizeof(float**));
cnn->d_weights[i] = (float***)nalloc(output_depth, sizeof(float**));
#ifdef ADAM_CNN_WEIGHTS
cnn->s_d_weights[i] = (float***)nalloc(depth_output, sizeof(float**));
cnn->v_d_weights[i] = (float***)nalloc(depth_output, sizeof(float**));
cnn->s_d_weights[i] = (float***)nalloc(output_depth, sizeof(float**));
cnn->v_d_weights[i] = (float***)nalloc(output_depth, sizeof(float**));
#endif
for (int j=0; j < depth_output; j++) {
for (int j=0; j < output_depth; j++) {
cnn->weights[i][j] = (float**)nalloc(kernel_size, sizeof(float*));
cnn->d_weights[i][j] = (float**)nalloc(kernel_size, sizeof(float*));
#ifdef ADAM_CNN_WEIGHTS
@ -200,13 +206,14 @@ void add_convolution(Network* network, int depth_output, int dim_output, int act
}
}
}
cnn->bias = (float***)nalloc(depth_output, sizeof(float**));
cnn->d_bias = (float***)nalloc(depth_output, sizeof(float**));
cnn->bias = (float***)nalloc(output_depth, sizeof(float**));
cnn->d_bias = (float***)nalloc(output_depth, sizeof(float**));
#ifdef ADAM_CNN_BIAS
cnn->s_d_bias = (float***)nalloc(depth_output, sizeof(float**));
cnn->v_d_bias = (float***)nalloc(depth_output, sizeof(float**));
cnn->s_d_bias = (float***)nalloc(output_depth, sizeof(float**));
cnn->v_d_bias = (float***)nalloc(output_depth, sizeof(float**));
#endif
for (int i=0; i < depth_output; i++) {
for (int i=0; i < output_depth; i++) {
cnn->bias[i] = (float**)nalloc(bias_size, sizeof(float*));
cnn->d_bias[i] = (float**)nalloc(bias_size, sizeof(float*));
#ifdef ADAM_CNN_BIAS
@ -229,12 +236,13 @@ void add_convolution(Network* network, int depth_output, int dim_output, int act
}
}
}
int n_in = network->width[n-1]*network->width[n-1]*network->depth[n-1];
int n_out = network->width[n]*network->width[n]*network->depth[n];
initialisation_3d_matrix(network->initialisation, cnn->bias, depth_output, dim_output, dim_output, n_in, n_out);
initialisation_4d_matrix(network->initialisation, cnn->weights, depth_input, depth_output, kernel_size, kernel_size, n_in, n_out);
create_a_cube_input_layer(network, n, depth_output, bias_size);
create_a_cube_input_z_layer(network, n, depth_output, bias_size);
initialisation_3d_matrix(network->initialisation, cnn->bias, output_depth, output_width, output_width, n_in, n_out);
initialisation_4d_matrix(network->initialisation, cnn->weights, input_depth, output_depth, kernel_size, kernel_size, n_in, n_out);
create_a_cube_input_layer(network, n, output_depth, bias_size);
create_a_cube_input_z_layer(network, n, output_depth, bias_size);
network->size++;
}
@ -247,13 +255,17 @@ void add_dense(Network* network, int size_output, int activation) {
return;
}
network->kernel[k_pos]->cnn = NULL;
network->kernel[k_pos]->nn = (Kernel_nn*)nalloc(1, sizeof(Kernel_nn));
Kernel_nn* nn = network->kernel[k_pos]->nn;
network->kernel[k_pos]->stride = -1; // N'est pas utilisé dans une couche dense
network->kernel[k_pos]->padding = -1; // N'est pas utilisé dans une couche dense
network->kernel[k_pos]->activation = activation;
network->kernel[k_pos]->linearisation = DOESNT_LINEARISE;
network->kernel[k_pos]->pooling = NO_POOLING;
network->kernel[k_pos]->nn = (Kernel_nn*)nalloc(1, sizeof(Kernel_nn));
Kernel_nn* nn = network->kernel[k_pos]->nn;
nn->size_input = size_input;
nn->size_output = size_output;
nn->bias = (float*)nalloc(size_output, sizeof(float));
nn->d_bias = (float*)nalloc(size_output, sizeof(float));
#ifdef ADAM_DENSE_BIAS
@ -289,7 +301,7 @@ void add_dense(Network* network, int size_output, int activation) {
#endif
}
}
initialisation_1d_matrix(network->initialisation, nn->bias, size_output, size_input, size_output);
initialisation_2d_matrix(network->initialisation, nn->weights, size_input, size_output, size_input, size_output);
create_a_line_input_layer(network, n, size_output);
@ -308,11 +320,14 @@ void add_dense_linearisation(Network* network, int size_output, int activation)
return;
}
network->kernel[k_pos]->cnn = NULL;
network->kernel[k_pos]->nn = (Kernel_nn*)nalloc(1, sizeof(Kernel_nn));
Kernel_nn* nn = network->kernel[k_pos]->nn;
network->kernel[k_pos]->stride = -1; // N'est pas utilisé dans une couche dense
network->kernel[k_pos]->padding = -1; // N'est pas utilisé dans une couche dense
network->kernel[k_pos]->activation = activation;
network->kernel[k_pos]->linearisation = DO_LINEARISE;
network->kernel[k_pos]->pooling = NO_POOLING;
network->kernel[k_pos]->nn = (Kernel_nn*)nalloc(1, sizeof(Kernel_nn));
Kernel_nn* nn = network->kernel[k_pos]->nn;
nn->size_input = size_input;
nn->size_output = size_output;
@ -329,6 +344,7 @@ void add_dense_linearisation(Network* network, int size_output, int activation)
nn->v_d_bias[i] = 0.;
#endif
}
nn->weights = (float**)nalloc(size_input, sizeof(float*));
nn->d_weights = (float**)nalloc(size_input, sizeof(float*));
#ifdef ADAM_DENSE_WEIGHTS
@ -350,6 +366,7 @@ void add_dense_linearisation(Network* network, int size_output, int activation)
#endif
}
}
initialisation_1d_matrix(network->initialisation, nn->bias, size_output, size_input, size_output);
initialisation_2d_matrix(network->initialisation, nn->weights, size_input, size_output, size_input, size_output);
create_a_line_input_layer(network, n, size_output);

View File

@ -19,6 +19,8 @@ void free_a_cube_input_layer(Network* network, int pos, int depth, int dim) {
}
void free_a_line_input_layer(Network* network, int pos) {
// Libère l'espace mémoire de network->input[pos] et network->input_z[pos]
// lorsque ces couches sont denses (donc sont des matrice de dimension 1)
gree(network->input[pos][0][0]);
gree(network->input_z[pos][0][0]);
gree(network->input[pos][0]);
@ -28,6 +30,7 @@ void free_a_line_input_layer(Network* network, int pos) {
}
void free_pooling(Network* network, int pos) {
// Le pooling n'alloue rien d'autre que l'input
free_a_cube_input_layer(network, pos+1, network->depth[pos+1], network->width[pos+1]);
}
@ -36,7 +39,7 @@ void free_convolution(Network* network, int pos) {
int c = k_pos->columns;
int k_size = k_pos->k_size;
int r = k_pos->rows;
int bias_size = network->width[pos+1]; // Not sure of the value
int bias_size = network->width[pos+1];
free_a_cube_input_layer(network, pos+1, network->depth[pos+1], network->width[pos+1]);
for (int i=0; i < c; i++) {
for (int j=0; j < bias_size; j++) {
@ -154,7 +157,9 @@ void free_dense_linearisation(Network* network, int pos) {
}
void free_network_creation(Network* network) {
// On libère l'input correspondant à l'image: input[0] (car elle n'appartient à aucune couche)
free_a_cube_input_layer(network, 0, network->depth[0], network->width[0]);
for (int i=0; i < network->max_size-1; i++) {
gree(network->kernel[i]);
}
@ -169,15 +174,21 @@ void free_network_creation(Network* network) {
void free_network(Network* network) {
for (int i=network->size-2; i>=0; i--) {
if (network->kernel[i]->cnn != NULL) { // Convolution
if (network->kernel[i]->cnn != NULL) {
// Convolution
free_convolution(network, i);
} else if (network->kernel[i]->nn != NULL) {
if (network->kernel[i]->linearisation == DOESNT_LINEARISE) { // Dense non linearized
}
else if (network->kernel[i]->nn != NULL) {
// Dense
if (network->kernel[i]->linearisation == DOESNT_LINEARISE) {
// Dense normale
free_dense(network, i);
} else { // Dense linearisation
} else {
// Dense qui linéarise
free_dense_linearisation(network, i);
}
} else { // Pooling
} else {
// Pooling
free_pooling(network, i);
}
}

View File

@ -59,7 +59,7 @@ extern "C"
/*
* Transfert les informations d'erreur à travers une couche de linéarisation
*/
void backward_linearisation(Kernel_nn* ker, float*** input, float*** input_z, float* output, int depth_input, int dim_input, int size_output, int activation);
void backward_linearisation(Kernel_nn* ker, float*** input, float*** input_z, float* output, int input_depth, int input_width, int size_output, int activation);
#ifdef __CUDACC__
@ -68,6 +68,6 @@ extern "C"
/*
* Transfert les informations d'erreur à travers un couche de convolution
*/
void backward_convolution(Kernel_cnn* ker, float*** input, float*** input_z, float*** output, int depth_input, int dim_input, int depth_output, int dim_output, int activation, int is_first);
void backward_convolution(Kernel_cnn* ker, float*** input, float*** input_z, float*** output, int input_depth, int input_width, int output_depth, int output_width, int activation, int is_first);
#endif

View File

@ -1,23 +1,36 @@
#include "struct.h"
#ifdef __CUDACC__
__host__ __device__
#endif
/*
On renvoie true si et seulement si _ et _:
lower_bound <= x < upper_bound
lower_bound <= y < upper_bound
*/
int convolution_not_outside(int x, int y, int lower_bound, int upper_bound);
/*
* Effectue la convolution naïvement sur le processeur
*/
void make_convolution_cpu(Kernel_cnn* kernel, float*** input, float*** output, int output_dim, int stride);
void make_convolution_cpu(Kernel_cnn* kernel, float*** input, float*** output, int output_width, int stride, int padding);
#ifdef __CUDACC__
/*
* Kernel de la convolution sur carte graphique
*/
__global__ void make_convolution_kernel(int k_size, int columns, int rows, float* bias, size_t pitch_bias, float**** weights, size_t pitch_weights, float*** input, size_t pitch_input, float*** output, size_t pitch_output, int output_dim, int stride);
__global__ void make_convolution_kernel(int k_size, int columns, int rows, float* bias, size_t pitch_bias, float**** weights, size_t pitch_weights, float*** input, size_t pitch_input, float*** output, size_t pitch_output, int output_width, int stride, int padding);
/*
* Effectue la convolution naïvement sur la carte graphique
*/
void make_convolution_device(Kernel_cnn* kernel, float*** input, float*** output, int output_dim, int stride);
void make_convolution_device(Kernel_cnn* kernel, float*** input, float*** output, int output_width, int stride, int padding);
#endif
#ifdef __CUDACC__
extern "C"
#endif
/*
* Détermine si la convolution peut-être faite sur la carte graphique au moment de la compilation
*/
void make_convolution(Kernel_cnn* kernel, float*** input, float*** output, int output_dim, int stride);
void make_convolution(Kernel_cnn* kernel, float*** input, float*** output, int output_width, int stride, int padding);

View File

@ -7,17 +7,17 @@
/*
* Créé un réseau qui peut contenir max_size couche (dont celle d'input et d'output)
*/
Network* create_network(int max_size, float learning_rate, int dropout, int activation, int initialisation, int input_dim, int input_depth);
Network* create_network(int max_size, float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth);
/*
* Renvoie un réseau suivant l'architecture LeNet5
*/
Network* create_network_lenet5(float learning_rate, int dropout, int activation, int initialisation, int input_dim, int input_depth);
Network* create_network_lenet5(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth);
/*
* Renvoie un réseau sans convolution, similaire à celui utilisé dans src/dense
*/
Network* create_simple_one(float learning_rate, int dropout, int activation, int initialisation, int input_dim, int input_depth);
Network* create_simple_one(float learning_rate, int dropout, int activation, int initialisation, int input_width, int input_depth);
/*
* Créé et alloue de la mémoire à une couche de type input cube
@ -35,19 +35,24 @@ void create_a_cube_input_z_layer(Network* network, int pos, int depth, int dim);
void create_a_line_input_layer(Network* network, int pos, int dim);
/*
* Ajoute au réseau une couche d'average pooling valide de dimension dim*dim
* Ajoute au réseau une couche d'average pooling avec la taille de noyau (kernel_size),
* le remplissage (padding) et le décalge (stride) choisis
*/
void add_average_pooling(Network* network, int dim_output);
void add_average_pooling(Network* network, int kernel_size, int stride, int padding);
/*
* Ajoute au réseau une couche de max pooling valide de dimension dim*dim
* Ajoute au réseau une couche de max pooling avec la taille de noyau (kernel_size),
* le remplissage (padding) et le décalge (stride) choisis
*/
void add_max_pooling(Network* network, int dim_output);
void add_max_pooling(Network* network, int kernel_size, int stride, int padding);
/*
* Ajoute au réseau une couche de convolution dim*dim et initialise les kernels
* Ajoute au réseau une couche de convolution avec la taille de noyau (kernel_size),
* le remplissage (padding) et le décalge (stride) choisis. Le choix de la profondeur de
* la couche suivante se fait avec number_of_kernels (= output_depth)
* Puis initialise les poids et les biais construits
*/
void add_convolution(Network* network, int depth_output, int dim_output, int activation);
void add_convolution(Network* network, int kernel_size, int number_of_kernels, int stride, int padding, int activation);
/*
* Ajoute au réseau une couche dense et initialise les poids et les biais

View File

@ -4,14 +4,16 @@
#define DEF_FREE_H
/*
* Libère la mémoire allouée à une couche de type input cube
* Donc free networkt->input[pos][i][j]
* Libère l'espace mémoire de network->input[pos] et network->input_z[pos]
* lorsque ces couches sont non denses (donc sont des matrice de dimension 3)
* Libère donc l'espace mémoire alloué dans 'create_a_cube_input_layer' et create_a_cube_input_z_layer' (creation.c)
*/
void free_a_cube_input_layer(Network* network, int pos, int depth, int dim);
/*
* Libère la mémoire allouée à une couche de type input line
* Donc free networkt->input[pos][0][0]
* Libère l'espace mémoire de network->input[pos] et network->input_z[pos]
* lorsque ces couches sont denses (donc sont des matrice de dimension 1)
* Libère donc l'espace mémoire alloué dans 'create_a_line_input_layer' et create_a_line_input_z_layer' (creation.c)
*/
void free_a_line_input_layer(Network* network, int pos);
@ -41,7 +43,7 @@ void free_dense_linearisation(Network* network, int pos);
void free_network_creation(Network* network);
/*
* Libère l'espace mémoire alloué à un réseau quelconque
* Libère entièrement l'espace mémoire alloué à un réseau quelconque
*/
void free_network(Network* network);

View File

@ -3,37 +3,47 @@
#ifndef DEF_MAKE_H
#define DEF_MAKE_H
#ifdef __CUDACC__
__host__ __device__
#endif
/*
* Effectue une convolution sans stride sur le processeur
* On renvoie true si et seulement si _ et _:
* lower_bound <= y < upper_bound
* lower_bound <= x < upper_bound
*/
void make_convolution_cpu(Kernel_cnn* kernel, float*** input, float*** output, int output_dim, int stride);
int pooling_not_outside(int x, int y, int lower_bound, int upper_bound);
/*
* Effectue la convolution sur le CPU ou GPU
* Effectue la propagation d'une convolution avec stride et padding choisis sur le processeur
*/
void make_convolution(Kernel_cnn* kernel, float*** input, float*** output, int output_dim, int stride);
void make_convolution_cpu(Kernel_cnn* kernel, float*** input, float*** output, int output_width, int stride, int padding);
/*
* Effectue la propagation d'une convolution avec stride et padding choisis sur le CPU ou GPU
*/
void make_convolution(Kernel_cnn* kernel, float*** input, float*** output, int output_width, int stride, int padding);
#ifdef __CUDACC__
extern "C"
#endif
/*
* Effectue un average pooling avec stride=size
* Effectue propagation d'average pooling avec stride et padding choisis
*/
void make_average_pooling(float*** input, float*** output, int size, int output_depth, int output_dim, int stride);
void make_average_pooling(float*** input, float*** output, int size, int output_depth, int output_width, int stride, int padding);
#ifdef __CUDACC__
extern "C"
#endif
/*
* Effectue un max pooling avec stride=size
* Effectue propagation de max pooling avec stride et padding choisis
*/
void make_max_pooling(float*** input, float*** output, int size, int output_depth, int output_dim, int stride);
void make_max_pooling(float*** input, float*** output, int size, int output_depth, int output_width, int stride, int padding);
#ifdef __CUDACC__
extern "C"
#endif
/*
* Effectue une full connection
* Effectue la propagation d'une couche dense
*/
void make_dense(Kernel_nn* kernel, float* input, float* output, int size_input, int size_output);
@ -41,8 +51,8 @@ void make_dense(Kernel_nn* kernel, float* input, float* output, int size_input,
extern "C"
#endif
/*
* Effectue une full connection qui passe d'une matrice à un vecteur
* Effectue la propagation d'une couche dense qui passe d'une matrice à un vecteur
*/
void make_dense_linearized(Kernel_nn* kernel, float*** input, float* output, int depth_input, int dim_input, int size_output);
void make_dense_linearized(Kernel_nn* kernel, float*** input, float* output, int input_depth, int input_width, int size_output);
#endif

View File

@ -33,5 +33,5 @@ Network* read_network(char* filename);
/*
* Lit une kernel dans le fichier spécifié par le pointeur ptr
*/
Kernel* read_kernel(int type_couche, int output_dim, FILE* ptr);
Kernel* read_kernel(int type_couche, int output_width, FILE* ptr);
#endif

View File

@ -6,7 +6,7 @@
/*
* Affiche le kernel d'une couche de convolution
*/
void print_kernel_cnn(Kernel_cnn* k, int depth_input, int dim_input, int depth_output, int dim_output);
void print_kernel_cnn(Kernel_cnn* k, int input_depth, int input_width, int output_depth, int output_width);
/*
* Affiche une couche de pooling

View File

@ -12,18 +12,18 @@
typedef struct Kernel_cnn {
// Noyau ayant une couche matricielle en sortie
int k_size; // k_size = dim_input - dim_output + 1
int k_size; // k_size = input_width - output_width + 1
int rows; // Depth de l'input
int columns; // Depth de l'output
float*** bias; // bias[columns][dim_output][dim_output]
float*** d_bias; // d_bias[columns][dim_output][dim_output]
float*** bias; // bias[columns][output_width][output_width] <=> bias[depth output][dim output][dim output]
float*** d_bias; // d_bias[columns][output_width][output_width]
#ifdef ADAM_CNN_BIAS
float*** s_d_bias; // s_d_bias[columns][dim_output][dim_output]
float*** v_d_bias; // v_d_bias[columns][dim_output][dim_output]
float*** s_d_bias; // s_d_bias[columns][output_width][output_width]
float*** v_d_bias; // v_d_bias[columns][output_width][output_width]
#endif
float**** weights; // weights[rows][columns][k_size][k_size]
float**** weights; // weights[rows][columns][k_size][k_size] <=> weights[depth input][depth output][size kernel][size kernel]
float**** d_weights; // d_weights[rows][columns][k_size][k_size]
#ifdef ADAM_CNN_WEIGHTS
float**** s_d_weights; // s_d_weights[rows][columns][k_size][k_size]
@ -58,6 +58,8 @@ typedef struct Kernel {
int activation; // Id de la fonction d'activation et -Id de sa dérivée
int linearisation; // 1 si c'est la linéarisation d'une couche, 0 sinon
int pooling; // 0 si pas pooling, 1 si average_pooling, 2 si max_pooling
int stride; // Valable uniquement une pooling et un cnn
int padding; // Valable uniquement une pooling et un cnn
} Kernel;

View File

@ -10,59 +10,78 @@
#include "include/config.h"
#ifdef __CUDACC__
__host__ __device__
#endif
int pooling_not_outside(int x, int y, int lower_bound, int upper_bound) {
return !(x < lower_bound || y < lower_bound || x >= upper_bound || y>= upper_bound);
}
/*
* Average Pooling
*/
#ifdef __CUDACC__
__global__ void make_average_pooling_kernel(float*** input, float*** output, int size, int output_depth, int output_width, int stride) {
__global__ void make_average_pooling_kernel(float*** input, float*** output, int size, int output_depth, int output_width, int stride, int padding) {
// Équivalents respectifs de i, j et k dans la boucle effectuée par le cpu
int idx = threadIdx.x + blockDim.x*blockIdx.x; // < output_depth
int idy = threadIdx.y + blockDim.y*blockIdx.y; // < output_width
int idz = threadIdx.z + blockDim.z*blockIdx.z; // < output_width
int n = size*size;
int max_move = size - padding;
int input_width = output_width*stride - 2*padding + size - stride;
if (idx >= output_depth || idy >= output_width || idz >= output_width) {
return;
}
int nb_elements = 0;
float sum = 0;
for (int a=0; a < size; a++) {
for (int b=0; b < size; b++) {
sum += input[idx][stride*idy +a][stride*idz +b];
for (int a=-padding; a < max_move; a++) {
for (int b=-padding; b < max_move; b++) {
int idy_2 = stride*idy +a;
int idz_2 = stride*idz +b;
if (pooling_not_outside(idy_2, idz_2, 0, input_width)) {
sum += input[idx][idy_2][idz_2];
nb_elements++;
}
}
}
output[idx][idy][idz] = sum/(float)n;
output[idx][idy][idz] = sum/(float)nb_elements;
}
void make_average_pooling_device(float*** input, float*** output, int size, int output_depth, int output_width, int stride) {
void make_average_pooling_device(float*** input, float*** output, int size, int output_depth, int output_width, int stride, int padding) {
// Make computation
dim3 gridSize(i_div_up(output_depth, BLOCKSIZE_x), i_div_up(output_width, BLOCKSIZE_y), i_div_up(output_width, BLOCKSIZE_z));
dim3 blockSize(BLOCKSIZE_x, BLOCKSIZE_y, BLOCKSIZE_z);
make_average_pooling_kernel<<<gridSize, blockSize>>>(input, output, size, output_depth, output_width, stride);
make_average_pooling_kernel<<<gridSize, blockSize>>>(input, output, size, output_depth, output_width, stride, padding);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
}
#endif
void make_average_pooling_cpu(float*** input, float*** output, int size, int output_depth, int output_width, int stride) {
void make_average_pooling_cpu(float*** input, float*** output, int size, int output_depth, int output_width, int stride, int padding) {
// input[output_depth][output_width+size-1][output_width+size-1]
// output[output_depth][output_width][output_width]
float sum;
int n = size*size;
int max_move = size - padding;
int input_width = output_width*stride - 2*padding + size - stride;
for (int i=0; i < output_depth; i++) {
for (int j=0; j < output_width; j++) {
for (int k=0; k < output_width; k++) {
sum = 0;
for (int a=0; a < size; a++) {
for (int b=0; b < size; b++) {
sum += input[i][stride*j +a][stride*k +b];
float sum = 0.;
int nb_elements = 0;
for (int a=-padding; a < max_move; a++) {
for (int b=-padding; b < max_move; b++) {
int j_2 = stride*j +a;
int k_2 = stride*k +b;
if (pooling_not_outside(j_2, k_2, 0, input_width)) {
sum += input[i][j_2][k_2];
nb_elements++;
}
}
}
output[i][j][k] = sum/(float)n;
output[i][j][k] = sum/(float)nb_elements;
}
}
}
@ -71,11 +90,11 @@ void make_average_pooling_cpu(float*** input, float*** output, int size, int out
#ifdef __CUDACC__
extern "C"
#endif
void make_average_pooling(float*** input, float*** output, int size, int output_depth, int output_width, int stride) {
void make_average_pooling(float*** input, float*** output, int size, int output_depth, int output_width, int stride, int padding) {
#ifndef __CUDACC__
make_average_pooling_cpu(input, output, size, output_depth, output_width, stride);
make_average_pooling_cpu(input, output, size, output_depth, output_width, stride, padding);
#else
make_average_pooling_device(input, output, size, output_depth, output_width, stride);
make_average_pooling_device(input, output, size, output_depth, output_width, stride, padding);
#endif
}
@ -87,50 +106,62 @@ void make_average_pooling(float*** input, float*** output, int size, int output_
* Max Pooling
*/
#ifdef __CUDACC__
__global__ void make_max_pooling_kernel(float*** input, float*** output, int size, int output_depth, int output_width, int stride) {
__global__ void make_max_pooling_kernel(float*** input, float*** output, int size, int output_depth, int output_width, int stride, int padding) {
// Équivalents respectifs de i, j et k dans la boucle effectuée par le cpu
int idx = threadIdx.x + blockDim.x*blockIdx.x; // < output_depth
int idy = threadIdx.y + blockDim.y*blockIdx.y; // < output_width
int idz = threadIdx.z + blockDim.z*blockIdx.z; // < output_width
int input_width = output_width*stride - 2*padding + size - stride;
if (idx >= output_depth || idy >= output_width || idz >= output_width) {
return;
}
int max_move = size - padding;
float m = -FLT_MAX;
float temp;
for (int a=0; a < size; a++) {
for (int b=0; b < size; b++) {
temp = input[idx][stride*idy +a][stride*idz +b];
m = m > temp ? m : temp; // max(m, temp)
for (int a=-padding; a < max_move; a++) {
for (int b=-padding; b < max_move; b++) {
int idy_2 = stride*idy +a;
int idz_2 = stride*idz +b;
if (pooling_not_outside(idy_2, idz_2, 0, input_width)) {
temp = input[idx][idy_2][idz_2];
m = m > temp ? m : temp; // max(m, temp)
}
}
}
output[idx][idy][idz] = m;
}
void make_max_pooling_device(float*** input, float*** output, int size, int output_depth, int output_width, int stride) {
void make_max_pooling_device(float*** input, float*** output, int size, int output_depth, int output_width, int stride, int padding) {
// Make computation
dim3 gridSize(i_div_up(output_depth, BLOCKSIZE_x), i_div_up(output_width, BLOCKSIZE_y), i_div_up(output_width, BLOCKSIZE_z));
dim3 blockSize(BLOCKSIZE_x, BLOCKSIZE_y, BLOCKSIZE_z);
make_max_pooling_kernel<<<gridSize, blockSize>>>(input, output, size, output_depth, output_width, stride);
make_max_pooling_kernel<<<gridSize, blockSize>>>(input, output, size, output_depth, output_width, stride, padding);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
}
#endif
void make_max_pooling_cpu(float*** input, float*** output, int size, int output_depth, int output_width, int stride) {
void make_max_pooling_cpu(float*** input, float*** output, int size, int output_depth, int output_width, int stride, int padding) {
// input[output_depth][output_width+size-1][output_width+size-1]
// output[output_depth][output_width][output_width]
int max_move = size - padding;
int input_width = output_width*stride - 2*padding + size - stride;
float m;
for (int i=0; i < output_depth; i++) {
for (int j=0; j < output_width; j++) {
for (int k=0; k < output_width; k++) {
m = -FLT_MAX;
for (int a=0; a < size; a++) {
for (int b=0; b < size; b++) {
m = fmaxf(m, input[i][stride*j +a][stride*k +b]);
for (int a=-padding; a < max_move; a++) {
for (int b=-padding; b < max_move; b++) {
int j_2 = stride*j +a;
int k_2 = stride*k +b;
if (pooling_not_outside(j_2, k_2, 0, input_width)) {
m = fmaxf(m, input[i][j_2][k_2]);
}
}
}
output[i][j][k] = m;
@ -142,11 +173,11 @@ void make_max_pooling_cpu(float*** input, float*** output, int size, int output_
#ifdef __CUDACC__
extern "C"
#endif
void make_max_pooling(float*** input, float*** output, int size, int output_depth, int output_width, int stride) {
void make_max_pooling(float*** input, float*** output, int size, int output_depth, int output_width, int stride, int padding) {
#ifndef __CUDACC__
make_max_pooling_cpu(input, output, size, output_depth, output_width, stride);
make_max_pooling_cpu(input, output, size, output_depth, output_width, stride, padding);
#else
make_max_pooling_device(input, output, size, output_depth, output_width, stride);
make_max_pooling_device(input, output, size, output_depth, output_width, stride, padding);
#endif
}
@ -220,7 +251,7 @@ void make_dense(Kernel_nn* kernel, float* input, float* output, int size_input,
* Dense linearized
*/
#ifdef __CUDACC__
__global__ void make_dense_linearized_kernel(float** weights, float* bias, float*** input, float* output, int depth_input, int dim_input, int size_output) {
__global__ void make_dense_linearized_kernel(float** weights, float* bias, float*** input, float* output, int input_depth, int input_width, int size_output) {
// Équivalents respectifs de i, j et k dans la boucle effectuée par le cpu
int idx = threadIdx.x + blockDim.x*blockIdx.x; // < size_output
@ -229,38 +260,38 @@ __global__ void make_dense_linearized_kernel(float** weights, float* bias, float
}
float f = bias[idx];
for (int i=0; i < depth_input; i++) {
for (int j=0; j < dim_input; j++) {
for (int k=0; k < dim_input; k++) {
f += input[i][j][k]*weights[k + j*dim_input + i*depth_input][idx];
for (int i=0; i < input_depth; i++) {
for (int j=0; j < input_width; j++) {
for (int k=0; k < input_width; k++) {
f += input[i][j][k]*weights[k + j*input_width + i*input_depth][idx];
}
}
}
output[idx] = f;
}
void make_dense_linearized_device(Kernel_nn* kernel, float*** input, float* output, int depth_input, int dim_input, int size_output) {
void make_dense_linearized_device(Kernel_nn* kernel, float*** input, float* output, int input_depth, int input_width, int size_output) {
// Make computation
dim3 gridSize(i_div_up(size_output, BLOCKSIZE_x*BLOCKSIZE_y), 1, 1);
dim3 blockSize(BLOCKSIZE_x*BLOCKSIZE_y, 1, BLOCKSIZE_z);
make_dense_linearized_kernel<<<gridSize, blockSize>>>(kernel->weights, kernel->bias, input, output, depth_input, dim_input, size_output);
make_dense_linearized_kernel<<<gridSize, blockSize>>>(kernel->weights, kernel->bias, input, output, input_depth, input_width, size_output);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
}
#endif
void make_dense_linearized_cpu(Kernel_nn* kernel, float*** input, float* output, int depth_input, int dim_input, int size_output) {
// input[depth_input][dim_input][dim_input]
void make_dense_linearized_cpu(Kernel_nn* kernel, float*** input, float* output, int input_depth, int input_width, int size_output) {
// input[input_depth][input_width][input_width]
// output[size_output]
float f;
for (int l=0; l < size_output; l++) {
f = kernel->bias[l];
for (int i=0; i < depth_input; i++) {
for (int j=0; j < dim_input; j++) {
for (int k=0; k < dim_input; k++) {
f += input[i][j][k]*kernel->weights[k + j*dim_input + i*depth_input][l];
for (int i=0; i < input_depth; i++) {
for (int j=0; j < input_width; j++) {
for (int k=0; k < input_width; k++) {
f += input[i][j][k]*kernel->weights[k + j*input_width + i*input_depth][l];
}
}
}
@ -271,10 +302,10 @@ void make_dense_linearized_cpu(Kernel_nn* kernel, float*** input, float* output,
#ifdef __CUDACC__
extern "C"
#endif
void make_dense_linearized(Kernel_nn* kernel, float*** input, float* output, int depth_input, int dim_input, int size_output) {
void make_dense_linearized(Kernel_nn* kernel, float*** input, float* output, int input_depth, int input_width, int size_output) {
#ifndef __CUDACC__
make_dense_linearized_cpu(kernel, input, output, depth_input, dim_input, size_output);
make_dense_linearized_cpu(kernel, input, output, input_depth, input_width, size_output);
#else
make_dense_linearized_device(kernel, input, output, depth_input, dim_input, size_output);
make_dense_linearized_device(kernel, input, output, input_depth, input_width, size_output);
#endif
}

View File

@ -10,59 +10,78 @@
#include "include/config.h"
#ifdef __CUDACC__
__host__ __device__
#endif
int pooling_not_outside(int x, int y, int lower_bound, int upper_bound) {
return !(x < lower_bound || y < lower_bound || x >= upper_bound || y>= upper_bound);
}
/*
* Average Pooling
*/
#ifdef __CUDACC__
__global__ void make_average_pooling_kernel(float*** input, float*** output, int size, int output_depth, int output_width, int stride) {
__global__ void make_average_pooling_kernel(float*** input, float*** output, int size, int output_depth, int output_width, int stride, int padding) {
// Équivalents respectifs de i, j et k dans la boucle effectuée par le cpu
int idx = threadIdx.x + blockDim.x*blockIdx.x; // < output_depth
int idy = threadIdx.y + blockDim.y*blockIdx.y; // < output_width
int idz = threadIdx.z + blockDim.z*blockIdx.z; // < output_width
int n = size*size;
int max_move = size - padding;
int input_width = output_width*stride - 2*padding + size - stride;
if (idx >= output_depth || idy >= output_width || idz >= output_width) {
return;
}
int nb_elements = 0;
float sum = 0;
for (int a=0; a < size; a++) {
for (int b=0; b < size; b++) {
sum += input[idx][stride*idy +a][stride*idz +b];
for (int a=-padding; a < max_move; a++) {
for (int b=-padding; b < max_move; b++) {
int idy_2 = stride*idy +a;
int idz_2 = stride*idz +b;
if (pooling_not_outside(idy_2, idz_2, 0, input_width)) {
sum += input[idx][idy_2][idz_2];
nb_elements++;
}
}
}
output[idx][idy][idz] = sum/(float)n;
output[idx][idy][idz] = sum/(float)nb_elements;
}
void make_average_pooling_device(float*** input, float*** output, int size, int output_depth, int output_width, int stride) {
void make_average_pooling_device(float*** input, float*** output, int size, int output_depth, int output_width, int stride, int padding) {
// Make computation
dim3 gridSize(i_div_up(output_depth, BLOCKSIZE_x), i_div_up(output_width, BLOCKSIZE_y), i_div_up(output_width, BLOCKSIZE_z));
dim3 blockSize(BLOCKSIZE_x, BLOCKSIZE_y, BLOCKSIZE_z);
make_average_pooling_kernel<<<gridSize, blockSize>>>(input, output, size, output_depth, output_width, stride);
make_average_pooling_kernel<<<gridSize, blockSize>>>(input, output, size, output_depth, output_width, stride, padding);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
}
#endif
void make_average_pooling_cpu(float*** input, float*** output, int size, int output_depth, int output_width, int stride) {
void make_average_pooling_cpu(float*** input, float*** output, int size, int output_depth, int output_width, int stride, int padding) {
// input[output_depth][output_width+size-1][output_width+size-1]
// output[output_depth][output_width][output_width]
float sum;
int n = size*size;
int max_move = size - padding;
int input_width = output_width*stride - 2*padding + size - stride;
for (int i=0; i < output_depth; i++) {
for (int j=0; j < output_width; j++) {
for (int k=0; k < output_width; k++) {
sum = 0;
for (int a=0; a < size; a++) {
for (int b=0; b < size; b++) {
sum += input[i][stride*j +a][stride*k +b];
float sum = 0.;
int nb_elements = 0;
for (int a=-padding; a < max_move; a++) {
for (int b=-padding; b < max_move; b++) {
int j_2 = stride*j +a;
int k_2 = stride*k +b;
if (pooling_not_outside(j_2, k_2, 0, input_width)) {
sum += input[i][j_2][k_2];
nb_elements++;
}
}
}
output[i][j][k] = sum/(float)n;
output[i][j][k] = sum/(float)nb_elements;
}
}
}
@ -71,11 +90,11 @@ void make_average_pooling_cpu(float*** input, float*** output, int size, int out
#ifdef __CUDACC__
extern "C"
#endif
void make_average_pooling(float*** input, float*** output, int size, int output_depth, int output_width, int stride) {
void make_average_pooling(float*** input, float*** output, int size, int output_depth, int output_width, int stride, int padding) {
#ifndef __CUDACC__
make_average_pooling_cpu(input, output, size, output_depth, output_width, stride);
make_average_pooling_cpu(input, output, size, output_depth, output_width, stride, padding);
#else
make_average_pooling_device(input, output, size, output_depth, output_width, stride);
make_average_pooling_device(input, output, size, output_depth, output_width, stride, padding);
#endif
}
@ -87,50 +106,62 @@ void make_average_pooling(float*** input, float*** output, int size, int output_
* Max Pooling
*/
#ifdef __CUDACC__
__global__ void make_max_pooling_kernel(float*** input, float*** output, int size, int output_depth, int output_width, int stride) {
__global__ void make_max_pooling_kernel(float*** input, float*** output, int size, int output_depth, int output_width, int stride, int padding) {
// Équivalents respectifs de i, j et k dans la boucle effectuée par le cpu
int idx = threadIdx.x + blockDim.x*blockIdx.x; // < output_depth
int idy = threadIdx.y + blockDim.y*blockIdx.y; // < output_width
int idz = threadIdx.z + blockDim.z*blockIdx.z; // < output_width
int input_width = output_width*stride - 2*padding + size - stride;
if (idx >= output_depth || idy >= output_width || idz >= output_width) {
return;
}
int max_move = size - padding;
float m = -FLT_MAX;
float temp;
for (int a=0; a < size; a++) {
for (int b=0; b < size; b++) {
temp = input[idx][stride*idy +a][stride*idz +b];
m = m > temp ? m : temp; // max(m, temp)
for (int a=-padding; a < max_move; a++) {
for (int b=-padding; b < max_move; b++) {
int idy_2 = stride*idy +a;
int idz_2 = stride*idz +b;
if (pooling_not_outside(idy_2, idz_2, 0, input_width)) {
temp = input[idx][idy_2][idz_2];
m = m > temp ? m : temp; // max(m, temp)
}
}
}
output[idx][idy][idz] = m;
}
void make_max_pooling_device(float*** input, float*** output, int size, int output_depth, int output_width, int stride) {
void make_max_pooling_device(float*** input, float*** output, int size, int output_depth, int output_width, int stride, int padding) {
// Make computation
dim3 gridSize(i_div_up(output_depth, BLOCKSIZE_x), i_div_up(output_width, BLOCKSIZE_y), i_div_up(output_width, BLOCKSIZE_z));
dim3 blockSize(BLOCKSIZE_x, BLOCKSIZE_y, BLOCKSIZE_z);
make_max_pooling_kernel<<<gridSize, blockSize>>>(input, output, size, output_depth, output_width, stride);
make_max_pooling_kernel<<<gridSize, blockSize>>>(input, output, size, output_depth, output_width, stride, padding);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
}
#endif
void make_max_pooling_cpu(float*** input, float*** output, int size, int output_depth, int output_width, int stride) {
void make_max_pooling_cpu(float*** input, float*** output, int size, int output_depth, int output_width, int stride, int padding) {
// input[output_depth][output_width+size-1][output_width+size-1]
// output[output_depth][output_width][output_width]
int max_move = size - padding;
int input_width = output_width*stride - 2*padding + size - stride;
float m;
for (int i=0; i < output_depth; i++) {
for (int j=0; j < output_width; j++) {
for (int k=0; k < output_width; k++) {
m = -FLT_MAX;
for (int a=0; a < size; a++) {
for (int b=0; b < size; b++) {
m = fmaxf(m, input[i][stride*j +a][stride*k +b]);
for (int a=-padding; a < max_move; a++) {
for (int b=-padding; b < max_move; b++) {
int j_2 = stride*j +a;
int k_2 = stride*k +b;
if (pooling_not_outside(j_2, k_2, 0, input_width)) {
m = fmaxf(m, input[i][j_2][k_2]);
}
}
}
output[i][j][k] = m;
@ -142,11 +173,11 @@ void make_max_pooling_cpu(float*** input, float*** output, int size, int output_
#ifdef __CUDACC__
extern "C"
#endif
void make_max_pooling(float*** input, float*** output, int size, int output_depth, int output_width, int stride) {
void make_max_pooling(float*** input, float*** output, int size, int output_depth, int output_width, int stride, int padding) {
#ifndef __CUDACC__
make_max_pooling_cpu(input, output, size, output_depth, output_width, stride);
make_max_pooling_cpu(input, output, size, output_depth, output_width, stride, padding);
#else
make_max_pooling_device(input, output, size, output_depth, output_width, stride);
make_max_pooling_device(input, output, size, output_depth, output_width, stride, padding);
#endif
}
@ -220,7 +251,7 @@ void make_dense(Kernel_nn* kernel, float* input, float* output, int size_input,
* Dense linearized
*/
#ifdef __CUDACC__
__global__ void make_dense_linearized_kernel(float** weights, float* bias, float*** input, float* output, int depth_input, int dim_input, int size_output) {
__global__ void make_dense_linearized_kernel(float** weights, float* bias, float*** input, float* output, int input_depth, int input_width, int size_output) {
// Équivalents respectifs de i, j et k dans la boucle effectuée par le cpu
int idx = threadIdx.x + blockDim.x*blockIdx.x; // < size_output
@ -229,38 +260,38 @@ __global__ void make_dense_linearized_kernel(float** weights, float* bias, float
}
float f = bias[idx];
for (int i=0; i < depth_input; i++) {
for (int j=0; j < dim_input; j++) {
for (int k=0; k < dim_input; k++) {
f += input[i][j][k]*weights[k + j*dim_input + i*depth_input][idx];
for (int i=0; i < input_depth; i++) {
for (int j=0; j < input_width; j++) {
for (int k=0; k < input_width; k++) {
f += input[i][j][k]*weights[k + j*input_width + i*input_depth][idx];
}
}
}
output[idx] = f;
}
void make_dense_linearized_device(Kernel_nn* kernel, float*** input, float* output, int depth_input, int dim_input, int size_output) {
void make_dense_linearized_device(Kernel_nn* kernel, float*** input, float* output, int input_depth, int input_width, int size_output) {
// Make computation
dim3 gridSize(i_div_up(size_output, BLOCKSIZE_x*BLOCKSIZE_y), 1, 1);
dim3 blockSize(BLOCKSIZE_x*BLOCKSIZE_y, 1, BLOCKSIZE_z);
make_dense_linearized_kernel<<<gridSize, blockSize>>>(kernel->weights, kernel->bias, input, output, depth_input, dim_input, size_output);
make_dense_linearized_kernel<<<gridSize, blockSize>>>(kernel->weights, kernel->bias, input, output, input_depth, input_width, size_output);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
}
#endif
void make_dense_linearized_cpu(Kernel_nn* kernel, float*** input, float* output, int depth_input, int dim_input, int size_output) {
// input[depth_input][dim_input][dim_input]
void make_dense_linearized_cpu(Kernel_nn* kernel, float*** input, float* output, int input_depth, int input_width, int size_output) {
// input[input_depth][input_width][input_width]
// output[size_output]
float f;
for (int l=0; l < size_output; l++) {
f = kernel->bias[l];
for (int i=0; i < depth_input; i++) {
for (int j=0; j < dim_input; j++) {
for (int k=0; k < dim_input; k++) {
f += input[i][j][k]*kernel->weights[k + j*dim_input + i*depth_input][l];
for (int i=0; i < input_depth; i++) {
for (int j=0; j < input_width; j++) {
for (int k=0; k < input_width; k++) {
f += input[i][j][k]*kernel->weights[k + j*input_width + i*input_depth][l];
}
}
}
@ -271,10 +302,10 @@ void make_dense_linearized_cpu(Kernel_nn* kernel, float*** input, float* output,
#ifdef __CUDACC__
extern "C"
#endif
void make_dense_linearized(Kernel_nn* kernel, float*** input, float* output, int depth_input, int dim_input, int size_output) {
void make_dense_linearized(Kernel_nn* kernel, float*** input, float* output, int input_depth, int input_width, int size_output) {
#ifndef __CUDACC__
make_dense_linearized_cpu(kernel, input, output, depth_input, dim_input, size_output);
make_dense_linearized_cpu(kernel, input, output, input_depth, input_width, size_output);
#else
make_dense_linearized_device(kernel, input, output, depth_input, dim_input, size_output);
make_dense_linearized_device(kernel, input, output, input_depth, input_width, size_output);
#endif
}

View File

@ -73,24 +73,26 @@ void write_couche(Network* network, int indice_couche, int type_couche, FILE* pt
int indice_buffer = 0;
if (type_couche == 0) { // Cas du CNN
Kernel_cnn* cnn = kernel->cnn;
int output_dim = network->width[indice_couche+1];
int output_width = network->width[indice_couche+1];
// Écriture du pré-corps
uint32_t pre_buffer[5];
uint32_t pre_buffer[7];
pre_buffer[0] = kernel->activation;
pre_buffer[1] = kernel->linearisation;
pre_buffer[2] = cnn->k_size;
pre_buffer[3] = cnn->rows;
pre_buffer[4] = cnn->columns;
pre_buffer[5] = kernel->stride;
pre_buffer[6] = kernel->padding;
fwrite(pre_buffer, sizeof(pre_buffer), 1, ptr);
// Écriture du corps
// We need to split in small buffers to keep some free memory in the computer
for (int i=0; i < cnn->columns; i++) {
indice_buffer = 0;
float buffer[output_dim*output_dim];
for (int j=0; j < output_dim; j++) {
for (int k=0; k < output_dim; k++) {
float buffer[output_width*output_width];
for (int j=0; j < output_width; j++) {
for (int k=0; k < output_width; k++) {
bufferAdd(cnn->bias[i][j][k]);
}
}
@ -112,11 +114,13 @@ void write_couche(Network* network, int indice_couche, int type_couche, FILE* pt
Kernel_nn* nn = kernel->nn;
// Écriture du pré-corps
uint32_t pre_buffer[4];
uint32_t pre_buffer[6];
pre_buffer[0] = kernel->activation;
pre_buffer[1] = kernel->linearisation;
pre_buffer[2] = nn->size_input;
pre_buffer[3] = nn->size_output;
pre_buffer[4] = kernel->stride;
pre_buffer[5] = kernel->padding;
fwrite(pre_buffer, sizeof(pre_buffer), 1, ptr);
// Écriture du corps
@ -135,9 +139,11 @@ void write_couche(Network* network, int indice_couche, int type_couche, FILE* pt
fwrite(buffer, sizeof(buffer), 1, ptr);
}
} else if (type_couche == 2) { // Cas du Pooling Layer
uint32_t pre_buffer[2];
uint32_t pre_buffer[4];
pre_buffer[0] = kernel->linearisation;
pre_buffer[1] = kernel->pooling;
pre_buffer[2] = kernel->stride;
pre_buffer[3] = kernel->padding;
fwrite(pre_buffer, sizeof(pre_buffer), 1, ptr);
}
}
@ -228,13 +234,13 @@ Network* read_network(char* filename) {
return network;
}
Kernel* read_kernel(int type_couche, int output_dim, FILE* ptr) {
Kernel* read_kernel(int type_couche, int output_width, FILE* ptr) {
Kernel* kernel = (Kernel*)nalloc(1, sizeof(Kernel));
if (type_couche == CNN) { // Cas du CNN
// Lecture du "Pré-corps"
kernel->cnn = (Kernel_cnn*)nalloc(1, sizeof(Kernel_cnn));
kernel->nn = NULL;
uint32_t buffer[5];
uint32_t buffer[7];
(void) !fread(&buffer, sizeof(buffer), 1, ptr);
kernel->activation = buffer[0];
@ -242,6 +248,8 @@ Kernel* read_kernel(int type_couche, int output_dim, FILE* ptr) {
kernel->cnn->k_size = buffer[2];
kernel->cnn->rows = buffer[3];
kernel->cnn->columns = buffer[4];
kernel->stride = buffer[5];
kernel->padding = buffer[6];
// Lecture du corps
Kernel_cnn* cnn = kernel->cnn;
@ -254,20 +262,20 @@ Kernel* read_kernel(int type_couche, int output_dim, FILE* ptr) {
cnn->v_d_bias = (float***)nalloc(cnn->columns, sizeof(float**));
#endif
for (int i=0; i < cnn->columns; i++) {
cnn->bias[i] = (float**)nalloc(output_dim, sizeof(float*));
cnn->d_bias[i] = (float**)nalloc(output_dim, sizeof(float*));
cnn->bias[i] = (float**)nalloc(output_width, sizeof(float*));
cnn->d_bias[i] = (float**)nalloc(output_width, sizeof(float*));
#ifdef ADAM_CNN_BIAS
cnn->s_d_bias[i] = (float**)nalloc(output_dim, sizeof(float*));
cnn->v_d_bias[i] = (float**)nalloc(output_dim, sizeof(float*));
cnn->s_d_bias[i] = (float**)nalloc(output_width, sizeof(float*));
cnn->v_d_bias[i] = (float**)nalloc(output_width, sizeof(float*));
#endif
for (int j=0; j < output_dim; j++) {
cnn->bias[i][j] = (float*)nalloc(output_dim, sizeof(float));
cnn->d_bias[i][j] = (float*)nalloc(output_dim, sizeof(float));
for (int j=0; j < output_width; j++) {
cnn->bias[i][j] = (float*)nalloc(output_width, sizeof(float));
cnn->d_bias[i][j] = (float*)nalloc(output_width, sizeof(float));
#ifdef ADAM_CNN_BIAS
cnn->s_d_bias[i][j] = (float*)nalloc(output_dim, sizeof(float));
cnn->v_d_bias[i][j] = (float*)nalloc(output_dim, sizeof(float));
cnn->s_d_bias[i][j] = (float*)nalloc(output_width, sizeof(float));
cnn->v_d_bias[i][j] = (float*)nalloc(output_width, sizeof(float));
#endif
for (int k=0; k < output_dim; k++) {
for (int k=0; k < output_width; k++) {
(void) !fread(&tmp, sizeof(tmp), 1, ptr);
cnn->bias[i][j][k] = tmp;
cnn->d_bias[i][j][k] = 0.;
@ -322,13 +330,15 @@ Kernel* read_kernel(int type_couche, int output_dim, FILE* ptr) {
// Lecture du "Pré-corps"
kernel->nn = (Kernel_nn*)nalloc(1, sizeof(Kernel_nn));
kernel->cnn = NULL;
uint32_t buffer[4];
uint32_t buffer[6];
(void) !fread(&buffer, sizeof(buffer), 1, ptr);
kernel->activation = buffer[0];
kernel->linearisation = buffer[1];
kernel->nn->size_input = buffer[2];
kernel->nn->size_output = buffer[3];
kernel->stride = buffer[4];
kernel->padding = buffer[5];
// Lecture du corps
Kernel_nn* nn = kernel->nn;
@ -374,15 +384,19 @@ Kernel* read_kernel(int type_couche, int output_dim, FILE* ptr) {
}
}
} else if (type_couche == POOLING) { // Cas du Pooling Layer
uint32_t pooling, linearisation;
uint32_t pooling, linearisation, stride, padding;
(void) !fread(&linearisation, sizeof(linearisation), 1, ptr);
(void) !fread(&pooling, sizeof(pooling), 1, ptr);
(void) !fread(&stride, sizeof(stride), 1, ptr);
(void) !fread(&padding, sizeof(padding), 1, ptr);
kernel->cnn = NULL;
kernel->nn = NULL;
kernel->activation = IDENTITY;
kernel->pooling = pooling;
kernel->linearisation = linearisation;
kernel->stride = stride;
kernel->padding = padding;
}
return kernel;
}

View File

@ -11,13 +11,13 @@
#define purple printf("\033[0;35m")
#define reset_color printf("\033[0m")
void print_kernel_cnn(Kernel_cnn* ker, int depth_input, int dim_input, int depth_output, int dim_output) {
int k_size = dim_input - dim_output + 1;
void print_kernel_cnn(Kernel_cnn* ker, int input_depth, int input_width, int output_depth, int output_width) {
int k_size = input_width - output_width + 1;
// print bias
green;
for (int i=0; i<depth_output; i++) {
for (int j=0; j<dim_output; j++) {
for (int k=0; k<dim_output; k++) {
for (int i=0; i<output_depth; i++) {
for (int j=0; j<output_width; j++) {
for (int k=0; k<output_width; k++) {
printf("%.2f", ker->bias[i][j][k]);
}
print_space;
@ -29,9 +29,9 @@ void print_kernel_cnn(Kernel_cnn* ker, int depth_input, int dim_input, int depth
//print weights
red;
for (int i=0; i<depth_input; i++) {
for (int i=0; i<input_depth; i++) {
printf("------Line %d-----\n", i);
for (int j=0; j<depth_output; j++) {
for (int j=0; j<output_depth; j++) {
for (int k=0; k<k_size; k++) {
for (int l=0; l<k_size; l++) {
printf("%.2f", ker->weights[i][j][k][l]);

View File

@ -63,9 +63,8 @@ void* train_thread(void* parameters) {
float loss = 0.;
pthread_t tid;
LoadImageParameters* load_image_param;
LoadImageParameters* load_image_param = (LoadImageParameters*)malloc(sizeof(LoadImageParameters));
if (dataset_type != 0) {
load_image_param = (LoadImageParameters*)malloc(sizeof(LoadImageParameters));
load_image_param->dataset = param->dataset;
load_image_param->index = index[start];
@ -118,9 +117,7 @@ void* train_thread(void* parameters) {
}
}
if (dataset_type != 0) {
free(load_image_param);
}
free(load_image_param);
param->accuracy = accuracy;
param->loss = loss;
@ -140,6 +137,7 @@ void train(int dataset_type, char* images_file, char* labels_file, char* data_di
float loss;
float batch_loss; // May be redundant with loss, but gives more informations
float test_accuracy = 0.; // Used to decrease Learning rate
(void)test_accuracy; // To avoid warnings when not used
float accuracy;
float batch_accuracy;
float current_accuracy;
@ -155,7 +153,7 @@ void train(int dataset_type, char* images_file, char* labels_file, char* data_di
//* Chargement du dataset
int input_dim = -1;
int input_width = -1;
int input_depth = -1;
int nb_images_total; // Images au total
@ -174,11 +172,11 @@ void train(int dataset_type, char* images_file, char* labels_file, char* data_di
images = read_mnist_images(images_file);
labels = read_mnist_labels(labels_file);
input_dim = 32;
input_width = 32;
input_depth = 1;
} else { // Type JPG
dataset = loadJpegDataset(data_dir);
input_dim = dataset->height + 4; // image_size + padding
input_width = dataset->height + 4; // image_size + padding
input_depth = dataset->numComponents;
nb_images_total = dataset->numImages;
@ -187,8 +185,8 @@ void train(int dataset_type, char* images_file, char* labels_file, char* data_di
//* Création du réseau
Network* network;
if (!recover) {
network = create_network_lenet5(LEARNING_RATE, 0, RELU, NORMALIZED_XAVIER, input_dim, input_depth);
//network = create_simple_one(LEARNING_RATE, 0, RELU, GLOROT, input_dim, input_depth);
network = create_network_lenet5(LEARNING_RATE, 0, RELU, NORMALIZED_XAVIER, input_width, input_depth);
//network = create_simple_one(LEARNING_RATE, 0, RELU, GLOROT, input_width, input_depth);
} else {
network = read_network(recover);
network->learning_rate = LEARNING_RATE;

View File

@ -33,7 +33,7 @@ void knuth_shuffle(int* tab, int n) {
}
bool equals_networks(Network* network1, Network* network2) {
int output_dim;
int output_width;
checkEquals(size, "size", -1);
checkEquals(initialisation, "initialisation", -1);
checkEquals(dropout, "dropout", -1);
@ -45,6 +45,8 @@ bool equals_networks(Network* network1, Network* network2) {
for (int i=0; i < network1->size-1; i++) {
checkEquals(kernel[i]->activation, "kernel[i]->activation", i);
checkEquals(kernel[i]->stride, "kernel[i]->stride", i);
checkEquals(kernel[i]->padding, "kernel[i]->padding", i);
if ((!network1->kernel[i]->cnn ^ !network2->kernel[i]->cnn) || (!network1->kernel[i]->nn ^ !network2->kernel[i]->nn)) {
printf(BOLDRED "[ ERROR ]" RESET "network1->kernel[%d] et network1->kernel[%d] diffèrent de type\n", i, i);
return false;
@ -68,13 +70,13 @@ bool equals_networks(Network* network1, Network* network2) {
}
} else {
// Type CNN
output_dim = network1->width[i+1];
output_width = network1->width[i+1];
checkEquals(kernel[i]->cnn->k_size, "kernel[i]->k_size", i);
checkEquals(kernel[i]->cnn->rows, "kernel[i]->rows", i);
checkEquals(kernel[i]->cnn->columns, "kernel[i]->columns", i);
for (int j=0; j < network1->kernel[i]->cnn->columns; j++) {
for (int k=0; k < output_dim; k++) {
for (int l=0; l < output_dim; l++) {
for (int k=0; k < output_width; k++) {
for (int l=0; l < output_width; l++) {
checkEquals(kernel[i]->cnn->bias[j][k][l], "kernel[i]->cnn->bias[j][k][l]", l);
}
}
@ -106,7 +108,7 @@ Network* copy_network(Network* network) {
int rows;
int k_size;
int columns;
int output_dim;
int output_width;
copyVar(dropout);
copyVar(learning_rate);
@ -129,6 +131,8 @@ Network* copy_network(Network* network) {
copyVar(kernel[i]->pooling);
copyVar(kernel[i]->activation);
copyVar(kernel[i]->linearisation); // 1
copyVar(kernel[i]->stride); // -1
copyVar(kernel[i]->padding); // -1
network_cp->kernel[i]->cnn = NULL;
network_cp->kernel[i]->nn = NULL;
}
@ -136,6 +140,8 @@ Network* copy_network(Network* network) {
copyVar(kernel[i]->pooling);
copyVar(kernel[i]->activation);
copyVar(kernel[i]->linearisation); // 0
copyVar(kernel[i]->stride); // -1
copyVar(kernel[i]->padding); // -1
size_input = network->kernel[i]->nn->size_input;
size_output = network->kernel[i]->nn->size_output;
@ -188,11 +194,13 @@ Network* copy_network(Network* network) {
copyVar(kernel[i]->pooling);
copyVar(kernel[i]->activation);
copyVar(kernel[i]->linearisation); // 0
copyVar(kernel[i]->stride);
copyVar(kernel[i]->padding);
rows = network->kernel[i]->cnn->rows;
k_size = network->kernel[i]->cnn->k_size;
columns = network->kernel[i]->cnn->columns;
output_dim = network->width[i+1];
output_width = network->width[i+1];
network_cp->kernel[i]->nn = NULL;
@ -209,20 +217,20 @@ Network* copy_network(Network* network) {
network_cp->kernel[i]->cnn->v_d_bias = (float***)nalloc(columns, sizeof(float**));
#endif
for (int j=0; j < columns; j++) {
network_cp->kernel[i]->cnn->bias[j] = (float**)nalloc(output_dim, sizeof(float*));
network_cp->kernel[i]->cnn->d_bias[j] = (float**)nalloc(output_dim, sizeof(float*));
network_cp->kernel[i]->cnn->bias[j] = (float**)nalloc(output_width, sizeof(float*));
network_cp->kernel[i]->cnn->d_bias[j] = (float**)nalloc(output_width, sizeof(float*));
#ifdef ADAM_CNN_BIAS
network_cp->kernel[i]->cnn->s_d_bias[j] = (float**)nalloc(output_dim, sizeof(float*));
network_cp->kernel[i]->cnn->v_d_bias[j] = (float**)nalloc(output_dim, sizeof(float*));
network_cp->kernel[i]->cnn->s_d_bias[j] = (float**)nalloc(output_width, sizeof(float*));
network_cp->kernel[i]->cnn->v_d_bias[j] = (float**)nalloc(output_width, sizeof(float*));
#endif
for (int k=0; k < output_dim; k++) {
network_cp->kernel[i]->cnn->bias[j][k] = (float*)nalloc(output_dim, sizeof(float));
network_cp->kernel[i]->cnn->d_bias[j][k] = (float*)nalloc(output_dim, sizeof(float));
for (int k=0; k < output_width; k++) {
network_cp->kernel[i]->cnn->bias[j][k] = (float*)nalloc(output_width, sizeof(float));
network_cp->kernel[i]->cnn->d_bias[j][k] = (float*)nalloc(output_width, sizeof(float));
#ifdef ADAM_CNN_BIAS
network_cp->kernel[i]->cnn->s_d_bias[j][k] = (float*)nalloc(output_dim, sizeof(float));
network_cp->kernel[i]->cnn->v_d_bias[j][k] = (float*)nalloc(output_dim, sizeof(float));
network_cp->kernel[i]->cnn->s_d_bias[j][k] = (float*)nalloc(output_width, sizeof(float));
network_cp->kernel[i]->cnn->v_d_bias[j][k] = (float*)nalloc(output_width, sizeof(float));
#endif
for (int l=0; l < output_dim; l++) {
for (int l=0; l < output_width; l++) {
copyVar(kernel[i]->cnn->bias[j][k][l]);
network_cp->kernel[i]->cnn->d_bias[j][k][l] = 0.;
#ifdef ADAM_CNN_BIAS
@ -316,7 +324,7 @@ void copy_network_parameters(Network* network_src, Network* network_dest) {
int rows;
int k_size;
int columns;
int output_dim;
int output_width;
copyVarParams(learning_rate);
@ -340,11 +348,11 @@ void copy_network_parameters(Network* network_src, Network* network_dest) {
rows = network_src->kernel[i]->cnn->rows;
k_size = network_src->kernel[i]->cnn->k_size;
columns = network_src->kernel[i]->cnn->columns;
output_dim = network_src->width[i+1];
output_width = network_src->width[i+1];
for (int j=0; j < columns; j++) {
for (int k=0; k < output_dim; k++) {
for (int l=0; l < output_dim; l++) {
for (int k=0; k < output_width; k++) {
for (int l=0; l < output_width; l++) {
copyVarParams(kernel[i]->cnn->bias[j][k][l]);
}
}
@ -377,7 +385,7 @@ int count_null_weights(Network* network) {
int rows;
int k_size;
int columns;
int output_dim;
int output_width;
for (int i=0; i < size-1; i++) {
if (!network->kernel[i]->cnn && network->kernel[i]->nn) { // Cas du NN
@ -399,11 +407,11 @@ int count_null_weights(Network* network) {
rows = network->kernel[i]->cnn->rows;
k_size = network->kernel[i]->cnn->k_size;
columns = network->kernel[i]->cnn->columns;
output_dim = network->width[i+1];
output_width = network->width[i+1];
for (int j=0; j < columns; j++) {
for (int k=0; k < output_dim; k++) {
for (int l=0; l < output_dim; l++) {
for (int k=0; k < output_width; k++) {
for (int l=0; l < output_width; l++) {
null_bias += fabs(network->kernel[i]->cnn->bias[j][k][l]) <= epsilon;
}
}

View File

@ -54,12 +54,12 @@ def generate_data_mul():
def generate_data_conv():
values = []
output_dim = 40
output_width = 40
rows = 40
columns = 40
for i in range(10):
values.append(avg([conv_matrix((i+1)*100, output_dim, rows, columns) for j in range(10)]))
print(f"Added ({(i+1)*100}, output_dim, rows, columns)")
values.append(avg([conv_matrix((i+1)*100, output_width, rows, columns) for j in range(10)]))
print(f"Added ({(i+1)*100}, output_width, rows, columns)")
with open("result_conv.json", "weights") as file:
json.dump(values, file, indent=4)

View File

@ -102,9 +102,9 @@ bool check_matrices_equality(float*** m1, float*** m2, int n, int p, int q, int
return true;
}
void run_convolution_test(int input_dim, int output_dim, int rows, int columns) {
assert(input_dim >= output_dim);
int k_size = input_dim - output_dim +1;
void run_convolution_test(int input_width, int output_width, int rows, int columns) {
assert(input_width >= output_width);
int k_size = input_width - output_width +1;
// Génération des données aléatoires
Kernel_cnn* kernel = (Kernel_cnn*)malloc(sizeof(Kernel_cnn));
@ -145,11 +145,11 @@ void run_convolution_test(int input_dim, int output_dim, int rows, int columns)
#endif
}
float*** input = create_matrix(kernel->rows, input_dim, input_dim, 5.0f);
float*** output_cpu = create_empty_matrix(kernel->columns, output_dim, output_dim);
float*** output_gpu = create_empty_matrix(kernel->columns, output_dim, output_dim);
float*** input = create_matrix(kernel->rows, input_width, input_width, 5.0f);
float*** output_cpu = create_empty_matrix(kernel->columns, output_width, output_width);
float*** output_gpu = create_empty_matrix(kernel->columns, output_width, output_width);
//printf("(%d, %d, %d, %d) Data generation complete\n", rows, columns, input_dim, output_dim);
//printf("(%d, %d, %d, %d) Data generation complete\n", rows, columns, input_width, output_width);
// Lancement des calculs
@ -157,7 +157,7 @@ void run_convolution_test(int input_dim, int output_dim, int rows, int columns)
double cpu_time_used, gpu_time_used;
start = clock();
make_convolution_device(kernel, input, output_gpu, output_dim, 1);
make_convolution_device(kernel, input, output_gpu, output_width, 1);
end = clock();
gpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
@ -165,15 +165,15 @@ void run_convolution_test(int input_dim, int output_dim, int rows, int columns)
start = clock();
make_convolution_cpu(kernel, input, output_cpu, output_dim, 1);
make_convolution_cpu(kernel, input, output_cpu, output_width, 1);
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("CPU: %lf\n", cpu_time_used);
// Vérification de l'égalité des matrices
//printf("(%d, %d, %d, %d) Checking equality.\n", rows, columns, input_dim, output_dim);
if (!check_matrices_equality(output_gpu, output_cpu, kernel->columns, output_dim, output_dim, kernel->k_size)) {// TODO: change acceptation
//printf("(%d, %d, %d, %d) Checking equality.\n", rows, columns, input_width, output_width);
if (!check_matrices_equality(output_gpu, output_cpu, kernel->columns, output_width, output_width, kernel->k_size)) {// TODO: change acceptation
//exit(1);
}
//printf(GREEN "OK\n" RESET);
@ -200,9 +200,9 @@ void run_convolution_test(int input_dim, int output_dim, int rows, int columns)
free(kernel->v_d_weights);
#endif
free_matrix(input, kernel->rows, input_dim);
free_matrix(output_cpu, kernel->columns, output_dim);
free_matrix(output_gpu, kernel->columns, output_dim);
free_matrix(input, kernel->rows, input_width);
free_matrix(output_cpu, kernel->columns, output_width);
free_matrix(output_gpu, kernel->columns, output_width);
}

View File

@ -93,9 +93,9 @@ bool check_matrices_equality(float*** m1, float*** m2, int n, int p, int q, int
return true;
}
void run_convolution_test(int input_dim, int output_dim, int rows, int columns) {
assert(input_dim >= output_dim);
int k_size = input_dim - output_dim +1;
void run_convolution_test(int input_width, int output_width, int rows, int columns) {
assert(input_width >= output_width);
int k_size = input_width - output_width +1;
// Génération des données aléatoires
Kernel_cnn* kernel = (Kernel_cnn*)nalloc(1, sizeof(Kernel_cnn));
@ -104,12 +104,12 @@ void run_convolution_test(int input_dim, int output_dim, int rows, int columns)
kernel->rows = rows;
kernel->columns = columns;
// bias[kernel->columns][dim_output][dim_output]
kernel->bias = create_matrix(kernel->columns, output_dim, output_dim, 15.0f);
kernel->d_bias = create_matrix(kernel->columns, output_dim, output_dim, 1.5f);
// bias[kernel->columns][output_width][output_width]
kernel->bias = create_matrix(kernel->columns, output_width, output_width, 15.0f);
kernel->d_bias = create_matrix(kernel->columns, output_width, output_width, 1.5f);
#ifdef ADAM_CNN_BIAS
kernel->s_d_bias = create_matrix(kernel->columns, output_dim, output_dim, 1.5f);
kernel->v_d_bias = create_matrix(kernel->columns, output_dim, output_dim, 1.5f);
kernel->s_d_bias = create_matrix(kernel->columns, output_width, output_width, 1.5f);
kernel->v_d_bias = create_matrix(kernel->columns, output_width, output_width, 1.5f);
#endif
// weights[rows][columns][k_size][k_size]
@ -128,11 +128,11 @@ void run_convolution_test(int input_dim, int output_dim, int rows, int columns)
#endif
}
float*** input = create_matrix(kernel->rows, input_dim, input_dim, 5.0f);
float*** output_cpu = create_empty_matrix(kernel->columns, output_dim, output_dim);
float*** output_gpu = create_empty_matrix(kernel->columns, output_dim, output_dim);
float*** input = create_matrix(kernel->rows, input_width, input_width, 5.0f);
float*** output_cpu = create_empty_matrix(kernel->columns, output_width, output_width);
float*** output_gpu = create_empty_matrix(kernel->columns, output_width, output_width);
printf("(%d, %d, %d, %d) Data generation complete\n", rows, columns, input_dim, output_dim);
printf("(%d, %d, %d, %d) Data generation complete\n", rows, columns, input_width, output_width);
// Lancement des calculs
@ -140,33 +140,33 @@ void run_convolution_test(int input_dim, int output_dim, int rows, int columns)
double cpu_time_used, gpu_time_used;
start_time = omp_get_wtime();
make_convolution_device(kernel, input, output_gpu, output_dim, 1);
make_convolution_device(kernel, input, output_gpu, output_width, 1);
end_time = omp_get_wtime();
gpu_time_used = end_time - start_time;
printf("(%d, %d, %d, %d) Time used for GPU: %lf seconds\n", rows, columns, input_dim, output_dim, gpu_time_used);
printf("(%d, %d, %d, %d) Time used for GPU: %lf seconds\n", rows, columns, input_width, output_width, gpu_time_used);
start_time = omp_get_wtime();
make_convolution_cpu(kernel, input, output_cpu, output_dim, 1);
make_convolution_cpu(kernel, input, output_cpu, output_width, 1);
end_time = omp_get_wtime();
cpu_time_used = end_time - start_time;
printf("(%d, %d, %d, %d) Time used for CPU: %lf seconds\n", rows, columns, input_dim, output_dim, cpu_time_used);
printf("(%d, %d, %d, %d) Time used for CPU: %lf seconds\n", rows, columns, input_width, output_width, cpu_time_used);
// Vérification de l'égalité des matrices
printf("(%d, %d, %d, %d) Checking equality.\n", rows, columns, input_dim, output_dim);
if (!check_matrices_equality(output_gpu, output_cpu, kernel->columns, output_dim, output_dim, kernel->k_size)) {// TODO: change acceptation
printf("(%d, %d, %d, %d) Checking equality.\n", rows, columns, input_width, output_width);
if (!check_matrices_equality(output_gpu, output_cpu, kernel->columns, output_width, output_width, kernel->k_size)) {// TODO: change acceptation
exit(1);
}
printf(GREEN "OK\n" RESET);
free_matrix(kernel->bias, kernel->columns, output_dim);
free_matrix(kernel->d_bias, kernel->columns, output_dim);
free_matrix(kernel->bias, kernel->columns, output_width);
free_matrix(kernel->d_bias, kernel->columns, output_width);
#ifdef ADAM_CNN_BIAS
free_matrix(kernel->s_d_bias, kernel->columns, output_dim);
free_matrix(kernel->v_d_bias, kernel->columns, output_dim);
free_matrix(kernel->s_d_bias, kernel->columns, output_width);
free_matrix(kernel->v_d_bias, kernel->columns, output_width);
#endif
for (int i=0; i < kernel->rows; i++) {
@ -184,9 +184,9 @@ void run_convolution_test(int input_dim, int output_dim, int rows, int columns)
gree(kernel->v_d_weights);
#endif
free_matrix(input, kernel->rows, input_dim);
free_matrix(output_cpu, kernel->columns, output_dim);
free_matrix(output_gpu, kernel->columns, output_dim);
free_matrix(input, kernel->rows, input_width);
free_matrix(output_cpu, kernel->columns, output_width);
free_matrix(output_gpu, kernel->columns, output_width);
}

View File

@ -40,6 +40,8 @@ int main() {
printf("width: %d\n", network->width[i]);
printf("depth: %d\n", network->depth[i]);
printf("activation: %d\n", kernel->activation);
printf("stride: %d\n", kernel->stride);
printf("padding: %d\n", kernel->padding);
}
printf("\n" GREEN "OK\n" RESET);