Change 'w' and 'd_w' to 'weights' and 'd_weights'

This commit is contained in:
julienChemillier 2023-02-19 13:38:33 +01:00
parent 3148dd56ad
commit c13772c291
16 changed files with 82 additions and 82 deletions

View File

@ -66,9 +66,9 @@ type | nom de la variable | commentaire
float|bias[0][0][0]|biais float|bias[0][0][0]|biais
float|...| float|...|
float|bias[cnn->columns-1][cnn->k_size-1][cnn->k_size-1]| float|bias[cnn->columns-1][cnn->k_size-1][cnn->k_size-1]|
float|w[0][0][0][0]|poids float|weights[0][0][0][0]|poids
float|...| float|...|
float|w[cnn->rows][cnn->columns-1][cnn->k_size-1][cnn->k_size-1]| float|weights[cnn->rows][cnn->columns-1][cnn->k_size-1][cnn->k_size-1]|
- Si la couche est de type nn, on ajoute les poids de manière croissante sur leurs indices: - Si la couche est de type nn, on ajoute les poids de manière croissante sur leurs indices:

View File

@ -131,7 +131,7 @@ void backward_convolution(Kernel_cnn* ker, float*** input, float*** input_z, flo
tmp += input[h][l+j][m+k]*output[i][l][m]; tmp += input[h][l+j][m+k]*output[i][l][m];
} }
} }
ker->d_w[h][i][j][k] += tmp; ker->d_weights[h][i][j][k] += tmp;
} }
} }
} }
@ -152,7 +152,7 @@ void backward_convolution(Kernel_cnn* ker, float*** input, float*** input_z, flo
max_n = min(k_size, dim_input-k); max_n = min(k_size, dim_input-k);
for (int m=min_m; m < max_m; m++) { for (int m=min_m; m < max_m; m++) {
for (int n=min_n; n < max_n; n++) { for (int n=min_n; n < max_n; n++) {
tmp += output[l][j-k_size+m+1][k-k_size+n+1]*ker->w[i][l][m][n]; tmp += output[l][j-k_size+m+1][k-k_size+n+1]*ker->weights[i][l][m][n];
} }
} }
} }

View File

@ -24,7 +24,7 @@ void make_convolution_cpu(Kernel_cnn* kernel, float*** input, float*** output, i
for (int a=0; a < kernel->rows; a++) { // Canal de couleur for (int a=0; a < kernel->rows; a++) { // Canal de couleur
for (int b=0; b < kernel->k_size; b++) { // ligne du filtre for (int b=0; b < kernel->k_size; b++) { // ligne du filtre
for (int c=0; c < kernel->k_size; c++) { // colonne du filtre for (int c=0; c < kernel->k_size; c++) { // colonne du filtre
f += kernel->w[a][i][b][c]*input[a][j+b][k+c]; f += kernel->weights[a][i][b][c]*input[a][j+b][k+c];
} }
} }
} }
@ -51,7 +51,7 @@ __global__ void make_convolution_kernel(Kernel_cnn* kernel, float*** input, floa
for (int a=0; a < kernel->rows; a++) { for (int a=0; a < kernel->rows; a++) {
for (int b=0; b < kernel->k_size; b++) { for (int b=0; b < kernel->k_size; b++) {
for (int c=0; c < kernel->k_size; c++) { for (int c=0; c < kernel->k_size; c++) {
f += kernel->w[a][idx][b][c]*input[a][idy+b][idz+c]; f += kernel->weights[a][idx][b][c]*input[a][idy+b][idz+c];
} }
} }
} }

View File

@ -24,7 +24,7 @@ void make_convolution_cpu(Kernel_cnn* kernel, float*** input, float*** output, i
for (int a=0; a < kernel->rows; a++) { // Canal de couleur for (int a=0; a < kernel->rows; a++) { // Canal de couleur
for (int b=0; b < kernel->k_size; b++) { // ligne du filtre for (int b=0; b < kernel->k_size; b++) { // ligne du filtre
for (int c=0; c < kernel->k_size; c++) { // colonne du filtre for (int c=0; c < kernel->k_size; c++) { // colonne du filtre
f += kernel->w[a][i][b][c]*input[a][j+b][k+c]; f += kernel->weights[a][i][b][c]*input[a][j+b][k+c];
} }
} }
} }
@ -51,7 +51,7 @@ __global__ void make_convolution_kernel(Kernel_cnn* kernel, float*** input, floa
for (int a=0; a < kernel->rows; a++) { for (int a=0; a < kernel->rows; a++) {
for (int b=0; b < kernel->k_size; b++) { for (int b=0; b < kernel->k_size; b++) {
for (int c=0; c < kernel->k_size; c++) { for (int c=0; c < kernel->k_size; c++) {
f += kernel->w[a][idx][b][c]*input[a][idy+b][idz+c]; f += kernel->weights[a][idx][b][c]*input[a][idy+b][idz+c];
} }
} }
} }

View File

@ -163,19 +163,19 @@ void add_convolution(Network* network, int depth_output, int dim_output, int act
cnn->k_size = kernel_size; cnn->k_size = kernel_size;
cnn->rows = depth_input; cnn->rows = depth_input;
cnn->columns = depth_output; cnn->columns = depth_output;
cnn->w = (float****)nalloc(sizeof(float***)*depth_input); cnn->weights = (float****)nalloc(sizeof(float***)*depth_input);
cnn->d_w = (float****)nalloc(sizeof(float***)*depth_input); cnn->d_weights = (float****)nalloc(sizeof(float***)*depth_input);
for (int i=0; i < depth_input; i++) { for (int i=0; i < depth_input; i++) {
cnn->w[i] = (float***)nalloc(sizeof(float**)*depth_output); cnn->weights[i] = (float***)nalloc(sizeof(float**)*depth_output);
cnn->d_w[i] = (float***)nalloc(sizeof(float**)*depth_output); cnn->d_weights[i] = (float***)nalloc(sizeof(float**)*depth_output);
for (int j=0; j < depth_output; j++) { for (int j=0; j < depth_output; j++) {
cnn->w[i][j] = (float**)nalloc(sizeof(float*)*kernel_size); cnn->weights[i][j] = (float**)nalloc(sizeof(float*)*kernel_size);
cnn->d_w[i][j] = (float**)nalloc(sizeof(float*)*kernel_size); cnn->d_weights[i][j] = (float**)nalloc(sizeof(float*)*kernel_size);
for (int k=0; k < kernel_size; k++) { for (int k=0; k < kernel_size; k++) {
cnn->w[i][j][k] = (float*)nalloc(sizeof(float)*kernel_size); cnn->weights[i][j][k] = (float*)nalloc(sizeof(float)*kernel_size);
cnn->d_w[i][j][k] = (float*)nalloc(sizeof(float)*kernel_size); cnn->d_weights[i][j][k] = (float*)nalloc(sizeof(float)*kernel_size);
for (int l=0; l < kernel_size; l++) { for (int l=0; l < kernel_size; l++) {
cnn->d_w[i][j][k][l] = 0.; cnn->d_weights[i][j][k][l] = 0.;
} }
} }
} }
@ -196,7 +196,7 @@ void add_convolution(Network* network, int depth_output, int dim_output, int act
int n_in = network->width[n-1]*network->width[n-1]*network->depth[n-1]; int n_in = network->width[n-1]*network->width[n-1]*network->depth[n-1];
int n_out = network->width[n]*network->width[n]*network->depth[n]; int n_out = network->width[n]*network->width[n]*network->depth[n];
initialisation_3d_matrix(network->initialisation, cnn->bias, depth_output, dim_output, dim_output, n_in, n_out); initialisation_3d_matrix(network->initialisation, cnn->bias, depth_output, dim_output, dim_output, n_in, n_out);
initialisation_4d_matrix(network->initialisation, cnn->w, depth_input, depth_output, kernel_size, kernel_size, n_in, n_out); initialisation_4d_matrix(network->initialisation, cnn->weights, depth_input, depth_output, kernel_size, kernel_size, n_in, n_out);
create_a_cube_input_layer(network, n, depth_output, bias_size); create_a_cube_input_layer(network, n, depth_output, bias_size);
create_a_cube_input_z_layer(network, n, depth_output, bias_size); create_a_cube_input_z_layer(network, n, depth_output, bias_size);
network->size++; network->size++;

View File

@ -32,7 +32,7 @@ void print_poids_ker_cnn(char* modele) {
for (int k=0; k < kernel_cnn->k_size; k++) { for (int k=0; k < kernel_cnn->k_size; k++) {
printf("\t\t\t\t["); printf("\t\t\t\t[");
for (int l=0; l < kernel_cnn->k_size; l++) { for (int l=0; l < kernel_cnn->k_size; l++) {
printf("%lf", kernel_cnn->w[i][j][k][l]); printf("%lf", kernel_cnn->weights[i][j][k][l]);
if (l != kernel_cnn->k_size-1) { if (l != kernel_cnn->k_size-1) {
printf(", "); printf(", ");
} }

View File

@ -52,17 +52,17 @@ void free_convolution(Network* network, int pos) {
for (int i=0; i < r; i++) { for (int i=0; i < r; i++) {
for (int j=0; j < c; j++) { for (int j=0; j < c; j++) {
for (int k=0; k < k_size; k++) { for (int k=0; k < k_size; k++) {
gree(k_pos->w[i][j][k]); gree(k_pos->weights[i][j][k]);
gree(k_pos->d_w[i][j][k]); gree(k_pos->d_weights[i][j][k]);
} }
gree(k_pos->w[i][j]); gree(k_pos->weights[i][j]);
gree(k_pos->d_w[i][j]); gree(k_pos->d_weights[i][j]);
} }
gree(k_pos->w[i]); gree(k_pos->weights[i]);
gree(k_pos->d_w[i]); gree(k_pos->d_weights[i]);
} }
gree(k_pos->w); gree(k_pos->weights);
gree(k_pos->d_w); gree(k_pos->d_weights);
gree(k_pos); gree(k_pos);
} }

View File

@ -9,7 +9,7 @@ void make_convolution_cpu(Kernel_cnn* kernel, float*** input, float*** output, i
/* /*
* Kernel de la convolution sur carte graphique * Kernel de la convolution sur carte graphique
*/ */
__global__ void make_convolution_kernel(int k_size, int columns, int rows, float*** bias, size_t pitch_bias, float**** w, size_t pitch_w, float*** input, size_t pitch_input, float*** output, size_t pitch_output, int output_dim); __global__ void make_convolution_kernel(int k_size, int columns, int rows, float*** bias, size_t pitch_bias, float**** weights, size_t pitch_weights, float*** input, size_t pitch_input, float*** output, size_t pitch_output, int output_dim);
/* /*
* Effectue la convolution naïvement sur la carte graphique * Effectue la convolution naïvement sur la carte graphique

View File

@ -7,8 +7,8 @@ typedef struct Kernel_cnn {
int columns; // Depth of the output int columns; // Depth of the output
float*** bias; // bias[columns][dim_output][dim_output] float*** bias; // bias[columns][dim_output][dim_output]
float*** d_bias; // d_bias[columns][dim_output][dim_output] float*** d_bias; // d_bias[columns][dim_output][dim_output]
float**** w; // w[rows][columns][k_size][k_size] float**** weights; // weights[rows][columns][k_size][k_size]
float**** d_w; // d_w[rows][columns][k_size][k_size] float**** d_weights; // d_weights[rows][columns][k_size][k_size]
} Kernel_cnn; } Kernel_cnn;
typedef struct Kernel_nn { typedef struct Kernel_nn {

View File

@ -98,7 +98,7 @@ void write_couche(Network* network, int indice_couche, int type_couche, FILE* pt
for (int j=0; j < cnn->columns; j++) { for (int j=0; j < cnn->columns; j++) {
for (int k=0; k < cnn->k_size; k++) { for (int k=0; k < cnn->k_size; k++) {
for (int l=0; l < cnn->k_size; l++) { for (int l=0; l < cnn->k_size; l++) {
bufferAdd(cnn->w[i][j][k][l]); bufferAdd(cnn->weights[i][j][k][l]);
} }
} }
} }
@ -259,21 +259,21 @@ Kernel* read_kernel(int type_couche, int output_dim, FILE* ptr) {
} }
} }
cnn->w = (float****)nalloc(sizeof(float***)*cnn->rows); cnn->weights = (float****)nalloc(sizeof(float***)*cnn->rows);
cnn->d_w = (float****)nalloc(sizeof(float***)*cnn->rows); cnn->d_weights = (float****)nalloc(sizeof(float***)*cnn->rows);
for (int i=0; i < cnn->rows; i++) { for (int i=0; i < cnn->rows; i++) {
cnn->w[i] = (float***)nalloc(sizeof(float**)*cnn->columns); cnn->weights[i] = (float***)nalloc(sizeof(float**)*cnn->columns);
cnn->d_w[i] = (float***)nalloc(sizeof(float**)*cnn->columns); cnn->d_weights[i] = (float***)nalloc(sizeof(float**)*cnn->columns);
for (int j=0; j < cnn->columns; j++) { for (int j=0; j < cnn->columns; j++) {
cnn->w[i][j] = (float**)nalloc(sizeof(float*)*cnn->k_size); cnn->weights[i][j] = (float**)nalloc(sizeof(float*)*cnn->k_size);
cnn->d_w[i][j] = (float**)nalloc(sizeof(float*)*cnn->k_size); cnn->d_weights[i][j] = (float**)nalloc(sizeof(float*)*cnn->k_size);
for (int k=0; k < cnn->k_size; k++) { for (int k=0; k < cnn->k_size; k++) {
cnn->w[i][j][k] = (float*)nalloc(sizeof(float)*cnn->k_size); cnn->weights[i][j][k] = (float*)nalloc(sizeof(float)*cnn->k_size);
cnn->d_w[i][j][k] = (float*)nalloc(sizeof(float)*cnn->k_size); cnn->d_weights[i][j][k] = (float*)nalloc(sizeof(float)*cnn->k_size);
for (int l=0; l < cnn->k_size; l++) { for (int l=0; l < cnn->k_size; l++) {
fread(&tmp, sizeof(tmp), 1, ptr); fread(&tmp, sizeof(tmp), 1, ptr);
cnn->w[i][j][k][l] = tmp; cnn->weights[i][j][k][l] = tmp;
cnn->d_w[i][j][k][l] = 0.; cnn->d_weights[i][j][k][l] = 0.;
} }
} }
} }

View File

@ -34,7 +34,7 @@ void print_kernel_cnn(Kernel_cnn* ker, int depth_input, int dim_input, int depth
for (int j=0; j<depth_output; j++) { for (int j=0; j<depth_output; j++) {
for (int k=0; k<k_size; k++) { for (int k=0; k<k_size; k++) {
for (int l=0; l<k_size; l++) { for (int l=0; l<k_size; l++) {
printf("%.2f", ker->w[i][j][k][l]); printf("%.2f", ker->weights[i][j][k][l]);
} }
print_space; print_space;
} }

View File

@ -24,13 +24,13 @@ void update_weights(Network* network, Network* d_network) {
for (int b=0; b<output_depth; b++) { for (int b=0; b<output_depth; b++) {
for (int c=0; c<k_size; c++) { for (int c=0; c<k_size; c++) {
for (int d=0; d<k_size; d++) { for (int d=0; d<k_size; d++) {
cnn->w[a][b][c][d] -= network->learning_rate * d_cnn->d_w[a][b][c][d]; cnn->weights[a][b][c][d] -= network->learning_rate * d_cnn->d_weights[a][b][c][d];
d_cnn->d_w[a][b][c][d] = 0; d_cnn->d_weights[a][b][c][d] = 0;
if (cnn->w[a][b][c][d] > MAX_RESEAU) if (cnn->weights[a][b][c][d] > MAX_RESEAU)
cnn->w[a][b][c][d] = MAX_RESEAU; cnn->weights[a][b][c][d] = MAX_RESEAU;
else if (cnn->w[a][b][c][d] < -MAX_RESEAU) else if (cnn->weights[a][b][c][d] < -MAX_RESEAU)
cnn->w[a][b][c][d] = -MAX_RESEAU; cnn->weights[a][b][c][d] = -MAX_RESEAU;
} }
} }
} }
@ -133,7 +133,7 @@ void reset_d_weights(Network* network) {
for (int b=0; b<output_depth; b++) { for (int b=0; b<output_depth; b++) {
for (int c=0; c<k_size; c++) { for (int c=0; c<k_size; c++) {
for (int d=0; d<k_size; d++) { for (int d=0; d<k_size; d++) {
cnn->d_w[a][b][c][d] = 0; cnn->d_weights[a][b][c][d] = 0;
} }
} }
} }

View File

@ -83,7 +83,7 @@ bool equals_networks(Network* network1, Network* network2) {
for (int k=0; k < network1->kernel[i]->cnn->columns; k++) { for (int k=0; k < network1->kernel[i]->cnn->columns; k++) {
for (int l=0; l < network1->kernel[i]->cnn->k_size; l++) { for (int l=0; l < network1->kernel[i]->cnn->k_size; l++) {
for (int m=0; m < network1->kernel[i]->cnn->k_size; m++) { for (int m=0; m < network1->kernel[i]->cnn->k_size; m++) {
checkEquals(kernel[i]->cnn->w[j][k][l][m], "kernel[i]->cnn->bias[j][k][l][m]", m); checkEquals(kernel[i]->cnn->weights[j][k][l][m], "kernel[i]->cnn->bias[j][k][l][m]", m);
} }
} }
} }
@ -197,20 +197,20 @@ Network* copy_network(Network* network) {
} }
} }
network_cp->kernel[i]->cnn->w = (float****)nalloc(sizeof(float***)*rows); network_cp->kernel[i]->cnn->weights = (float****)nalloc(sizeof(float***)*rows);
network_cp->kernel[i]->cnn->d_w = (float****)nalloc(sizeof(float***)*rows); network_cp->kernel[i]->cnn->d_weights = (float****)nalloc(sizeof(float***)*rows);
for (int j=0; j < rows; j++) { for (int j=0; j < rows; j++) {
network_cp->kernel[i]->cnn->w[j] = (float***)nalloc(sizeof(float**)*columns); network_cp->kernel[i]->cnn->weights[j] = (float***)nalloc(sizeof(float**)*columns);
network_cp->kernel[i]->cnn->d_w[j] = (float***)nalloc(sizeof(float**)*columns); network_cp->kernel[i]->cnn->d_weights[j] = (float***)nalloc(sizeof(float**)*columns);
for (int k=0; k < columns; k++) { for (int k=0; k < columns; k++) {
network_cp->kernel[i]->cnn->w[j][k] = (float**)nalloc(sizeof(float*)*k_size); network_cp->kernel[i]->cnn->weights[j][k] = (float**)nalloc(sizeof(float*)*k_size);
network_cp->kernel[i]->cnn->d_w[j][k] = (float**)nalloc(sizeof(float*)*k_size); network_cp->kernel[i]->cnn->d_weights[j][k] = (float**)nalloc(sizeof(float*)*k_size);
for (int l=0; l < k_size; l++) { for (int l=0; l < k_size; l++) {
network_cp->kernel[i]->cnn->w[j][k][l] = (float*)nalloc(sizeof(float)*k_size); network_cp->kernel[i]->cnn->weights[j][k][l] = (float*)nalloc(sizeof(float)*k_size);
network_cp->kernel[i]->cnn->d_w[j][k][l] = (float*)nalloc(sizeof(float)*k_size); network_cp->kernel[i]->cnn->d_weights[j][k][l] = (float*)nalloc(sizeof(float)*k_size);
for (int m=0; m < k_size; m++) { for (int m=0; m < k_size; m++) {
copyVar(kernel[i]->cnn->w[j][k][l][m]); copyVar(kernel[i]->cnn->weights[j][k][l][m]);
network_cp->kernel[i]->cnn->d_w[j][k][l][m] = 0.; network_cp->kernel[i]->cnn->d_weights[j][k][l][m] = 0.;
} }
} }
} }
@ -297,7 +297,7 @@ void copy_network_parameters(Network* network_src, Network* network_dest) {
for (int k=0; k < columns; k++) { for (int k=0; k < columns; k++) {
for (int l=0; l < k_size; l++) { for (int l=0; l < k_size; l++) {
for (int m=0; m < k_size; m++) { for (int m=0; m < k_size; m++) {
copyVarParams(kernel[i]->cnn->w[j][k][l][m]); copyVarParams(kernel[i]->cnn->weights[j][k][l][m]);
} }
} }
} }
@ -356,7 +356,7 @@ int count_null_weights(Network* network) {
for (int k=0; k < columns; k++) { for (int k=0; k < columns; k++) {
for (int l=0; l < k_size; l++) { for (int l=0; l < k_size; l++) {
for (int m=0; m < k_size; m++) { for (int m=0; m < k_size; m++) {
null_weights = fabs(network->kernel[i]->cnn->w[j][k][l][m]) <= epsilon; null_weights = fabs(network->kernel[i]->cnn->weights[j][k][l][m]) <= epsilon;
} }
} }
} }

View File

@ -48,7 +48,7 @@ def generate_data_mul():
values.append(avg([mul_matrix((i+1)*100, depth, (i+1)*100) for j in range(10)])) values.append(avg([mul_matrix((i+1)*100, depth, (i+1)*100) for j in range(10)]))
print(f"Added M({(i+1)*100}x{depth}) x M({depth}x{(i+1)*100})") print(f"Added M({(i+1)*100}x{depth}) x M({depth}x{(i+1)*100})")
with open("result_mul.json", "w") as file: with open("result_mul.json", "weights") as file:
json.dump(values, file, indent=4) json.dump(values, file, indent=4)
@ -61,7 +61,7 @@ def generate_data_conv():
values.append(avg([conv_matrix((i+1)*100, output_dim, rows, columns) for j in range(10)])) values.append(avg([conv_matrix((i+1)*100, output_dim, rows, columns) for j in range(10)]))
print(f"Added ({(i+1)*100}, output_dim, rows, columns)") print(f"Added ({(i+1)*100}, output_dim, rows, columns)")
with open("result_conv.json", "w") as file: with open("result_conv.json", "weights") as file:
json.dump(values, file, indent=4) json.dump(values, file, indent=4)

View File

@ -118,12 +118,12 @@ void run_convolution_test(int input_dim, int output_dim, int rows, int columns)
kernel->bias = create_matrix(kernel->columns, output_dim, output_dim, 15.0f); kernel->bias = create_matrix(kernel->columns, output_dim, output_dim, 15.0f);
kernel->d_bias = create_matrix(kernel->columns, output_dim, output_dim, 1.5f); kernel->d_bias = create_matrix(kernel->columns, output_dim, output_dim, 1.5f);
// w[rows][columns][k_size][k_size] // weights[rows][columns][k_size][k_size]
kernel->w = (float****)malloc(sizeof(float***)*kernel->rows); kernel->weights = (float****)malloc(sizeof(float***)*kernel->rows);
kernel->d_w = (float****)malloc(sizeof(float***)*kernel->rows); kernel->d_weights = (float****)malloc(sizeof(float***)*kernel->rows);
for (int i=0; i < kernel->rows; i++) { for (int i=0; i < kernel->rows; i++) {
kernel->w[i] = create_matrix(kernel->columns, kernel->k_size, kernel->k_size, 15.0f); kernel->weights[i] = create_matrix(kernel->columns, kernel->k_size, kernel->k_size, 15.0f);
kernel->d_w[i] = create_matrix(kernel->columns, kernel->k_size, kernel->k_size, 1.5f); kernel->d_weights[i] = create_matrix(kernel->columns, kernel->k_size, kernel->k_size, 1.5f);
} }
float*** input = create_matrix(kernel->rows, input_dim, input_dim, 5.0f); float*** input = create_matrix(kernel->rows, input_dim, input_dim, 5.0f);
@ -163,11 +163,11 @@ void run_convolution_test(int input_dim, int output_dim, int rows, int columns)
free_matrix(kernel->d_bias, kernel->columns, output_dim); free_matrix(kernel->d_bias, kernel->columns, output_dim);
for (int i=0; i < kernel->rows; i++) { for (int i=0; i < kernel->rows; i++) {
free_matrix(kernel->w[i], kernel->columns, kernel->k_size); free_matrix(kernel->weights[i], kernel->columns, kernel->k_size);
free_matrix(kernel->d_w[i], kernel->columns, kernel->k_size); free_matrix(kernel->d_weights[i], kernel->columns, kernel->k_size);
} }
free(kernel->w); free(kernel->weights);
free(kernel->d_w); free(kernel->d_weights);
free_matrix(input, kernel->rows, input_dim); free_matrix(input, kernel->rows, input_dim);
free_matrix(output_cpu, kernel->columns, output_dim); free_matrix(output_cpu, kernel->columns, output_dim);

View File

@ -108,12 +108,12 @@ void run_convolution_test(int input_dim, int output_dim, int rows, int columns)
kernel->bias = create_matrix(kernel->columns, output_dim, output_dim, 15.0f); kernel->bias = create_matrix(kernel->columns, output_dim, output_dim, 15.0f);
kernel->d_bias = create_matrix(kernel->columns, output_dim, output_dim, 1.5f); kernel->d_bias = create_matrix(kernel->columns, output_dim, output_dim, 1.5f);
// w[rows][columns][k_size][k_size] // weights[rows][columns][k_size][k_size]
kernel->w = (float****)nalloc(sizeof(float***)*kernel->rows); kernel->weights = (float****)nalloc(sizeof(float***)*kernel->rows);
kernel->d_w = (float****)nalloc(sizeof(float***)*kernel->rows); kernel->d_weights = (float****)nalloc(sizeof(float***)*kernel->rows);
for (int i=0; i < kernel->rows; i++) { for (int i=0; i < kernel->rows; i++) {
kernel->w[i] = create_matrix(kernel->columns, kernel->k_size, kernel->k_size, 15.0f); kernel->weights[i] = create_matrix(kernel->columns, kernel->k_size, kernel->k_size, 15.0f);
kernel->d_w[i] = create_matrix(kernel->columns, kernel->k_size, kernel->k_size, 1.5f); kernel->d_weights[i] = create_matrix(kernel->columns, kernel->k_size, kernel->k_size, 1.5f);
} }
float*** input = create_matrix(kernel->rows, input_dim, input_dim, 5.0f); float*** input = create_matrix(kernel->rows, input_dim, input_dim, 5.0f);
@ -154,11 +154,11 @@ void run_convolution_test(int input_dim, int output_dim, int rows, int columns)
free_matrix(kernel->d_bias, kernel->columns, output_dim); free_matrix(kernel->d_bias, kernel->columns, output_dim);
for (int i=0; i < kernel->rows; i++) { for (int i=0; i < kernel->rows; i++) {
free_matrix(kernel->w[i], kernel->columns, kernel->k_size); free_matrix(kernel->weights[i], kernel->columns, kernel->k_size);
free_matrix(kernel->d_w[i], kernel->columns, kernel->k_size); free_matrix(kernel->d_weights[i], kernel->columns, kernel->k_size);
} }
gree(kernel->w); gree(kernel->weights);
gree(kernel->d_w); gree(kernel->d_weights);
free_matrix(input, kernel->rows, input_dim); free_matrix(input, kernel->rows, input_dim);
free_matrix(output_cpu, kernel->columns, output_dim); free_matrix(output_cpu, kernel->columns, output_dim);