12-12-2019 12:08 PM - edited 12-12-2019 12:27 PM
Hello!
I got a problem when reshaping an array.
The array has the dimensions [4][3][3][3] and each element has 8 Bit. Dimensions 1 and 2 are accessed in parallel while 3 and 4 are accessed sequentially.
To reduce BRAM usage I tried to reshaped the array in dimension one and two like the following:
#pragma HLS ARRAY_RESHAPE variable=encoder_conv_00_weights dim=1 complete
That worked well with many other arrays but in this case Vivado HLS stops Synthesis with the following output:
INFO: [XFORM 203-131] Reshaping array 'encoder_conv_00_weights.V' in dimension 1 completely. Stack dump: 0. Running pass 'Array reshaping' on module '/home/jney/HLS_Projects/SegNet_2018/3blocks_4channels_test/.autopilot/db/a.o.1.bc'. Abnormal program termination (11) Please check '/home/jney/HLS_Projects/hs_err_pid10959.log' for details Finished C synthesis.
I also tried reshaping in dimension 2 only, reshaping all dimensions, reshaping cyclic and block wise.
In addition, I tested Vivado HLS Version 2017.2 and 2018.3. Everything results in the same error.
You can find the HLS log-file attached.
Thanks for any help!
Best regards
Jonas
12-25-2019 11:09 AM
Hi @j_ney ,
It would be great if you would share us a test case reproducing the issue. So that we can report it to development.
12-28-2019 04:53 AM
Hi!
With the following top.cpp file you should be able to reproduce the error:
#include <ap_fixed.h> #include <ap_int.h> typedef ap_fixed<8, 2, AP_RND_ZERO, AP_WRAP> encoder_conv_00_weight_dtype; encoder_conv_00_weight_dtype encoder_conv_00_weights[4][3][3][3] = {{{{-0.140625, 0.046875, -0.28125}, {-0.125, 0.03125, -0.0}, {0.03125, 0.265625, -0.109375}}, {{0.09375, -0.046875, 0.09375}, {-0.1875, 0.046875, -0.25}, {-0.015625, 0.640625, 0.25}}, {{-0.15625, -0.09375, -0.0625}, {0.125, 0.015625, 0.140625}, {0.171875, 0.328125, 0.0625}}}, {{{-0.03125, -0.03125, -0.09375}, {-0.15625, -0.09375, 0.03125}, {-0.109375, -0.140625, -0.015625}}, {{-0.109375, 0.0, 0.0625}, {-0.140625, 0.015625, -0.03125}, {-0.140625, -0.015625, 0.0}}, {{-0.0, 0.109375, 0.046875}, {-0.109375, 0.109375, 0.109375}, {-0.078125, 0.046875, 0.109375}}}, {{{-0.265625, -0.15625, -0.09375}, {-0.203125, -0.203125, 0.109375}, {0.109375, 0.046875, 0.234375}}, {{0.015625, 0.03125, -0.203125}, {-0.25, 0.25, -0.234375}, {-0.046875, 0.203125, 0.21875}}, {{-0.078125, 0.140625, -0.25}, {-0.015625, 0.03125, -0.046875}, {-0.078125, 0.25, -0.03125}}}, {{{-0.0, 0.0, -0.0}, {0.0, 0.0, 0.0}, {-0.0, 0.0, 0.0}}, {{0.0, -0.0, -0.0}, {-0.0, -0.0, 0.0}, {-0.0, 0.0, 0.0}}, {{-0.0, -0.0, -0.0}, {-0.0, 0.0, 0.0}, {-0.0, -0.0, 0.0}}}}; typedef ap_fixed<8, 2, AP_RND_ZERO, AP_WRAP> encoder_conv_01_weight_dtype; encoder_conv_01_weight_dtype encoder_conv_01_weights[4][4][3][3] = {{{{0.125, -0.015625, -0.3125}, {0.296875, -0.09375, 0.046875}, {0.3125, 0.140625, 0.109375}}, {{-0.125, -0.03125, -0.359375}, {0.109375, -0.015625, -0.140625}, {-0.015625, -0.234375, -0.25}}, {{-0.03125, 0.140625, 0.078125}, {-0.171875, 0.140625, -0.03125}, {-0.234375, 0.40625, 0.015625}}, {{0.0, 0.0, -0.0}, {0.0, 0.0, 0.0}, {0.0, -0.0, -0.0}}}, {{{-0.40625, -0.171875, 0.015625}, {-0.109375, -0.03125, -0.0}, {-0.09375, 0.078125, 0.03125}}, {{-0.09375, 0.09375, -0.25}, {0.015625, 0.03125, -0.171875}, {0.015625, -0.0, 0.0625}}, {{0.203125, 0.15625, 0.03125}, {0.140625, 0.171875, -0.046875}, {-0.0, 0.0625, -0.0625}}, {{0.0, 0.0, 0.0}, {-0.0, 0.0, 0.0}, {0.0, 0.0, -0.0}}}, {{{-0.0625, -0.375, 0.171875}, {-0.015625, -0.6875, 0.3125}, {0.21875, 0.015625, 0.109375}}, {{-0.046875, 0.15625, 0.0}, {0.359375, -0.09375, 0.328125}, {-0.09375, 0.171875, 0.265625}}, {{-0.046875, 0.046875, -0.0625}, {-0.015625, 0.015625, -0.15625}, {-0.21875, 0.28125, 0.0}}, {{-0.0, -0.0, 0.0}, {0.0, 0.0, -0.0}, {-0.0, 0.0, 0.0}}}, {{{0.0625, 0.03125, -0.0}, {-0.109375, -0.40625, -0.3125}, {0.078125, -0.21875, 0.015625}}, {{-0.1875, -0.0625, 0.0}, {-0.125, -0.09375, -0.328125}, {-0.234375, -0.234375, 0.109375}}, {{-0.078125, -0.015625, -0.0625}, {0.171875, 0.203125, 0.203125}, {0.015625, -0.0625, -0.171875}}, {{0.0, -0.0, -0.0}, {-0.0, 0.0, 0.0}, {0.0, -0.0, -0.0}}}}; template<typename weight_dtype, unsigned int size_1, unsigned int size_2, unsigned int size_3, unsigned int size_4> void mulacc(ap_int<64> &in, ap_int<64> &out, weight_dtype weight[size_1][size_2][size_3][size_4]){ for(int dim_4_iter = 0; dim_4_iter < size_4; dim_4_iter++){ for(int dim_3_iter = 0; dim_3_iter < size_3; dim_3_iter++){ #pragma HLS PIPELINE for(int dim_2_iter = 0; dim_2_iter < size_2; dim_2_iter++){ #pragma HLS UNROLL for(int dim_1_iter = 0; dim_1_iter < size_1; dim_1_iter++){ #pragma HLS UNROLL out += in*weight[dim_1_iter][dim_2_iter][dim_3_iter][dim_4_iter]; } } } } } void toplevel(ap_int<64> *in, ap_int<64> *out) { #pragma HLS INTERFACE s_axilite port=return bundle=control #pragma HLS INTERFACE m_axi offset=slave port=in bundle=input depth=1 #pragma HLS INTERFACE s_axilite port=in bundle=control #pragma HLS INTERFACE m_axi offset=slave port=out bundle=output depth=1 #pragma HLS INTERFACE s_axilite port=out bundle=control #pragma HLS ARRAY_RESHAPE variable=encoder_conv_00_weights dim=1 complete #pragma HLS ARRAY_RESHAPE variable=encoder_conv_00_weights dim=2 complete #pragma HLS ARRAY_RESHAPE variable=encoder_conv_01_weights dim=1 complete #pragma HLS ARRAY_RESHAPE variable=encoder_conv_01_weights dim=2 complete mulacc<encoder_conv_00_weight_dtype, 4, 3, 3, 3>(in[0], out[0], encoder_conv_00_weights); mulacc<encoder_conv_01_weight_dtype, 4, 4, 3, 3>(in[0], out[0], encoder_conv_01_weights); }
01-01-2020 09:17 PM
Hi @j_ney i am able to reproduce the issue in 2018.3. But, it looks like issue got fixed in 2019.1 and later releases. Is it possible to use latest release?
01-03-2020 01:11 AM
Hi. Nice to hear the issue is resolved in 2019.1, unfortunately, it's currently not possible for me to use this version.
But I found another way to resolve the issue.
Instead of reshaping the array I partition it and map it vertically to the storage instances. It's much more coding overhead, but in the end, the results should be the same.