cancel
Showing results for 
Show  only  | Search instead for 
Did you mean: 
j_ney
Contributor
Contributor
705 Views
Registered: ‎11-26-2017

ARRAY RESHAPE Abnormal Program Termination

Hello!

I got a problem when reshaping an array. 

The array has the dimensions [4][3][3][3] and each element has 8 Bit. Dimensions 1 and 2 are accessed in parallel while 3 and 4 are accessed sequentially. 

To reduce BRAM usage I tried to reshaped the array in dimension one and two like the following:

#pragma HLS ARRAY_RESHAPE variable=encoder_conv_00_weights dim=1 complete

That worked well with many other arrays but in this case Vivado HLS stops Synthesis with the following output: 

INFO: [XFORM 203-131] Reshaping array 'encoder_conv_00_weights.V'  in dimension 1 completely.
Stack dump:
0.	Running pass 'Array reshaping' on module '/home/jney/HLS_Projects/SegNet_2018/3blocks_4channels_test/.autopilot/db/a.o.1.bc'.
Abnormal program termination (11)
Please check '/home/jney/HLS_Projects/hs_err_pid10959.log' for details
Finished C synthesis.

I also tried reshaping in dimension 2 only, reshaping all dimensions, reshaping cyclic and block wise. 

In addition, I tested Vivado HLS Version 2017.2 and 2018.3. Everything results in the same error.

 

You can find the HLS log-file attached. 

Thanks for any help!

 

Best regards

Jonas

 

0 Kudos
4 Replies
shameera
Moderator
Moderator
606 Views
Registered: ‎05-31-2017

Hi @j_ney ,

It would be great if you would share us a test case reproducing the issue. So that we can report it to development.

0 Kudos
j_ney
Contributor
Contributor
574 Views
Registered: ‎11-26-2017

Hi!

With the following top.cpp file you should be able to reproduce the error: 

 

#include <ap_fixed.h>
#include <ap_int.h>

typedef ap_fixed<8, 2, AP_RND_ZERO, AP_WRAP> encoder_conv_00_weight_dtype;
encoder_conv_00_weight_dtype encoder_conv_00_weights[4][3][3][3] =
	{{{{-0.140625, 0.046875, -0.28125}, {-0.125, 0.03125, -0.0}, {0.03125, 0.265625, -0.109375}},
	{{0.09375, -0.046875, 0.09375}, {-0.1875, 0.046875, -0.25}, {-0.015625, 0.640625, 0.25}},
	{{-0.15625, -0.09375, -0.0625}, {0.125, 0.015625, 0.140625}, {0.171875, 0.328125, 0.0625}}},
	{{{-0.03125, -0.03125, -0.09375}, {-0.15625, -0.09375, 0.03125}, {-0.109375, -0.140625, -0.015625}},
	{{-0.109375, 0.0, 0.0625}, {-0.140625, 0.015625, -0.03125}, {-0.140625, -0.015625, 0.0}},
	{{-0.0, 0.109375, 0.046875}, {-0.109375, 0.109375, 0.109375}, {-0.078125, 0.046875, 0.109375}}},
	{{{-0.265625, -0.15625, -0.09375}, {-0.203125, -0.203125, 0.109375}, {0.109375, 0.046875, 0.234375}},
	{{0.015625, 0.03125, -0.203125}, {-0.25, 0.25, -0.234375}, {-0.046875, 0.203125, 0.21875}},
	{{-0.078125, 0.140625, -0.25}, {-0.015625, 0.03125, -0.046875}, {-0.078125, 0.25, -0.03125}}},
	{{{-0.0, 0.0, -0.0}, {0.0, 0.0, 0.0}, {-0.0, 0.0, 0.0}},
	{{0.0, -0.0, -0.0}, {-0.0, -0.0, 0.0}, {-0.0, 0.0, 0.0}},
	{{-0.0, -0.0, -0.0}, {-0.0, 0.0, 0.0}, {-0.0, -0.0, 0.0}}}};


typedef ap_fixed<8, 2, AP_RND_ZERO, AP_WRAP> encoder_conv_01_weight_dtype;
encoder_conv_01_weight_dtype encoder_conv_01_weights[4][4][3][3] =
	{{{{0.125, -0.015625, -0.3125}, {0.296875, -0.09375, 0.046875}, {0.3125, 0.140625, 0.109375}},
	{{-0.125, -0.03125, -0.359375}, {0.109375, -0.015625, -0.140625}, {-0.015625, -0.234375, -0.25}},
	{{-0.03125, 0.140625, 0.078125}, {-0.171875, 0.140625, -0.03125}, {-0.234375, 0.40625, 0.015625}},
	{{0.0, 0.0, -0.0}, {0.0, 0.0, 0.0}, {0.0, -0.0, -0.0}}},
	{{{-0.40625, -0.171875, 0.015625}, {-0.109375, -0.03125, -0.0}, {-0.09375, 0.078125, 0.03125}},
	{{-0.09375, 0.09375, -0.25}, {0.015625, 0.03125, -0.171875}, {0.015625, -0.0, 0.0625}},
	{{0.203125, 0.15625, 0.03125}, {0.140625, 0.171875, -0.046875}, {-0.0, 0.0625, -0.0625}},
	{{0.0, 0.0, 0.0}, {-0.0, 0.0, 0.0}, {0.0, 0.0, -0.0}}},
	{{{-0.0625, -0.375, 0.171875}, {-0.015625, -0.6875, 0.3125}, {0.21875, 0.015625, 0.109375}},
	{{-0.046875, 0.15625, 0.0}, {0.359375, -0.09375, 0.328125}, {-0.09375, 0.171875, 0.265625}},
	{{-0.046875, 0.046875, -0.0625}, {-0.015625, 0.015625, -0.15625}, {-0.21875, 0.28125, 0.0}},
	{{-0.0, -0.0, 0.0}, {0.0, 0.0, -0.0}, {-0.0, 0.0, 0.0}}},
	{{{0.0625, 0.03125, -0.0}, {-0.109375, -0.40625, -0.3125}, {0.078125, -0.21875, 0.015625}},
	{{-0.1875, -0.0625, 0.0}, {-0.125, -0.09375, -0.328125}, {-0.234375, -0.234375, 0.109375}},
	{{-0.078125, -0.015625, -0.0625}, {0.171875, 0.203125, 0.203125}, {0.015625, -0.0625, -0.171875}},
	{{0.0, -0.0, -0.0}, {-0.0, 0.0, 0.0}, {0.0, -0.0, -0.0}}}};

template<typename weight_dtype,
unsigned int size_1,
unsigned int size_2,
unsigned int size_3,
unsigned int size_4>
void mulacc(ap_int<64> &in, ap_int<64> &out, weight_dtype weight[size_1][size_2][size_3][size_4]){
	for(int dim_4_iter = 0; dim_4_iter < size_4; dim_4_iter++){
		for(int dim_3_iter = 0; dim_3_iter < size_3; dim_3_iter++){
			#pragma HLS PIPELINE
			for(int dim_2_iter = 0; dim_2_iter < size_2; dim_2_iter++){
			#pragma HLS UNROLL
				for(int dim_1_iter = 0; dim_1_iter < size_1; dim_1_iter++){
				#pragma HLS UNROLL
					out += in*weight[dim_1_iter][dim_2_iter][dim_3_iter][dim_4_iter];
				}
			}
		}
	}
}


void toplevel(ap_int<64> *in,
	      ap_int<64> *out)
		{

#pragma HLS INTERFACE s_axilite port=return bundle=control
#pragma HLS INTERFACE m_axi offset=slave port=in  bundle=input depth=1
#pragma HLS INTERFACE s_axilite port=in bundle=control
#pragma HLS INTERFACE m_axi offset=slave port=out  bundle=output depth=1
#pragma HLS INTERFACE s_axilite port=out bundle=control

#pragma HLS ARRAY_RESHAPE variable=encoder_conv_00_weights dim=1 complete
#pragma HLS ARRAY_RESHAPE variable=encoder_conv_00_weights dim=2 complete

#pragma HLS ARRAY_RESHAPE variable=encoder_conv_01_weights dim=1 complete
#pragma HLS ARRAY_RESHAPE variable=encoder_conv_01_weights dim=2 complete


	mulacc<encoder_conv_00_weight_dtype, 4, 3, 3, 3>(in[0], out[0], encoder_conv_00_weights);
	mulacc<encoder_conv_01_weight_dtype, 4, 4, 3, 3>(in[0], out[0], encoder_conv_01_weights);


}


0 Kudos
viswanad
Xilinx Employee
Xilinx Employee
516 Views
Registered: ‎05-16-2018

Hi @j_ney i am able to reproduce the issue in 2018.3. But, it looks like issue got fixed in 2019.1 and later releases. Is it possible to use latest release?

j_ney
Contributor
Contributor
487 Views
Registered: ‎11-26-2017

Hi. Nice to hear the issue is resolved in 2019.1, unfortunately, it's currently not possible for me to use this version. 

But I found another way to resolve the issue. 

Instead of reshaping the array I partition it and map it vertically to the storage instances. It's much more coding overhead, but in the end, the results should be the same. 

0 Kudos