I solved this by following what you guys have done. Using custom axis type and custom MAT to stream functions instead of
xf::cv::AXIvideo2xfMat(_src, src_mat);
xf::cv::xfMat2AXIvideo(src_mat, _dst);
The issue is indeed the PYNQ DMA code is not compatible with the sidebands and tuser signal.
It gets stuck on the receive side.
struct axis_t {
ap_uint<8> data;
ap_int<1> last;
};
/*
Unpack a AXI video stream into a xf::cv::Mat<> object
*input: AXI_video_strm
*output: img
*/
template <int TYPE, int ROWS, int COLS, int NPPC>
int AXIstream2xfMat(hls::stream<axis_t> & AXI_video_strm, xf::cv::Mat<TYPE, ROWS, COLS, NPPC>& img) {
axis_t pixelpacket;
int res = 0;
int rows = img.rows;
int cols = img.cols;
int idx = 0;
assert(img.rows <= ROWS);
assert(img.cols <= COLS);
loop_row_axi2mat: for (int i = 0; i < rows; i++) {
loop_col_axi2mat: for (int j = 0; j < cols; j++) {
// clang-format off
#pragma HLS loop_flatten off
#pragma HLS pipeline II=1
// clang-format on
AXI_video_strm >> pixelpacket;
img.write(idx++, pixelpacket.data);
}
}
return res;
}
You can test this using a simple passthrough functions like axiconv on Vitis vision.