UPGRADE YOUR BROWSER

We have detected your current browser version is not the latest one. Xilinx.com uses the latest web technologies to bring you the best online experience possible. Please upgrade to a Xilinx.com supported browser:Chrome, Firefox, Internet Explorer 11, Safari. Thank you!

cancel
Showing results for 
Search instead for 
Did you mean: 
Highlighted
Adventurer
Adventurer
4,523 Views
Registered: ‎01-24-2014

Moving data from DDR to PL using pl330 DMA

According to the dmatest.c in xilinx linux source, I am trying to write a module to copy data between ddr to pl (where a simple register is implemented which means the pl can be directly accessed as a memory address). However, only data from pl to ddr works while the transmission from ddr to pl is always failed. From my code, it fails when waiting for the completion of dma, where the tx_chan  is timeout. 

 

By looking in to the recent post, I adopted some changes in the DMA driver pl330.c in /drivers/dma folder. I tried both single and burst mode, however neither will not succeed in transferring data from ddr to pl. 

 

Does anyone have successful experience on this?

 

Thanks a lot. 

 

Here are some of my sample codes:


// set cap
dma_cap_zero(rx_mask);
dma_cap_set(DMA_SLAVE, rx_mask);
dma_cap_set(DMA_MEMCPY, rx_mask);
dma_cap_set(DMA_PRIVATE, rx_mask);
dma_cap_zero(tx_mask);
dma_cap_set(DMA_SLAVE, tx_mask);
dma_cap_set(DMA_MEMCPY, tx_mask);
dma_cap_set(DMA_PRIVATE, tx_mask);

 

// request channel
rx_chan = dma_request_channel(rx_mask, NULL, NULL);
if (!rx_chan) {
dmaengine_terminate_all(rx_chan);
dma_release_channel(rx_chan);
printk("did not find Rx device\n");
goto comp_err;
}
tx_chan = dma_request_channel(tx_mask, NULL, NULL);
if (!tx_chan) {
dmaengine_terminate_all(tx_chan);
dmaengine_terminate_all(rx_chan);
dma_release_channel(tx_chan);
dma_release_channel(rx_chan);
printk("did not find Rx device\n");
goto comp_err;
}


// define dma operations
rx_dev = rx_chan->device;
tx_dev = tx_chan->device;
rx_vir=dma_alloc_coherent(rx_dev->dev,get_buffersize(), &(rx_phy), GFP_KERNEL);
tx_vir=dma_alloc_coherent(tx_dev->dev,get_buffersize(), &(tx_phy), GFP_KERNEL);
rxd = rx_chan->device->device_prep_dma_memcpy(rx_chan, rx_phy, get_dst_addr(), get_buffersize(), DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!rxd) {
printk("dmaengine_prep_slave_sg failed\n");
goto alloc_err2;
}
tx_dma_conf.direction = DMA_MEM_TO_DEV;
tx_dma_conf.dst_addr = (unsigned long) get_src_addr();
tx_dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
tx_dma_conf.dst_maxburst = 4;
tx_dma_conf.device_fc=false;
if(dmaengine_slave_config(tx_chan, &tx_dma_conf)){
printk("DMA config for tx_chan failed\n");
goto alloc_err2;
}
txd = dmaengine_prep_slave_sg(tx_chan, &sg_tx, 1, DMA_MEM_TO_DEV,DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!txd){
printk("dmaengine_prep_slave_sg failed\n");
goto alloc_err2;
}

 

// define call back functions
rx_done.done = false;
init_completion(&comp_rx);
rxd->callback = dma_complete_func_rx;
rxd->callback_param = &comp_rx;
tx_done.done = false;
init_completion(&comp_tx);
txd->callback = dma_complete_func_tx;
txd->callback_param = &comp_tx;

 

 

// submit dma operation: receiving
rx_cookie=dmaengine_submit(rxd);// read
if (dma_submit_error(rx_cookie)) {
printk("Failed to do DMA tx_submit\n");
goto alloc_err2;
}
reinit_completion(&comp_rx);
rx_timeout = msecs_to_jiffies(timeout);
dma_async_issue_pending(rx_chan);
printk("waiting for pendding rx_chan\n");
wait_for_completion_timeout(&(comp_rx), rx_timeout);
rx_status = rx_dev->device_tx_status(rx_chan, rx_cookie, &rx_state);
if (rx_status != DMA_COMPLETE){
printk("rx_chan dma timeout\n");
goto alloc_err2;
}
pr_info("finish dma using %s\n",dma_chan_name(rx_chan));

// copy source to destination in virtual space
memcpy(tx_vir, rx_vir, get_buffersize());//copy
printk("memcpy from source to destination\n");



// submit dma operation: delivering
tx_cookie=dmaengine_submit(txd);//write
if (dma_submit_error(tx_cookie)) {
printk("Failed to do DMA tx_submit\n");
goto alloc_err2;
}
reinit_completion(&comp_tx);
tx_timeout = msecs_to_jiffies(timeout);
dma_async_issue_pending(tx_chan);
printk("waiting for pendding tx_chan\n");
wait_for_completion_timeout(&(comp_tx), tx_timeout);
tx_status = tx_dev->device_tx_status(tx_chan, tx_cookie, &tx_state);
if (tx_status != DMA_COMPLETE){
printk("tx_chan dma timeout\n");
goto alloc_err2;
}
pr_info("finish dma using %s\n",dma_chan_name(tx_chan));

 


alloc_err2:
dma_free_coherent(rx_dev->dev, get_buffersize(), rx_vir, rx_phy);
dma_free_coherent(tx_dev->dev, get_buffersize(), tx_vir, tx_phy);
dmaengine_terminate_all(tx_chan);
dmaengine_terminate_all(rx_chan);
dma_release_channel(tx_chan);
dma_release_channel(rx_chan);
printk("free virtual address and dma channel\n");

0 Kudos