1
0
mirror of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2026-01-11 17:10:13 +00:00

dmaengine: xilinx_dma: Support descriptor setup from dma_vecs

The DMAEngine provides an interface for obtaining DMA transaction
descriptors from an array of scatter gather buffers represented by
struct dma_vec. This interface is used in the DMABUF API of the IIO
framework [1][2].
To enable DMABUF support through the IIO framework for the Xilinx DMA,
implement callback .device_prep_peripheral_dma_vec() of struct
dma_device in the driver.

[1]: 7a86d469983a ("iio: buffer-dmaengine: Support new DMABUF based userspace API")
[2]: 5878853fc938 ("dmaengine: Add API function dmaengine_prep_peripheral_dma_vec()")

Signed-off-by: Folker Schwesinger <dev@folker-schwesinger.de>
Reviewed-by: Suraj Gupta <suraj.gupta2@amd.com>
Reviewed-by: Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
Link: https://lore.kernel.org/r/DCCKQLKOZC06.2H6LJ8RJQJNV2@folker-schwesinger.de
Signed-off-by: Vinod Koul <vkoul@kernel.org>
This commit is contained in:
Folker Schwesinger 2025-08-26 20:21:10 +02:00 committed by Vinod Koul
parent d9a3e99294
commit 38433a6fdf

View File

@ -2172,6 +2172,99 @@ error:
return NULL;
}
/**
* xilinx_dma_prep_peripheral_dma_vec - prepare descriptors for a DMA_SLAVE
* transaction from DMA vectors
* @dchan: DMA channel
* @vecs: Array of DMA vectors that should be transferred
* @nb: number of entries in @vecs
* @direction: DMA direction
* @flags: transfer ack flags
*
* Return: Async transaction descriptor on success and NULL on failure
*/
static struct dma_async_tx_descriptor *xilinx_dma_prep_peripheral_dma_vec(
struct dma_chan *dchan, const struct dma_vec *vecs, size_t nb,
enum dma_transfer_direction direction, unsigned long flags)
{
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
struct xilinx_dma_tx_descriptor *desc;
struct xilinx_axidma_tx_segment *segment, *head, *prev = NULL;
size_t copy;
size_t sg_used;
unsigned int i;
if (!is_slave_direction(direction) || direction != chan->direction)
return NULL;
desc = xilinx_dma_alloc_tx_descriptor(chan);
if (!desc)
return NULL;
dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
desc->async_tx.tx_submit = xilinx_dma_tx_submit;
/* Build transactions using information from DMA vectors */
for (i = 0; i < nb; i++) {
sg_used = 0;
/* Loop until the entire dma_vec entry is used */
while (sg_used < vecs[i].len) {
struct xilinx_axidma_desc_hw *hw;
/* Get a free segment */
segment = xilinx_axidma_alloc_tx_segment(chan);
if (!segment)
goto error;
/*
* Calculate the maximum number of bytes to transfer,
* making sure it is less than the hw limit
*/
copy = xilinx_dma_calc_copysize(chan, vecs[i].len,
sg_used);
hw = &segment->hw;
/* Fill in the descriptor */
xilinx_axidma_buf(chan, hw, vecs[i].addr, sg_used, 0);
hw->control = copy;
if (prev)
prev->hw.next_desc = segment->phys;
prev = segment;
sg_used += copy;
/*
* Insert the segment into the descriptor segments
* list.
*/
list_add_tail(&segment->node, &desc->segments);
}
}
head = list_first_entry(&desc->segments, struct xilinx_axidma_tx_segment, node);
desc->async_tx.phys = head->phys;
/* For the last DMA_MEM_TO_DEV transfer, set EOP */
if (chan->direction == DMA_MEM_TO_DEV) {
segment->hw.control |= XILINX_DMA_BD_SOP;
segment = list_last_entry(&desc->segments,
struct xilinx_axidma_tx_segment,
node);
segment->hw.control |= XILINX_DMA_BD_EOP;
}
if (chan->xdev->has_axistream_connected)
desc->async_tx.metadata_ops = &xilinx_dma_metadata_ops;
return &desc->async_tx;
error:
xilinx_dma_free_tx_descriptor(chan, desc);
return NULL;
}
/**
* xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
* @dchan: DMA channel
@ -3180,6 +3273,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
xdev->common.device_config = xilinx_dma_device_config;
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
xdev->common.device_prep_peripheral_dma_vec = xilinx_dma_prep_peripheral_dma_vec;
xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
xdev->common.device_prep_dma_cyclic =
xilinx_dma_prep_dma_cyclic;