Skip to content

Commit

Permalink
dmaengine: xilinx: dpdma: Add support for cyclic dma mode
Browse files Browse the repository at this point in the history
This patch adds support for DPDMA cyclic dma mode,
DMA cyclic transfers are required by audio streaming.

Signed-off-by: Rohit Visavalia <rohit.visavalia@amd.com>
Signed-off-by: Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
Signed-off-by: Vishal Sagar <vishal.sagar@amd.com>
Reviewed-by: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
Link: https://lore.kernel.org/r/20240821134043.2885506-1-vishal.sagar@amd.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
  • Loading branch information
Rohit Visavalia authored and Vinod Koul committed Aug 28, 2024
1 parent 654beb7 commit 51c42ae
Showing 1 changed file with 97 additions and 0 deletions.
97 changes: 97 additions & 0 deletions drivers/dma/xilinx/xilinx_dpdma.c
Original file line number Diff line number Diff line change
Expand Up @@ -670,6 +670,84 @@ static void xilinx_dpdma_chan_free_tx_desc(struct virt_dma_desc *vdesc)
kfree(desc);
}

/**
* xilinx_dpdma_chan_prep_cyclic - Prepare a cyclic dma descriptor
* @chan: DPDMA channel
* @buf_addr: buffer address
* @buf_len: buffer length
* @period_len: number of periods
* @flags: tx flags argument passed in to prepare function
*
* Prepare a tx descriptor incudling internal software/hardware descriptors
* for the given cyclic transaction.
*
* Return: A dma async tx descriptor on success, or NULL.
*/
static struct dma_async_tx_descriptor *
xilinx_dpdma_chan_prep_cyclic(struct xilinx_dpdma_chan *chan,
dma_addr_t buf_addr, size_t buf_len,
size_t period_len, unsigned long flags)
{
struct xilinx_dpdma_tx_desc *tx_desc;
struct xilinx_dpdma_sw_desc *sw_desc, *last = NULL;
unsigned int periods = buf_len / period_len;
unsigned int i;

tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
if (!tx_desc)
return NULL;

for (i = 0; i < periods; i++) {
struct xilinx_dpdma_hw_desc *hw_desc;

if (!IS_ALIGNED(buf_addr, XILINX_DPDMA_ALIGN_BYTES)) {
dev_err(chan->xdev->dev,
"buffer should be aligned at %d B\n",
XILINX_DPDMA_ALIGN_BYTES);
goto error;
}

sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
if (!sw_desc)
goto error;

xilinx_dpdma_sw_desc_set_dma_addrs(chan->xdev, sw_desc, last,
&buf_addr, 1);
hw_desc = &sw_desc->hw;
hw_desc->xfer_size = period_len;
hw_desc->hsize_stride =
FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK,
period_len) |
FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK,
period_len);
hw_desc->control = XILINX_DPDMA_DESC_CONTROL_PREEMBLE |
XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE |
XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;

list_add_tail(&sw_desc->node, &tx_desc->descriptors);

buf_addr += period_len;
last = sw_desc;
}

sw_desc = list_first_entry(&tx_desc->descriptors,
struct xilinx_dpdma_sw_desc, node);
last->hw.next_desc = lower_32_bits(sw_desc->dma_addr);
if (chan->xdev->ext_addr)
last->hw.addr_ext |=
FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK,
upper_32_bits(sw_desc->dma_addr));

last->hw.control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;

return vchan_tx_prep(&chan->vchan, &tx_desc->vdesc, flags);

error:
xilinx_dpdma_chan_free_tx_desc(&tx_desc->vdesc);

return NULL;
}

/**
* xilinx_dpdma_chan_prep_interleaved_dma - Prepare an interleaved dma
* descriptor
Expand Down Expand Up @@ -1189,6 +1267,23 @@ static void xilinx_dpdma_chan_handle_err(struct xilinx_dpdma_chan *chan)
/* -----------------------------------------------------------------------------
* DMA Engine Operations
*/
static struct dma_async_tx_descriptor *
xilinx_dpdma_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t buf_addr,
size_t buf_len, size_t period_len,
enum dma_transfer_direction direction,
unsigned long flags)
{
struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);

if (direction != DMA_MEM_TO_DEV)
return NULL;

if (buf_len % period_len)
return NULL;

return xilinx_dpdma_chan_prep_cyclic(chan, buf_addr, buf_len,
period_len, flags);
}

static struct dma_async_tx_descriptor *
xilinx_dpdma_prep_interleaved_dma(struct dma_chan *dchan,
Expand Down Expand Up @@ -1672,13 +1767,15 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)

dma_cap_set(DMA_SLAVE, ddev->cap_mask);
dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
dma_cap_set(DMA_CYCLIC, ddev->cap_mask);
dma_cap_set(DMA_INTERLEAVE, ddev->cap_mask);
dma_cap_set(DMA_REPEAT, ddev->cap_mask);
dma_cap_set(DMA_LOAD_EOT, ddev->cap_mask);
ddev->copy_align = fls(XILINX_DPDMA_ALIGN_BYTES - 1);

ddev->device_alloc_chan_resources = xilinx_dpdma_alloc_chan_resources;
ddev->device_free_chan_resources = xilinx_dpdma_free_chan_resources;
ddev->device_prep_dma_cyclic = xilinx_dpdma_prep_dma_cyclic;
ddev->device_prep_interleaved_dma = xilinx_dpdma_prep_interleaved_dma;
/* TODO: Can we achieve better granularity ? */
ddev->device_tx_status = dma_cookie_status;
Expand Down

0 comments on commit 51c42ae

Please sign in to comment.