Skip to content

Commit

Permalink
dma: mv_xor: Simplify the DMA_MEMCPY operation
Browse files Browse the repository at this point in the history
A memory copy operation can be expressed as an XOR operation with one
source. This commit removes code duplication in the driver by reusing
the XOR operation for the MEMCPY.

As an added benefit, we can now put MEMCPY and XOR descriptors on the
same chain, which improves performance.

Signed-off-by: Lior Amsalem <alior@marvell.com>
Signed-off-by: Ezequiel Garcia <ezequiel.garcia@free-electrons.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
  • Loading branch information
Lior Amsalem authored and Vinod Koul committed Sep 23, 2014
1 parent b8291dd commit 3e4f52e
Showing 1 changed file with 12 additions and 65 deletions.
77 changes: 12 additions & 65 deletions drivers/dma/mv_xor.c
Original file line number Diff line number Diff line change
Expand Up @@ -82,13 +82,6 @@ static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
hw_desc->phy_dest_addr = addr;
}

static int mv_chan_memset_slot_count(size_t len)
{
return 1;
}

#define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)

static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
int index, dma_addr_t addr)
{
Expand Down Expand Up @@ -144,17 +137,6 @@ static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
writel_relaxed(val, XOR_INTR_CAUSE(chan));
}

static int mv_can_chain(struct mv_xor_desc_slot *desc)
{
struct mv_xor_desc_slot *chain_old_tail = list_entry(
desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);

if (chain_old_tail->type != desc->type)
return 0;

return 1;
}

static void mv_set_mode(struct mv_xor_chan *chan,
enum dma_transaction_type type)
{
Expand Down Expand Up @@ -236,8 +218,6 @@ static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
{
dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
__func__, __LINE__, sw_desc);
if (sw_desc->type != mv_chan->current_type)
mv_set_mode(mv_chan, sw_desc->type);

/* set the hardware chain */
mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
Expand Down Expand Up @@ -492,9 +472,6 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
list_splice_init(&grp_start->tx_list,
&old_chain_tail->chain_node);

if (!mv_can_chain(grp_start))
goto submit_done;

dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
&old_chain_tail->async_tx.phys);

Expand All @@ -516,7 +493,6 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
if (new_hw_chain)
mv_xor_start_new_chain(mv_chan, grp_start);

submit_done:
spin_unlock_bh(&mv_chan->lock);

return cookie;
Expand Down Expand Up @@ -572,45 +548,6 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
return mv_chan->slots_allocated ? : -ENOMEM;
}

static struct dma_async_tx_descriptor *
mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsigned long flags)
{
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
struct mv_xor_desc_slot *sw_desc, *grp_start;
int slot_cnt;

dev_dbg(mv_chan_to_devp(mv_chan),
"%s dest: %pad src %pad len: %u flags: %ld\n",
__func__, &dest, &src, len, flags);
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
return NULL;

BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);

spin_lock_bh(&mv_chan->lock);
slot_cnt = mv_chan_memcpy_slot_count(len);
sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
if (sw_desc) {
sw_desc->type = DMA_MEMCPY;
sw_desc->async_tx.flags = flags;
grp_start = sw_desc->group_head;
mv_desc_init(grp_start, flags);
mv_desc_set_byte_count(grp_start, len);
mv_desc_set_dest_addr(sw_desc->group_head, dest);
mv_desc_set_src_addr(grp_start, 0, src);
sw_desc->unmap_src_cnt = 1;
sw_desc->unmap_len = len;
}
spin_unlock_bh(&mv_chan->lock);

dev_dbg(mv_chan_to_devp(mv_chan),
"%s sw_desc %p async_tx %p\n",
__func__, sw_desc, sw_desc ? &sw_desc->async_tx : NULL);

return sw_desc ? &sw_desc->async_tx : NULL;
}

static struct dma_async_tx_descriptor *
mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
unsigned int src_cnt, size_t len, unsigned long flags)
Expand All @@ -636,7 +573,6 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
sw_desc->async_tx.flags = flags;
grp_start = sw_desc->group_head;
mv_desc_init(grp_start, flags);
/* the byte count field is the same as in memcpy desc*/
mv_desc_set_byte_count(grp_start, len);
mv_desc_set_dest_addr(sw_desc->group_head, dest);
sw_desc->unmap_src_cnt = src_cnt;
Expand All @@ -651,6 +587,17 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
return sw_desc ? &sw_desc->async_tx : NULL;
}

static struct dma_async_tx_descriptor *
mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsigned long flags)
{
/*
* A MEMCPY operation is identical to an XOR operation with only
* a single source address.
*/
return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
}

static void mv_xor_free_chan_resources(struct dma_chan *chan)
{
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
Expand Down Expand Up @@ -1071,7 +1018,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev,

mv_chan_unmask_interrupts(mv_chan);

mv_set_mode(mv_chan, DMA_MEMCPY);
mv_set_mode(mv_chan, DMA_XOR);

spin_lock_init(&mv_chan->lock);
INIT_LIST_HEAD(&mv_chan->chain);
Expand Down

0 comments on commit 3e4f52e

Please sign in to comment.