Skip to content

Commit

Permalink
dmaengine: shdma: protect against the IRQ handler
Browse files Browse the repository at this point in the history
The IRQ handler of the shdma driver accesses common hardware registers,
that are also accessed from other contexts. Therefore access to them
has to be performed with interrupts disabled, not only with disabled
bottom halves.

Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
  • Loading branch information
Guennadi Liakhovetski authored and Vinod Koul committed Sep 28, 2011
1 parent 0745c9a commit b4dae6e
Showing 1 changed file with 28 additions and 27 deletions.
55 changes: 28 additions & 27 deletions drivers/dma/shdma.c
Original file line number Diff line number Diff line change
Expand Up @@ -265,8 +265,9 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
dma_async_tx_callback callback = tx->callback;
dma_cookie_t cookie;
unsigned long flags;

spin_lock_bh(&sh_chan->desc_lock);
spin_lock_irqsave(&sh_chan->desc_lock, flags);

cookie = sh_chan->common.cookie;
cookie++;
Expand Down Expand Up @@ -302,7 +303,7 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
tx->cookie, &last->async_tx, sh_chan->id,
desc->hw.sar, desc->hw.tcr, desc->hw.dar);

spin_unlock_bh(&sh_chan->desc_lock);
spin_unlock_irqrestore(&sh_chan->desc_lock, flags);

return cookie;
}
Expand Down Expand Up @@ -374,24 +375,18 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
dmae_init(sh_chan);
}

spin_lock_bh(&sh_chan->desc_lock);
while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
spin_unlock_bh(&sh_chan->desc_lock);
desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
if (!desc) {
spin_lock_bh(&sh_chan->desc_lock);
if (!desc)
break;
}
dma_async_tx_descriptor_init(&desc->async_tx,
&sh_chan->common);
desc->async_tx.tx_submit = sh_dmae_tx_submit;
desc->mark = DESC_IDLE;

spin_lock_bh(&sh_chan->desc_lock);
list_add(&desc->node, &sh_chan->ld_free);
sh_chan->descs_allocated++;
}
spin_unlock_bh(&sh_chan->desc_lock);

if (!sh_chan->descs_allocated) {
ret = -ENOMEM;
Expand All @@ -405,6 +400,7 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
clear_bit(param->slave_id, sh_dmae_slave_used);
etestused:
efindslave:
chan->private = NULL;
pm_runtime_put(sh_chan->dev);
return ret;
}
Expand Down Expand Up @@ -437,12 +433,12 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
chan->private = NULL;
}

spin_lock_bh(&sh_chan->desc_lock);
spin_lock_irq(&sh_chan->desc_lock);

list_splice_init(&sh_chan->ld_free, &list);
sh_chan->descs_allocated = 0;

spin_unlock_bh(&sh_chan->desc_lock);
spin_unlock_irq(&sh_chan->desc_lock);

if (descs > 0)
pm_runtime_put(sh_chan->dev);
Expand Down Expand Up @@ -534,6 +530,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
struct sh_desc *first = NULL, *new = NULL /* compiler... */;
LIST_HEAD(tx_list);
int chunks = 0;
unsigned long irq_flags;
int i;

if (!sg_len)
Expand All @@ -544,7 +541,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
(SH_DMA_TCR_MAX + 1);

/* Have to lock the whole loop to protect against concurrent release */
spin_lock_bh(&sh_chan->desc_lock);
spin_lock_irqsave(&sh_chan->desc_lock, irq_flags);

/*
* Chaining:
Expand Down Expand Up @@ -590,7 +587,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
/* Put them back on the free list, so, they don't get lost */
list_splice_tail(&tx_list, &sh_chan->ld_free);

spin_unlock_bh(&sh_chan->desc_lock);
spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);

return &first->async_tx;

Expand All @@ -599,7 +596,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
new->mark = DESC_IDLE;
list_splice(&tx_list, &sh_chan->ld_free);

spin_unlock_bh(&sh_chan->desc_lock);
spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);

return NULL;
}
Expand Down Expand Up @@ -661,6 +658,7 @@ static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg)
{
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
unsigned long flags;

/* Only supports DMA_TERMINATE_ALL */
if (cmd != DMA_TERMINATE_ALL)
Expand All @@ -669,7 +667,7 @@ static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
if (!chan)
return -EINVAL;

spin_lock_bh(&sh_chan->desc_lock);
spin_lock_irqsave(&sh_chan->desc_lock, flags);
dmae_halt(sh_chan);

if (!list_empty(&sh_chan->ld_queue)) {
Expand All @@ -680,7 +678,7 @@ static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
sh_chan->xmit_shift;

}
spin_unlock_bh(&sh_chan->desc_lock);
spin_unlock_irqrestore(&sh_chan->desc_lock, flags);

sh_dmae_chan_ld_cleanup(sh_chan, true);

Expand All @@ -695,8 +693,9 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
dma_cookie_t cookie = 0;
dma_async_tx_callback callback = NULL;
void *param = NULL;
unsigned long flags;

spin_lock_bh(&sh_chan->desc_lock);
spin_lock_irqsave(&sh_chan->desc_lock, flags);
list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
struct dma_async_tx_descriptor *tx = &desc->async_tx;

Expand Down Expand Up @@ -773,7 +772,7 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
*/
sh_chan->completed_cookie = sh_chan->common.cookie;

spin_unlock_bh(&sh_chan->desc_lock);
spin_unlock_irqrestore(&sh_chan->desc_lock, flags);

if (callback)
callback(param);
Expand All @@ -796,10 +795,12 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
{
struct sh_desc *desc;

spin_lock_bh(&sh_chan->desc_lock);
spin_lock_irq(&sh_chan->desc_lock);
/* DMA work check */
if (dmae_is_busy(sh_chan))
goto sh_chan_xfer_ld_queue_end;
if (dmae_is_busy(sh_chan)) {
spin_unlock_irq(&sh_chan->desc_lock);
return;
}

/* Find the first not transferred descriptor */
list_for_each_entry(desc, &sh_chan->ld_queue, node)
Expand All @@ -813,8 +814,7 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
break;
}

sh_chan_xfer_ld_queue_end:
spin_unlock_bh(&sh_chan->desc_lock);
spin_unlock_irq(&sh_chan->desc_lock);
}

static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
Expand All @@ -831,6 +831,7 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
dma_cookie_t last_used;
dma_cookie_t last_complete;
enum dma_status status;
unsigned long flags;

sh_dmae_chan_ld_cleanup(sh_chan, false);

Expand All @@ -841,7 +842,7 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
BUG_ON(last_complete < 0);
dma_set_tx_state(txstate, last_complete, last_used, 0);

spin_lock_bh(&sh_chan->desc_lock);
spin_lock_irqsave(&sh_chan->desc_lock, flags);

status = dma_async_is_complete(cookie, last_complete, last_used);

Expand All @@ -859,7 +860,7 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
}
}

spin_unlock_bh(&sh_chan->desc_lock);
spin_unlock_irqrestore(&sh_chan->desc_lock, flags);

return status;
}
Expand Down Expand Up @@ -952,7 +953,7 @@ static void dmae_do_tasklet(unsigned long data)
u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
u32 dar_buf = sh_dmae_readl(sh_chan, DAR);

spin_lock(&sh_chan->desc_lock);
spin_lock_irq(&sh_chan->desc_lock);
list_for_each_entry(desc, &sh_chan->ld_queue, node) {
if (desc->mark == DESC_SUBMITTED &&
((desc->direction == DMA_FROM_DEVICE &&
Expand All @@ -965,7 +966,7 @@ static void dmae_do_tasklet(unsigned long data)
break;
}
}
spin_unlock(&sh_chan->desc_lock);
spin_unlock_irq(&sh_chan->desc_lock);

/* Next desc */
sh_chan_xfer_ld_queue(sh_chan);
Expand Down

0 comments on commit b4dae6e

Please sign in to comment.