Skip to content

Commit

Permalink
dma: shdma: transfer based runtime PM
Browse files Browse the repository at this point in the history
Currently the shdma dmaengine driver uses runtime PM to save power, when
no channel on the specific controller is requested by a user. This patch
switches the driver to count individual DMA transfers. That way the
controller can be powered down between transfers, even if some of its
channels are in use.

Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
  • Loading branch information
Guennadi Liakhovetski authored and Vinod Koul committed Sep 28, 2011
1 parent b4dae6e commit 7a1cd9a
Show file tree
Hide file tree
Showing 2 changed files with 75 additions and 26 deletions.
94 changes: 68 additions & 26 deletions drivers/dma/shdma.c
Original file line number Diff line number Diff line change
Expand Up @@ -259,15 +259,23 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
return 0;
}

static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);

static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
struct sh_dmae_slave *param = tx->chan->private;
dma_async_tx_callback callback = tx->callback;
dma_cookie_t cookie;
unsigned long flags;
bool power_up;

spin_lock_irqsave(&sh_chan->desc_lock, flags);
spin_lock_irq(&sh_chan->desc_lock);

if (list_empty(&sh_chan->ld_queue))
power_up = true;
else
power_up = false;

cookie = sh_chan->common.cookie;
cookie++;
Expand Down Expand Up @@ -303,7 +311,38 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
tx->cookie, &last->async_tx, sh_chan->id,
desc->hw.sar, desc->hw.tcr, desc->hw.dar);

spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
if (power_up) {
sh_chan->pm_state = DMAE_PM_BUSY;

pm_runtime_get(sh_chan->dev);

spin_unlock_irq(&sh_chan->desc_lock);

pm_runtime_barrier(sh_chan->dev);

spin_lock_irq(&sh_chan->desc_lock);

/* Have we been reset, while waiting? */
if (sh_chan->pm_state != DMAE_PM_ESTABLISHED) {
dev_dbg(sh_chan->dev, "Bring up channel %d\n",
sh_chan->id);
if (param) {
const struct sh_dmae_slave_config *cfg =
param->config;

dmae_set_dmars(sh_chan, cfg->mid_rid);
dmae_set_chcr(sh_chan, cfg->chcr);
} else {
dmae_init(sh_chan);
}

if (sh_chan->pm_state == DMAE_PM_PENDING)
sh_chan_xfer_ld_queue(sh_chan);
sh_chan->pm_state = DMAE_PM_ESTABLISHED;
}
}

spin_unlock_irq(&sh_chan->desc_lock);

return cookie;
}
Expand Down Expand Up @@ -347,8 +386,6 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
struct sh_dmae_slave *param = chan->private;
int ret;

pm_runtime_get_sync(sh_chan->dev);

/*
* This relies on the guarantee from dmaengine that alloc_chan_resources
* never runs concurrently with itself or free_chan_resources.
Expand All @@ -368,11 +405,6 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
}

param->config = cfg;

dmae_set_dmars(sh_chan, cfg->mid_rid);
dmae_set_chcr(sh_chan, cfg->chcr);
} else {
dmae_init(sh_chan);
}

while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
Expand Down Expand Up @@ -401,7 +433,6 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
etestused:
efindslave:
chan->private = NULL;
pm_runtime_put(sh_chan->dev);
return ret;
}

Expand All @@ -413,7 +444,6 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
struct sh_desc *desc, *_desc;
LIST_HEAD(list);
int descs = sh_chan->descs_allocated;

/* Protect against ISR */
spin_lock_irq(&sh_chan->desc_lock);
Expand All @@ -440,9 +470,6 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)

spin_unlock_irq(&sh_chan->desc_lock);

if (descs > 0)
pm_runtime_put(sh_chan->dev);

list_for_each_entry_safe(desc, _desc, &list, node)
kfree(desc);
}
Expand Down Expand Up @@ -676,7 +703,6 @@ static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
struct sh_desc, node);
desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
sh_chan->xmit_shift;

}
spin_unlock_irqrestore(&sh_chan->desc_lock, flags);

Expand Down Expand Up @@ -761,7 +787,13 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
async_tx_test_ack(&desc->async_tx)) || all) {
/* Remove from ld_queue list */
desc->mark = DESC_IDLE;

list_move(&desc->node, &sh_chan->ld_free);

if (list_empty(&sh_chan->ld_queue)) {
dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id);
pm_runtime_put(sh_chan->dev);
}
}
}

Expand Down Expand Up @@ -791,16 +823,14 @@ static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
;
}

/* Called under spin_lock_irq(&sh_chan->desc_lock) */
static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
{
struct sh_desc *desc;

spin_lock_irq(&sh_chan->desc_lock);
/* DMA work check */
if (dmae_is_busy(sh_chan)) {
spin_unlock_irq(&sh_chan->desc_lock);
if (dmae_is_busy(sh_chan))
return;
}

/* Find the first not transferred descriptor */
list_for_each_entry(desc, &sh_chan->ld_queue, node)
Expand All @@ -813,14 +843,18 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
dmae_start(sh_chan);
break;
}

spin_unlock_irq(&sh_chan->desc_lock);
}

static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
{
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
sh_chan_xfer_ld_queue(sh_chan);

spin_lock_irq(&sh_chan->desc_lock);
if (sh_chan->pm_state == DMAE_PM_ESTABLISHED)
sh_chan_xfer_ld_queue(sh_chan);
else
sh_chan->pm_state = DMAE_PM_PENDING;
spin_unlock_irq(&sh_chan->desc_lock);
}

static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
Expand Down Expand Up @@ -913,6 +947,12 @@ static bool sh_dmae_reset(struct sh_dmae_device *shdev)

list_splice_init(&sh_chan->ld_queue, &dl);

if (!list_empty(&dl)) {
dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id);
pm_runtime_put(sh_chan->dev);
}
sh_chan->pm_state = DMAE_PM_ESTABLISHED;

spin_unlock(&sh_chan->desc_lock);

/* Complete all */
Expand Down Expand Up @@ -966,10 +1006,10 @@ static void dmae_do_tasklet(unsigned long data)
break;
}
}
spin_unlock_irq(&sh_chan->desc_lock);

/* Next desc */
sh_chan_xfer_ld_queue(sh_chan);
spin_unlock_irq(&sh_chan->desc_lock);

sh_dmae_chan_ld_cleanup(sh_chan, false);
}

Expand Down Expand Up @@ -1037,7 +1077,9 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
return -ENOMEM;
}

/* copy struct dma_device */
new_sh_chan->pm_state = DMAE_PM_ESTABLISHED;

/* reference struct dma_device */
new_sh_chan->common.device = &shdev->common;

new_sh_chan->dev = shdev->common.dev;
Expand Down
7 changes: 7 additions & 0 deletions drivers/dma/shdma.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,12 @@

struct device;

enum dmae_pm_state {
DMAE_PM_ESTABLISHED,
DMAE_PM_BUSY,
DMAE_PM_PENDING,
};

struct sh_dmae_chan {
dma_cookie_t completed_cookie; /* The maximum cookie completed */
spinlock_t desc_lock; /* Descriptor operation lock */
Expand All @@ -38,6 +44,7 @@ struct sh_dmae_chan {
u32 __iomem *base;
char dev_id[16]; /* unique name per DMAC of channel */
int pm_error;
enum dmae_pm_state pm_state;
};

struct sh_dmae_device {
Expand Down

0 comments on commit 7a1cd9a

Please sign in to comment.