Skip to content

Commit

Permalink
mmc: tmio: use PIO for short transfers
Browse files Browse the repository at this point in the history
This patch allows transferring of some requests in PIO and some in DMA
mode and defaults to using DMA only for transfers longer than 8 bytes.
This is especially useful with SDIO, which can have lots of 2- and 4-byte
transfers, creating unnecessary high overhead, when executed in DMA.

Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
Acked-by: Paul Mundt <lethal@linux-sh.org>
Signed-off-by: Chris Ball <cjb@laptop.org>
  • Loading branch information
Guennadi Liakhovetski authored and Chris Ball committed Mar 25, 2011
1 parent 51fc7b2 commit 5f52c35
Showing 1 changed file with 23 additions and 10 deletions.
33 changes: 23 additions & 10 deletions drivers/mmc/host/tmio_mmc.c
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,8 @@
TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)

#define TMIO_MIN_DMA_LEN 8

#define enable_mmc_irqs(host, i) \
do { \
u32 mask;\
Expand Down Expand Up @@ -147,6 +149,7 @@ struct tmio_mmc_host {
struct platform_device *pdev;

/* DMA support */
bool force_pio;
struct dma_chan *chan_rx;
struct dma_chan *chan_tx;
struct tasklet_struct dma_complete;
Expand Down Expand Up @@ -385,6 +388,7 @@ static void tmio_mmc_reset_work(struct work_struct *work)
host->cmd = NULL;
host->data = NULL;
host->mrq = NULL;
host->force_pio = false;

spin_unlock_irqrestore(&host->lock, flags);

Expand All @@ -404,6 +408,7 @@ tmio_mmc_finish_request(struct tmio_mmc_host *host)
host->mrq = NULL;
host->cmd = NULL;
host->data = NULL;
host->force_pio = false;

cancel_delayed_work(&host->delayed_reset_work);

Expand Down Expand Up @@ -485,7 +490,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
unsigned int count;
unsigned long flags;

if (host->chan_tx || host->chan_rx) {
if ((host->chan_tx || host->chan_rx) && !host->force_pio) {
pr_err("PIO IRQ in DMA mode!\n");
return;
} else if (!data) {
Expand Down Expand Up @@ -551,15 +556,11 @@ static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
*/

if (data->flags & MMC_DATA_READ) {
if (!host->chan_rx)
disable_mmc_irqs(host, TMIO_MASK_READOP);
else
if (host->chan_rx && !host->force_pio)
tmio_check_bounce_buffer(host);
dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
host->mrq);
} else {
if (!host->chan_tx)
disable_mmc_irqs(host, TMIO_MASK_WRITEOP);
dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
host->mrq);
}
Expand All @@ -583,7 +584,7 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
if (!data)
goto out;

if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) {
if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
/*
* Has all data been written out yet? Testing on SuperH showed,
* that in most cases the first interrupt comes already with the
Expand All @@ -596,11 +597,12 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
disable_mmc_irqs(host, TMIO_STAT_DATAEND);
tasklet_schedule(&host->dma_complete);
}
} else if (host->chan_rx && (data->flags & MMC_DATA_READ)) {
} else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) {
disable_mmc_irqs(host, TMIO_STAT_DATAEND);
tasklet_schedule(&host->dma_complete);
} else {
tmio_mmc_do_data_irq(host);
disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP);
}
out:
spin_unlock(&host->lock);
Expand Down Expand Up @@ -649,12 +651,12 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
*/
if (host->data && !cmd->error) {
if (host->data->flags & MMC_DATA_READ) {
if (!host->chan_rx)
if (host->force_pio || !host->chan_rx)
enable_mmc_irqs(host, TMIO_MASK_READOP);
else
tasklet_schedule(&host->dma_issue);
} else {
if (!host->chan_tx)
if (host->force_pio || !host->chan_tx)
enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
else
tasklet_schedule(&host->dma_issue);
Expand Down Expand Up @@ -810,6 +812,11 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
goto pio;
}

if (sg->length < TMIO_MIN_DMA_LEN) {
host->force_pio = true;
return;
}

disable_mmc_irqs(host, TMIO_STAT_RXRDY);

/* The only sg element can be unaligned, use our bounce buffer then */
Expand Down Expand Up @@ -878,6 +885,11 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
goto pio;
}

if (sg->length < TMIO_MIN_DMA_LEN) {
host->force_pio = true;
return;
}

disable_mmc_irqs(host, TMIO_STAT_TXRQ);

/* The only sg element can be unaligned, use our bounce buffer then */
Expand Down Expand Up @@ -1119,6 +1131,7 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)

fail:
host->mrq = NULL;
host->force_pio = false;
mrq->cmd->error = ret;
mmc_request_done(mmc, mrq);
}
Expand Down

0 comments on commit 5f52c35

Please sign in to comment.