From 32b6cfd860b1dd5d723ebdd57403da28e728a795 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Wed, 12 Jan 2011 13:14:37 +0100 Subject: [PATCH] --- yaml --- r: 233286 b: refs/heads/master c: 1e070a60997f5bbaadd498c34380e2aa110336cf h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/drivers/dma/amba-pl08x.c | 53 ++++++++++++------------------- trunk/drivers/dma/imx-dma.c | 4 +++ trunk/drivers/dma/imx-sdma.c | 12 +++---- trunk/drivers/dma/ipu/ipu_idmac.c | 50 +++++++++++++++++++++++++++++ 5 files changed, 81 insertions(+), 40 deletions(-) diff --git a/[refs] b/[refs] index a5df9f428728..da00f8ddcdd3 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: a646bd7f0824d3e0f02ff8d7410704f965de01bc +refs/heads/master: 1e070a60997f5bbaadd498c34380e2aa110336cf diff --git a/trunk/drivers/dma/amba-pl08x.c b/trunk/drivers/dma/amba-pl08x.c index 07bca4970e50..297f48b0cba9 100644 --- a/trunk/drivers/dma/amba-pl08x.c +++ b/trunk/drivers/dma/amba-pl08x.c @@ -79,7 +79,6 @@ #include #include #include -#include #include #include #include @@ -236,19 +235,16 @@ static void pl08x_start_txd(struct pl08x_dma_chan *plchan, } /* - * Pause the channel by setting the HALT bit. + * Overall DMAC remains enabled always. * - * For M->P transfers, pause the DMAC first and then stop the peripheral - - * the FIFO can only drain if the peripheral is still requesting data. - * (note: this can still timeout if the DMAC FIFO never drains of data.) + * Disabling individual channels could lose data. * - * For P->M transfers, disable the peripheral first to stop it filling - * the DMAC FIFO, and then pause the DMAC. + * Disable the peripheral DMA after disabling the DMAC in order to allow + * the DMAC FIFO to drain, and hence allow the channel to show inactive */ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) { u32 val; - int timeout; /* Set the HALT bit and wait for the FIFO to drain */ val = readl(ch->base + PL080_CH_CONFIG); @@ -256,13 +252,8 @@ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) writel(val, ch->base + PL080_CH_CONFIG); /* Wait for channel inactive */ - for (timeout = 1000; timeout; timeout--) { - if (!pl08x_phy_channel_busy(ch)) - break; - udelay(1); - } - if (pl08x_phy_channel_busy(ch)) - pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id); + while (pl08x_phy_channel_busy(ch)) + cpu_relax(); } static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) @@ -276,24 +267,19 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) } -/* - * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and - * clears any pending interrupt status. This should not be used for - * an on-going transfer, but as a method of shutting down a channel - * (eg, when it's no longer used) or terminating a transfer. - */ -static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x, - struct pl08x_phy_chan *ch) +/* Stops the channel */ +static void pl08x_stop_phy_chan(struct pl08x_phy_chan *ch) { - u32 val = readl(ch->base + PL080_CH_CONFIG); + u32 val; - val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK | - PL080_CONFIG_TC_IRQ_MASK); + pl08x_pause_phy_chan(ch); + /* Disable channel */ + val = readl(ch->base + PL080_CH_CONFIG); + val &= ~PL080_CONFIG_ENABLE; + val &= ~PL080_CONFIG_ERR_IRQ_MASK; + val &= ~PL080_CONFIG_TC_IRQ_MASK; writel(val, ch->base + PL080_CH_CONFIG); - - writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR); - writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR); } static inline u32 get_bytes_in_cctl(u32 cctl) @@ -418,12 +404,13 @@ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, { unsigned long flags; - spin_lock_irqsave(&ch->lock, flags); - /* Stop the channel and clear its interrupts */ - pl08x_terminate_phy_chan(pl08x, ch); + pl08x_stop_phy_chan(ch); + writel((1 << ch->id), pl08x->base + PL080_ERR_CLEAR); + writel((1 << ch->id), pl08x->base + PL080_TC_CLEAR); /* Mark it as free */ + spin_lock_irqsave(&ch->lock, flags); ch->serving = NULL; spin_unlock_irqrestore(&ch->lock, flags); } @@ -1462,7 +1449,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, plchan->state = PL08X_CHAN_IDLE; if (plchan->phychan) { - pl08x_terminate_phy_chan(pl08x, plchan->phychan); + pl08x_stop_phy_chan(plchan->phychan); /* * Mark physical channel as free and free any slave diff --git a/trunk/drivers/dma/imx-dma.c b/trunk/drivers/dma/imx-dma.c index e53d438142bb..a46e1d9fa3e4 100644 --- a/trunk/drivers/dma/imx-dma.c +++ b/trunk/drivers/dma/imx-dma.c @@ -49,6 +49,7 @@ struct imxdma_channel { struct imxdma_engine { struct device *dev; + struct device_dma_parameters dma_parms; struct dma_device dma_device; struct imxdma_channel channel[MAX_DMA_CHANNELS]; }; @@ -370,6 +371,9 @@ static int __init imxdma_probe(struct platform_device *pdev) platform_set_drvdata(pdev, imxdma); + imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms; + dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff); + ret = dma_async_device_register(&imxdma->dma_device); if (ret) { dev_err(&pdev->dev, "unable to register\n"); diff --git a/trunk/drivers/dma/imx-sdma.c b/trunk/drivers/dma/imx-sdma.c index e89fd1033df9..d5a5d4d9c19b 100644 --- a/trunk/drivers/dma/imx-sdma.c +++ b/trunk/drivers/dma/imx-sdma.c @@ -770,15 +770,15 @@ static void sdma_enable_channel(struct sdma_engine *sdma, int channel) __raw_writel(1 << channel, sdma->regs + SDMA_H_START); } -static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdmac) +static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdma) { - dma_cookie_t cookie = sdmac->chan.cookie; + dma_cookie_t cookie = sdma->chan.cookie; if (++cookie < 0) cookie = 1; - sdmac->chan.cookie = cookie; - sdmac->desc.cookie = cookie; + sdma->chan.cookie = cookie; + sdma->desc.cookie = cookie; return cookie; } @@ -1135,7 +1135,7 @@ static int __init sdma_get_firmware(struct sdma_engine *sdma, /* download the RAM image for SDMA */ sdma_load_script(sdma, ram_code, header->ram_code_size, - addr->ram_code_start_addr); + sdma->script_addrs->ram_code_start_addr); clk_disable(sdma->clk); sdma_add_scripts(sdma, addr); @@ -1348,7 +1348,7 @@ static int __init sdma_probe(struct platform_device *pdev) err_request_region: err_irq: kfree(sdma); - return ret; + return 0; } static int __exit sdma_remove(struct platform_device *pdev) diff --git a/trunk/drivers/dma/ipu/ipu_idmac.c b/trunk/drivers/dma/ipu/ipu_idmac.c index c1a125e7d1df..cb26ee9773d6 100644 --- a/trunk/drivers/dma/ipu/ipu_idmac.c +++ b/trunk/drivers/dma/ipu/ipu_idmac.c @@ -1145,6 +1145,29 @@ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan, reg = idmac_read_icreg(ipu, IDMAC_CHA_EN); idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN); + /* + * Problem (observed with channel DMAIC_7): after enabling the channel + * and initialising buffers, there comes an interrupt with current still + * pointing at buffer 0, whereas it should use buffer 0 first and only + * generate an interrupt when it is done, then current should already + * point to buffer 1. This spurious interrupt also comes on channel + * DMASDC_0. With DMAIC_7 normally, is we just leave the ISR after the + * first interrupt, there comes the second with current correctly + * pointing to buffer 1 this time. But sometimes this second interrupt + * doesn't come and the channel hangs. Clearing BUFx_RDY when disabling + * the channel seems to prevent the channel from hanging, but it doesn't + * prevent the spurious interrupt. This might also be unsafe. Think + * about the IDMAC controller trying to switch to a buffer, when we + * clear the ready bit, and re-enable it a moment later. + */ + reg = idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY); + idmac_write_ipureg(ipu, 0, IPU_CHA_BUF0_RDY); + idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF0_RDY); + + reg = idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY); + idmac_write_ipureg(ipu, 0, IPU_CHA_BUF1_RDY); + idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF1_RDY); + spin_unlock_irqrestore(&ipu->lock, flags); return 0; @@ -1223,6 +1246,33 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) /* Other interrupts do not interfere with this channel */ spin_lock(&ichan->lock); + if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 && + ((curbuf >> chan_id) & 1) == ichan->active_buffer && + !list_is_last(ichan->queue.next, &ichan->queue))) { + int i = 100; + + /* This doesn't help. See comment in ipu_disable_channel() */ + while (--i) { + curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF); + if (((curbuf >> chan_id) & 1) != ichan->active_buffer) + break; + cpu_relax(); + } + + if (!i) { + spin_unlock(&ichan->lock); + dev_dbg(dev, + "IRQ on active buffer on channel %x, active " + "%d, ready %x, %x, current %x!\n", chan_id, + ichan->active_buffer, ready0, ready1, curbuf); + return IRQ_NONE; + } else + dev_dbg(dev, + "Buffer deactivated on channel %x, active " + "%d, ready %x, %x, current %x, rest %d!\n", chan_id, + ichan->active_buffer, ready0, ready1, curbuf, i); + } + if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) || (!ichan->active_buffer && (ready0 >> chan_id) & 1) )) {