diff --git a/[refs] b/[refs] index fda6deff33fb..230736d982c0 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: aa4d72ae946a4fa40486b871717778734184fa29 +refs/heads/master: 848ad121240f539e14a59eddd69e164aea9560b2 diff --git a/trunk/Documentation/powerpc/dts-bindings/fsl/dma.txt b/trunk/Documentation/powerpc/dts-bindings/fsl/dma.txt index 2a4b4bce6110..0732cdd05ba1 100644 --- a/trunk/Documentation/powerpc/dts-bindings/fsl/dma.txt +++ b/trunk/Documentation/powerpc/dts-bindings/fsl/dma.txt @@ -44,29 +44,21 @@ Example: compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; cell-index = <0>; reg = <0 0x80>; - interrupt-parent = <&ipic>; - interrupts = <71 8>; }; dma-channel@80 { compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; cell-index = <1>; reg = <0x80 0x80>; - interrupt-parent = <&ipic>; - interrupts = <71 8>; }; dma-channel@100 { compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; cell-index = <2>; reg = <0x100 0x80>; - interrupt-parent = <&ipic>; - interrupts = <71 8>; }; dma-channel@180 { compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; cell-index = <3>; reg = <0x180 0x80>; - interrupt-parent = <&ipic>; - interrupts = <71 8>; }; }; diff --git a/trunk/arch/arm/include/asm/hardware/iop3xx-adma.h b/trunk/arch/arm/include/asm/hardware/iop3xx-adma.h index 9b28f1243bdc..1a8c7279a28b 100644 --- a/trunk/arch/arm/include/asm/hardware/iop3xx-adma.h +++ b/trunk/arch/arm/include/asm/hardware/iop3xx-adma.h @@ -366,7 +366,8 @@ static inline int iop_chan_xor_slot_count(size_t len, int src_cnt, slot_cnt += *slots_per_op; } - slot_cnt += *slots_per_op; + if (len) + slot_cnt += *slots_per_op; return slot_cnt; } @@ -388,7 +389,8 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt, slot_cnt += *slots_per_op; } - slot_cnt += *slots_per_op; + if (len) + slot_cnt += *slots_per_op; return slot_cnt; } @@ -735,8 +737,10 @@ iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len) i += slots_per_op; } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT); - iter = iop_hw_desc_slot_idx(hw_desc, i); - iter->byte_count = len; + if (len) { + iter = iop_hw_desc_slot_idx(hw_desc, i); + iter->byte_count = len; + } } } diff --git a/trunk/drivers/dma/Kconfig b/trunk/drivers/dma/Kconfig index c27f80e5d531..e02d74b1e892 100644 --- a/trunk/drivers/dma/Kconfig +++ b/trunk/drivers/dma/Kconfig @@ -13,22 +13,6 @@ menuconfig DMADEVICES DMA Device drivers supported by the configured arch, it may be empty in some cases. -config DMADEVICES_DEBUG - bool "DMA Engine debugging" - depends on DMADEVICES != n - help - This is an option for use by developers; most people should - say N here. This enables DMA engine core and driver debugging. - -config DMADEVICES_VDEBUG - bool "DMA Engine verbose debugging" - depends on DMADEVICES_DEBUG != n - help - This is an option for use by developers; most people should - say N here. This enables deeper (more verbose) debugging of - the DMA engine core and drivers. - - if DMADEVICES comment "DMA Devices" @@ -85,13 +69,6 @@ config FSL_DMA The Elo is the DMA controller on some 82xx and 83xx parts, and the Elo Plus is the DMA controller on 85xx and 86xx parts. -config MPC512X_DMA - tristate "Freescale MPC512x built-in DMA engine support" - depends on PPC_MPC512x - select DMA_ENGINE - ---help--- - Enable support for the Freescale MPC512x built-in DMA engine. - config MV_XOR bool "Marvell XOR engine support" depends on PLAT_ORION diff --git a/trunk/drivers/dma/Makefile b/trunk/drivers/dma/Makefile index 22bba3d5e2b6..807053d48232 100644 --- a/trunk/drivers/dma/Makefile +++ b/trunk/drivers/dma/Makefile @@ -1,17 +1,9 @@ -ifeq ($(CONFIG_DMADEVICES_DEBUG),y) - EXTRA_CFLAGS += -DDEBUG -endif -ifeq ($(CONFIG_DMADEVICES_VDEBUG),y) - EXTRA_CFLAGS += -DVERBOSE_DEBUG -endif - obj-$(CONFIG_DMA_ENGINE) += dmaengine.o obj-$(CONFIG_NET_DMA) += iovlock.o obj-$(CONFIG_DMATEST) += dmatest.o obj-$(CONFIG_INTEL_IOATDMA) += ioat/ obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o obj-$(CONFIG_FSL_DMA) += fsldma.o -obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o obj-$(CONFIG_MV_XOR) += mv_xor.o obj-$(CONFIG_DW_DMAC) += dw_dmac.o obj-$(CONFIG_AT_HDMAC) += at_hdmac.o diff --git a/trunk/drivers/dma/coh901318.c b/trunk/drivers/dma/coh901318.c index b5f2ee0f8e2c..f1bf4f74ad8f 100644 --- a/trunk/drivers/dma/coh901318.c +++ b/trunk/drivers/dma/coh901318.c @@ -80,18 +80,16 @@ struct coh901318_chan { static void coh901318_list_print(struct coh901318_chan *cohc, struct coh901318_lli *lli) { - struct coh901318_lli *l; - dma_addr_t addr = virt_to_phys(lli); + struct coh901318_lli *l = lli; int i = 0; - while (addr) { - l = phys_to_virt(addr); + while (l) { dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%x" - ", dst 0x%x, link 0x%x link_virt 0x%p\n", + ", dst 0x%x, link 0x%x virt_link_addr 0x%p\n", i, l, l->control, l->src_addr, l->dst_addr, - l->link_addr, phys_to_virt(l->link_addr)); + l->link_addr, l->virt_link_addr); i++; - addr = l->link_addr; + l = l->virt_link_addr; } } @@ -125,7 +123,7 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf, goto err_kmalloc; tmp = dev_buf; - tmp += sprintf(tmp, "DMA -- enable dma channels\n"); + tmp += sprintf(tmp, "DMA -- enabled dma channels\n"); for (i = 0; i < debugfs_dma_base->platform->max_channels; i++) if (started_channels & (1 << i)) @@ -592,6 +590,10 @@ static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc) return cohd_que; } +/* + * This tasklet is called from the interrupt handler to + * handle each descriptor (DMA job) that is sent to a channel. + */ static void dma_tasklet(unsigned long data) { struct coh901318_chan *cohc = (struct coh901318_chan *) data; @@ -600,9 +602,13 @@ static void dma_tasklet(unsigned long data) dma_async_tx_callback callback; void *callback_param; + dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d" + " nbr_active_done %ld\n", __func__, + cohc->id, cohc->nbr_active_done); + spin_lock_irqsave(&cohc->lock, flags); - /* get first active entry from list */ + /* get first active descriptor entry from list */ cohd_fin = coh901318_first_active_get(cohc); BUG_ON(cohd_fin->pending_irqs == 0); @@ -613,8 +619,6 @@ static void dma_tasklet(unsigned long data) cohd_fin->pending_irqs--; cohc->completed = cohd_fin->desc.cookie; - BUG_ON(cohc->nbr_active_done && cohd_fin == NULL); - if (cohc->nbr_active_done == 0) return; @@ -638,10 +642,19 @@ static void dma_tasklet(unsigned long data) coh901318_desc_free(cohc, cohd_fin); } + /* + * If another interrupt fired while the tasklet was scheduling, + * we don't get called twice, so we have this number of active + * counter that keep track of the number of IRQs expected to + * be handled for this channel. If there happen to be more than + * one IRQ to be ack:ed, we simply schedule this tasklet again. + */ if (cohc->nbr_active_done) cohc->nbr_active_done--; if (cohc->nbr_active_done) { + dev_dbg(COHC_2_DEV(cohc), "scheduling tasklet again, new IRQs " + "came in while we were scheduling this tasklet\n"); if (cohc_chan_conf(cohc)->priority_high) tasklet_hi_schedule(&cohc->tasklet); else @@ -996,6 +1009,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, len += factor; } + pr_debug("Allocate %d lli:s for this transfer\n", len); data = coh901318_lli_alloc(&cohc->base->pool, len); if (data == NULL) @@ -1094,9 +1108,8 @@ coh901318_terminate_all(struct dma_chan *chan) /* release the lli allocation*/ coh901318_lli_free(&cohc->base->pool, &cohd->data); - coh901318_desc_remove(cohd); - /* return desc to free-list */ + coh901318_desc_remove(cohd); coh901318_desc_free(cohc, cohd); } @@ -1104,9 +1117,8 @@ coh901318_terminate_all(struct dma_chan *chan) /* release the lli allocation*/ coh901318_lli_free(&cohc->base->pool, &cohd->data); - coh901318_desc_remove(cohd); - /* return desc to free-list */ + coh901318_desc_remove(cohd); coh901318_desc_free(cohc, cohd); } @@ -1261,7 +1273,7 @@ static int __init coh901318_probe(struct platform_device *pdev) if (err) goto err_register_memcpy; - dev_dbg(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n", + dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n", (u32) base->virtbase); return err; diff --git a/trunk/drivers/dma/dmaengine.c b/trunk/drivers/dma/dmaengine.c index 6f51a0a7a8bb..e7a3230fb7d5 100644 --- a/trunk/drivers/dma/dmaengine.c +++ b/trunk/drivers/dma/dmaengine.c @@ -826,6 +826,7 @@ void dma_async_device_unregister(struct dma_device *device) chan->dev->chan = NULL; mutex_unlock(&dma_list_mutex); device_unregister(&chan->dev->device); + free_percpu(chan->local); } } EXPORT_SYMBOL(dma_async_device_unregister); diff --git a/trunk/drivers/dma/dmatest.c b/trunk/drivers/dma/dmatest.c index 8e409fb50fc0..948d563941c9 100644 --- a/trunk/drivers/dma/dmatest.c +++ b/trunk/drivers/dma/dmatest.c @@ -237,7 +237,7 @@ static int dmatest_func(void *data) dma_cookie_t cookie; enum dma_status status; enum dma_ctrl_flags flags; - u8 pq_coefs[pq_sources + 1]; + u8 pq_coefs[pq_sources]; int ret; int src_cnt; int dst_cnt; @@ -257,7 +257,7 @@ static int dmatest_func(void *data) } else if (thread->type == DMA_PQ) { src_cnt = pq_sources | 1; /* force odd to ensure dst = src */ dst_cnt = 2; - for (i = 0; i < src_cnt; i++) + for (i = 0; i < pq_sources; i++) pq_coefs[i] = 1; } else goto err_srcs; @@ -347,7 +347,7 @@ static int dmatest_func(void *data) else if (thread->type == DMA_XOR) tx = dev->device_prep_dma_xor(chan, dma_dsts[0] + dst_off, - dma_srcs, src_cnt, + dma_srcs, xor_sources, len, flags); else if (thread->type == DMA_PQ) { dma_addr_t dma_pq[dst_cnt]; @@ -355,7 +355,7 @@ static int dmatest_func(void *data) for (i = 0; i < dst_cnt; i++) dma_pq[i] = dma_dsts[i] + dst_off; tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs, - src_cnt, pq_coefs, + pq_sources, pq_coefs, len, flags); } @@ -467,7 +467,7 @@ static int dmatest_func(void *data) if (iterations > 0) while (!kthread_should_stop()) { - DECLARE_WAIT_QUEUE_HEAD(wait_dmatest_exit); + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); interruptible_sleep_on(&wait_dmatest_exit); } diff --git a/trunk/drivers/dma/fsldma.c b/trunk/drivers/dma/fsldma.c index bbb4be5a3ff4..296f9e747fac 100644 --- a/trunk/drivers/dma/fsldma.c +++ b/trunk/drivers/dma/fsldma.c @@ -37,19 +37,19 @@ #include #include "fsldma.h" -static void dma_init(struct fsldma_chan *chan) +static void dma_init(struct fsl_dma_chan *fsl_chan) { /* Reset the channel */ - DMA_OUT(chan, &chan->regs->mr, 0, 32); + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32); - switch (chan->feature & FSL_DMA_IP_MASK) { + switch (fsl_chan->feature & FSL_DMA_IP_MASK) { case FSL_DMA_IP_85XX: /* Set the channel to below modes: * EIE - Error interrupt enable * EOSIE - End of segments interrupt enable (basic mode) * EOLNIE - End of links interrupt enable */ - DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EIE + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32); break; case FSL_DMA_IP_83XX: @@ -57,146 +57,170 @@ static void dma_init(struct fsldma_chan *chan) * EOTIE - End-of-transfer interrupt enable * PRC_RM - PCI read multiple */ - DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE | FSL_DMA_MR_PRC_RM, 32); break; } + } -static void set_sr(struct fsldma_chan *chan, u32 val) +static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val) { - DMA_OUT(chan, &chan->regs->sr, val, 32); + DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32); } -static u32 get_sr(struct fsldma_chan *chan) +static u32 get_sr(struct fsl_dma_chan *fsl_chan) { - return DMA_IN(chan, &chan->regs->sr, 32); + return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32); } -static void set_desc_cnt(struct fsldma_chan *chan, +static void set_desc_cnt(struct fsl_dma_chan *fsl_chan, struct fsl_dma_ld_hw *hw, u32 count) { - hw->count = CPU_TO_DMA(chan, count, 32); + hw->count = CPU_TO_DMA(fsl_chan, count, 32); } -static void set_desc_src(struct fsldma_chan *chan, +static void set_desc_src(struct fsl_dma_chan *fsl_chan, struct fsl_dma_ld_hw *hw, dma_addr_t src) { u64 snoop_bits; - snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) + snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; - hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); + hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64); } -static void set_desc_dst(struct fsldma_chan *chan, - struct fsl_dma_ld_hw *hw, dma_addr_t dst) +static void set_desc_dest(struct fsl_dma_chan *fsl_chan, + struct fsl_dma_ld_hw *hw, dma_addr_t dest) { u64 snoop_bits; - snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) + snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; - hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); + hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64); } -static void set_desc_next(struct fsldma_chan *chan, +static void set_desc_next(struct fsl_dma_chan *fsl_chan, struct fsl_dma_ld_hw *hw, dma_addr_t next) { u64 snoop_bits; - snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) + snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0; - hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64); + hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64); } -static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) +static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) { - DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); + DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64); } -static dma_addr_t get_cdar(struct fsldma_chan *chan) +static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan) { - return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; + return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN; } -static dma_addr_t get_ndar(struct fsldma_chan *chan) +static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) { - return DMA_IN(chan, &chan->regs->ndar, 64); + DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64); } -static u32 get_bcr(struct fsldma_chan *chan) +static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan) { - return DMA_IN(chan, &chan->regs->bcr, 32); + return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64); } -static int dma_is_idle(struct fsldma_chan *chan) +static u32 get_bcr(struct fsl_dma_chan *fsl_chan) { - u32 sr = get_sr(chan); - return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); + return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32); } -static void dma_start(struct fsldma_chan *chan) +static int dma_is_idle(struct fsl_dma_chan *fsl_chan) { - u32 mode; - - mode = DMA_IN(chan, &chan->regs->mr, 32); + u32 sr = get_sr(fsl_chan); + return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); +} - if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { - if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { - DMA_OUT(chan, &chan->regs->bcr, 0, 32); - mode |= FSL_DMA_MR_EMP_EN; - } else { - mode &= ~FSL_DMA_MR_EMP_EN; - } +static void dma_start(struct fsl_dma_chan *fsl_chan) +{ + u32 mr_set = 0; + + if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { + DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32); + mr_set |= FSL_DMA_MR_EMP_EN; + } else if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, + DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) + & ~FSL_DMA_MR_EMP_EN, 32); } - if (chan->feature & FSL_DMA_CHAN_START_EXT) - mode |= FSL_DMA_MR_EMS_EN; + if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT) + mr_set |= FSL_DMA_MR_EMS_EN; else - mode |= FSL_DMA_MR_CS; + mr_set |= FSL_DMA_MR_CS; - DMA_OUT(chan, &chan->regs->mr, mode, 32); + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, + DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) + | mr_set, 32); } -static void dma_halt(struct fsldma_chan *chan) +static void dma_halt(struct fsl_dma_chan *fsl_chan) { - u32 mode; int i; - mode = DMA_IN(chan, &chan->regs->mr, 32); - mode |= FSL_DMA_MR_CA; - DMA_OUT(chan, &chan->regs->mr, mode, 32); - - mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA); - DMA_OUT(chan, &chan->regs->mr, mode, 32); + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, + DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA, + 32); + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, + DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS + | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32); for (i = 0; i < 100; i++) { - if (dma_is_idle(chan)) - return; - + if (dma_is_idle(fsl_chan)) + break; udelay(10); } - - if (!dma_is_idle(chan)) - dev_err(chan->dev, "DMA halt timeout!\n"); + if (i >= 100 && !dma_is_idle(fsl_chan)) + dev_err(fsl_chan->dev, "DMA halt timeout!\n"); } -static void set_ld_eol(struct fsldma_chan *chan, +static void set_ld_eol(struct fsl_dma_chan *fsl_chan, struct fsl_desc_sw *desc) { u64 snoop_bits; - snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) + snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0; - desc->hw.next_ln_addr = CPU_TO_DMA(chan, - DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL + desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, + DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL | snoop_bits, 64); } +static void append_ld_queue(struct fsl_dma_chan *fsl_chan, + struct fsl_desc_sw *new_desc) +{ + struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev); + + if (list_empty(&fsl_chan->ld_queue)) + return; + + /* Link to the new descriptor physical address and + * Enable End-of-segment interrupt for + * the last link descriptor. + * (the previous node's next link descriptor) + * + * For FSL_DMA_IP_83xx, the snoop enable bit need be set. + */ + queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, + new_desc->async_tx.phys | FSL_DMA_EOSIE | + (((fsl_chan->feature & FSL_DMA_IP_MASK) + == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64); +} + /** * fsl_chan_set_src_loop_size - Set source address hold transfer size - * @chan : Freescale DMA channel + * @fsl_chan : Freescale DMA channel * @size : Address loop size, 0 for disable loop * * The set source address hold transfer size. The source @@ -205,30 +229,29 @@ static void set_ld_eol(struct fsldma_chan *chan, * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, * SA + 1 ... and so on. */ -static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size) +static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size) { - u32 mode; - - mode = DMA_IN(chan, &chan->regs->mr, 32); - switch (size) { case 0: - mode &= ~FSL_DMA_MR_SAHE; + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, + DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & + (~FSL_DMA_MR_SAHE), 32); break; case 1: case 2: case 4: case 8: - mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14); + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, + DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | + FSL_DMA_MR_SAHE | (__ilog2(size) << 14), + 32); break; } - - DMA_OUT(chan, &chan->regs->mr, mode, 32); } /** - * fsl_chan_set_dst_loop_size - Set destination address hold transfer size - * @chan : Freescale DMA channel + * fsl_chan_set_dest_loop_size - Set destination address hold transfer size + * @fsl_chan : Freescale DMA channel * @size : Address loop size, 0 for disable loop * * The set destination address hold transfer size. The destination @@ -237,30 +260,29 @@ static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size) * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, * TA + 1 ... and so on. */ -static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size) +static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size) { - u32 mode; - - mode = DMA_IN(chan, &chan->regs->mr, 32); - switch (size) { case 0: - mode &= ~FSL_DMA_MR_DAHE; + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, + DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & + (~FSL_DMA_MR_DAHE), 32); break; case 1: case 2: case 4: case 8: - mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16); + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, + DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | + FSL_DMA_MR_DAHE | (__ilog2(size) << 16), + 32); break; } - - DMA_OUT(chan, &chan->regs->mr, mode, 32); } /** * fsl_chan_set_request_count - Set DMA Request Count for external control - * @chan : Freescale DMA channel + * @fsl_chan : Freescale DMA channel * @size : Number of bytes to transfer in a single request * * The Freescale DMA channel can be controlled by the external signal DREQ#. @@ -270,38 +292,35 @@ static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size) * * A size of 0 disables external pause control. The maximum size is 1024. */ -static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size) +static void fsl_chan_set_request_count(struct fsl_dma_chan *fsl_chan, int size) { - u32 mode; - BUG_ON(size > 1024); - - mode = DMA_IN(chan, &chan->regs->mr, 32); - mode |= (__ilog2(size) << 24) & 0x0f000000; - - DMA_OUT(chan, &chan->regs->mr, mode, 32); + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, + DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) + | ((__ilog2(size) << 24) & 0x0f000000), + 32); } /** * fsl_chan_toggle_ext_pause - Toggle channel external pause status - * @chan : Freescale DMA channel + * @fsl_chan : Freescale DMA channel * @enable : 0 is disabled, 1 is enabled. * * The Freescale DMA channel can be controlled by the external signal DREQ#. * The DMA Request Count feature should be used in addition to this feature * to set the number of bytes to transfer before pausing the channel. */ -static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable) +static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable) { if (enable) - chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; + fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; else - chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; + fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; } /** * fsl_chan_toggle_ext_start - Toggle channel external start status - * @chan : Freescale DMA channel + * @fsl_chan : Freescale DMA channel * @enable : 0 is disabled, 1 is enabled. * * If enable the external start, the channel can be started by an @@ -309,196 +328,141 @@ static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable) * transfer immediately. The DMA channel will wait for the * control pin asserted. */ -static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable) +static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable) { if (enable) - chan->feature |= FSL_DMA_CHAN_START_EXT; + fsl_chan->feature |= FSL_DMA_CHAN_START_EXT; else - chan->feature &= ~FSL_DMA_CHAN_START_EXT; -} - -static void append_ld_queue(struct fsldma_chan *chan, - struct fsl_desc_sw *desc) -{ - struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev); - - if (list_empty(&chan->ld_pending)) - goto out_splice; - - /* - * Add the hardware descriptor to the chain of hardware descriptors - * that already exists in memory. - * - * This will un-set the EOL bit of the existing transaction, and the - * last link in this transaction will become the EOL descriptor. - */ - set_desc_next(chan, &tail->hw, desc->async_tx.phys); - - /* - * Add the software descriptor and all children to the list - * of pending transactions - */ -out_splice: - list_splice_tail_init(&desc->tx_list, &chan->ld_pending); + fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT; } static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) { - struct fsldma_chan *chan = to_fsl_chan(tx->chan); + struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); struct fsl_desc_sw *child; unsigned long flags; dma_cookie_t cookie; - spin_lock_irqsave(&chan->desc_lock, flags); + /* cookie increment and adding to ld_queue must be atomic */ + spin_lock_irqsave(&fsl_chan->desc_lock, flags); - /* - * assign cookies to all of the software descriptors - * that make up this transaction - */ - cookie = chan->common.cookie; + cookie = fsl_chan->common.cookie; list_for_each_entry(child, &desc->tx_list, node) { cookie++; if (cookie < 0) cookie = 1; - child->async_tx.cookie = cookie; + desc->async_tx.cookie = cookie; } - chan->common.cookie = cookie; - - /* put this transaction onto the tail of the pending queue */ - append_ld_queue(chan, desc); + fsl_chan->common.cookie = cookie; + append_ld_queue(fsl_chan, desc); + list_splice_init(&desc->tx_list, fsl_chan->ld_queue.prev); - spin_unlock_irqrestore(&chan->desc_lock, flags); + spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); return cookie; } /** * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. - * @chan : Freescale DMA channel + * @fsl_chan : Freescale DMA channel * * Return - The descriptor allocated. NULL for failed. */ static struct fsl_desc_sw *fsl_dma_alloc_descriptor( - struct fsldma_chan *chan) + struct fsl_dma_chan *fsl_chan) { - struct fsl_desc_sw *desc; dma_addr_t pdesc; - - desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); - if (!desc) { - dev_dbg(chan->dev, "out of memory for link desc\n"); - return NULL; + struct fsl_desc_sw *desc_sw; + + desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc); + if (desc_sw) { + memset(desc_sw, 0, sizeof(struct fsl_desc_sw)); + INIT_LIST_HEAD(&desc_sw->tx_list); + dma_async_tx_descriptor_init(&desc_sw->async_tx, + &fsl_chan->common); + desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; + desc_sw->async_tx.phys = pdesc; } - memset(desc, 0, sizeof(*desc)); - INIT_LIST_HEAD(&desc->tx_list); - dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); - desc->async_tx.tx_submit = fsl_dma_tx_submit; - desc->async_tx.phys = pdesc; - - return desc; + return desc_sw; } /** * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. - * @chan : Freescale DMA channel + * @fsl_chan : Freescale DMA channel * * This function will create a dma pool for descriptor allocation. * * Return - The number of descriptors allocated. */ -static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan) +static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) { - struct fsldma_chan *chan = to_fsl_chan(dchan); + struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); /* Has this channel already been allocated? */ - if (chan->desc_pool) + if (fsl_chan->desc_pool) return 1; - /* - * We need the descriptor to be aligned to 32bytes + /* We need the descriptor to be aligned to 32bytes * for meeting FSL DMA specification requirement. */ - chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", - chan->dev, - sizeof(struct fsl_desc_sw), - __alignof__(struct fsl_desc_sw), 0); - if (!chan->desc_pool) { - dev_err(chan->dev, "unable to allocate channel %d " - "descriptor pool\n", chan->id); - return -ENOMEM; + fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", + fsl_chan->dev, sizeof(struct fsl_desc_sw), + 32, 0); + if (!fsl_chan->desc_pool) { + dev_err(fsl_chan->dev, "No memory for channel %d " + "descriptor dma pool.\n", fsl_chan->id); + return 0; } - /* there is at least one descriptor free to be allocated */ return 1; } /** - * fsldma_free_desc_list - Free all descriptors in a queue - * @chan: Freescae DMA channel - * @list: the list to free - * - * LOCKING: must hold chan->desc_lock + * fsl_dma_free_chan_resources - Free all resources of the channel. + * @fsl_chan : Freescale DMA channel */ -static void fsldma_free_desc_list(struct fsldma_chan *chan, - struct list_head *list) -{ - struct fsl_desc_sw *desc, *_desc; - - list_for_each_entry_safe(desc, _desc, list, node) { - list_del(&desc->node); - dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); - } -} - -static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan, - struct list_head *list) +static void fsl_dma_free_chan_resources(struct dma_chan *chan) { + struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); struct fsl_desc_sw *desc, *_desc; + unsigned long flags; - list_for_each_entry_safe_reverse(desc, _desc, list, node) { + dev_dbg(fsl_chan->dev, "Free all channel resources.\n"); + spin_lock_irqsave(&fsl_chan->desc_lock, flags); + list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { +#ifdef FSL_DMA_LD_DEBUG + dev_dbg(fsl_chan->dev, + "LD %p will be released.\n", desc); +#endif list_del(&desc->node); - dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); + /* free link descriptor */ + dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); } -} - -/** - * fsl_dma_free_chan_resources - Free all resources of the channel. - * @chan : Freescale DMA channel - */ -static void fsl_dma_free_chan_resources(struct dma_chan *dchan) -{ - struct fsldma_chan *chan = to_fsl_chan(dchan); - unsigned long flags; - - dev_dbg(chan->dev, "Free all channel resources.\n"); - spin_lock_irqsave(&chan->desc_lock, flags); - fsldma_free_desc_list(chan, &chan->ld_pending); - fsldma_free_desc_list(chan, &chan->ld_running); - spin_unlock_irqrestore(&chan->desc_lock, flags); + spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); + dma_pool_destroy(fsl_chan->desc_pool); - dma_pool_destroy(chan->desc_pool); - chan->desc_pool = NULL; + fsl_chan->desc_pool = NULL; } static struct dma_async_tx_descriptor * -fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags) +fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags) { - struct fsldma_chan *chan; + struct fsl_dma_chan *fsl_chan; struct fsl_desc_sw *new; - if (!dchan) + if (!chan) return NULL; - chan = to_fsl_chan(dchan); + fsl_chan = to_fsl_chan(chan); - new = fsl_dma_alloc_descriptor(chan); + new = fsl_dma_alloc_descriptor(fsl_chan); if (!new) { - dev_err(chan->dev, "No free memory for link descriptor\n"); + dev_err(fsl_chan->dev, "No free memory for link descriptor\n"); return NULL; } @@ -509,50 +473,51 @@ fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags) list_add_tail(&new->node, &new->tx_list); /* Set End-of-link to the last link descriptor of new list*/ - set_ld_eol(chan, new); + set_ld_eol(fsl_chan, new); return &new->async_tx; } static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( - struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src, + struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, size_t len, unsigned long flags) { - struct fsldma_chan *chan; + struct fsl_dma_chan *fsl_chan; struct fsl_desc_sw *first = NULL, *prev = NULL, *new; + struct list_head *list; size_t copy; - if (!dchan) + if (!chan) return NULL; if (!len) return NULL; - chan = to_fsl_chan(dchan); + fsl_chan = to_fsl_chan(chan); do { /* Allocate the link descriptor from DMA pool */ - new = fsl_dma_alloc_descriptor(chan); + new = fsl_dma_alloc_descriptor(fsl_chan); if (!new) { - dev_err(chan->dev, + dev_err(fsl_chan->dev, "No free memory for link descriptor\n"); goto fail; } #ifdef FSL_DMA_LD_DEBUG - dev_dbg(chan->dev, "new link desc alloc %p\n", new); + dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); #endif copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); - set_desc_cnt(chan, &new->hw, copy); - set_desc_src(chan, &new->hw, dma_src); - set_desc_dst(chan, &new->hw, dma_dst); + set_desc_cnt(fsl_chan, &new->hw, copy); + set_desc_src(fsl_chan, &new->hw, dma_src); + set_desc_dest(fsl_chan, &new->hw, dma_dest); if (!first) first = new; else - set_desc_next(chan, &prev->hw, new->async_tx.phys); + set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys); new->async_tx.cookie = 0; async_tx_ack(&new->async_tx); @@ -560,7 +525,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( prev = new; len -= copy; dma_src += copy; - dma_dst += copy; + dma_dest += copy; /* Insert the link descriptor to the LD ring */ list_add_tail(&new->node, &first->tx_list); @@ -570,7 +535,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( new->async_tx.cookie = -EBUSY; /* Set End-of-link to the last link descriptor of new list*/ - set_ld_eol(chan, new); + set_ld_eol(fsl_chan, new); return &first->async_tx; @@ -578,7 +543,12 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( if (!first) return NULL; - fsldma_free_desc_list_reverse(chan, &first->tx_list); + list = &first->tx_list; + list_for_each_entry_safe_reverse(new, prev, list, node) { + list_del(&new->node); + dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); + } + return NULL; } @@ -595,12 +565,13 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( * chan->private variable. */ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( - struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, + struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_data_direction direction, unsigned long flags) { - struct fsldma_chan *chan; + struct fsl_dma_chan *fsl_chan; struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; struct fsl_dma_slave *slave; + struct list_head *tx_list; size_t copy; int i; @@ -610,14 +581,14 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( struct fsl_dma_hw_addr *hw; dma_addr_t dma_dst, dma_src; - if (!dchan) + if (!chan) return NULL; - if (!dchan->private) + if (!chan->private) return NULL; - chan = to_fsl_chan(dchan); - slave = dchan->private; + fsl_chan = to_fsl_chan(chan); + slave = chan->private; if (list_empty(&slave->addresses)) return NULL; @@ -666,14 +637,14 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( } /* Allocate the link descriptor from DMA pool */ - new = fsl_dma_alloc_descriptor(chan); + new = fsl_dma_alloc_descriptor(fsl_chan); if (!new) { - dev_err(chan->dev, "No free memory for " + dev_err(fsl_chan->dev, "No free memory for " "link descriptor\n"); goto fail; } #ifdef FSL_DMA_LD_DEBUG - dev_dbg(chan->dev, "new link desc alloc %p\n", new); + dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); #endif /* @@ -700,9 +671,9 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( } /* Fill in the descriptor */ - set_desc_cnt(chan, &new->hw, copy); - set_desc_src(chan, &new->hw, dma_src); - set_desc_dst(chan, &new->hw, dma_dst); + set_desc_cnt(fsl_chan, &new->hw, copy); + set_desc_src(fsl_chan, &new->hw, dma_src); + set_desc_dest(fsl_chan, &new->hw, dma_dst); /* * If this is not the first descriptor, chain the @@ -711,7 +682,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( if (!first) { first = new; } else { - set_desc_next(chan, &prev->hw, + set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys); } @@ -737,23 +708,23 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( new->async_tx.cookie = -EBUSY; /* Set End-of-link to the last link descriptor of new list */ - set_ld_eol(chan, new); + set_ld_eol(fsl_chan, new); /* Enable extra controller features */ - if (chan->set_src_loop_size) - chan->set_src_loop_size(chan, slave->src_loop_size); + if (fsl_chan->set_src_loop_size) + fsl_chan->set_src_loop_size(fsl_chan, slave->src_loop_size); - if (chan->set_dst_loop_size) - chan->set_dst_loop_size(chan, slave->dst_loop_size); + if (fsl_chan->set_dest_loop_size) + fsl_chan->set_dest_loop_size(fsl_chan, slave->dst_loop_size); - if (chan->toggle_ext_start) - chan->toggle_ext_start(chan, slave->external_start); + if (fsl_chan->toggle_ext_start) + fsl_chan->toggle_ext_start(fsl_chan, slave->external_start); - if (chan->toggle_ext_pause) - chan->toggle_ext_pause(chan, slave->external_pause); + if (fsl_chan->toggle_ext_pause) + fsl_chan->toggle_ext_pause(fsl_chan, slave->external_pause); - if (chan->set_request_count) - chan->set_request_count(chan, slave->request_count); + if (fsl_chan->set_request_count) + fsl_chan->set_request_count(fsl_chan, slave->request_count); return &first->async_tx; @@ -770,216 +741,215 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( * * We're re-using variables for the loop, oh well */ - fsldma_free_desc_list_reverse(chan, &first->tx_list); + tx_list = &first->tx_list; + list_for_each_entry_safe_reverse(new, prev, tx_list, node) { + list_del_init(&new->node); + dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); + } + return NULL; } -static void fsl_dma_device_terminate_all(struct dma_chan *dchan) +static void fsl_dma_device_terminate_all(struct dma_chan *chan) { - struct fsldma_chan *chan; + struct fsl_dma_chan *fsl_chan; + struct fsl_desc_sw *desc, *tmp; unsigned long flags; - if (!dchan) + if (!chan) return; - chan = to_fsl_chan(dchan); + fsl_chan = to_fsl_chan(chan); /* Halt the DMA engine */ - dma_halt(chan); + dma_halt(fsl_chan); - spin_lock_irqsave(&chan->desc_lock, flags); + spin_lock_irqsave(&fsl_chan->desc_lock, flags); /* Remove and free all of the descriptors in the LD queue */ - fsldma_free_desc_list(chan, &chan->ld_pending); - fsldma_free_desc_list(chan, &chan->ld_running); + list_for_each_entry_safe(desc, tmp, &fsl_chan->ld_queue, node) { + list_del(&desc->node); + dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); + } - spin_unlock_irqrestore(&chan->desc_lock, flags); + spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); } /** * fsl_dma_update_completed_cookie - Update the completed cookie. - * @chan : Freescale DMA channel - * - * CONTEXT: hardirq + * @fsl_chan : Freescale DMA channel */ -static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan) +static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan) { - struct fsl_desc_sw *desc; - unsigned long flags; - dma_cookie_t cookie; + struct fsl_desc_sw *cur_desc, *desc; + dma_addr_t ld_phy; - spin_lock_irqsave(&chan->desc_lock, flags); + ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK; - if (list_empty(&chan->ld_running)) { - dev_dbg(chan->dev, "no running descriptors\n"); - goto out_unlock; - } + if (ld_phy) { + cur_desc = NULL; + list_for_each_entry(desc, &fsl_chan->ld_queue, node) + if (desc->async_tx.phys == ld_phy) { + cur_desc = desc; + break; + } - /* Get the last descriptor, update the cookie to that */ - desc = to_fsl_desc(chan->ld_running.prev); - if (dma_is_idle(chan)) - cookie = desc->async_tx.cookie; - else { - cookie = desc->async_tx.cookie - 1; - if (unlikely(cookie < DMA_MIN_COOKIE)) - cookie = DMA_MAX_COOKIE; + if (cur_desc && cur_desc->async_tx.cookie) { + if (dma_is_idle(fsl_chan)) + fsl_chan->completed_cookie = + cur_desc->async_tx.cookie; + else + fsl_chan->completed_cookie = + cur_desc->async_tx.cookie - 1; + } } - - chan->completed_cookie = cookie; - -out_unlock: - spin_unlock_irqrestore(&chan->desc_lock, flags); -} - -/** - * fsldma_desc_status - Check the status of a descriptor - * @chan: Freescale DMA channel - * @desc: DMA SW descriptor - * - * This function will return the status of the given descriptor - */ -static enum dma_status fsldma_desc_status(struct fsldma_chan *chan, - struct fsl_desc_sw *desc) -{ - return dma_async_is_complete(desc->async_tx.cookie, - chan->completed_cookie, - chan->common.cookie); } /** * fsl_chan_ld_cleanup - Clean up link descriptors - * @chan : Freescale DMA channel + * @fsl_chan : Freescale DMA channel * * This function clean up the ld_queue of DMA channel. + * If 'in_intr' is set, the function will move the link descriptor to + * the recycle list. Otherwise, free it directly. */ -static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) +static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan) { struct fsl_desc_sw *desc, *_desc; unsigned long flags; - spin_lock_irqsave(&chan->desc_lock, flags); + spin_lock_irqsave(&fsl_chan->desc_lock, flags); - dev_dbg(chan->dev, "chan completed_cookie = %d\n", chan->completed_cookie); - list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) { + dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n", + fsl_chan->completed_cookie); + list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { dma_async_tx_callback callback; void *callback_param; - if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS) + if (dma_async_is_complete(desc->async_tx.cookie, + fsl_chan->completed_cookie, fsl_chan->common.cookie) + == DMA_IN_PROGRESS) break; - /* Remove from the list of running transactions */ + callback = desc->async_tx.callback; + callback_param = desc->async_tx.callback_param; + + /* Remove from ld_queue list */ list_del(&desc->node); + dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n", + desc); + dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); + /* Run the link descriptor callback function */ - callback = desc->async_tx.callback; - callback_param = desc->async_tx.callback_param; if (callback) { - spin_unlock_irqrestore(&chan->desc_lock, flags); - dev_dbg(chan->dev, "LD %p callback\n", desc); + spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); + dev_dbg(fsl_chan->dev, "link descriptor %p callback\n", + desc); callback(callback_param); - spin_lock_irqsave(&chan->desc_lock, flags); + spin_lock_irqsave(&fsl_chan->desc_lock, flags); } - - /* Run any dependencies, then free the descriptor */ - dma_run_dependencies(&desc->async_tx); - dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); } - - spin_unlock_irqrestore(&chan->desc_lock, flags); + spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); } /** - * fsl_chan_xfer_ld_queue - transfer any pending transactions - * @chan : Freescale DMA channel - * - * This will make sure that any pending transactions will be run. - * If the DMA controller is idle, it will be started. Otherwise, - * the DMA controller's interrupt handler will start any pending - * transactions when it becomes idle. + * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue. + * @fsl_chan : Freescale DMA channel */ -static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) +static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan) { - struct fsl_desc_sw *desc; + struct list_head *ld_node; + dma_addr_t next_dest_addr; unsigned long flags; - spin_lock_irqsave(&chan->desc_lock, flags); + spin_lock_irqsave(&fsl_chan->desc_lock, flags); - /* - * If the list of pending descriptors is empty, then we - * don't need to do any work at all - */ - if (list_empty(&chan->ld_pending)) { - dev_dbg(chan->dev, "no pending LDs\n"); + if (!dma_is_idle(fsl_chan)) goto out_unlock; - } - /* - * The DMA controller is not idle, which means the interrupt - * handler will start any queued transactions when it runs - * at the end of the current transaction - */ - if (!dma_is_idle(chan)) { - dev_dbg(chan->dev, "DMA controller still busy\n"); - goto out_unlock; - } + dma_halt(fsl_chan); - /* - * TODO: - * make sure the dma_halt() function really un-wedges the - * controller as much as possible + /* If there are some link descriptors + * not transfered in queue. We need to start it. */ - dma_halt(chan); - /* - * If there are some link descriptors which have not been - * transferred, we need to start the controller - */ - - /* - * Move all elements from the queue of pending transactions - * onto the list of running transactions - */ - desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); - list_splice_tail_init(&chan->ld_pending, &chan->ld_running); - - /* - * Program the descriptor's address into the DMA controller, - * then start the DMA transaction - */ - set_cdar(chan, desc->async_tx.phys); - dma_start(chan); + /* Find the first un-transfer desciptor */ + for (ld_node = fsl_chan->ld_queue.next; + (ld_node != &fsl_chan->ld_queue) + && (dma_async_is_complete( + to_fsl_desc(ld_node)->async_tx.cookie, + fsl_chan->completed_cookie, + fsl_chan->common.cookie) == DMA_SUCCESS); + ld_node = ld_node->next); + + if (ld_node != &fsl_chan->ld_queue) { + /* Get the ld start address from ld_queue */ + next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; + dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%llx\n", + (unsigned long long)next_dest_addr); + set_cdar(fsl_chan, next_dest_addr); + dma_start(fsl_chan); + } else { + set_cdar(fsl_chan, 0); + set_ndar(fsl_chan, 0); + } out_unlock: - spin_unlock_irqrestore(&chan->desc_lock, flags); + spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); } /** * fsl_dma_memcpy_issue_pending - Issue the DMA start command - * @chan : Freescale DMA channel + * @fsl_chan : Freescale DMA channel */ -static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) +static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan) { - struct fsldma_chan *chan = to_fsl_chan(dchan); - fsl_chan_xfer_ld_queue(chan); + struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); + +#ifdef FSL_DMA_LD_DEBUG + struct fsl_desc_sw *ld; + unsigned long flags; + + spin_lock_irqsave(&fsl_chan->desc_lock, flags); + if (list_empty(&fsl_chan->ld_queue)) { + spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); + return; + } + + dev_dbg(fsl_chan->dev, "--memcpy issue--\n"); + list_for_each_entry(ld, &fsl_chan->ld_queue, node) { + int i; + dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n", + fsl_chan->id, ld->async_tx.phys); + for (i = 0; i < 8; i++) + dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n", + i, *(((u32 *)&ld->hw) + i)); + } + dev_dbg(fsl_chan->dev, "----------------\n"); + spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); +#endif + + fsl_chan_xfer_ld_queue(fsl_chan); } /** * fsl_dma_is_complete - Determine the DMA status - * @chan : Freescale DMA channel + * @fsl_chan : Freescale DMA channel */ -static enum dma_status fsl_dma_is_complete(struct dma_chan *dchan, +static enum dma_status fsl_dma_is_complete(struct dma_chan *chan, dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used) { - struct fsldma_chan *chan = to_fsl_chan(dchan); + struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); dma_cookie_t last_used; dma_cookie_t last_complete; - fsl_chan_ld_cleanup(chan); + fsl_chan_ld_cleanup(fsl_chan); - last_used = dchan->cookie; - last_complete = chan->completed_cookie; + last_used = chan->cookie; + last_complete = fsl_chan->completed_cookie; if (done) *done = last_complete; @@ -990,37 +960,32 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *dchan, return dma_async_is_complete(cookie, last_complete, last_used); } -/*----------------------------------------------------------------------------*/ -/* Interrupt Handling */ -/*----------------------------------------------------------------------------*/ - -static irqreturn_t fsldma_chan_irq(int irq, void *data) +static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) { - struct fsldma_chan *chan = data; + struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; + u32 stat; int update_cookie = 0; int xfer_ld_q = 0; - u32 stat; - /* save and clear the status register */ - stat = get_sr(chan); - set_sr(chan, stat); - dev_dbg(chan->dev, "irq: channel %d, stat = 0x%x\n", chan->id, stat); + stat = get_sr(fsl_chan); + dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n", + fsl_chan->id, stat); + set_sr(fsl_chan, stat); /* Clear the event register */ stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); if (!stat) return IRQ_NONE; if (stat & FSL_DMA_SR_TE) - dev_err(chan->dev, "Transfer Error!\n"); + dev_err(fsl_chan->dev, "Transfer Error!\n"); - /* - * Programming Error + /* Programming Error * The DMA_INTERRUPT async_tx is a NULL transfer, which will * triger a PE interrupt. */ if (stat & FSL_DMA_SR_PE) { - dev_dbg(chan->dev, "irq: Programming Error INT\n"); - if (get_bcr(chan) == 0) { + dev_dbg(fsl_chan->dev, "event: Programming Error INT\n"); + if (get_bcr(fsl_chan) == 0) { /* BCR register is 0, this is a DMA_INTERRUPT async_tx. * Now, update the completed cookie, and continue the * next uncompleted transfer. @@ -1031,296 +996,208 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) stat &= ~FSL_DMA_SR_PE; } - /* - * If the link descriptor segment transfer finishes, + /* If the link descriptor segment transfer finishes, * we will recycle the used descriptor. */ if (stat & FSL_DMA_SR_EOSI) { - dev_dbg(chan->dev, "irq: End-of-segments INT\n"); - dev_dbg(chan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n", - (unsigned long long)get_cdar(chan), - (unsigned long long)get_ndar(chan)); + dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); + dev_dbg(fsl_chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n", + (unsigned long long)get_cdar(fsl_chan), + (unsigned long long)get_ndar(fsl_chan)); stat &= ~FSL_DMA_SR_EOSI; update_cookie = 1; } - /* - * For MPC8349, EOCDI event need to update cookie + /* For MPC8349, EOCDI event need to update cookie * and start the next transfer if it exist. */ if (stat & FSL_DMA_SR_EOCDI) { - dev_dbg(chan->dev, "irq: End-of-Chain link INT\n"); + dev_dbg(fsl_chan->dev, "event: End-of-Chain link INT\n"); stat &= ~FSL_DMA_SR_EOCDI; update_cookie = 1; xfer_ld_q = 1; } - /* - * If it current transfer is the end-of-transfer, + /* If it current transfer is the end-of-transfer, * we should clear the Channel Start bit for * prepare next transfer. */ if (stat & FSL_DMA_SR_EOLNI) { - dev_dbg(chan->dev, "irq: End-of-link INT\n"); + dev_dbg(fsl_chan->dev, "event: End-of-link INT\n"); stat &= ~FSL_DMA_SR_EOLNI; xfer_ld_q = 1; } if (update_cookie) - fsl_dma_update_completed_cookie(chan); + fsl_dma_update_completed_cookie(fsl_chan); if (xfer_ld_q) - fsl_chan_xfer_ld_queue(chan); + fsl_chan_xfer_ld_queue(fsl_chan); if (stat) - dev_dbg(chan->dev, "irq: unhandled sr 0x%02x\n", stat); + dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n", + stat); - dev_dbg(chan->dev, "irq: Exit\n"); - tasklet_schedule(&chan->tasklet); + dev_dbg(fsl_chan->dev, "event: Exit\n"); + tasklet_schedule(&fsl_chan->tasklet); return IRQ_HANDLED; } -static void dma_do_tasklet(unsigned long data) +static irqreturn_t fsl_dma_do_interrupt(int irq, void *data) { - struct fsldma_chan *chan = (struct fsldma_chan *)data; - fsl_chan_ld_cleanup(chan); -} - -static irqreturn_t fsldma_ctrl_irq(int irq, void *data) -{ - struct fsldma_device *fdev = data; - struct fsldma_chan *chan; - unsigned int handled = 0; - u32 gsr, mask; - int i; + struct fsl_dma_device *fdev = (struct fsl_dma_device *)data; + u32 gsr; + int ch_nr; - gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs) - : in_le32(fdev->regs); - mask = 0xff000000; - dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr); - - for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { - chan = fdev->chan[i]; - if (!chan) - continue; - - if (gsr & mask) { - dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id); - fsldma_chan_irq(irq, chan); - handled++; - } + gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base) + : in_le32(fdev->reg_base); + ch_nr = (32 - ffs(gsr)) / 8; - gsr &= ~mask; - mask >>= 8; - } - - return IRQ_RETVAL(handled); + return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq, + fdev->chan[ch_nr]) : IRQ_NONE; } -static void fsldma_free_irqs(struct fsldma_device *fdev) +static void dma_do_tasklet(unsigned long data) { - struct fsldma_chan *chan; - int i; - - if (fdev->irq != NO_IRQ) { - dev_dbg(fdev->dev, "free per-controller IRQ\n"); - free_irq(fdev->irq, fdev); - return; - } - - for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { - chan = fdev->chan[i]; - if (chan && chan->irq != NO_IRQ) { - dev_dbg(fdev->dev, "free channel %d IRQ\n", chan->id); - free_irq(chan->irq, chan); - } - } + struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; + fsl_chan_ld_cleanup(fsl_chan); } -static int fsldma_request_irqs(struct fsldma_device *fdev) -{ - struct fsldma_chan *chan; - int ret; - int i; - - /* if we have a per-controller IRQ, use that */ - if (fdev->irq != NO_IRQ) { - dev_dbg(fdev->dev, "request per-controller IRQ\n"); - ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED, - "fsldma-controller", fdev); - return ret; - } - - /* no per-controller IRQ, use the per-channel IRQs */ - for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { - chan = fdev->chan[i]; - if (!chan) - continue; - - if (chan->irq == NO_IRQ) { - dev_err(fdev->dev, "no interrupts property defined for " - "DMA channel %d. Please fix your " - "device tree\n", chan->id); - ret = -ENODEV; - goto out_unwind; - } - - dev_dbg(fdev->dev, "request channel %d IRQ\n", chan->id); - ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED, - "fsldma-chan", chan); - if (ret) { - dev_err(fdev->dev, "unable to request IRQ for DMA " - "channel %d\n", chan->id); - goto out_unwind; - } - } - - return 0; - -out_unwind: - for (/* none */; i >= 0; i--) { - chan = fdev->chan[i]; - if (!chan) - continue; - - if (chan->irq == NO_IRQ) - continue; - - free_irq(chan->irq, chan); - } - - return ret; -} - -/*----------------------------------------------------------------------------*/ -/* OpenFirmware Subsystem */ -/*----------------------------------------------------------------------------*/ - -static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, +static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev, struct device_node *node, u32 feature, const char *compatible) { - struct fsldma_chan *chan; - struct resource res; + struct fsl_dma_chan *new_fsl_chan; int err; /* alloc channel */ - chan = kzalloc(sizeof(*chan), GFP_KERNEL); - if (!chan) { - dev_err(fdev->dev, "no free memory for DMA channels!\n"); - err = -ENOMEM; - goto out_return; - } - - /* ioremap registers for use */ - chan->regs = of_iomap(node, 0); - if (!chan->regs) { - dev_err(fdev->dev, "unable to ioremap registers\n"); - err = -ENOMEM; - goto out_free_chan; + new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL); + if (!new_fsl_chan) { + dev_err(fdev->dev, "No free memory for allocating " + "dma channels!\n"); + return -ENOMEM; } - err = of_address_to_resource(node, 0, &res); + /* get dma channel register base */ + err = of_address_to_resource(node, 0, &new_fsl_chan->reg); if (err) { - dev_err(fdev->dev, "unable to find 'reg' property\n"); - goto out_iounmap_regs; + dev_err(fdev->dev, "Can't get %s property 'reg'\n", + node->full_name); + goto err_no_reg; } - chan->feature = feature; + new_fsl_chan->feature = feature; + if (!fdev->feature) - fdev->feature = chan->feature; + fdev->feature = new_fsl_chan->feature; - /* - * If the DMA device's feature is different than the feature - * of its channels, report the bug + /* If the DMA device's feature is different than its channels', + * report the bug. */ - WARN_ON(fdev->feature != chan->feature); + WARN_ON(fdev->feature != new_fsl_chan->feature); - chan->dev = fdev->dev; - chan->id = ((res.start - 0x100) & 0xfff) >> 7; - if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { - dev_err(fdev->dev, "too many channels for device\n"); + new_fsl_chan->dev = fdev->dev; + new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start, + new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); + + new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7; + if (new_fsl_chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { + dev_err(fdev->dev, "There is no %d channel!\n", + new_fsl_chan->id); err = -EINVAL; - goto out_iounmap_regs; + goto err_no_chan; } + fdev->chan[new_fsl_chan->id] = new_fsl_chan; + tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet, + (unsigned long)new_fsl_chan); - fdev->chan[chan->id] = chan; - tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); - - /* Initialize the channel */ - dma_init(chan); + /* Init the channel */ + dma_init(new_fsl_chan); /* Clear cdar registers */ - set_cdar(chan, 0); + set_cdar(new_fsl_chan, 0); - switch (chan->feature & FSL_DMA_IP_MASK) { + switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) { case FSL_DMA_IP_85XX: - chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; + new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; case FSL_DMA_IP_83XX: - chan->toggle_ext_start = fsl_chan_toggle_ext_start; - chan->set_src_loop_size = fsl_chan_set_src_loop_size; - chan->set_dst_loop_size = fsl_chan_set_dst_loop_size; - chan->set_request_count = fsl_chan_set_request_count; + new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start; + new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size; + new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size; + new_fsl_chan->set_request_count = fsl_chan_set_request_count; } - spin_lock_init(&chan->desc_lock); - INIT_LIST_HEAD(&chan->ld_pending); - INIT_LIST_HEAD(&chan->ld_running); - - chan->common.device = &fdev->common; + spin_lock_init(&new_fsl_chan->desc_lock); + INIT_LIST_HEAD(&new_fsl_chan->ld_queue); - /* find the IRQ line, if it exists in the device tree */ - chan->irq = irq_of_parse_and_map(node, 0); + new_fsl_chan->common.device = &fdev->common; /* Add the channel to DMA device channel list */ - list_add_tail(&chan->common.device_node, &fdev->common.channels); + list_add_tail(&new_fsl_chan->common.device_node, + &fdev->common.channels); fdev->common.chancnt++; - dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible, - chan->irq != NO_IRQ ? chan->irq : fdev->irq); + new_fsl_chan->irq = irq_of_parse_and_map(node, 0); + if (new_fsl_chan->irq != NO_IRQ) { + err = request_irq(new_fsl_chan->irq, + &fsl_dma_chan_do_interrupt, IRQF_SHARED, + "fsldma-channel", new_fsl_chan); + if (err) { + dev_err(fdev->dev, "DMA channel %s request_irq error " + "with return %d\n", node->full_name, err); + goto err_no_irq; + } + } + + dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, + compatible, + new_fsl_chan->irq != NO_IRQ ? new_fsl_chan->irq : fdev->irq); return 0; -out_iounmap_regs: - iounmap(chan->regs); -out_free_chan: - kfree(chan); -out_return: +err_no_irq: + list_del(&new_fsl_chan->common.device_node); +err_no_chan: + iounmap(new_fsl_chan->reg_base); +err_no_reg: + kfree(new_fsl_chan); return err; } -static void fsl_dma_chan_remove(struct fsldma_chan *chan) +static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan) { - irq_dispose_mapping(chan->irq); - list_del(&chan->common.device_node); - iounmap(chan->regs); - kfree(chan); + if (fchan->irq != NO_IRQ) + free_irq(fchan->irq, fchan); + list_del(&fchan->common.device_node); + iounmap(fchan->reg_base); + kfree(fchan); } -static int __devinit fsldma_of_probe(struct of_device *op, +static int __devinit of_fsl_dma_probe(struct of_device *dev, const struct of_device_id *match) { - struct fsldma_device *fdev; - struct device_node *child; int err; + struct fsl_dma_device *fdev; + struct device_node *child; - fdev = kzalloc(sizeof(*fdev), GFP_KERNEL); + fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL); if (!fdev) { - dev_err(&op->dev, "No enough memory for 'priv'\n"); - err = -ENOMEM; - goto out_return; + dev_err(&dev->dev, "No enough memory for 'priv'\n"); + return -ENOMEM; } - - fdev->dev = &op->dev; + fdev->dev = &dev->dev; INIT_LIST_HEAD(&fdev->common.channels); - /* ioremap the registers for use */ - fdev->regs = of_iomap(op->node, 0); - if (!fdev->regs) { - dev_err(&op->dev, "unable to ioremap registers\n"); - err = -ENOMEM; - goto out_free_fdev; + /* get DMA controller register base */ + err = of_address_to_resource(dev->node, 0, &fdev->reg); + if (err) { + dev_err(&dev->dev, "Can't get %s property 'reg'\n", + dev->node->full_name); + goto err_no_reg; } - /* map the channel IRQ if it exists, but don't hookup the handler yet */ - fdev->irq = irq_of_parse_and_map(op->node, 0); + dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " + "controller at 0x%llx...\n", + match->compatible, (unsigned long long)fdev->reg.start); + fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end + - fdev->reg.start + 1); dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); @@ -1333,111 +1210,103 @@ static int __devinit fsldma_of_probe(struct of_device *op, fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; fdev->common.device_terminate_all = fsl_dma_device_terminate_all; - fdev->common.dev = &op->dev; + fdev->common.dev = &dev->dev; - dev_set_drvdata(&op->dev, fdev); + fdev->irq = irq_of_parse_and_map(dev->node, 0); + if (fdev->irq != NO_IRQ) { + err = request_irq(fdev->irq, &fsl_dma_do_interrupt, IRQF_SHARED, + "fsldma-device", fdev); + if (err) { + dev_err(&dev->dev, "DMA device request_irq error " + "with return %d\n", err); + goto err; + } + } - /* - * We cannot use of_platform_bus_probe() because there is no - * of_platform_bus_remove(). Instead, we manually instantiate every DMA + dev_set_drvdata(&(dev->dev), fdev); + + /* We cannot use of_platform_bus_probe() because there is no + * of_platform_bus_remove. Instead, we manually instantiate every DMA * channel object. */ - for_each_child_of_node(op->node, child) { - if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) { + for_each_child_of_node(dev->node, child) { + if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) fsl_dma_chan_probe(fdev, child, FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN, "fsl,eloplus-dma-channel"); - } - - if (of_device_is_compatible(child, "fsl,elo-dma-channel")) { + if (of_device_is_compatible(child, "fsl,elo-dma-channel")) fsl_dma_chan_probe(fdev, child, FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN, "fsl,elo-dma-channel"); - } - } - - /* - * Hookup the IRQ handler(s) - * - * If we have a per-controller interrupt, we prefer that to the - * per-channel interrupts to reduce the number of shared interrupt - * handlers on the same IRQ line - */ - err = fsldma_request_irqs(fdev); - if (err) { - dev_err(fdev->dev, "unable to request IRQs\n"); - goto out_free_fdev; } dma_async_device_register(&fdev->common); return 0; -out_free_fdev: - irq_dispose_mapping(fdev->irq); +err: + iounmap(fdev->reg_base); +err_no_reg: kfree(fdev); -out_return: return err; } -static int fsldma_of_remove(struct of_device *op) +static int of_fsl_dma_remove(struct of_device *of_dev) { - struct fsldma_device *fdev; + struct fsl_dma_device *fdev; unsigned int i; - fdev = dev_get_drvdata(&op->dev); - dma_async_device_unregister(&fdev->common); + fdev = dev_get_drvdata(&of_dev->dev); - fsldma_free_irqs(fdev); + dma_async_device_unregister(&fdev->common); - for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { + for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) if (fdev->chan[i]) fsl_dma_chan_remove(fdev->chan[i]); - } - iounmap(fdev->regs); - dev_set_drvdata(&op->dev, NULL); + if (fdev->irq != NO_IRQ) + free_irq(fdev->irq, fdev); + + iounmap(fdev->reg_base); + kfree(fdev); + dev_set_drvdata(&of_dev->dev, NULL); return 0; } -static const struct of_device_id fsldma_of_ids[] = { +static struct of_device_id of_fsl_dma_ids[] = { { .compatible = "fsl,eloplus-dma", }, { .compatible = "fsl,elo-dma", }, {} }; -static struct of_platform_driver fsldma_of_driver = { - .name = "fsl-elo-dma", - .match_table = fsldma_of_ids, - .probe = fsldma_of_probe, - .remove = fsldma_of_remove, +static struct of_platform_driver of_fsl_dma_driver = { + .name = "fsl-elo-dma", + .match_table = of_fsl_dma_ids, + .probe = of_fsl_dma_probe, + .remove = of_fsl_dma_remove, }; -/*----------------------------------------------------------------------------*/ -/* Module Init / Exit */ -/*----------------------------------------------------------------------------*/ - -static __init int fsldma_init(void) +static __init int of_fsl_dma_init(void) { int ret; pr_info("Freescale Elo / Elo Plus DMA driver\n"); - ret = of_register_platform_driver(&fsldma_of_driver); + ret = of_register_platform_driver(&of_fsl_dma_driver); if (ret) pr_err("fsldma: failed to register platform driver\n"); return ret; } -static void __exit fsldma_exit(void) +static void __exit of_fsl_dma_exit(void) { - of_unregister_platform_driver(&fsldma_of_driver); + of_unregister_platform_driver(&of_fsl_dma_driver); } -subsys_initcall(fsldma_init); -module_exit(fsldma_exit); +subsys_initcall(of_fsl_dma_init); +module_exit(of_fsl_dma_exit); MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver"); MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/dma/fsldma.h b/trunk/drivers/dma/fsldma.h index cb4d6ff51597..0df14cbb8ca3 100644 --- a/trunk/drivers/dma/fsldma.h +++ b/trunk/drivers/dma/fsldma.h @@ -92,9 +92,11 @@ struct fsl_desc_sw { struct list_head node; struct list_head tx_list; struct dma_async_tx_descriptor async_tx; + struct list_head *ld; + void *priv; } __attribute__((aligned(32))); -struct fsldma_chan_regs { +struct fsl_dma_chan_regs { u32 mr; /* 0x00 - Mode Register */ u32 sr; /* 0x04 - Status Register */ u64 cdar; /* 0x08 - Current descriptor address register */ @@ -104,19 +106,20 @@ struct fsldma_chan_regs { u64 ndar; /* 0x24 - Next Descriptor Address Register */ }; -struct fsldma_chan; +struct fsl_dma_chan; #define FSL_DMA_MAX_CHANS_PER_DEVICE 4 -struct fsldma_device { - void __iomem *regs; /* DGSR register base */ +struct fsl_dma_device { + void __iomem *reg_base; /* DGSR register base */ + struct resource reg; /* Resource for register */ struct device *dev; struct dma_device common; - struct fsldma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE]; + struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE]; u32 feature; /* The same as DMA channels */ int irq; /* Channel IRQ */ }; -/* Define macros for fsldma_chan->feature property */ +/* Define macros for fsl_dma_chan->feature property */ #define FSL_DMA_LITTLE_ENDIAN 0x00000000 #define FSL_DMA_BIG_ENDIAN 0x00000001 @@ -127,28 +130,28 @@ struct fsldma_device { #define FSL_DMA_CHAN_PAUSE_EXT 0x00001000 #define FSL_DMA_CHAN_START_EXT 0x00002000 -struct fsldma_chan { - struct fsldma_chan_regs __iomem *regs; +struct fsl_dma_chan { + struct fsl_dma_chan_regs __iomem *reg_base; dma_cookie_t completed_cookie; /* The maximum cookie completed */ spinlock_t desc_lock; /* Descriptor operation lock */ - struct list_head ld_pending; /* Link descriptors queue */ - struct list_head ld_running; /* Link descriptors queue */ + struct list_head ld_queue; /* Link descriptors queue */ struct dma_chan common; /* DMA common channel */ struct dma_pool *desc_pool; /* Descriptors pool */ struct device *dev; /* Channel device */ + struct resource reg; /* Resource for register */ int irq; /* Channel IRQ */ int id; /* Raw id of this channel */ struct tasklet_struct tasklet; u32 feature; - void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable); - void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable); - void (*set_src_loop_size)(struct fsldma_chan *fsl_chan, int size); - void (*set_dst_loop_size)(struct fsldma_chan *fsl_chan, int size); - void (*set_request_count)(struct fsldma_chan *fsl_chan, int size); + void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int enable); + void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable); + void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size); + void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size); + void (*set_request_count)(struct fsl_dma_chan *fsl_chan, int size); }; -#define to_fsl_chan(chan) container_of(chan, struct fsldma_chan, common) +#define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common) #define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node) #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx) diff --git a/trunk/drivers/dma/ioat/dma.c b/trunk/drivers/dma/ioat/dma.c index 5d0e42b263df..dcc4ab78b32b 100644 --- a/trunk/drivers/dma/ioat/dma.c +++ b/trunk/drivers/dma/ioat/dma.c @@ -94,12 +94,16 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) return IRQ_HANDLED; } +static void ioat1_cleanup_tasklet(unsigned long data); + /* common channel initialization */ -void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx) +void ioat_init_channel(struct ioatdma_device *device, + struct ioat_chan_common *chan, int idx, + void (*timer_fn)(unsigned long), + void (*tasklet)(unsigned long), + unsigned long ioat) { struct dma_device *dma = &device->common; - struct dma_chan *c = &chan->common; - unsigned long data = (unsigned long) c; chan->device = device; chan->reg_base = device->reg_base + (0x80 * (idx + 1)); @@ -108,12 +112,14 @@ void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *c list_add_tail(&chan->common.device_node, &dma->channels); device->idx[idx] = chan; init_timer(&chan->timer); - chan->timer.function = device->timer_fn; - chan->timer.data = data; - tasklet_init(&chan->cleanup_task, device->cleanup_fn, data); + chan->timer.function = timer_fn; + chan->timer.data = ioat; + tasklet_init(&chan->cleanup_task, tasklet, ioat); tasklet_disable(&chan->cleanup_task); } +static void ioat1_timer_event(unsigned long data); + /** * ioat1_dma_enumerate_channels - find and initialize the device's channels * @device: the device to be enumerated @@ -149,7 +155,10 @@ static int ioat1_enumerate_channels(struct ioatdma_device *device) if (!ioat) break; - ioat_init_channel(device, &ioat->base, i); + ioat_init_channel(device, &ioat->base, i, + ioat1_timer_event, + ioat1_cleanup_tasklet, + (unsigned long) ioat); ioat->xfercap = xfercap; spin_lock_init(&ioat->desc_lock); INIT_LIST_HEAD(&ioat->free_desc); @@ -523,12 +532,12 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, return &desc->txd; } -static void ioat1_cleanup_event(unsigned long data) +static void ioat1_cleanup_tasklet(unsigned long data) { - struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); + struct ioat_dma_chan *chan = (void *)data; - ioat1_cleanup(ioat); - writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); + ioat1_cleanup(chan); + writew(IOAT_CHANCTRL_RUN, chan->base.reg_base + IOAT_CHANCTRL_OFFSET); } void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, @@ -678,7 +687,7 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat) static void ioat1_timer_event(unsigned long data) { - struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); + struct ioat_dma_chan *ioat = (void *) data; struct ioat_chan_common *chan = &ioat->base; dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state); @@ -725,17 +734,16 @@ static void ioat1_timer_event(unsigned long data) spin_unlock_bh(&chan->cleanup_lock); } -enum dma_status -ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie, +static enum dma_status +ioat1_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used) { - struct ioat_chan_common *chan = to_chan_common(c); - struct ioatdma_device *device = chan->device; + struct ioat_dma_chan *ioat = to_ioat_chan(c); if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) return DMA_SUCCESS; - device->cleanup_fn((unsigned long) c); + ioat1_cleanup(ioat); return ioat_is_complete(c, cookie, done, used); } @@ -1191,14 +1199,12 @@ int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca) device->intr_quirk = ioat1_intr_quirk; device->enumerate_channels = ioat1_enumerate_channels; device->self_test = ioat_dma_self_test; - device->timer_fn = ioat1_timer_event; - device->cleanup_fn = ioat1_cleanup_event; dma = &device->common; dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources; dma->device_free_chan_resources = ioat1_dma_free_chan_resources; - dma->device_is_tx_complete = ioat_is_dma_complete; + dma->device_is_tx_complete = ioat1_dma_is_complete; err = ioat_probe(device); if (err) diff --git a/trunk/drivers/dma/ioat/dma.h b/trunk/drivers/dma/ioat/dma.h index 4f747a254074..bbc3e78ef333 100644 --- a/trunk/drivers/dma/ioat/dma.h +++ b/trunk/drivers/dma/ioat/dma.h @@ -61,7 +61,7 @@ * @intr_quirk: interrupt setup quirk (for ioat_v1 devices) * @enumerate_channels: hw version specific channel enumeration * @reset_hw: hw version specific channel (re)initialization - * @cleanup_fn: select between the v2 and v3 cleanup routines + * @cleanup_tasklet: select between the v2 and v3 cleanup routines * @timer_fn: select between the v2 and v3 timer watchdog routines * @self_test: hardware version specific self test for each supported op type * @@ -80,7 +80,7 @@ struct ioatdma_device { void (*intr_quirk)(struct ioatdma_device *device); int (*enumerate_channels)(struct ioatdma_device *device); int (*reset_hw)(struct ioat_chan_common *chan); - void (*cleanup_fn)(unsigned long data); + void (*cleanup_tasklet)(unsigned long data); void (*timer_fn)(unsigned long data); int (*self_test)(struct ioatdma_device *device); }; @@ -337,9 +337,10 @@ struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); void ioat_init_channel(struct ioatdma_device *device, - struct ioat_chan_common *chan, int idx); -enum dma_status ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie, - dma_cookie_t *done, dma_cookie_t *used); + struct ioat_chan_common *chan, int idx, + void (*timer_fn)(unsigned long), + void (*tasklet)(unsigned long), + unsigned long ioat); void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, size_t len, struct ioat_dma_descriptor *hw); bool ioat_cleanup_preamble(struct ioat_chan_common *chan, diff --git a/trunk/drivers/dma/ioat/dma_v2.c b/trunk/drivers/dma/ioat/dma_v2.c index 25a3c72b2941..5cc37afe2bc1 100644 --- a/trunk/drivers/dma/ioat/dma_v2.c +++ b/trunk/drivers/dma/ioat/dma_v2.c @@ -51,40 +51,48 @@ MODULE_PARM_DESC(ioat_ring_max_alloc_order, void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) { - struct ioat_chan_common *chan = &ioat->base; + void * __iomem reg_base = ioat->base.reg_base; + ioat->pending = 0; ioat->dmacount += ioat2_ring_pending(ioat); ioat->issued = ioat->head; /* make descriptor updates globally visible before notifying channel */ wmb(); - writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); - dev_dbg(to_dev(chan), + writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET); + dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x count: %#x\n", __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); } -void ioat2_issue_pending(struct dma_chan *c) +void ioat2_issue_pending(struct dma_chan *chan) { - struct ioat2_dma_chan *ioat = to_ioat2_chan(c); + struct ioat2_dma_chan *ioat = to_ioat2_chan(chan); - if (ioat2_ring_pending(ioat)) { - spin_lock_bh(&ioat->ring_lock); + spin_lock_bh(&ioat->ring_lock); + if (ioat->pending == 1) __ioat2_issue_pending(ioat); - spin_unlock_bh(&ioat->ring_lock); - } + spin_unlock_bh(&ioat->ring_lock); } /** * ioat2_update_pending - log pending descriptors * @ioat: ioat2+ channel * - * Check if the number of unsubmitted descriptors has exceeded the - * watermark. Called with ring_lock held + * set pending to '1' unless pending is already set to '2', pending == 2 + * indicates that submission is temporarily blocked due to an in-flight + * reset. If we are already above the ioat_pending_level threshold then + * just issue pending. + * + * called with ring_lock held */ static void ioat2_update_pending(struct ioat2_dma_chan *ioat) { - if (ioat2_ring_pending(ioat) > ioat_pending_level) + if (unlikely(ioat->pending == 2)) + return; + else if (ioat2_ring_pending(ioat) > ioat_pending_level) __ioat2_issue_pending(ioat); + else + ioat->pending = 1; } static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) @@ -158,7 +166,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) seen_current = true; } ioat->tail += i; - BUG_ON(active && !seen_current); /* no active descs have written a completion? */ + BUG_ON(!seen_current); /* no active descs have written a completion? */ chan->last_completion = phys_complete; if (ioat->head == ioat->tail) { @@ -199,9 +207,9 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat) spin_unlock_bh(&chan->cleanup_lock); } -void ioat2_cleanup_event(unsigned long data) +void ioat2_cleanup_tasklet(unsigned long data) { - struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); + struct ioat2_dma_chan *ioat = (void *) data; ioat2_cleanup(ioat); writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); @@ -241,7 +249,7 @@ int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo) if (is_ioat_active(status) || is_ioat_idle(status)) ioat_suspend(chan); while (is_ioat_active(status) || is_ioat_idle(status)) { - if (end && time_after(jiffies, end)) { + if (tmo && time_after(jiffies, end)) { err = -ETIMEDOUT; break; } @@ -283,7 +291,7 @@ static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) void ioat2_timer_event(unsigned long data) { - struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); + struct ioat2_dma_chan *ioat = (void *) data; struct ioat_chan_common *chan = &ioat->base; spin_lock_bh(&chan->cleanup_lock); @@ -389,7 +397,10 @@ int ioat2_enumerate_channels(struct ioatdma_device *device) if (!ioat) break; - ioat_init_channel(device, &ioat->base, i); + ioat_init_channel(device, &ioat->base, i, + device->timer_fn, + device->cleanup_tasklet, + (unsigned long) ioat); ioat->xfercap_log = xfercap_log; spin_lock_init(&ioat->ring_lock); if (device->reset_hw(&ioat->base)) { @@ -535,6 +546,7 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) ioat->head = 0; ioat->issued = 0; ioat->tail = 0; + ioat->pending = 0; ioat->alloc_order = order; spin_unlock_bh(&ioat->ring_lock); @@ -689,7 +701,7 @@ int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs) mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); spin_unlock_bh(&chan->cleanup_lock); - device->timer_fn((unsigned long) &chan->common); + device->timer_fn((unsigned long) ioat); } else spin_unlock_bh(&chan->cleanup_lock); return -ENOMEM; @@ -773,7 +785,7 @@ void ioat2_free_chan_resources(struct dma_chan *c) tasklet_disable(&chan->cleanup_task); del_timer_sync(&chan->timer); - device->cleanup_fn((unsigned long) c); + device->cleanup_tasklet((unsigned long) ioat); device->reset_hw(chan); spin_lock_bh(&ioat->ring_lock); @@ -803,9 +815,25 @@ void ioat2_free_chan_resources(struct dma_chan *c) chan->last_completion = 0; chan->completion_dma = 0; + ioat->pending = 0; ioat->dmacount = 0; } +enum dma_status +ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie, + dma_cookie_t *done, dma_cookie_t *used) +{ + struct ioat2_dma_chan *ioat = to_ioat2_chan(c); + struct ioatdma_device *device = ioat->base.device; + + if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) + return DMA_SUCCESS; + + device->cleanup_tasklet((unsigned long) ioat); + + return ioat_is_complete(c, cookie, done, used); +} + static ssize_t ring_size_show(struct dma_chan *c, char *page) { struct ioat2_dma_chan *ioat = to_ioat2_chan(c); @@ -846,7 +874,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) device->enumerate_channels = ioat2_enumerate_channels; device->reset_hw = ioat2_reset_hw; - device->cleanup_fn = ioat2_cleanup_event; + device->cleanup_tasklet = ioat2_cleanup_tasklet; device->timer_fn = ioat2_timer_event; device->self_test = ioat_dma_self_test; dma = &device->common; @@ -854,7 +882,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) dma->device_issue_pending = ioat2_issue_pending; dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; dma->device_free_chan_resources = ioat2_free_chan_resources; - dma->device_is_tx_complete = ioat_is_dma_complete; + dma->device_is_tx_complete = ioat2_is_complete; err = ioat_probe(device); if (err) diff --git a/trunk/drivers/dma/ioat/dma_v2.h b/trunk/drivers/dma/ioat/dma_v2.h index ef2871fd7868..3afad8da43cc 100644 --- a/trunk/drivers/dma/ioat/dma_v2.h +++ b/trunk/drivers/dma/ioat/dma_v2.h @@ -47,6 +47,7 @@ extern int ioat_ring_alloc_order; * @head: allocated index * @issued: hardware notification point * @tail: cleanup index + * @pending: lock free indicator for issued != head * @dmacount: identical to 'head' except for occasionally resetting to zero * @alloc_order: log2 of the number of allocated descriptors * @ring: software ring buffer implementation of hardware ring @@ -60,6 +61,7 @@ struct ioat2_dma_chan { u16 tail; u16 dmacount; u16 alloc_order; + int pending; struct ioat_ring_ent **ring; spinlock_t ring_lock; }; @@ -176,10 +178,12 @@ ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, void ioat2_issue_pending(struct dma_chan *chan); int ioat2_alloc_chan_resources(struct dma_chan *c); void ioat2_free_chan_resources(struct dma_chan *c); +enum dma_status ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie, + dma_cookie_t *done, dma_cookie_t *used); void __ioat2_restart_chan(struct ioat2_dma_chan *ioat); bool reshape_ring(struct ioat2_dma_chan *ioat, int order); void __ioat2_issue_pending(struct ioat2_dma_chan *ioat); -void ioat2_cleanup_event(unsigned long data); +void ioat2_cleanup_tasklet(unsigned long data); void ioat2_timer_event(unsigned long data); int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo); int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo); diff --git a/trunk/drivers/dma/ioat/dma_v3.c b/trunk/drivers/dma/ioat/dma_v3.c index 26febc56dab1..9908c9e94b2d 100644 --- a/trunk/drivers/dma/ioat/dma_v3.c +++ b/trunk/drivers/dma/ioat/dma_v3.c @@ -293,25 +293,17 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) } } ioat->tail += i; - BUG_ON(active && !seen_current); /* no active descs have written a completion? */ + BUG_ON(!seen_current); /* no active descs have written a completion? */ chan->last_completion = phys_complete; - - active = ioat2_ring_active(ioat); - if (active == 0) { + if (ioat->head == ioat->tail) { dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", __func__); clear_bit(IOAT_COMPLETION_PENDING, &chan->state); mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); } - /* 5 microsecond delay per pending descriptor */ - writew(min((5 * active), IOAT_INTRDELAY_MASK), - chan->device->reg_base + IOAT_INTRDELAY_OFFSET); } -/* try to cleanup, but yield (via spin_trylock) to incoming submissions - * with the expectation that we will immediately poll again shortly - */ -static void ioat3_cleanup_poll(struct ioat2_dma_chan *ioat) +static void ioat3_cleanup(struct ioat2_dma_chan *ioat) { struct ioat_chan_common *chan = &ioat->base; unsigned long phys_complete; @@ -337,41 +329,29 @@ static void ioat3_cleanup_poll(struct ioat2_dma_chan *ioat) spin_unlock_bh(&chan->cleanup_lock); } -/* run cleanup now because we already delayed the interrupt via INTRDELAY */ -static void ioat3_cleanup_sync(struct ioat2_dma_chan *ioat) -{ - struct ioat_chan_common *chan = &ioat->base; - unsigned long phys_complete; - - prefetch(chan->completion); - - spin_lock_bh(&chan->cleanup_lock); - if (!ioat_cleanup_preamble(chan, &phys_complete)) { - spin_unlock_bh(&chan->cleanup_lock); - return; - } - spin_lock_bh(&ioat->ring_lock); - - __cleanup(ioat, phys_complete); - - spin_unlock_bh(&ioat->ring_lock); - spin_unlock_bh(&chan->cleanup_lock); -} - -static void ioat3_cleanup_event(unsigned long data) +static void ioat3_cleanup_tasklet(unsigned long data) { - struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); + struct ioat2_dma_chan *ioat = (void *) data; - ioat3_cleanup_sync(ioat); - writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); + ioat3_cleanup(ioat); + writew(IOAT_CHANCTRL_RUN | IOAT3_CHANCTRL_COMPL_DCA_EN, + ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); } static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) { struct ioat_chan_common *chan = &ioat->base; unsigned long phys_complete; + u32 status; + + status = ioat_chansts(chan); + if (is_ioat_active(status) || is_ioat_idle(status)) + ioat_suspend(chan); + while (is_ioat_active(status) || is_ioat_idle(status)) { + status = ioat_chansts(chan); + cpu_relax(); + } - ioat2_quiesce(chan, 0); if (ioat_cleanup_preamble(chan, &phys_complete)) __cleanup(ioat, phys_complete); @@ -380,7 +360,7 @@ static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) static void ioat3_timer_event(unsigned long data) { - struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); + struct ioat2_dma_chan *ioat = (void *) data; struct ioat_chan_common *chan = &ioat->base; spin_lock_bh(&chan->cleanup_lock); @@ -446,7 +426,7 @@ ioat3_is_complete(struct dma_chan *c, dma_cookie_t cookie, if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) return DMA_SUCCESS; - ioat3_cleanup_poll(ioat); + ioat3_cleanup(ioat); return ioat_is_complete(c, cookie, done, used); } @@ -1259,11 +1239,11 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) if (is_raid_device) { dma->device_is_tx_complete = ioat3_is_complete; - device->cleanup_fn = ioat3_cleanup_event; + device->cleanup_tasklet = ioat3_cleanup_tasklet; device->timer_fn = ioat3_timer_event; } else { - dma->device_is_tx_complete = ioat_is_dma_complete; - device->cleanup_fn = ioat2_cleanup_event; + dma->device_is_tx_complete = ioat2_is_complete; + device->cleanup_tasklet = ioat2_cleanup_tasklet; device->timer_fn = ioat2_timer_event; } diff --git a/trunk/drivers/dma/ioat/registers.h b/trunk/drivers/dma/ioat/registers.h index 1391798542b6..e8ae63baf588 100644 --- a/trunk/drivers/dma/ioat/registers.h +++ b/trunk/drivers/dma/ioat/registers.h @@ -60,7 +60,7 @@ #define IOAT_PERPORTOFFSET_OFFSET 0x0A /* 16-bit */ #define IOAT_INTRDELAY_OFFSET 0x0C /* 16-bit */ -#define IOAT_INTRDELAY_MASK 0x3FFF /* Interrupt Delay Time */ +#define IOAT_INTRDELAY_INT_DELAY_MASK 0x3FFF /* Interrupt Delay Time */ #define IOAT_INTRDELAY_COALESE_SUPPORT 0x8000 /* Interrupt Coalescing Supported */ #define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */ diff --git a/trunk/drivers/dma/ipu/ipu_idmac.c b/trunk/drivers/dma/ipu/ipu_idmac.c index 1c518f1cc49b..e80bae1673fa 100644 --- a/trunk/drivers/dma/ipu/ipu_idmac.c +++ b/trunk/drivers/dma/ipu/ipu_idmac.c @@ -348,7 +348,6 @@ static void ipu_ch_param_set_size(union chan_param_mem *params, break; case IPU_PIX_FMT_BGRA32: case IPU_PIX_FMT_BGR32: - case IPU_PIX_FMT_ABGR32: params->ip.bpp = 0; params->ip.pfs = 4; params->ip.npb = 7; @@ -377,6 +376,20 @@ static void ipu_ch_param_set_size(union chan_param_mem *params, params->ip.wid2 = 7; /* Blue bit width - 1 */ params->ip.wid3 = 7; /* Alpha bit width - 1 */ break; + case IPU_PIX_FMT_ABGR32: + params->ip.bpp = 0; + params->ip.pfs = 4; + params->ip.npb = 7; + params->ip.sat = 2; /* SAT = 32-bit access */ + params->ip.ofs0 = 8; /* Red bit offset */ + params->ip.ofs1 = 16; /* Green bit offset */ + params->ip.ofs2 = 24; /* Blue bit offset */ + params->ip.ofs3 = 0; /* Alpha bit offset */ + params->ip.wid0 = 7; /* Red bit width - 1 */ + params->ip.wid1 = 7; /* Green bit width - 1 */ + params->ip.wid2 = 7; /* Blue bit width - 1 */ + params->ip.wid3 = 7; /* Alpha bit width - 1 */ + break; case IPU_PIX_FMT_UYVY: params->ip.bpp = 2; params->ip.pfs = 6; @@ -748,12 +761,10 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n) * @buffer_n: buffer number to update. * 0 or 1 are the only valid values. * @phyaddr: buffer physical address. - * @return: Returns 0 on success or negative error code on failure. This - * function will fail if the buffer is set to ready. */ /* Called under spin_lock(_irqsave)(&ichan->lock) */ -static int ipu_update_channel_buffer(struct idmac_channel *ichan, - int buffer_n, dma_addr_t phyaddr) +static void ipu_update_channel_buffer(struct idmac_channel *ichan, + int buffer_n, dma_addr_t phyaddr) { enum ipu_channel channel = ichan->dma_chan.chan_id; uint32_t reg; @@ -793,8 +804,6 @@ static int ipu_update_channel_buffer(struct idmac_channel *ichan, } spin_unlock_irqrestore(&ipu_data.lock, flags); - - return 0; } /* Called under spin_lock_irqsave(&ichan->lock) */ @@ -803,7 +812,6 @@ static int ipu_submit_buffer(struct idmac_channel *ichan, { unsigned int chan_id = ichan->dma_chan.chan_id; struct device *dev = &ichan->dma_chan.dev->device; - int ret; if (async_tx_test_ack(&desc->txd)) return -EINTR; @@ -814,14 +822,7 @@ static int ipu_submit_buffer(struct idmac_channel *ichan, * could make it conditional on status >= IPU_CHANNEL_ENABLED, but * doing it again shouldn't hurt either. */ - ret = ipu_update_channel_buffer(ichan, buf_idx, - sg_dma_address(sg)); - - if (ret < 0) { - dev_err(dev, "Updating sg %p on channel 0x%x buffer %d failed!\n", - sg, chan_id, buf_idx); - return ret; - } + ipu_update_channel_buffer(ichan, buf_idx, sg_dma_address(sg)); ipu_select_buffer(chan_id, buf_idx); dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n", @@ -1366,10 +1367,11 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) if (likely(sgnew) && ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) { - callback = desc->txd.callback; - callback_param = desc->txd.callback_param; + callback = descnew->txd.callback; + callback_param = descnew->txd.callback_param; spin_unlock(&ichan->lock); - callback(callback_param); + if (callback) + callback(callback_param); spin_lock(&ichan->lock); } diff --git a/trunk/drivers/dma/mpc512x_dma.c b/trunk/drivers/dma/mpc512x_dma.c deleted file mode 100644 index 3fdf1f46bd63..000000000000 --- a/trunk/drivers/dma/mpc512x_dma.c +++ /dev/null @@ -1,800 +0,0 @@ -/* - * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. - * Copyright (C) Semihalf 2009 - * - * Written by Piotr Ziecik . Hardware description - * (defines, structures and comments) was taken from MPC5121 DMA driver - * written by Hongjun Chen . - * - * Approved as OSADL project by a majority of OSADL members and funded - * by OSADL membership fees in 2009; for details see www.osadl.org. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 - * Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * The full GNU General Public License is included in this distribution in the - * file called COPYING. - */ - -/* - * This is initial version of MPC5121 DMA driver. Only memory to memory - * transfers are supported (tested using dmatest module). - */ - -#include -#include -#include -#include -#include -#include -#include - -#include - -/* Number of DMA Transfer descriptors allocated per channel */ -#define MPC_DMA_DESCRIPTORS 64 - -/* Macro definitions */ -#define MPC_DMA_CHANNELS 64 -#define MPC_DMA_TCD_OFFSET 0x1000 - -/* Arbitration mode of group and channel */ -#define MPC_DMA_DMACR_EDCG (1 << 31) -#define MPC_DMA_DMACR_ERGA (1 << 3) -#define MPC_DMA_DMACR_ERCA (1 << 2) - -/* Error codes */ -#define MPC_DMA_DMAES_VLD (1 << 31) -#define MPC_DMA_DMAES_GPE (1 << 15) -#define MPC_DMA_DMAES_CPE (1 << 14) -#define MPC_DMA_DMAES_ERRCHN(err) \ - (((err) >> 8) & 0x3f) -#define MPC_DMA_DMAES_SAE (1 << 7) -#define MPC_DMA_DMAES_SOE (1 << 6) -#define MPC_DMA_DMAES_DAE (1 << 5) -#define MPC_DMA_DMAES_DOE (1 << 4) -#define MPC_DMA_DMAES_NCE (1 << 3) -#define MPC_DMA_DMAES_SGE (1 << 2) -#define MPC_DMA_DMAES_SBE (1 << 1) -#define MPC_DMA_DMAES_DBE (1 << 0) - -#define MPC_DMA_TSIZE_1 0x00 -#define MPC_DMA_TSIZE_2 0x01 -#define MPC_DMA_TSIZE_4 0x02 -#define MPC_DMA_TSIZE_16 0x04 -#define MPC_DMA_TSIZE_32 0x05 - -/* MPC5121 DMA engine registers */ -struct __attribute__ ((__packed__)) mpc_dma_regs { - /* 0x00 */ - u32 dmacr; /* DMA control register */ - u32 dmaes; /* DMA error status */ - /* 0x08 */ - u32 dmaerqh; /* DMA enable request high(channels 63~32) */ - u32 dmaerql; /* DMA enable request low(channels 31~0) */ - u32 dmaeeih; /* DMA enable error interrupt high(ch63~32) */ - u32 dmaeeil; /* DMA enable error interrupt low(ch31~0) */ - /* 0x18 */ - u8 dmaserq; /* DMA set enable request */ - u8 dmacerq; /* DMA clear enable request */ - u8 dmaseei; /* DMA set enable error interrupt */ - u8 dmaceei; /* DMA clear enable error interrupt */ - /* 0x1c */ - u8 dmacint; /* DMA clear interrupt request */ - u8 dmacerr; /* DMA clear error */ - u8 dmassrt; /* DMA set start bit */ - u8 dmacdne; /* DMA clear DONE status bit */ - /* 0x20 */ - u32 dmainth; /* DMA interrupt request high(ch63~32) */ - u32 dmaintl; /* DMA interrupt request low(ch31~0) */ - u32 dmaerrh; /* DMA error high(ch63~32) */ - u32 dmaerrl; /* DMA error low(ch31~0) */ - /* 0x30 */ - u32 dmahrsh; /* DMA hw request status high(ch63~32) */ - u32 dmahrsl; /* DMA hardware request status low(ch31~0) */ - u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */ - u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */ - /* 0x40 ~ 0xff */ - u32 reserve0[48]; /* Reserved */ - /* 0x100 */ - u8 dchpri[MPC_DMA_CHANNELS]; - /* DMA channels(0~63) priority */ -}; - -struct __attribute__ ((__packed__)) mpc_dma_tcd { - /* 0x00 */ - u32 saddr; /* Source address */ - - u32 smod:5; /* Source address modulo */ - u32 ssize:3; /* Source data transfer size */ - u32 dmod:5; /* Destination address modulo */ - u32 dsize:3; /* Destination data transfer size */ - u32 soff:16; /* Signed source address offset */ - - /* 0x08 */ - u32 nbytes; /* Inner "minor" byte count */ - u32 slast; /* Last source address adjustment */ - u32 daddr; /* Destination address */ - - /* 0x14 */ - u32 citer_elink:1; /* Enable channel-to-channel linking on - * minor loop complete - */ - u32 citer_linkch:6; /* Link channel for minor loop complete */ - u32 citer:9; /* Current "major" iteration count */ - u32 doff:16; /* Signed destination address offset */ - - /* 0x18 */ - u32 dlast_sga; /* Last Destination address adjustment/scatter - * gather address - */ - - /* 0x1c */ - u32 biter_elink:1; /* Enable channel-to-channel linking on major - * loop complete - */ - u32 biter_linkch:6; - u32 biter:9; /* Beginning "major" iteration count */ - u32 bwc:2; /* Bandwidth control */ - u32 major_linkch:6; /* Link channel number */ - u32 done:1; /* Channel done */ - u32 active:1; /* Channel active */ - u32 major_elink:1; /* Enable channel-to-channel linking on major - * loop complete - */ - u32 e_sg:1; /* Enable scatter/gather processing */ - u32 d_req:1; /* Disable request */ - u32 int_half:1; /* Enable an interrupt when major counter is - * half complete - */ - u32 int_maj:1; /* Enable an interrupt when major iteration - * count completes - */ - u32 start:1; /* Channel start */ -}; - -struct mpc_dma_desc { - struct dma_async_tx_descriptor desc; - struct mpc_dma_tcd *tcd; - dma_addr_t tcd_paddr; - int error; - struct list_head node; -}; - -struct mpc_dma_chan { - struct dma_chan chan; - struct list_head free; - struct list_head prepared; - struct list_head queued; - struct list_head active; - struct list_head completed; - struct mpc_dma_tcd *tcd; - dma_addr_t tcd_paddr; - dma_cookie_t completed_cookie; - - /* Lock for this structure */ - spinlock_t lock; -}; - -struct mpc_dma { - struct dma_device dma; - struct tasklet_struct tasklet; - struct mpc_dma_chan channels[MPC_DMA_CHANNELS]; - struct mpc_dma_regs __iomem *regs; - struct mpc_dma_tcd __iomem *tcd; - int irq; - uint error_status; - - /* Lock for error_status field in this structure */ - spinlock_t error_status_lock; -}; - -#define DRV_NAME "mpc512x_dma" - -/* Convert struct dma_chan to struct mpc_dma_chan */ -static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c) -{ - return container_of(c, struct mpc_dma_chan, chan); -} - -/* Convert struct dma_chan to struct mpc_dma */ -static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c) -{ - struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c); - return container_of(mchan, struct mpc_dma, channels[c->chan_id]); -} - -/* - * Execute all queued DMA descriptors. - * - * Following requirements must be met while calling mpc_dma_execute(): - * a) mchan->lock is acquired, - * b) mchan->active list is empty, - * c) mchan->queued list contains at least one entry. - */ -static void mpc_dma_execute(struct mpc_dma_chan *mchan) -{ - struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan); - struct mpc_dma_desc *first = NULL; - struct mpc_dma_desc *prev = NULL; - struct mpc_dma_desc *mdesc; - int cid = mchan->chan.chan_id; - - /* Move all queued descriptors to active list */ - list_splice_tail_init(&mchan->queued, &mchan->active); - - /* Chain descriptors into one transaction */ - list_for_each_entry(mdesc, &mchan->active, node) { - if (!first) - first = mdesc; - - if (!prev) { - prev = mdesc; - continue; - } - - prev->tcd->dlast_sga = mdesc->tcd_paddr; - prev->tcd->e_sg = 1; - mdesc->tcd->start = 1; - - prev = mdesc; - } - - prev->tcd->start = 0; - prev->tcd->int_maj = 1; - - /* Send first descriptor in chain into hardware */ - memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd)); - out_8(&mdma->regs->dmassrt, cid); -} - -/* Handle interrupt on one half of DMA controller (32 channels) */ -static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off) -{ - struct mpc_dma_chan *mchan; - struct mpc_dma_desc *mdesc; - u32 status = is | es; - int ch; - - while ((ch = fls(status) - 1) >= 0) { - status &= ~(1 << ch); - mchan = &mdma->channels[ch + off]; - - spin_lock(&mchan->lock); - - /* Check error status */ - if (es & (1 << ch)) - list_for_each_entry(mdesc, &mchan->active, node) - mdesc->error = -EIO; - - /* Execute queued descriptors */ - list_splice_tail_init(&mchan->active, &mchan->completed); - if (!list_empty(&mchan->queued)) - mpc_dma_execute(mchan); - - spin_unlock(&mchan->lock); - } -} - -/* Interrupt handler */ -static irqreturn_t mpc_dma_irq(int irq, void *data) -{ - struct mpc_dma *mdma = data; - uint es; - - /* Save error status register */ - es = in_be32(&mdma->regs->dmaes); - spin_lock(&mdma->error_status_lock); - if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0) - mdma->error_status = es; - spin_unlock(&mdma->error_status_lock); - - /* Handle interrupt on each channel */ - mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth), - in_be32(&mdma->regs->dmaerrh), 32); - mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl), - in_be32(&mdma->regs->dmaerrl), 0); - - /* Ack interrupt on all channels */ - out_be32(&mdma->regs->dmainth, 0xFFFFFFFF); - out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF); - out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF); - out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF); - - /* Schedule tasklet */ - tasklet_schedule(&mdma->tasklet); - - return IRQ_HANDLED; -} - -/* DMA Tasklet */ -static void mpc_dma_tasklet(unsigned long data) -{ - struct mpc_dma *mdma = (void *)data; - dma_cookie_t last_cookie = 0; - struct mpc_dma_chan *mchan; - struct mpc_dma_desc *mdesc; - struct dma_async_tx_descriptor *desc; - unsigned long flags; - LIST_HEAD(list); - uint es; - int i; - - spin_lock_irqsave(&mdma->error_status_lock, flags); - es = mdma->error_status; - mdma->error_status = 0; - spin_unlock_irqrestore(&mdma->error_status_lock, flags); - - /* Print nice error report */ - if (es) { - dev_err(mdma->dma.dev, - "Hardware reported following error(s) on channel %u:\n", - MPC_DMA_DMAES_ERRCHN(es)); - - if (es & MPC_DMA_DMAES_GPE) - dev_err(mdma->dma.dev, "- Group Priority Error\n"); - if (es & MPC_DMA_DMAES_CPE) - dev_err(mdma->dma.dev, "- Channel Priority Error\n"); - if (es & MPC_DMA_DMAES_SAE) - dev_err(mdma->dma.dev, "- Source Address Error\n"); - if (es & MPC_DMA_DMAES_SOE) - dev_err(mdma->dma.dev, "- Source Offset" - " Configuration Error\n"); - if (es & MPC_DMA_DMAES_DAE) - dev_err(mdma->dma.dev, "- Destination Address" - " Error\n"); - if (es & MPC_DMA_DMAES_DOE) - dev_err(mdma->dma.dev, "- Destination Offset" - " Configuration Error\n"); - if (es & MPC_DMA_DMAES_NCE) - dev_err(mdma->dma.dev, "- NBytes/Citter" - " Configuration Error\n"); - if (es & MPC_DMA_DMAES_SGE) - dev_err(mdma->dma.dev, "- Scatter/Gather" - " Configuration Error\n"); - if (es & MPC_DMA_DMAES_SBE) - dev_err(mdma->dma.dev, "- Source Bus Error\n"); - if (es & MPC_DMA_DMAES_DBE) - dev_err(mdma->dma.dev, "- Destination Bus Error\n"); - } - - for (i = 0; i < mdma->dma.chancnt; i++) { - mchan = &mdma->channels[i]; - - /* Get all completed descriptors */ - spin_lock_irqsave(&mchan->lock, flags); - if (!list_empty(&mchan->completed)) - list_splice_tail_init(&mchan->completed, &list); - spin_unlock_irqrestore(&mchan->lock, flags); - - if (list_empty(&list)) - continue; - - /* Execute callbacks and run dependencies */ - list_for_each_entry(mdesc, &list, node) { - desc = &mdesc->desc; - - if (desc->callback) - desc->callback(desc->callback_param); - - last_cookie = desc->cookie; - dma_run_dependencies(desc); - } - - /* Free descriptors */ - spin_lock_irqsave(&mchan->lock, flags); - list_splice_tail_init(&list, &mchan->free); - mchan->completed_cookie = last_cookie; - spin_unlock_irqrestore(&mchan->lock, flags); - } -} - -/* Submit descriptor to hardware */ -static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd) -{ - struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan); - struct mpc_dma_desc *mdesc; - unsigned long flags; - dma_cookie_t cookie; - - mdesc = container_of(txd, struct mpc_dma_desc, desc); - - spin_lock_irqsave(&mchan->lock, flags); - - /* Move descriptor to queue */ - list_move_tail(&mdesc->node, &mchan->queued); - - /* If channel is idle, execute all queued descriptors */ - if (list_empty(&mchan->active)) - mpc_dma_execute(mchan); - - /* Update cookie */ - cookie = mchan->chan.cookie + 1; - if (cookie <= 0) - cookie = 1; - - mchan->chan.cookie = cookie; - mdesc->desc.cookie = cookie; - - spin_unlock_irqrestore(&mchan->lock, flags); - - return cookie; -} - -/* Alloc channel resources */ -static int mpc_dma_alloc_chan_resources(struct dma_chan *chan) -{ - struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); - struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); - struct mpc_dma_desc *mdesc; - struct mpc_dma_tcd *tcd; - dma_addr_t tcd_paddr; - unsigned long flags; - LIST_HEAD(descs); - int i; - - /* Alloc DMA memory for Transfer Control Descriptors */ - tcd = dma_alloc_coherent(mdma->dma.dev, - MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), - &tcd_paddr, GFP_KERNEL); - if (!tcd) - return -ENOMEM; - - /* Alloc descriptors for this channel */ - for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) { - mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL); - if (!mdesc) { - dev_notice(mdma->dma.dev, "Memory allocation error. " - "Allocated only %u descriptors\n", i); - break; - } - - dma_async_tx_descriptor_init(&mdesc->desc, chan); - mdesc->desc.flags = DMA_CTRL_ACK; - mdesc->desc.tx_submit = mpc_dma_tx_submit; - - mdesc->tcd = &tcd[i]; - mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd)); - - list_add_tail(&mdesc->node, &descs); - } - - /* Return error only if no descriptors were allocated */ - if (i == 0) { - dma_free_coherent(mdma->dma.dev, - MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), - tcd, tcd_paddr); - return -ENOMEM; - } - - spin_lock_irqsave(&mchan->lock, flags); - mchan->tcd = tcd; - mchan->tcd_paddr = tcd_paddr; - list_splice_tail_init(&descs, &mchan->free); - spin_unlock_irqrestore(&mchan->lock, flags); - - /* Enable Error Interrupt */ - out_8(&mdma->regs->dmaseei, chan->chan_id); - - return 0; -} - -/* Free channel resources */ -static void mpc_dma_free_chan_resources(struct dma_chan *chan) -{ - struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); - struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); - struct mpc_dma_desc *mdesc, *tmp; - struct mpc_dma_tcd *tcd; - dma_addr_t tcd_paddr; - unsigned long flags; - LIST_HEAD(descs); - - spin_lock_irqsave(&mchan->lock, flags); - - /* Channel must be idle */ - BUG_ON(!list_empty(&mchan->prepared)); - BUG_ON(!list_empty(&mchan->queued)); - BUG_ON(!list_empty(&mchan->active)); - BUG_ON(!list_empty(&mchan->completed)); - - /* Move data */ - list_splice_tail_init(&mchan->free, &descs); - tcd = mchan->tcd; - tcd_paddr = mchan->tcd_paddr; - - spin_unlock_irqrestore(&mchan->lock, flags); - - /* Free DMA memory used by descriptors */ - dma_free_coherent(mdma->dma.dev, - MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), - tcd, tcd_paddr); - - /* Free descriptors */ - list_for_each_entry_safe(mdesc, tmp, &descs, node) - kfree(mdesc); - - /* Disable Error Interrupt */ - out_8(&mdma->regs->dmaceei, chan->chan_id); -} - -/* Send all pending descriptor to hardware */ -static void mpc_dma_issue_pending(struct dma_chan *chan) -{ - /* - * We are posting descriptors to the hardware as soon as - * they are ready, so this function does nothing. - */ -} - -/* Check request completion status */ -static enum dma_status -mpc_dma_is_tx_complete(struct dma_chan *chan, dma_cookie_t cookie, - dma_cookie_t *done, dma_cookie_t *used) -{ - struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); - unsigned long flags; - dma_cookie_t last_used; - dma_cookie_t last_complete; - - spin_lock_irqsave(&mchan->lock, flags); - last_used = mchan->chan.cookie; - last_complete = mchan->completed_cookie; - spin_unlock_irqrestore(&mchan->lock, flags); - - if (done) - *done = last_complete; - - if (used) - *used = last_used; - - return dma_async_is_complete(cookie, last_complete, last_used); -} - -/* Prepare descriptor for memory to memory copy */ -static struct dma_async_tx_descriptor * -mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, - size_t len, unsigned long flags) -{ - struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); - struct mpc_dma_desc *mdesc = NULL; - struct mpc_dma_tcd *tcd; - unsigned long iflags; - - /* Get free descriptor */ - spin_lock_irqsave(&mchan->lock, iflags); - if (!list_empty(&mchan->free)) { - mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc, - node); - list_del(&mdesc->node); - } - spin_unlock_irqrestore(&mchan->lock, iflags); - - if (!mdesc) - return NULL; - - mdesc->error = 0; - tcd = mdesc->tcd; - - /* Prepare Transfer Control Descriptor for this transaction */ - memset(tcd, 0, sizeof(struct mpc_dma_tcd)); - - if (IS_ALIGNED(src | dst | len, 32)) { - tcd->ssize = MPC_DMA_TSIZE_32; - tcd->dsize = MPC_DMA_TSIZE_32; - tcd->soff = 32; - tcd->doff = 32; - } else if (IS_ALIGNED(src | dst | len, 16)) { - tcd->ssize = MPC_DMA_TSIZE_16; - tcd->dsize = MPC_DMA_TSIZE_16; - tcd->soff = 16; - tcd->doff = 16; - } else if (IS_ALIGNED(src | dst | len, 4)) { - tcd->ssize = MPC_DMA_TSIZE_4; - tcd->dsize = MPC_DMA_TSIZE_4; - tcd->soff = 4; - tcd->doff = 4; - } else if (IS_ALIGNED(src | dst | len, 2)) { - tcd->ssize = MPC_DMA_TSIZE_2; - tcd->dsize = MPC_DMA_TSIZE_2; - tcd->soff = 2; - tcd->doff = 2; - } else { - tcd->ssize = MPC_DMA_TSIZE_1; - tcd->dsize = MPC_DMA_TSIZE_1; - tcd->soff = 1; - tcd->doff = 1; - } - - tcd->saddr = src; - tcd->daddr = dst; - tcd->nbytes = len; - tcd->biter = 1; - tcd->citer = 1; - - /* Place descriptor in prepared list */ - spin_lock_irqsave(&mchan->lock, iflags); - list_add_tail(&mdesc->node, &mchan->prepared); - spin_unlock_irqrestore(&mchan->lock, iflags); - - return &mdesc->desc; -} - -static int __devinit mpc_dma_probe(struct of_device *op, - const struct of_device_id *match) -{ - struct device_node *dn = op->node; - struct device *dev = &op->dev; - struct dma_device *dma; - struct mpc_dma *mdma; - struct mpc_dma_chan *mchan; - struct resource res; - ulong regs_start, regs_size; - int retval, i; - - mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL); - if (!mdma) { - dev_err(dev, "Memory exhausted!\n"); - return -ENOMEM; - } - - mdma->irq = irq_of_parse_and_map(dn, 0); - if (mdma->irq == NO_IRQ) { - dev_err(dev, "Error mapping IRQ!\n"); - return -EINVAL; - } - - retval = of_address_to_resource(dn, 0, &res); - if (retval) { - dev_err(dev, "Error parsing memory region!\n"); - return retval; - } - - regs_start = res.start; - regs_size = res.end - res.start + 1; - - if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) { - dev_err(dev, "Error requesting memory region!\n"); - return -EBUSY; - } - - mdma->regs = devm_ioremap(dev, regs_start, regs_size); - if (!mdma->regs) { - dev_err(dev, "Error mapping memory region!\n"); - return -ENOMEM; - } - - mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs) - + MPC_DMA_TCD_OFFSET); - - retval = devm_request_irq(dev, mdma->irq, &mpc_dma_irq, 0, DRV_NAME, - mdma); - if (retval) { - dev_err(dev, "Error requesting IRQ!\n"); - return -EINVAL; - } - - spin_lock_init(&mdma->error_status_lock); - - dma = &mdma->dma; - dma->dev = dev; - dma->chancnt = MPC_DMA_CHANNELS; - dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources; - dma->device_free_chan_resources = mpc_dma_free_chan_resources; - dma->device_issue_pending = mpc_dma_issue_pending; - dma->device_is_tx_complete = mpc_dma_is_tx_complete; - dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy; - - INIT_LIST_HEAD(&dma->channels); - dma_cap_set(DMA_MEMCPY, dma->cap_mask); - - for (i = 0; i < dma->chancnt; i++) { - mchan = &mdma->channels[i]; - - mchan->chan.device = dma; - mchan->chan.chan_id = i; - mchan->chan.cookie = 1; - mchan->completed_cookie = mchan->chan.cookie; - - INIT_LIST_HEAD(&mchan->free); - INIT_LIST_HEAD(&mchan->prepared); - INIT_LIST_HEAD(&mchan->queued); - INIT_LIST_HEAD(&mchan->active); - INIT_LIST_HEAD(&mchan->completed); - - spin_lock_init(&mchan->lock); - list_add_tail(&mchan->chan.device_node, &dma->channels); - } - - tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma); - - /* - * Configure DMA Engine: - * - Dynamic clock, - * - Round-robin group arbitration, - * - Round-robin channel arbitration. - */ - out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG | - MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA); - - /* Disable hardware DMA requests */ - out_be32(&mdma->regs->dmaerqh, 0); - out_be32(&mdma->regs->dmaerql, 0); - - /* Disable error interrupts */ - out_be32(&mdma->regs->dmaeeih, 0); - out_be32(&mdma->regs->dmaeeil, 0); - - /* Clear interrupts status */ - out_be32(&mdma->regs->dmainth, 0xFFFFFFFF); - out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF); - out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF); - out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF); - - /* Route interrupts to IPIC */ - out_be32(&mdma->regs->dmaihsa, 0); - out_be32(&mdma->regs->dmailsa, 0); - - /* Register DMA engine */ - dev_set_drvdata(dev, mdma); - retval = dma_async_device_register(dma); - if (retval) { - devm_free_irq(dev, mdma->irq, mdma); - irq_dispose_mapping(mdma->irq); - } - - return retval; -} - -static int __devexit mpc_dma_remove(struct of_device *op) -{ - struct device *dev = &op->dev; - struct mpc_dma *mdma = dev_get_drvdata(dev); - - dma_async_device_unregister(&mdma->dma); - devm_free_irq(dev, mdma->irq, mdma); - irq_dispose_mapping(mdma->irq); - - return 0; -} - -static struct of_device_id mpc_dma_match[] = { - { .compatible = "fsl,mpc5121-dma", }, - {}, -}; - -static struct of_platform_driver mpc_dma_driver = { - .match_table = mpc_dma_match, - .probe = mpc_dma_probe, - .remove = __devexit_p(mpc_dma_remove), - .driver = { - .name = DRV_NAME, - .owner = THIS_MODULE, - }, -}; - -static int __init mpc_dma_init(void) -{ - return of_register_platform_driver(&mpc_dma_driver); -} -module_init(mpc_dma_init); - -static void __exit mpc_dma_exit(void) -{ - of_unregister_platform_driver(&mpc_dma_driver); -} -module_exit(mpc_dma_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Piotr Ziecik "); diff --git a/trunk/drivers/dma/ppc4xx/adma.c b/trunk/drivers/dma/ppc4xx/adma.c index e69d87f24a25..0a3478e910f0 100644 --- a/trunk/drivers/dma/ppc4xx/adma.c +++ b/trunk/drivers/dma/ppc4xx/adma.c @@ -4940,7 +4940,7 @@ static int ppc440spe_configure_raid_devices(void) return ret; } -static const struct of_device_id ppc440spe_adma_of_match[] __devinitconst = { +static struct of_device_id __devinitdata ppc440spe_adma_of_match[] = { { .compatible = "ibm,dma-440spe", }, { .compatible = "amcc,xor-accelerator", }, {}, diff --git a/trunk/include/linux/dmaengine.h b/trunk/include/linux/dmaengine.h index 4d8d619f28bc..78784982b33e 100644 --- a/trunk/include/linux/dmaengine.h +++ b/trunk/include/linux/dmaengine.h @@ -31,8 +31,6 @@ * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code */ typedef s32 dma_cookie_t; -#define DMA_MIN_COOKIE 1 -#define DMA_MAX_COOKIE INT_MAX #define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0)