diff --git a/[refs] b/[refs] index 968996a18457..366417a89fec 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 0d688662aab9d80078be82aa5aea561346643298 +refs/heads/master: 782bc950d84e404422ba21008fd51ee894c8d231 diff --git a/trunk/arch/powerpc/include/asm/fsldma.h b/trunk/arch/powerpc/include/asm/fsldma.h new file mode 100644 index 000000000000..debc5ed96d6e --- /dev/null +++ b/trunk/arch/powerpc/include/asm/fsldma.h @@ -0,0 +1,137 @@ +/* + * Freescale MPC83XX / MPC85XX DMA Controller + * + * Copyright (c) 2009 Ira W. Snyder + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef __ARCH_POWERPC_ASM_FSLDMA_H__ +#define __ARCH_POWERPC_ASM_FSLDMA_H__ + +#include +#include + +/* + * Definitions for the Freescale DMA controller's DMA_SLAVE implemention + * + * The Freescale DMA_SLAVE implementation was designed to handle many-to-many + * transfers. An example usage would be an accelerated copy between two + * scatterlists. Another example use would be an accelerated copy from + * multiple non-contiguous device buffers into a single scatterlist. + * + * A DMA_SLAVE transaction is defined by a struct fsl_dma_slave. This + * structure contains a list of hardware addresses that should be copied + * to/from the scatterlist passed into device_prep_slave_sg(). The structure + * also has some fields to enable hardware-specific features. + */ + +/** + * struct fsl_dma_hw_addr + * @entry: linked list entry + * @address: the hardware address + * @length: length to transfer + * + * Holds a single physical hardware address / length pair for use + * with the DMAEngine DMA_SLAVE API. + */ +struct fsl_dma_hw_addr { + struct list_head entry; + + dma_addr_t address; + size_t length; +}; + +/** + * struct fsl_dma_slave + * @addresses: a linked list of struct fsl_dma_hw_addr structures + * @request_count: value for DMA request count + * @src_loop_size: setup and enable constant source-address DMA transfers + * @dst_loop_size: setup and enable constant destination address DMA transfers + * @external_start: enable externally started DMA transfers + * @external_pause: enable externally paused DMA transfers + * + * Holds a list of address / length pairs for use with the DMAEngine + * DMA_SLAVE API implementation for the Freescale DMA controller. + */ +struct fsl_dma_slave { + + /* List of hardware address/length pairs */ + struct list_head addresses; + + /* Support for extra controller features */ + unsigned int request_count; + unsigned int src_loop_size; + unsigned int dst_loop_size; + bool external_start; + bool external_pause; +}; + +/** + * fsl_dma_slave_append - add an address/length pair to a struct fsl_dma_slave + * @slave: the &struct fsl_dma_slave to add to + * @address: the hardware address to add + * @length: the length of bytes to transfer from @address + * + * Add a hardware address/length pair to a struct fsl_dma_slave. Returns 0 on + * success, -ERRNO otherwise. + */ +static inline int fsl_dma_slave_append(struct fsl_dma_slave *slave, + dma_addr_t address, size_t length) +{ + struct fsl_dma_hw_addr *addr; + + addr = kzalloc(sizeof(*addr), GFP_ATOMIC); + if (!addr) + return -ENOMEM; + + INIT_LIST_HEAD(&addr->entry); + addr->address = address; + addr->length = length; + + list_add_tail(&addr->entry, &slave->addresses); + return 0; +} + +/** + * fsl_dma_slave_free - free a struct fsl_dma_slave + * @slave: the struct fsl_dma_slave to free + * + * Free a struct fsl_dma_slave and all associated address/length pairs + */ +static inline void fsl_dma_slave_free(struct fsl_dma_slave *slave) +{ + struct fsl_dma_hw_addr *addr, *tmp; + + if (slave) { + list_for_each_entry_safe(addr, tmp, &slave->addresses, entry) { + list_del(&addr->entry); + kfree(addr); + } + + kfree(slave); + } +} + +/** + * fsl_dma_slave_alloc - allocate a struct fsl_dma_slave + * @gfp: the flags to pass to kmalloc when allocating this structure + * + * Allocate a struct fsl_dma_slave for use by the DMA_SLAVE API. Returns a new + * struct fsl_dma_slave on success, or NULL on failure. + */ +static inline struct fsl_dma_slave *fsl_dma_slave_alloc(gfp_t gfp) +{ + struct fsl_dma_slave *slave; + + slave = kzalloc(sizeof(*slave), gfp); + if (!slave) + return NULL; + + INIT_LIST_HEAD(&slave->addresses); + return slave; +} + +#endif /* __ARCH_POWERPC_ASM_FSLDMA_H__ */ diff --git a/trunk/drivers/dma/dmaengine.c b/trunk/drivers/dma/dmaengine.c index db403b8ccabd..e5e79ced4f4b 100644 --- a/trunk/drivers/dma/dmaengine.c +++ b/trunk/drivers/dma/dmaengine.c @@ -690,10 +690,10 @@ int dma_async_device_register(struct dma_device *device) !device->device_prep_dma_memset); BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt); - BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && - !device->device_prep_dma_sg); BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && !device->device_prep_slave_sg); + BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && + !device->device_prep_dma_cyclic); BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && !device->device_control); diff --git a/trunk/drivers/dma/fsldma.c b/trunk/drivers/dma/fsldma.c index 286c3ac6bdcc..cea08bed9cf9 100644 --- a/trunk/drivers/dma/fsldma.c +++ b/trunk/drivers/dma/fsldma.c @@ -35,10 +35,9 @@ #include #include +#include #include "fsldma.h" -static const char msg_ld_oom[] = "No free memory for link descriptor\n"; - static void dma_init(struct fsldma_chan *chan) { /* Reset the channel */ @@ -500,7 +499,7 @@ fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags) new = fsl_dma_alloc_descriptor(chan); if (!new) { - dev_err(chan->dev, msg_ld_oom); + dev_err(chan->dev, "No free memory for link descriptor\n"); return NULL; } @@ -537,7 +536,8 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( /* Allocate the link descriptor from DMA pool */ new = fsl_dma_alloc_descriptor(chan); if (!new) { - dev_err(chan->dev, msg_ld_oom); + dev_err(chan->dev, + "No free memory for link descriptor\n"); goto fail; } #ifdef FSL_DMA_LD_DEBUG @@ -583,205 +583,223 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( return NULL; } -static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan, - struct scatterlist *dst_sg, unsigned int dst_nents, - struct scatterlist *src_sg, unsigned int src_nents, - unsigned long flags) +/** + * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction + * @chan: DMA channel + * @sgl: scatterlist to transfer to/from + * @sg_len: number of entries in @scatterlist + * @direction: DMA direction + * @flags: DMAEngine flags + * + * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the + * DMA_SLAVE API, this gets the device-specific information from the + * chan->private variable. + */ +static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( + struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, + enum dma_data_direction direction, unsigned long flags) { + struct fsldma_chan *chan; struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; - struct fsldma_chan *chan = to_fsl_chan(dchan); - size_t dst_avail, src_avail; - dma_addr_t dst, src; - size_t len; + struct fsl_dma_slave *slave; + size_t copy; - /* basic sanity checks */ - if (dst_nents == 0 || src_nents == 0) - return NULL; + int i; + struct scatterlist *sg; + size_t sg_used; + size_t hw_used; + struct fsl_dma_hw_addr *hw; + dma_addr_t dma_dst, dma_src; - if (dst_sg == NULL || src_sg == NULL) + if (!dchan) return NULL; - /* - * TODO: should we check that both scatterlists have the same - * TODO: number of bytes in total? Is that really an error? - */ - - /* get prepared for the loop */ - dst_avail = sg_dma_len(dst_sg); - src_avail = sg_dma_len(src_sg); + if (!dchan->private) + return NULL; - /* run until we are out of scatterlist entries */ - while (true) { + chan = to_fsl_chan(dchan); + slave = dchan->private; - /* create the largest transaction possible */ - len = min_t(size_t, src_avail, dst_avail); - len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT); - if (len == 0) - goto fetch; + if (list_empty(&slave->addresses)) + return NULL; - dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail; - src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail; + hw = list_first_entry(&slave->addresses, struct fsl_dma_hw_addr, entry); + hw_used = 0; - /* allocate and populate the descriptor */ - new = fsl_dma_alloc_descriptor(chan); - if (!new) { - dev_err(chan->dev, msg_ld_oom); - goto fail; - } + /* + * Build the hardware transaction to copy from the scatterlist to + * the hardware, or from the hardware to the scatterlist + * + * If you are copying from the hardware to the scatterlist and it + * takes two hardware entries to fill an entire page, then both + * hardware entries will be coalesced into the same page + * + * If you are copying from the scatterlist to the hardware and a + * single page can fill two hardware entries, then the data will + * be read out of the page into the first hardware entry, and so on + */ + for_each_sg(sgl, sg, sg_len, i) { + sg_used = 0; + + /* Loop until the entire scatterlist entry is used */ + while (sg_used < sg_dma_len(sg)) { + + /* + * If we've used up the current hardware address/length + * pair, we need to load a new one + * + * This is done in a while loop so that descriptors with + * length == 0 will be skipped + */ + while (hw_used >= hw->length) { + + /* + * If the current hardware entry is the last + * entry in the list, we're finished + */ + if (list_is_last(&hw->entry, &slave->addresses)) + goto finished; + + /* Get the next hardware address/length pair */ + hw = list_entry(hw->entry.next, + struct fsl_dma_hw_addr, entry); + hw_used = 0; + } + + /* Allocate the link descriptor from DMA pool */ + new = fsl_dma_alloc_descriptor(chan); + if (!new) { + dev_err(chan->dev, "No free memory for " + "link descriptor\n"); + goto fail; + } #ifdef FSL_DMA_LD_DEBUG - dev_dbg(chan->dev, "new link desc alloc %p\n", new); + dev_dbg(chan->dev, "new link desc alloc %p\n", new); #endif - set_desc_cnt(chan, &new->hw, len); - set_desc_src(chan, &new->hw, src); - set_desc_dst(chan, &new->hw, dst); - - if (!first) - first = new; - else - set_desc_next(chan, &prev->hw, new->async_tx.phys); - - new->async_tx.cookie = 0; - async_tx_ack(&new->async_tx); - prev = new; - - /* Insert the link descriptor to the LD ring */ - list_add_tail(&new->node, &first->tx_list); - - /* update metadata */ - dst_avail -= len; - src_avail -= len; - -fetch: - /* fetch the next dst scatterlist entry */ - if (dst_avail == 0) { + /* + * Calculate the maximum number of bytes to transfer, + * making sure it is less than the DMA controller limit + */ + copy = min_t(size_t, sg_dma_len(sg) - sg_used, + hw->length - hw_used); + copy = min_t(size_t, copy, FSL_DMA_BCR_MAX_CNT); + + /* + * DMA_FROM_DEVICE + * from the hardware to the scatterlist + * + * DMA_TO_DEVICE + * from the scatterlist to the hardware + */ + if (direction == DMA_FROM_DEVICE) { + dma_src = hw->address + hw_used; + dma_dst = sg_dma_address(sg) + sg_used; + } else { + dma_src = sg_dma_address(sg) + sg_used; + dma_dst = hw->address + hw_used; + } + + /* Fill in the descriptor */ + set_desc_cnt(chan, &new->hw, copy); + set_desc_src(chan, &new->hw, dma_src); + set_desc_dst(chan, &new->hw, dma_dst); + + /* + * If this is not the first descriptor, chain the + * current descriptor after the previous descriptor + */ + if (!first) { + first = new; + } else { + set_desc_next(chan, &prev->hw, + new->async_tx.phys); + } + + new->async_tx.cookie = 0; + async_tx_ack(&new->async_tx); + + prev = new; + sg_used += copy; + hw_used += copy; + + /* Insert the link descriptor into the LD ring */ + list_add_tail(&new->node, &first->tx_list); + } + } - /* no more entries: we're done */ - if (dst_nents == 0) - break; +finished: - /* fetch the next entry: if there are no more: done */ - dst_sg = sg_next(dst_sg); - if (dst_sg == NULL) - break; + /* All of the hardware address/length pairs had length == 0 */ + if (!first || !new) + return NULL; - dst_nents--; - dst_avail = sg_dma_len(dst_sg); - } + new->async_tx.flags = flags; + new->async_tx.cookie = -EBUSY; - /* fetch the next src scatterlist entry */ - if (src_avail == 0) { + /* Set End-of-link to the last link descriptor of new list */ + set_ld_eol(chan, new); - /* no more entries: we're done */ - if (src_nents == 0) - break; + /* Enable extra controller features */ + if (chan->set_src_loop_size) + chan->set_src_loop_size(chan, slave->src_loop_size); - /* fetch the next entry: if there are no more: done */ - src_sg = sg_next(src_sg); - if (src_sg == NULL) - break; + if (chan->set_dst_loop_size) + chan->set_dst_loop_size(chan, slave->dst_loop_size); - src_nents--; - src_avail = sg_dma_len(src_sg); - } - } + if (chan->toggle_ext_start) + chan->toggle_ext_start(chan, slave->external_start); - new->async_tx.flags = flags; /* client is in control of this ack */ - new->async_tx.cookie = -EBUSY; + if (chan->toggle_ext_pause) + chan->toggle_ext_pause(chan, slave->external_pause); - /* Set End-of-link to the last link descriptor of new list */ - set_ld_eol(chan, new); + if (chan->set_request_count) + chan->set_request_count(chan, slave->request_count); return &first->async_tx; fail: + /* If first was not set, then we failed to allocate the very first + * descriptor, and we're done */ if (!first) return NULL; - fsldma_free_desc_list_reverse(chan, &first->tx_list); - return NULL; -} - -/** - * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction - * @chan: DMA channel - * @sgl: scatterlist to transfer to/from - * @sg_len: number of entries in @scatterlist - * @direction: DMA direction - * @flags: DMAEngine flags - * - * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the - * DMA_SLAVE API, this gets the device-specific information from the - * chan->private variable. - */ -static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( - struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, - enum dma_data_direction direction, unsigned long flags) -{ /* - * This operation is not supported on the Freescale DMA controller + * First is set, so all of the descriptors we allocated have been added + * to first->tx_list, INCLUDING "first" itself. Therefore we + * must traverse the list backwards freeing each descriptor in turn * - * However, we need to provide the function pointer to allow the - * device_control() method to work. + * We're re-using variables for the loop, oh well */ + fsldma_free_desc_list_reverse(chan, &first->tx_list); return NULL; } static int fsl_dma_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, unsigned long arg) { - struct dma_slave_config *config; struct fsldma_chan *chan; unsigned long flags; - int size; + + /* Only supports DMA_TERMINATE_ALL */ + if (cmd != DMA_TERMINATE_ALL) + return -ENXIO; if (!dchan) return -EINVAL; chan = to_fsl_chan(dchan); - switch (cmd) { - case DMA_TERMINATE_ALL: - /* Halt the DMA engine */ - dma_halt(chan); - - spin_lock_irqsave(&chan->desc_lock, flags); - - /* Remove and free all of the descriptors in the LD queue */ - fsldma_free_desc_list(chan, &chan->ld_pending); - fsldma_free_desc_list(chan, &chan->ld_running); - - spin_unlock_irqrestore(&chan->desc_lock, flags); - return 0; - - case DMA_SLAVE_CONFIG: - config = (struct dma_slave_config *)arg; - - /* make sure the channel supports setting burst size */ - if (!chan->set_request_count) - return -ENXIO; - - /* we set the controller burst size depending on direction */ - if (config->direction == DMA_TO_DEVICE) - size = config->dst_addr_width * config->dst_maxburst; - else - size = config->src_addr_width * config->src_maxburst; - - chan->set_request_count(chan, size); - return 0; - - case FSLDMA_EXTERNAL_START: + /* Halt the DMA engine */ + dma_halt(chan); - /* make sure the channel supports external start */ - if (!chan->toggle_ext_start) - return -ENXIO; + spin_lock_irqsave(&chan->desc_lock, flags); - chan->toggle_ext_start(chan, arg); - return 0; + /* Remove and free all of the descriptors in the LD queue */ + fsldma_free_desc_list(chan, &chan->ld_pending); + fsldma_free_desc_list(chan, &chan->ld_running); - default: - return -ENXIO; - } + spin_unlock_irqrestore(&chan->desc_lock, flags); return 0; } @@ -1309,13 +1327,11 @@ static int __devinit fsldma_of_probe(struct platform_device *op, dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); - dma_cap_set(DMA_SG, fdev->common.cap_mask); dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; - fdev->common.device_prep_dma_sg = fsl_dma_prep_sg; fdev->common.device_tx_status = fsl_tx_status; fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; diff --git a/trunk/drivers/dma/mv_xor.c b/trunk/drivers/dma/mv_xor.c index 411d5bf50fc4..86c5ae9fde34 100644 --- a/trunk/drivers/dma/mv_xor.c +++ b/trunk/drivers/dma/mv_xor.c @@ -162,7 +162,7 @@ static int mv_is_err_intr(u32 intr_cause) static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) { - u32 val = ~(1 << (chan->idx * 16)); + u32 val = (1 << (1 + (chan->idx * 16))); dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val); __raw_writel(val, XOR_INTR_CAUSE(chan)); } diff --git a/trunk/drivers/dma/shdma.c b/trunk/drivers/dma/shdma.c index eb6b54dbb806..fb64cf36ba61 100644 --- a/trunk/drivers/dma/shdma.c +++ b/trunk/drivers/dma/shdma.c @@ -580,6 +580,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( sh_chan = to_sh_chan(chan); param = chan->private; + slave_addr = param->config->addr; /* Someone calling slave DMA on a public channel? */ if (!param || !sg_len) { @@ -588,8 +589,6 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( return NULL; } - slave_addr = param->config->addr; - /* * if (param != NULL), this is a successfully requested slave channel, * therefore param->config != NULL too. diff --git a/trunk/drivers/dma/ste_dma40.c b/trunk/drivers/dma/ste_dma40.c index d5fd098e22e8..17e2600a00cf 100644 --- a/trunk/drivers/dma/ste_dma40.c +++ b/trunk/drivers/dma/ste_dma40.c @@ -1857,18 +1857,6 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, return NULL; } -static struct dma_async_tx_descriptor * -d40_prep_sg(struct dma_chan *chan, - struct scatterlist *dst_sg, unsigned int dst_nents, - struct scatterlist *src_sg, unsigned int src_nents, - unsigned long dma_flags) -{ - if (dst_nents != src_nents) - return NULL; - - return stedma40_memcpy_sg(chan, dst_sg, src_sg, dst_nents, dma_flags); -} - static int d40_prep_slave_sg_log(struct d40_desc *d40d, struct d40_chan *d40c, struct scatterlist *sgl, @@ -2293,7 +2281,6 @@ static int __init d40_dmaengine_init(struct d40_base *base, base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources; base->dma_slave.device_free_chan_resources = d40_free_chan_resources; base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy; - base->dma_slave.device_prep_dma_sg = d40_prep_sg; base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg; base->dma_slave.device_tx_status = d40_tx_status; base->dma_slave.device_issue_pending = d40_issue_pending; @@ -2314,12 +2301,10 @@ static int __init d40_dmaengine_init(struct d40_base *base, dma_cap_zero(base->dma_memcpy.cap_mask); dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); - dma_cap_set(DMA_SG, base->dma_slave.cap_mask); base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources; base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources; base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy; - base->dma_slave.device_prep_dma_sg = d40_prep_sg; base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg; base->dma_memcpy.device_tx_status = d40_tx_status; base->dma_memcpy.device_issue_pending = d40_issue_pending; @@ -2346,12 +2331,10 @@ static int __init d40_dmaengine_init(struct d40_base *base, dma_cap_zero(base->dma_both.cap_mask); dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); - dma_cap_set(DMA_SG, base->dma_slave.cap_mask); base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources; base->dma_both.device_free_chan_resources = d40_free_chan_resources; base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy; - base->dma_slave.device_prep_dma_sg = d40_prep_sg; base->dma_both.device_prep_slave_sg = d40_prep_slave_sg; base->dma_both.device_tx_status = d40_tx_status; base->dma_both.device_issue_pending = d40_issue_pending; diff --git a/trunk/include/linux/dmaengine.h b/trunk/include/linux/dmaengine.h index 885f35211675..32cd84b47478 100644 --- a/trunk/include/linux/dmaengine.h +++ b/trunk/include/linux/dmaengine.h @@ -64,14 +64,14 @@ enum dma_transaction_type { DMA_PQ_VAL, DMA_MEMSET, DMA_INTERRUPT, - DMA_SG, DMA_PRIVATE, DMA_ASYNC_TX, DMA_SLAVE, + DMA_CYCLIC, }; /* last transaction type for creation of the capabilities mask */ -#define DMA_TX_TYPE_END (DMA_SLAVE + 1) +#define DMA_TX_TYPE_END (DMA_CYCLIC + 1) /** @@ -120,15 +120,12 @@ enum dma_ctrl_flags { * configuration data in statically from the platform). An additional * argument of struct dma_slave_config must be passed in with this * command. - * @FSLDMA_EXTERNAL_START: this command will put the Freescale DMA controller - * into external start mode. */ enum dma_ctrl_cmd { DMA_TERMINATE_ALL, DMA_PAUSE, DMA_RESUME, DMA_SLAVE_CONFIG, - FSLDMA_EXTERNAL_START, }; /** @@ -426,6 +423,9 @@ struct dma_tx_state { * @device_prep_dma_memset: prepares a memset operation * @device_prep_dma_interrupt: prepares an end of chain interrupt operation * @device_prep_slave_sg: prepares a slave dma operation + * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio. + * The function takes a buffer of size buf_len. The callback function will + * be called after period_len bytes have been transferred. * @device_control: manipulate all pending operations on a channel, returns * zero or error code * @device_tx_status: poll for transaction completion, the optional @@ -477,16 +477,14 @@ struct dma_device { unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( struct dma_chan *chan, unsigned long flags); - struct dma_async_tx_descriptor *(*device_prep_dma_sg)( - struct dma_chan *chan, - struct scatterlist *dst_sg, unsigned int dst_nents, - struct scatterlist *src_sg, unsigned int src_nents, - unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_slave_sg)( struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_data_direction direction, unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)( + struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_data_direction direction); int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg); @@ -557,7 +555,7 @@ static inline bool dma_dev_has_pq_continue(struct dma_device *dma) return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE; } -static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma) +static unsigned short dma_dev_to_maxpq(struct dma_device *dma) { return dma->max_pq & ~DMA_HAS_PQ_CONTINUE; }