Skip to content

Commit

Permalink
dmaengine: ack to flags: make use of the unused bits in the 'ack' field
Browse files Browse the repository at this point in the history
'ack' is currently a simple integer that flags whether or not a client is done
touching fields in the given descriptor.  It is effectively just a single bit
of information.  Converting this to a flags parameter allows the other bits to
be put to use to control completion actions, like dma-unmap, and capture
results, like xor-zero-sum == 0.

Changes are one of:
1/ convert all open-coded ->ack manipulations to use async_tx_ack
   and async_tx_test_ack.
2/ set the ack bit at prep time where possible
3/ make drivers store the flags at prep time
4/ add flags to the device_prep_dma_interrupt prototype

Acked-by: Maciej Sosnowski <maciej.sosnowski@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
  • Loading branch information
Dan Williams committed Apr 17, 2008
1 parent c4fe155 commit 636bdea
Show file tree
Hide file tree
Showing 8 changed files with 69 additions and 54 deletions.
2 changes: 1 addition & 1 deletion crypto/async_tx/async_memcpy.c
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
/* if ack is already set then we cannot be sure
* we are referring to the correct operation
*/
BUG_ON(depend_tx->ack);
BUG_ON(async_tx_test_ack(depend_tx));
if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
panic("%s: DMA_ERROR waiting for depend_tx\n",
__func__);
Expand Down
9 changes: 5 additions & 4 deletions crypto/async_tx/async_tx.c
Original file line number Diff line number Diff line change
Expand Up @@ -446,7 +446,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
* otherwise poll for completion
*/
if (dma_has_cap(DMA_INTERRUPT, device->cap_mask))
intr_tx = device->device_prep_dma_interrupt(chan);
intr_tx = device->device_prep_dma_interrupt(chan, 0);
else
intr_tx = NULL;

Expand Down Expand Up @@ -515,7 +515,8 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
* 2/ dependencies are 1:1 i.e. two transactions can
* not depend on the same parent
*/
BUG_ON(depend_tx->ack || depend_tx->next || tx->parent);
BUG_ON(async_tx_test_ack(depend_tx) || depend_tx->next ||
tx->parent);

/* the lock prevents async_tx_run_dependencies from missing
* the setting of ->next when ->parent != NULL
Expand Down Expand Up @@ -594,7 +595,7 @@ async_trigger_callback(enum async_tx_flags flags,
if (device && !dma_has_cap(DMA_INTERRUPT, device->cap_mask))
device = NULL;

tx = device ? device->device_prep_dma_interrupt(chan) : NULL;
tx = device ? device->device_prep_dma_interrupt(chan, 0) : NULL;
} else
tx = NULL;

Expand All @@ -610,7 +611,7 @@ async_trigger_callback(enum async_tx_flags flags,
/* if ack is already set then we cannot be sure
* we are referring to the correct operation
*/
BUG_ON(depend_tx->ack);
BUG_ON(async_tx_test_ack(depend_tx));
if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
panic("%s: DMA_ERROR waiting for depend_tx\n",
__func__);
Expand Down
2 changes: 1 addition & 1 deletion crypto/async_tx/async_xor.c
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
/* if ack is already set then we cannot be sure
* we are referring to the correct operation
*/
BUG_ON(depend_tx->ack);
BUG_ON(async_tx_test_ack(depend_tx));
if (dma_wait_for_async_tx(depend_tx) ==
DMA_ERROR)
panic("%s: DMA_ERROR waiting for "
Expand Down
12 changes: 6 additions & 6 deletions drivers/dma/dmaengine.c
Original file line number Diff line number Diff line change
Expand Up @@ -478,15 +478,15 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,

dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0);
tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
DMA_CTRL_ACK);

if (!tx) {
dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
return -ENOMEM;
}

tx->ack = 1;
tx->callback = NULL;
cookie = tx->tx_submit(tx);

Expand Down Expand Up @@ -524,15 +524,15 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,

dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0);
tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
DMA_CTRL_ACK);

if (!tx) {
dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
return -ENOMEM;
}

tx->ack = 1;
tx->callback = NULL;
cookie = tx->tx_submit(tx);

Expand Down Expand Up @@ -573,15 +573,15 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
DMA_FROM_DEVICE);
tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0);
tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
DMA_CTRL_ACK);

if (!tx) {
dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
return -ENOMEM;
}

tx->ack = 1;
tx->callback = NULL;
cookie = tx->tx_submit(tx);

Expand Down
10 changes: 5 additions & 5 deletions drivers/dma/fsldma.c
Original file line number Diff line number Diff line change
Expand Up @@ -412,7 +412,7 @@ static void fsl_dma_free_chan_resources(struct dma_chan *chan)
}

static struct dma_async_tx_descriptor *
fsl_dma_prep_interrupt(struct dma_chan *chan)
fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags)
{
struct fsl_dma_chan *fsl_chan;
struct fsl_desc_sw *new;
Expand All @@ -429,7 +429,7 @@ fsl_dma_prep_interrupt(struct dma_chan *chan)
}

new->async_tx.cookie = -EBUSY;
new->async_tx.ack = 0;
new->async_tx.flags = flags;

/* Insert the link descriptor to the LD ring */
list_add_tail(&new->node, &new->async_tx.tx_list);
Expand Down Expand Up @@ -482,7 +482,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys);

new->async_tx.cookie = 0;
new->async_tx.ack = 1;
async_tx_ack(&new->async_tx);

prev = new;
len -= copy;
Expand All @@ -493,7 +493,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
list_add_tail(&new->node, &first->async_tx.tx_list);
} while (len);

new->async_tx.ack = 0; /* client is in control of this ack */
new->async_tx.flags = flags; /* client is in control of this ack */
new->async_tx.cookie = -EBUSY;

/* Set End-of-link to the last link descriptor of new list*/
Expand Down Expand Up @@ -874,7 +874,7 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
async_tx_ack(tx3);

/* Interrupt tx test */
tx1 = fsl_dma_prep_interrupt(chan);
tx1 = fsl_dma_prep_interrupt(chan, 0);
async_tx_ack(tx1);
cookie = fsl_dma_tx_submit(tx1);

Expand Down
24 changes: 12 additions & 12 deletions drivers/dma/ioat_dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -212,14 +212,14 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
u32 copy;
size_t len;
dma_addr_t src, dst;
int orig_ack;
unsigned long orig_flags;
unsigned int desc_count = 0;

/* src and dest and len are stored in the initial descriptor */
len = first->len;
src = first->src;
dst = first->dst;
orig_ack = first->async_tx.ack;
orig_flags = first->async_tx.flags;
new = first;

spin_lock_bh(&ioat_chan->desc_lock);
Expand All @@ -228,7 +228,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
do {
copy = min_t(size_t, len, ioat_chan->xfercap);

new->async_tx.ack = 1;
async_tx_ack(&new->async_tx);

hw = new->hw;
hw->size = copy;
Expand Down Expand Up @@ -264,7 +264,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
}

new->tx_cnt = desc_count;
new->async_tx.ack = orig_ack; /* client is in control of this ack */
new->async_tx.flags = orig_flags; /* client is in control of this ack */

/* store the original values for use in later cleanup */
if (new != first) {
Expand Down Expand Up @@ -304,14 +304,14 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
u32 copy;
size_t len;
dma_addr_t src, dst;
int orig_ack;
unsigned long orig_flags;
unsigned int desc_count = 0;

/* src and dest and len are stored in the initial descriptor */
len = first->len;
src = first->src;
dst = first->dst;
orig_ack = first->async_tx.ack;
orig_flags = first->async_tx.flags;
new = first;

/*
Expand All @@ -321,7 +321,7 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
do {
copy = min_t(size_t, len, ioat_chan->xfercap);

new->async_tx.ack = 1;
async_tx_ack(&new->async_tx);

hw = new->hw;
hw->size = copy;
Expand Down Expand Up @@ -349,7 +349,7 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
}

new->tx_cnt = desc_count;
new->async_tx.ack = orig_ack; /* client is in control of this ack */
new->async_tx.flags = orig_flags; /* client is in control of this ack */

/* store the original values for use in later cleanup */
if (new != first) {
Expand Down Expand Up @@ -714,7 +714,7 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
new->len = len;
new->dst = dma_dest;
new->src = dma_src;
new->async_tx.ack = 0;
new->async_tx.flags = flags;
return &new->async_tx;
} else
return NULL;
Expand Down Expand Up @@ -742,7 +742,7 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
new->len = len;
new->dst = dma_dest;
new->src = dma_src;
new->async_tx.ack = 0;
new->async_tx.flags = flags;
return &new->async_tx;
} else
return NULL;
Expand Down Expand Up @@ -842,7 +842,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
* a completed entry, but not the last, so clean
* up if the client is done with the descriptor
*/
if (desc->async_tx.ack) {
if (async_tx_test_ack(&desc->async_tx)) {
list_del(&desc->node);
list_add_tail(&desc->node,
&ioat_chan->free_desc);
Expand Down Expand Up @@ -979,7 +979,7 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
desc->hw->size = 0;
desc->hw->src_addr = 0;
desc->hw->dst_addr = 0;
desc->async_tx.ack = 1;
async_tx_ack(&desc->async_tx);
switch (ioat_chan->device->version) {
case IOAT_VER_1_2:
desc->hw->next = 0;
Expand Down
Loading

0 comments on commit 636bdea

Please sign in to comment.