Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 74986
b: refs/heads/master
c: 711924b
h: refs/heads/master
v: v3
  • Loading branch information
Shannon Nelson authored and Linus Torvalds committed Dec 18, 2007
1 parent ea2a1fe commit dabef3f
Show file tree
Hide file tree
Showing 3 changed files with 79 additions and 67 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 7c9e70efbfc3186674d93451e0fbf18365347b4d
refs/heads/master: 711924b1052a280bd2452c3babb9816e4a77c723
142 changes: 77 additions & 65 deletions trunk/drivers/dma/ioat_dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -173,10 +173,47 @@ static void ioat_set_dest(dma_addr_t addr,
tx_to_ioat_desc(tx)->dst = addr;
}

/**
* ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
* descriptors to hw
* @chan: DMA channel handle
*/
static inline void __ioat1_dma_memcpy_issue_pending(
struct ioat_dma_chan *ioat_chan);
struct ioat_dma_chan *ioat_chan)
{
ioat_chan->pending = 0;
writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET);
}

static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
{
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);

if (ioat_chan->pending != 0) {
spin_lock_bh(&ioat_chan->desc_lock);
__ioat1_dma_memcpy_issue_pending(ioat_chan);
spin_unlock_bh(&ioat_chan->desc_lock);
}
}

static inline void __ioat2_dma_memcpy_issue_pending(
struct ioat_dma_chan *ioat_chan);
struct ioat_dma_chan *ioat_chan)
{
ioat_chan->pending = 0;
writew(ioat_chan->dmacount,
ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
}

static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
{
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);

if (ioat_chan->pending != 0) {
spin_lock_bh(&ioat_chan->desc_lock);
__ioat2_dma_memcpy_issue_pending(ioat_chan);
spin_unlock_bh(&ioat_chan->desc_lock);
}
}

static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
{
Expand All @@ -203,7 +240,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
prev = to_ioat_desc(ioat_chan->used_desc.prev);
prefetch(prev->hw);
do {
copy = min((u32) len, ioat_chan->xfercap);
copy = min_t(size_t, len, ioat_chan->xfercap);

new->async_tx.ack = 1;

Expand Down Expand Up @@ -291,10 +328,12 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
orig_ack = first->async_tx.ack;
new = first;

/* ioat_chan->desc_lock is still in force in version 2 path */

/*
* ioat_chan->desc_lock is still in force in version 2 path
* it gets unlocked at end of this function
*/
do {
copy = min((u32) len, ioat_chan->xfercap);
copy = min_t(size_t, len, ioat_chan->xfercap);

new->async_tx.ack = 1;

Expand Down Expand Up @@ -432,7 +471,7 @@ static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
{
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
struct ioat_desc_sw *desc = NULL;
struct ioat_desc_sw *desc;
u16 chanctrl;
u32 chanerr;
int i;
Expand Down Expand Up @@ -575,17 +614,19 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
static struct ioat_desc_sw *
ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
{
struct ioat_desc_sw *new = NULL;
struct ioat_desc_sw *new;

if (!list_empty(&ioat_chan->free_desc)) {
new = to_ioat_desc(ioat_chan->free_desc.next);
list_del(&new->node);
} else {
/* try to get another desc */
new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
/* will this ever happen? */
/* TODO add upper limit on these */
BUG_ON(!new);
if (!new) {
dev_err(&ioat_chan->device->pdev->dev,
"alloc failed\n");
return NULL;
}
}

prefetch(new->hw);
Expand All @@ -595,7 +636,7 @@ ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
static struct ioat_desc_sw *
ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
{
struct ioat_desc_sw *new = NULL;
struct ioat_desc_sw *new;

/*
* used.prev points to where to start processing
Expand All @@ -609,8 +650,8 @@ ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
if (ioat_chan->used_desc.prev &&
ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) {

struct ioat_desc_sw *desc = NULL;
struct ioat_desc_sw *noop_desc = NULL;
struct ioat_desc_sw *desc;
struct ioat_desc_sw *noop_desc;
int i;

/* set up the noop descriptor */
Expand All @@ -624,10 +665,14 @@ ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
ioat_chan->pending++;
ioat_chan->dmacount++;

/* get a few more descriptors */
/* try to get a few more descriptors */
for (i = 16; i; i--) {
desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
BUG_ON(!desc);
if (!desc) {
dev_err(&ioat_chan->device->pdev->dev,
"alloc failed\n");
break;
}
list_add_tail(&desc->node, ioat_chan->used_desc.next);

desc->hw->next
Expand Down Expand Up @@ -677,10 +722,13 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(

spin_lock_bh(&ioat_chan->desc_lock);
new = ioat_dma_get_next_descriptor(ioat_chan);
new->len = len;
spin_unlock_bh(&ioat_chan->desc_lock);

return new ? &new->async_tx : NULL;
if (new) {
new->len = len;
return &new->async_tx;
} else
return NULL;
}

static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
Expand All @@ -693,53 +741,17 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(

spin_lock_bh(&ioat_chan->desc_lock);
new = ioat2_dma_get_next_descriptor(ioat_chan);
new->len = len;

/* leave ioat_chan->desc_lock set in version 2 path */
return new ? &new->async_tx : NULL;
}


/**
* ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
* descriptors to hw
* @chan: DMA channel handle
*/
static inline void __ioat1_dma_memcpy_issue_pending(
struct ioat_dma_chan *ioat_chan)
{
ioat_chan->pending = 0;
writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET);
}

static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
{
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);

if (ioat_chan->pending != 0) {
spin_lock_bh(&ioat_chan->desc_lock);
__ioat1_dma_memcpy_issue_pending(ioat_chan);
spin_unlock_bh(&ioat_chan->desc_lock);
}
}

static inline void __ioat2_dma_memcpy_issue_pending(
struct ioat_dma_chan *ioat_chan)
{
ioat_chan->pending = 0;
writew(ioat_chan->dmacount,
ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
}

static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
{
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
/*
* leave ioat_chan->desc_lock set in ioat 2 path
* it will get unlocked at end of tx_submit
*/

if (ioat_chan->pending != 0) {
spin_lock_bh(&ioat_chan->desc_lock);
__ioat2_dma_memcpy_issue_pending(ioat_chan);
spin_unlock_bh(&ioat_chan->desc_lock);
}
if (new) {
new->len = len;
return &new->async_tx;
} else
return NULL;
}

static void ioat_dma_cleanup_tasklet(unsigned long data)
Expand Down Expand Up @@ -1019,7 +1031,7 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
static void ioat_dma_test_callback(void *dma_async_param)
{
printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n",
dma_async_param);
dma_async_param);
}

/**
Expand All @@ -1032,7 +1044,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
u8 *src;
u8 *dest;
struct dma_chan *dma_chan;
struct dma_async_tx_descriptor *tx = NULL;
struct dma_async_tx_descriptor *tx;
dma_addr_t addr;
dma_cookie_t cookie;
int err = 0;
Expand Down
2 changes: 1 addition & 1 deletion trunk/drivers/dma/ioatdma.h
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ struct ioat_dma_chan {
dma_cookie_t completed_cookie;
unsigned long last_completion;

u32 xfercap; /* XFERCAP register value expanded out */
size_t xfercap; /* XFERCAP register value expanded out */

spinlock_t cleanup_lock;
spinlock_t desc_lock;
Expand Down

0 comments on commit dabef3f

Please sign in to comment.