Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 140289
b: refs/heads/master
c: 87d3d3f
h: refs/heads/master
i:
  140287: 5cc369f
v: v3
  • Loading branch information
Sunil Mushran authored and Mark Fasheh committed Apr 3, 2009
1 parent 65bd943 commit 740d91f
Show file tree
Hide file tree
Showing 20 changed files with 346 additions and 930 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 133e2a3164771454aa326859c2b293687189b553
refs/heads/master: 87d3d3f3931f3e0fca44fbb5c06ad45fc4dca9bc
6 changes: 3 additions & 3 deletions trunk/crypto/async_tx/async_tx.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
#ifdef CONFIG_DMA_ENGINE
static int __init async_tx_init(void)
{
async_dmaengine_get();
dmaengine_get();

printk(KERN_INFO "async_tx: api initialized (async)\n");

Expand All @@ -39,7 +39,7 @@ static int __init async_tx_init(void)

static void __exit async_tx_exit(void)
{
async_dmaengine_put();
dmaengine_put();
}

/**
Expand All @@ -56,7 +56,7 @@ __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
if (depend_tx &&
dma_has_cap(tx_type, depend_tx->chan->device->cap_mask))
return depend_tx->chan;
return async_dma_find_channel(tx_type);
return dma_find_channel(tx_type);
}
EXPORT_SYMBOL_GPL(__async_tx_find_channel);
#else
Expand Down
7 changes: 5 additions & 2 deletions trunk/crypto/async_tx/async_xor.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,11 @@
#include <linux/raid/xor.h>
#include <linux/async_tx.h>

/* do_async_xor - dma map the pages and perform the xor with an engine */
static __async_inline struct dma_async_tx_descriptor *
/* do_async_xor - dma map the pages and perform the xor with an engine.
* This routine is marked __always_inline so it can be compiled away
* when CONFIG_DMA_ENGINE=n
*/
static __always_inline struct dma_async_tx_descriptor *
do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
unsigned int offset, int src_cnt, size_t len,
enum async_tx_flags flags,
Expand Down
11 changes: 0 additions & 11 deletions trunk/drivers/dma/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -98,17 +98,6 @@ config NET_DMA
Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise
say N.

config ASYNC_TX_DMA
bool "Async_tx: Offload support for the async_tx api"
depends on DMA_ENGINE
help
This allows the async_tx api to take advantage of offload engines for
memcpy, memset, xor, and raid6 p+q operations. If your platform has
a dma engine that can perform raid operations and you have enabled
MD_RAID456 say Y.

If unsure, say N.

config DMATEST
tristate "DMA Test client"
depends on DMA_ENGINE
Expand Down
60 changes: 12 additions & 48 deletions trunk/drivers/dma/dmaengine.c
Original file line number Diff line number Diff line change
Expand Up @@ -507,7 +507,6 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
* published in the general-purpose allocator
*/
dma_cap_set(DMA_PRIVATE, device->cap_mask);
device->privatecnt++;
err = dma_chan_get(chan);

if (err == -ENODEV) {
Expand All @@ -519,8 +518,6 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
dma_chan_name(chan), err);
else
break;
if (--device->privatecnt == 0)
dma_cap_clear(DMA_PRIVATE, device->cap_mask);
chan->private = NULL;
chan = NULL;
}
Expand All @@ -540,9 +537,6 @@ void dma_release_channel(struct dma_chan *chan)
WARN_ONCE(chan->client_count != 1,
"chan reference count %d != 1\n", chan->client_count);
dma_chan_put(chan);
/* drop PRIVATE cap enabled by __dma_request_channel() */
if (--chan->device->privatecnt == 0)
dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
chan->private = NULL;
mutex_unlock(&dma_list_mutex);
}
Expand Down Expand Up @@ -608,24 +602,6 @@ void dmaengine_put(void)
}
EXPORT_SYMBOL(dmaengine_put);

static int get_dma_id(struct dma_device *device)
{
int rc;

idr_retry:
if (!idr_pre_get(&dma_idr, GFP_KERNEL))
return -ENOMEM;
mutex_lock(&dma_list_mutex);
rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
mutex_unlock(&dma_list_mutex);
if (rc == -EAGAIN)
goto idr_retry;
else if (rc != 0)
return rc;

return 0;
}

/**
* dma_async_device_register - registers DMA devices found
* @device: &dma_device
Expand Down Expand Up @@ -664,25 +640,27 @@ int dma_async_device_register(struct dma_device *device)
idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
if (!idr_ref)
return -ENOMEM;
rc = get_dma_id(device);
if (rc != 0) {
kfree(idr_ref);
return rc;
}

atomic_set(idr_ref, 0);
idr_retry:
if (!idr_pre_get(&dma_idr, GFP_KERNEL))
return -ENOMEM;
mutex_lock(&dma_list_mutex);
rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
mutex_unlock(&dma_list_mutex);
if (rc == -EAGAIN)
goto idr_retry;
else if (rc != 0)
return rc;

/* represent channels in sysfs. Probably want devs too */
list_for_each_entry(chan, &device->channels, device_node) {
rc = -ENOMEM;
chan->local = alloc_percpu(typeof(*chan->local));
if (chan->local == NULL)
goto err_out;
continue;
chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
if (chan->dev == NULL) {
free_percpu(chan->local);
chan->local = NULL;
goto err_out;
continue;
}

chan->chan_id = chancnt++;
Expand All @@ -699,8 +677,6 @@ int dma_async_device_register(struct dma_device *device)
if (rc) {
free_percpu(chan->local);
chan->local = NULL;
kfree(chan->dev);
atomic_dec(idr_ref);
goto err_out;
}
chan->client_count = 0;
Expand All @@ -725,23 +701,12 @@ int dma_async_device_register(struct dma_device *device)
}
}
list_add_tail_rcu(&device->global_node, &dma_device_list);
if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
device->privatecnt++; /* Always private */
dma_channel_rebalance();
mutex_unlock(&dma_list_mutex);

return 0;

err_out:
/* if we never registered a channel just release the idr */
if (atomic_read(idr_ref) == 0) {
mutex_lock(&dma_list_mutex);
idr_remove(&dma_idr, device->dev_id);
mutex_unlock(&dma_list_mutex);
kfree(idr_ref);
return rc;
}

list_for_each_entry(chan, &device->channels, device_node) {
if (chan->local == NULL)
continue;
Expand Down Expand Up @@ -928,7 +893,6 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
{
tx->chan = chan;
spin_lock_init(&tx->lock);
INIT_LIST_HEAD(&tx->tx_list);
}
EXPORT_SYMBOL(dma_async_tx_descriptor_init);

Expand Down
Loading

0 comments on commit 740d91f

Please sign in to comment.