diff --git a/[refs] b/[refs] index f90ad2f39cf8..ec473c76b54c 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 133e2a3164771454aa326859c2b293687189b553 +refs/heads/master: 87d3d3f3931f3e0fca44fbb5c06ad45fc4dca9bc diff --git a/trunk/crypto/async_tx/async_tx.c b/trunk/crypto/async_tx/async_tx.c index 06eb6cc09fef..f21147f3626a 100644 --- a/trunk/crypto/async_tx/async_tx.c +++ b/trunk/crypto/async_tx/async_tx.c @@ -30,7 +30,7 @@ #ifdef CONFIG_DMA_ENGINE static int __init async_tx_init(void) { - async_dmaengine_get(); + dmaengine_get(); printk(KERN_INFO "async_tx: api initialized (async)\n"); @@ -39,7 +39,7 @@ static int __init async_tx_init(void) static void __exit async_tx_exit(void) { - async_dmaengine_put(); + dmaengine_put(); } /** @@ -56,7 +56,7 @@ __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, if (depend_tx && dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) return depend_tx->chan; - return async_dma_find_channel(tx_type); + return dma_find_channel(tx_type); } EXPORT_SYMBOL_GPL(__async_tx_find_channel); #else diff --git a/trunk/crypto/async_tx/async_xor.c b/trunk/crypto/async_tx/async_xor.c index 95fe2c8d6c51..595b78672b36 100644 --- a/trunk/crypto/async_tx/async_xor.c +++ b/trunk/crypto/async_tx/async_xor.c @@ -30,8 +30,11 @@ #include #include -/* do_async_xor - dma map the pages and perform the xor with an engine */ -static __async_inline struct dma_async_tx_descriptor * +/* do_async_xor - dma map the pages and perform the xor with an engine. + * This routine is marked __always_inline so it can be compiled away + * when CONFIG_DMA_ENGINE=n + */ +static __always_inline struct dma_async_tx_descriptor * do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, unsigned int offset, int src_cnt, size_t len, enum async_tx_flags flags, diff --git a/trunk/drivers/dma/Kconfig b/trunk/drivers/dma/Kconfig index 3b3c01b6f1ee..48ea59e79672 100644 --- a/trunk/drivers/dma/Kconfig +++ b/trunk/drivers/dma/Kconfig @@ -98,17 +98,6 @@ config NET_DMA Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise say N. -config ASYNC_TX_DMA - bool "Async_tx: Offload support for the async_tx api" - depends on DMA_ENGINE - help - This allows the async_tx api to take advantage of offload engines for - memcpy, memset, xor, and raid6 p+q operations. If your platform has - a dma engine that can perform raid operations and you have enabled - MD_RAID456 say Y. - - If unsure, say N. - config DMATEST tristate "DMA Test client" depends on DMA_ENGINE diff --git a/trunk/drivers/dma/dmaengine.c b/trunk/drivers/dma/dmaengine.c index 92438e9dacc3..280a9d263eb3 100644 --- a/trunk/drivers/dma/dmaengine.c +++ b/trunk/drivers/dma/dmaengine.c @@ -507,7 +507,6 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v * published in the general-purpose allocator */ dma_cap_set(DMA_PRIVATE, device->cap_mask); - device->privatecnt++; err = dma_chan_get(chan); if (err == -ENODEV) { @@ -519,8 +518,6 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v dma_chan_name(chan), err); else break; - if (--device->privatecnt == 0) - dma_cap_clear(DMA_PRIVATE, device->cap_mask); chan->private = NULL; chan = NULL; } @@ -540,9 +537,6 @@ void dma_release_channel(struct dma_chan *chan) WARN_ONCE(chan->client_count != 1, "chan reference count %d != 1\n", chan->client_count); dma_chan_put(chan); - /* drop PRIVATE cap enabled by __dma_request_channel() */ - if (--chan->device->privatecnt == 0) - dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); chan->private = NULL; mutex_unlock(&dma_list_mutex); } @@ -608,24 +602,6 @@ void dmaengine_put(void) } EXPORT_SYMBOL(dmaengine_put); -static int get_dma_id(struct dma_device *device) -{ - int rc; - - idr_retry: - if (!idr_pre_get(&dma_idr, GFP_KERNEL)) - return -ENOMEM; - mutex_lock(&dma_list_mutex); - rc = idr_get_new(&dma_idr, NULL, &device->dev_id); - mutex_unlock(&dma_list_mutex); - if (rc == -EAGAIN) - goto idr_retry; - else if (rc != 0) - return rc; - - return 0; -} - /** * dma_async_device_register - registers DMA devices found * @device: &dma_device @@ -664,25 +640,27 @@ int dma_async_device_register(struct dma_device *device) idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); if (!idr_ref) return -ENOMEM; - rc = get_dma_id(device); - if (rc != 0) { - kfree(idr_ref); - return rc; - } - atomic_set(idr_ref, 0); + idr_retry: + if (!idr_pre_get(&dma_idr, GFP_KERNEL)) + return -ENOMEM; + mutex_lock(&dma_list_mutex); + rc = idr_get_new(&dma_idr, NULL, &device->dev_id); + mutex_unlock(&dma_list_mutex); + if (rc == -EAGAIN) + goto idr_retry; + else if (rc != 0) + return rc; /* represent channels in sysfs. Probably want devs too */ list_for_each_entry(chan, &device->channels, device_node) { - rc = -ENOMEM; chan->local = alloc_percpu(typeof(*chan->local)); if (chan->local == NULL) - goto err_out; + continue; chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); if (chan->dev == NULL) { free_percpu(chan->local); - chan->local = NULL; - goto err_out; + continue; } chan->chan_id = chancnt++; @@ -699,8 +677,6 @@ int dma_async_device_register(struct dma_device *device) if (rc) { free_percpu(chan->local); chan->local = NULL; - kfree(chan->dev); - atomic_dec(idr_ref); goto err_out; } chan->client_count = 0; @@ -725,23 +701,12 @@ int dma_async_device_register(struct dma_device *device) } } list_add_tail_rcu(&device->global_node, &dma_device_list); - if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) - device->privatecnt++; /* Always private */ dma_channel_rebalance(); mutex_unlock(&dma_list_mutex); return 0; err_out: - /* if we never registered a channel just release the idr */ - if (atomic_read(idr_ref) == 0) { - mutex_lock(&dma_list_mutex); - idr_remove(&dma_idr, device->dev_id); - mutex_unlock(&dma_list_mutex); - kfree(idr_ref); - return rc; - } - list_for_each_entry(chan, &device->channels, device_node) { if (chan->local == NULL) continue; @@ -928,7 +893,6 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, { tx->chan = chan; spin_lock_init(&tx->lock); - INIT_LIST_HEAD(&tx->tx_list); } EXPORT_SYMBOL(dma_async_tx_descriptor_init); diff --git a/trunk/drivers/dma/dmatest.c b/trunk/drivers/dma/dmatest.c index a27c0fb1bc11..e190d8b30700 100644 --- a/trunk/drivers/dma/dmatest.c +++ b/trunk/drivers/dma/dmatest.c @@ -38,11 +38,6 @@ module_param(max_channels, uint, S_IRUGO); MODULE_PARM_DESC(max_channels, "Maximum number of channels to use (default: all)"); -static unsigned int xor_sources = 3; -module_param(xor_sources, uint, S_IRUGO); -MODULE_PARM_DESC(xor_sources, - "Number of xor source buffers (default: 3)"); - /* * Initialization patterns. All bytes in the source buffer has bit 7 * set, all bytes in the destination buffer has bit 7 cleared. @@ -64,9 +59,8 @@ struct dmatest_thread { struct list_head node; struct task_struct *task; struct dma_chan *chan; - u8 **srcs; - u8 **dsts; - enum dma_transaction_type type; + u8 *srcbuf; + u8 *dstbuf; }; struct dmatest_chan { @@ -104,37 +98,30 @@ static unsigned long dmatest_random(void) return buf; } -static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len) +static void dmatest_init_srcbuf(u8 *buf, unsigned int start, unsigned int len) { unsigned int i; - u8 *buf; - - for (; (buf = *bufs); bufs++) { - for (i = 0; i < start; i++) - buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); - for ( ; i < start + len; i++) - buf[i] = PATTERN_SRC | PATTERN_COPY - | (~i & PATTERN_COUNT_MASK);; - for ( ; i < test_buf_size; i++) - buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); - buf++; - } + + for (i = 0; i < start; i++) + buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); + for ( ; i < start + len; i++) + buf[i] = PATTERN_SRC | PATTERN_COPY + | (~i & PATTERN_COUNT_MASK);; + for ( ; i < test_buf_size; i++) + buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); } -static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len) +static void dmatest_init_dstbuf(u8 *buf, unsigned int start, unsigned int len) { unsigned int i; - u8 *buf; - - for (; (buf = *bufs); bufs++) { - for (i = 0; i < start; i++) - buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); - for ( ; i < start + len; i++) - buf[i] = PATTERN_DST | PATTERN_OVERWRITE - | (~i & PATTERN_COUNT_MASK); - for ( ; i < test_buf_size; i++) - buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); - } + + for (i = 0; i < start; i++) + buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); + for ( ; i < start + len; i++) + buf[i] = PATTERN_DST | PATTERN_OVERWRITE + | (~i & PATTERN_COUNT_MASK); + for ( ; i < test_buf_size; i++) + buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); } static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index, @@ -163,30 +150,23 @@ static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index, thread_name, index, expected, actual); } -static unsigned int dmatest_verify(u8 **bufs, unsigned int start, +static unsigned int dmatest_verify(u8 *buf, unsigned int start, unsigned int end, unsigned int counter, u8 pattern, bool is_srcbuf) { unsigned int i; unsigned int error_count = 0; u8 actual; - u8 expected; - u8 *buf; - unsigned int counter_orig = counter; - - for (; (buf = *bufs); bufs++) { - counter = counter_orig; - for (i = start; i < end; i++) { - actual = buf[i]; - expected = pattern | (~counter & PATTERN_COUNT_MASK); - if (actual != expected) { - if (error_count < 32) - dmatest_mismatch(actual, pattern, i, - counter, is_srcbuf); - error_count++; - } - counter++; + + for (i = start; i < end; i++) { + actual = buf[i]; + if (actual != (pattern | (~counter & PATTERN_COUNT_MASK))) { + if (error_count < 32) + dmatest_mismatch(actual, pattern, i, counter, + is_srcbuf); + error_count++; } + counter++; } if (error_count > 32) @@ -196,17 +176,12 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start, return error_count; } -static void dmatest_callback(void *completion) -{ - complete(completion); -} - /* * This function repeatedly tests DMA transfers of various lengths and - * offsets for a given operation type until it is told to exit by - * kthread_stop(). There may be multiple threads running this function - * in parallel for a single channel, and there may be multiple channels - * being tested in parallel. + * offsets until it is told to exit by kthread_stop(). There may be + * multiple threads running this function in parallel for a single + * channel, and there may be multiple channels being tested in + * parallel. * * Before each test, the source and destination buffer is initialized * with a known pattern. This pattern is different depending on @@ -226,57 +201,25 @@ static int dmatest_func(void *data) unsigned int total_tests = 0; dma_cookie_t cookie; enum dma_status status; - enum dma_ctrl_flags flags; int ret; - int src_cnt; - int dst_cnt; - int i; thread_name = current->comm; ret = -ENOMEM; + thread->srcbuf = kmalloc(test_buf_size, GFP_KERNEL); + if (!thread->srcbuf) + goto err_srcbuf; + thread->dstbuf = kmalloc(test_buf_size, GFP_KERNEL); + if (!thread->dstbuf) + goto err_dstbuf; smp_rmb(); chan = thread->chan; - if (thread->type == DMA_MEMCPY) - src_cnt = dst_cnt = 1; - else if (thread->type == DMA_XOR) { - src_cnt = xor_sources | 1; /* force odd to ensure dst = src */ - dst_cnt = 1; - } else - goto err_srcs; - - thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL); - if (!thread->srcs) - goto err_srcs; - for (i = 0; i < src_cnt; i++) { - thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL); - if (!thread->srcs[i]) - goto err_srcbuf; - } - thread->srcs[i] = NULL; - - thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL); - if (!thread->dsts) - goto err_dsts; - for (i = 0; i < dst_cnt; i++) { - thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL); - if (!thread->dsts[i]) - goto err_dstbuf; - } - thread->dsts[i] = NULL; - - set_user_nice(current, 10); - - flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT; while (!kthread_should_stop()) { struct dma_device *dev = chan->device; - struct dma_async_tx_descriptor *tx = NULL; - dma_addr_t dma_srcs[src_cnt]; - dma_addr_t dma_dsts[dst_cnt]; - struct completion cmp; - unsigned long tmo = msecs_to_jiffies(3000); + struct dma_async_tx_descriptor *tx; + dma_addr_t dma_src, dma_dest; total_tests++; @@ -284,41 +227,22 @@ static int dmatest_func(void *data) src_off = dmatest_random() % (test_buf_size - len + 1); dst_off = dmatest_random() % (test_buf_size - len + 1); - dmatest_init_srcs(thread->srcs, src_off, len); - dmatest_init_dsts(thread->dsts, dst_off, len); + dmatest_init_srcbuf(thread->srcbuf, src_off, len); + dmatest_init_dstbuf(thread->dstbuf, dst_off, len); - for (i = 0; i < src_cnt; i++) { - u8 *buf = thread->srcs[i] + src_off; - - dma_srcs[i] = dma_map_single(dev->dev, buf, len, - DMA_TO_DEVICE); - } + dma_src = dma_map_single(dev->dev, thread->srcbuf + src_off, + len, DMA_TO_DEVICE); /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ - for (i = 0; i < dst_cnt; i++) { - dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i], - test_buf_size, - DMA_BIDIRECTIONAL); - } - - if (thread->type == DMA_MEMCPY) - tx = dev->device_prep_dma_memcpy(chan, - dma_dsts[0] + dst_off, - dma_srcs[0], len, - flags); - else if (thread->type == DMA_XOR) - tx = dev->device_prep_dma_xor(chan, - dma_dsts[0] + dst_off, - dma_srcs, xor_sources, - len, flags); + dma_dest = dma_map_single(dev->dev, thread->dstbuf, + test_buf_size, DMA_BIDIRECTIONAL); + tx = dev->device_prep_dma_memcpy(chan, dma_dest + dst_off, + dma_src, len, + DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP); if (!tx) { - for (i = 0; i < src_cnt; i++) - dma_unmap_single(dev->dev, dma_srcs[i], len, - DMA_TO_DEVICE); - for (i = 0; i < dst_cnt; i++) - dma_unmap_single(dev->dev, dma_dsts[i], - test_buf_size, - DMA_BIDIRECTIONAL); + dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); + dma_unmap_single(dev->dev, dma_dest, + test_buf_size, DMA_BIDIRECTIONAL); pr_warning("%s: #%u: prep error with src_off=0x%x " "dst_off=0x%x len=0x%x\n", thread_name, total_tests - 1, @@ -327,10 +251,7 @@ static int dmatest_func(void *data) failed_tests++; continue; } - - init_completion(&cmp); - tx->callback = dmatest_callback; - tx->callback_param = &cmp; + tx->callback = NULL; cookie = tx->tx_submit(tx); if (dma_submit_error(cookie)) { @@ -342,50 +263,44 @@ static int dmatest_func(void *data) failed_tests++; continue; } - dma_async_issue_pending(chan); + dma_async_memcpy_issue_pending(chan); - tmo = wait_for_completion_timeout(&cmp, tmo); - status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); + do { + msleep(1); + status = dma_async_memcpy_complete( + chan, cookie, NULL, NULL); + } while (status == DMA_IN_PROGRESS); - if (tmo == 0) { - pr_warning("%s: #%u: test timed out\n", - thread_name, total_tests - 1); - failed_tests++; - continue; - } else if (status != DMA_SUCCESS) { - pr_warning("%s: #%u: got completion callback," - " but status is \'%s\'\n", - thread_name, total_tests - 1, - status == DMA_ERROR ? "error" : "in progress"); + if (status == DMA_ERROR) { + pr_warning("%s: #%u: error during copy\n", + thread_name, total_tests - 1); failed_tests++; continue; } - /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */ - for (i = 0; i < dst_cnt; i++) - dma_unmap_single(dev->dev, dma_dsts[i], test_buf_size, - DMA_BIDIRECTIONAL); + dma_unmap_single(dev->dev, dma_dest, + test_buf_size, DMA_BIDIRECTIONAL); error_count = 0; pr_debug("%s: verifying source buffer...\n", thread_name); - error_count += dmatest_verify(thread->srcs, 0, src_off, + error_count += dmatest_verify(thread->srcbuf, 0, src_off, 0, PATTERN_SRC, true); - error_count += dmatest_verify(thread->srcs, src_off, + error_count += dmatest_verify(thread->srcbuf, src_off, src_off + len, src_off, PATTERN_SRC | PATTERN_COPY, true); - error_count += dmatest_verify(thread->srcs, src_off + len, + error_count += dmatest_verify(thread->srcbuf, src_off + len, test_buf_size, src_off + len, PATTERN_SRC, true); pr_debug("%s: verifying dest buffer...\n", thread->task->comm); - error_count += dmatest_verify(thread->dsts, 0, dst_off, + error_count += dmatest_verify(thread->dstbuf, 0, dst_off, 0, PATTERN_DST, false); - error_count += dmatest_verify(thread->dsts, dst_off, + error_count += dmatest_verify(thread->dstbuf, dst_off, dst_off + len, src_off, PATTERN_SRC | PATTERN_COPY, false); - error_count += dmatest_verify(thread->dsts, dst_off + len, + error_count += dmatest_verify(thread->dstbuf, dst_off + len, test_buf_size, dst_off + len, PATTERN_DST, false); @@ -404,16 +319,10 @@ static int dmatest_func(void *data) } ret = 0; - for (i = 0; thread->dsts[i]; i++) - kfree(thread->dsts[i]); + kfree(thread->dstbuf); err_dstbuf: - kfree(thread->dsts); -err_dsts: - for (i = 0; thread->srcs[i]; i++) - kfree(thread->srcs[i]); + kfree(thread->srcbuf); err_srcbuf: - kfree(thread->srcs); -err_srcs: pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", thread_name, total_tests, failed_tests, ret); return ret; @@ -435,36 +344,35 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc) kfree(dtc); } -static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_type type) +static int dmatest_add_channel(struct dma_chan *chan) { - struct dmatest_thread *thread; - struct dma_chan *chan = dtc->chan; - char *op; - unsigned int i; + struct dmatest_chan *dtc; + struct dmatest_thread *thread; + unsigned int i; - if (type == DMA_MEMCPY) - op = "copy"; - else if (type == DMA_XOR) - op = "xor"; - else - return -EINVAL; + dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); + if (!dtc) { + pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan)); + return -ENOMEM; + } + + dtc->chan = chan; + INIT_LIST_HEAD(&dtc->threads); for (i = 0; i < threads_per_chan; i++) { thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); if (!thread) { - pr_warning("dmatest: No memory for %s-%s%u\n", - dma_chan_name(chan), op, i); - + pr_warning("dmatest: No memory for %s-test%u\n", + dma_chan_name(chan), i); break; } thread->chan = dtc->chan; - thread->type = type; smp_wmb(); - thread->task = kthread_run(dmatest_func, thread, "%s-%s%u", - dma_chan_name(chan), op, i); + thread->task = kthread_run(dmatest_func, thread, "%s-test%u", + dma_chan_name(chan), i); if (IS_ERR(thread->task)) { - pr_warning("dmatest: Failed to run thread %s-%s%u\n", - dma_chan_name(chan), op, i); + pr_warning("dmatest: Failed to run thread %s-test%u\n", + dma_chan_name(chan), i); kfree(thread); break; } @@ -474,36 +382,7 @@ static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_ty list_add_tail(&thread->node, &dtc->threads); } - return i; -} - -static int dmatest_add_channel(struct dma_chan *chan) -{ - struct dmatest_chan *dtc; - struct dma_device *dma_dev = chan->device; - unsigned int thread_count = 0; - unsigned int cnt; - - dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL); - if (!dtc) { - pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan)); - return -ENOMEM; - } - - dtc->chan = chan; - INIT_LIST_HEAD(&dtc->threads); - - if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { - cnt = dmatest_add_threads(dtc, DMA_MEMCPY); - thread_count += cnt > 0 ?: 0; - } - if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { - cnt = dmatest_add_threads(dtc, DMA_XOR); - thread_count += cnt > 0 ?: 0; - } - - pr_info("dmatest: Started %u threads using %s\n", - thread_count, dma_chan_name(chan)); + pr_info("dmatest: Started %u threads using %s\n", i, dma_chan_name(chan)); list_add_tail(&dtc->node, &dmatest_channels); nr_channels++; diff --git a/trunk/drivers/dma/dw_dmac.c b/trunk/drivers/dma/dw_dmac.c index 98c9a847bf51..20ad3d26bec2 100644 --- a/trunk/drivers/dma/dw_dmac.c +++ b/trunk/drivers/dma/dw_dmac.c @@ -363,82 +363,6 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) dwc_descriptor_complete(dwc, bad_desc); } -/* --------------------- Cyclic DMA API extensions -------------------- */ - -inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - return channel_readl(dwc, SAR); -} -EXPORT_SYMBOL(dw_dma_get_src_addr); - -inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - return channel_readl(dwc, DAR); -} -EXPORT_SYMBOL(dw_dma_get_dst_addr); - -/* called with dwc->lock held and all DMAC interrupts disabled */ -static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, - u32 status_block, u32 status_err, u32 status_xfer) -{ - if (status_block & dwc->mask) { - void (*callback)(void *param); - void *callback_param; - - dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", - channel_readl(dwc, LLP)); - dma_writel(dw, CLEAR.BLOCK, dwc->mask); - - callback = dwc->cdesc->period_callback; - callback_param = dwc->cdesc->period_callback_param; - if (callback) { - spin_unlock(&dwc->lock); - callback(callback_param); - spin_lock(&dwc->lock); - } - } - - /* - * Error and transfer complete are highly unlikely, and will most - * likely be due to a configuration error by the user. - */ - if (unlikely(status_err & dwc->mask) || - unlikely(status_xfer & dwc->mask)) { - int i; - - dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " - "interrupt, stopping DMA transfer\n", - status_xfer ? "xfer" : "error"); - dev_err(chan2dev(&dwc->chan), - " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", - channel_readl(dwc, SAR), - channel_readl(dwc, DAR), - channel_readl(dwc, LLP), - channel_readl(dwc, CTL_HI), - channel_readl(dwc, CTL_LO)); - - channel_clear_bit(dw, CH_EN, dwc->mask); - while (dma_readl(dw, CH_EN) & dwc->mask) - cpu_relax(); - - /* make sure DMA does not restart by loading a new list */ - channel_writel(dwc, LLP, 0); - channel_writel(dwc, CTL_LO, 0); - channel_writel(dwc, CTL_HI, 0); - - dma_writel(dw, CLEAR.BLOCK, dwc->mask); - dma_writel(dw, CLEAR.ERROR, dwc->mask); - dma_writel(dw, CLEAR.XFER, dwc->mask); - - for (i = 0; i < dwc->cdesc->periods; i++) - dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); - } -} - -/* ------------------------------------------------------------------------- */ - static void dw_dma_tasklet(unsigned long data) { struct dw_dma *dw = (struct dw_dma *)data; @@ -458,10 +382,7 @@ static void dw_dma_tasklet(unsigned long data) for (i = 0; i < dw->dma.chancnt; i++) { dwc = &dw->chan[i]; spin_lock(&dwc->lock); - if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) - dwc_handle_cyclic(dw, dwc, status_block, status_err, - status_xfer); - else if (status_err & (1 << i)) + if (status_err & (1 << i)) dwc_handle_error(dw, dwc); else if ((status_block | status_xfer) & (1 << i)) dwc_scan_descriptors(dw, dwc); @@ -905,6 +826,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) dma_async_tx_descriptor_init(&desc->txd, chan); desc->txd.tx_submit = dwc_tx_submit; desc->txd.flags = DMA_CTRL_ACK; + INIT_LIST_HEAD(&desc->txd.tx_list); desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli, sizeof(desc->lli), DMA_TO_DEVICE); dwc_desc_put(dwc, desc); @@ -962,257 +884,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan) dev_vdbg(chan2dev(chan), "free_chan_resources done\n"); } -/* --------------------- Cyclic DMA API extensions -------------------- */ - -/** - * dw_dma_cyclic_start - start the cyclic DMA transfer - * @chan: the DMA channel to start - * - * Must be called with soft interrupts disabled. Returns zero on success or - * -errno on failure. - */ -int dw_dma_cyclic_start(struct dma_chan *chan) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct dw_dma *dw = to_dw_dma(dwc->chan.device); - - if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { - dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); - return -ENODEV; - } - - spin_lock(&dwc->lock); - - /* assert channel is idle */ - if (dma_readl(dw, CH_EN) & dwc->mask) { - dev_err(chan2dev(&dwc->chan), - "BUG: Attempted to start non-idle channel\n"); - dev_err(chan2dev(&dwc->chan), - " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", - channel_readl(dwc, SAR), - channel_readl(dwc, DAR), - channel_readl(dwc, LLP), - channel_readl(dwc, CTL_HI), - channel_readl(dwc, CTL_LO)); - spin_unlock(&dwc->lock); - return -EBUSY; - } - - dma_writel(dw, CLEAR.BLOCK, dwc->mask); - dma_writel(dw, CLEAR.ERROR, dwc->mask); - dma_writel(dw, CLEAR.XFER, dwc->mask); - - /* setup DMAC channel registers */ - channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys); - channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); - channel_writel(dwc, CTL_HI, 0); - - channel_set_bit(dw, CH_EN, dwc->mask); - - spin_unlock(&dwc->lock); - - return 0; -} -EXPORT_SYMBOL(dw_dma_cyclic_start); - -/** - * dw_dma_cyclic_stop - stop the cyclic DMA transfer - * @chan: the DMA channel to stop - * - * Must be called with soft interrupts disabled. - */ -void dw_dma_cyclic_stop(struct dma_chan *chan) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct dw_dma *dw = to_dw_dma(dwc->chan.device); - - spin_lock(&dwc->lock); - - channel_clear_bit(dw, CH_EN, dwc->mask); - while (dma_readl(dw, CH_EN) & dwc->mask) - cpu_relax(); - - spin_unlock(&dwc->lock); -} -EXPORT_SYMBOL(dw_dma_cyclic_stop); - -/** - * dw_dma_cyclic_prep - prepare the cyclic DMA transfer - * @chan: the DMA channel to prepare - * @buf_addr: physical DMA address where the buffer starts - * @buf_len: total number of bytes for the entire buffer - * @period_len: number of bytes for each period - * @direction: transfer direction, to or from device - * - * Must be called before trying to start the transfer. Returns a valid struct - * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful. - */ -struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, - dma_addr_t buf_addr, size_t buf_len, size_t period_len, - enum dma_data_direction direction) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct dw_cyclic_desc *cdesc; - struct dw_cyclic_desc *retval = NULL; - struct dw_desc *desc; - struct dw_desc *last = NULL; - struct dw_dma_slave *dws = chan->private; - unsigned long was_cyclic; - unsigned int reg_width; - unsigned int periods; - unsigned int i; - - spin_lock_bh(&dwc->lock); - if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { - spin_unlock_bh(&dwc->lock); - dev_dbg(chan2dev(&dwc->chan), - "queue and/or active list are not empty\n"); - return ERR_PTR(-EBUSY); - } - - was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); - spin_unlock_bh(&dwc->lock); - if (was_cyclic) { - dev_dbg(chan2dev(&dwc->chan), - "channel already prepared for cyclic DMA\n"); - return ERR_PTR(-EBUSY); - } - - retval = ERR_PTR(-EINVAL); - reg_width = dws->reg_width; - periods = buf_len / period_len; - - /* Check for too big/unaligned periods and unaligned DMA buffer. */ - if (period_len > (DWC_MAX_COUNT << reg_width)) - goto out_err; - if (unlikely(period_len & ((1 << reg_width) - 1))) - goto out_err; - if (unlikely(buf_addr & ((1 << reg_width) - 1))) - goto out_err; - if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE)))) - goto out_err; - - retval = ERR_PTR(-ENOMEM); - - if (periods > NR_DESCS_PER_CHANNEL) - goto out_err; - - cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); - if (!cdesc) - goto out_err; - - cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL); - if (!cdesc->desc) - goto out_err_alloc; - - for (i = 0; i < periods; i++) { - desc = dwc_desc_get(dwc); - if (!desc) - goto out_err_desc_get; - - switch (direction) { - case DMA_TO_DEVICE: - desc->lli.dar = dws->tx_reg; - desc->lli.sar = buf_addr + (period_len * i); - desc->lli.ctllo = (DWC_DEFAULT_CTLLO - | DWC_CTLL_DST_WIDTH(reg_width) - | DWC_CTLL_SRC_WIDTH(reg_width) - | DWC_CTLL_DST_FIX - | DWC_CTLL_SRC_INC - | DWC_CTLL_FC_M2P - | DWC_CTLL_INT_EN); - break; - case DMA_FROM_DEVICE: - desc->lli.dar = buf_addr + (period_len * i); - desc->lli.sar = dws->rx_reg; - desc->lli.ctllo = (DWC_DEFAULT_CTLLO - | DWC_CTLL_SRC_WIDTH(reg_width) - | DWC_CTLL_DST_WIDTH(reg_width) - | DWC_CTLL_DST_INC - | DWC_CTLL_SRC_FIX - | DWC_CTLL_FC_P2M - | DWC_CTLL_INT_EN); - break; - default: - break; - } - - desc->lli.ctlhi = (period_len >> reg_width); - cdesc->desc[i] = desc; - - if (last) { - last->lli.llp = desc->txd.phys; - dma_sync_single_for_device(chan2parent(chan), - last->txd.phys, sizeof(last->lli), - DMA_TO_DEVICE); - } - - last = desc; - } - - /* lets make a cyclic list */ - last->lli.llp = cdesc->desc[0]->txd.phys; - dma_sync_single_for_device(chan2parent(chan), last->txd.phys, - sizeof(last->lli), DMA_TO_DEVICE); - - dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu " - "period %zu periods %d\n", buf_addr, buf_len, - period_len, periods); - - cdesc->periods = periods; - dwc->cdesc = cdesc; - - return cdesc; - -out_err_desc_get: - while (i--) - dwc_desc_put(dwc, cdesc->desc[i]); -out_err_alloc: - kfree(cdesc); -out_err: - clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); - return (struct dw_cyclic_desc *)retval; -} -EXPORT_SYMBOL(dw_dma_cyclic_prep); - -/** - * dw_dma_cyclic_free - free a prepared cyclic DMA transfer - * @chan: the DMA channel to free - */ -void dw_dma_cyclic_free(struct dma_chan *chan) -{ - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct dw_dma *dw = to_dw_dma(dwc->chan.device); - struct dw_cyclic_desc *cdesc = dwc->cdesc; - int i; - - dev_dbg(chan2dev(&dwc->chan), "cyclic free\n"); - - if (!cdesc) - return; - - spin_lock_bh(&dwc->lock); - - channel_clear_bit(dw, CH_EN, dwc->mask); - while (dma_readl(dw, CH_EN) & dwc->mask) - cpu_relax(); - - dma_writel(dw, CLEAR.BLOCK, dwc->mask); - dma_writel(dw, CLEAR.ERROR, dwc->mask); - dma_writel(dw, CLEAR.XFER, dwc->mask); - - spin_unlock_bh(&dwc->lock); - - for (i = 0; i < cdesc->periods; i++) - dwc_desc_put(dwc, cdesc->desc[i]); - - kfree(cdesc->desc); - kfree(cdesc); - - clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); -} -EXPORT_SYMBOL(dw_dma_cyclic_free); - /*----------------------------------------------------------------------*/ static void dw_dma_off(struct dw_dma *dw) diff --git a/trunk/drivers/dma/dw_dmac_regs.h b/trunk/drivers/dma/dw_dmac_regs.h index 13a580767031..b252b202c5cf 100644 --- a/trunk/drivers/dma/dw_dmac_regs.h +++ b/trunk/drivers/dma/dw_dmac_regs.h @@ -126,10 +126,6 @@ struct dw_dma_regs { #define DW_REGLEN 0x400 -enum dw_dmac_flags { - DW_DMA_IS_CYCLIC = 0, -}; - struct dw_dma_chan { struct dma_chan chan; void __iomem *ch_regs; @@ -138,12 +134,10 @@ struct dw_dma_chan { spinlock_t lock; /* these other elements are all protected by lock */ - unsigned long flags; dma_cookie_t completed; struct list_head active_list; struct list_head queue; struct list_head free_list; - struct dw_cyclic_desc *cdesc; unsigned int descs_allocated; }; @@ -164,6 +158,7 @@ static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) return container_of(chan, struct dw_dma_chan, chan); } + struct dw_dma { struct dma_device dma; void __iomem *regs; diff --git a/trunk/drivers/dma/fsldma.c b/trunk/drivers/dma/fsldma.c index da8a8ed9e411..86d6da47f558 100644 --- a/trunk/drivers/dma/fsldma.c +++ b/trunk/drivers/dma/fsldma.c @@ -354,6 +354,7 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor( dma_async_tx_descriptor_init(&desc_sw->async_tx, &fsl_chan->common); desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; + INIT_LIST_HEAD(&desc_sw->async_tx.tx_list); desc_sw->async_tx.phys = pdesc; } diff --git a/trunk/drivers/dma/ioat_dma.c b/trunk/drivers/dma/ioat_dma.c index e4fc33c1c32f..5905cd36bcd2 100644 --- a/trunk/drivers/dma/ioat_dma.c +++ b/trunk/drivers/dma/ioat_dma.c @@ -693,6 +693,7 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor( desc_sw->async_tx.tx_submit = ioat2_tx_submit; break; } + INIT_LIST_HEAD(&desc_sw->async_tx.tx_list); desc_sw->hw = desc; desc_sw->async_tx.phys = phys; diff --git a/trunk/drivers/dma/iop-adma.c b/trunk/drivers/dma/iop-adma.c index 2f052265122f..16adbe61cfb2 100644 --- a/trunk/drivers/dma/iop-adma.c +++ b/trunk/drivers/dma/iop-adma.c @@ -498,6 +498,7 @@ static int iop_adma_alloc_chan_resources(struct dma_chan *chan) slot->async_tx.tx_submit = iop_adma_tx_submit; INIT_LIST_HEAD(&slot->chain_node); INIT_LIST_HEAD(&slot->slot_node); + INIT_LIST_HEAD(&slot->async_tx.tx_list); hw_desc = (char *) iop_chan->device->dma_desc_pool; slot->async_tx.phys = (dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE]; diff --git a/trunk/drivers/dma/ipu/ipu_idmac.c b/trunk/drivers/dma/ipu/ipu_idmac.c index e202a6ce5573..da781d107895 100644 --- a/trunk/drivers/dma/ipu/ipu_idmac.c +++ b/trunk/drivers/dma/ipu/ipu_idmac.c @@ -28,9 +28,6 @@ #define FS_VF_IN_VALID 0x00000002 #define FS_ENC_IN_VALID 0x00000001 -static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan, - bool wait_for_stop); - /* * There can be only one, we could allocate it dynamically, but then we'd have * to add an extra parameter to some functions, and use something as ugly as @@ -110,7 +107,7 @@ static uint32_t bytes_per_pixel(enum pixel_fmt fmt) } } -/* Enable direct write to memory by the Camera Sensor Interface */ +/* Enable / disable direct write to memory by the Camera Sensor Interface */ static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel) { uint32_t ic_conf, mask; @@ -129,7 +126,6 @@ static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel) idmac_write_icreg(ipu, ic_conf, IC_CONF); } -/* Called under spin_lock_irqsave(&ipu_data.lock) */ static void ipu_ic_disable_task(struct ipu *ipu, enum ipu_channel channel) { uint32_t ic_conf, mask; @@ -426,7 +422,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params, break; default: dev_err(ipu_data.dev, - "mx3 ipu: unimplemented pixel format %d\n", pixel_fmt); + "mxc ipu: unimplemented pixel format %d\n", pixel_fmt); break; } @@ -437,20 +433,20 @@ static void ipu_ch_param_set_burst_size(union chan_param_mem *params, uint16_t burst_pixels) { params->pp.npb = burst_pixels - 1; -} +}; static void ipu_ch_param_set_buffer(union chan_param_mem *params, dma_addr_t buf0, dma_addr_t buf1) { params->pp.eba0 = buf0; params->pp.eba1 = buf1; -} +}; static void ipu_ch_param_set_rotation(union chan_param_mem *params, enum ipu_rotate_mode rotate) { params->pp.bam = rotate; -} +}; static void ipu_write_param_mem(uint32_t addr, uint32_t *data, uint32_t num_words) @@ -575,7 +571,7 @@ static uint32_t dma_param_addr(uint32_t dma_ch) { /* Channel Parameter Memory */ return 0x10000 | (dma_ch << 4); -} +}; static void ipu_channel_set_priority(struct ipu *ipu, enum ipu_channel channel, bool prio) @@ -615,8 +611,7 @@ static uint32_t ipu_channel_conf_mask(enum ipu_channel channel) /** * ipu_enable_channel() - enable an IPU channel. - * @idmac: IPU DMAC context. - * @ichan: IDMAC channel. + * @channel: channel ID. * @return: 0 on success or negative error code on failure. */ static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan) @@ -654,7 +649,7 @@ static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan) /** * ipu_init_channel_buffer() - initialize a buffer for logical IPU channel. - * @ichan: IDMAC channel. + * @channel: channel ID. * @pixel_fmt: pixel format of buffer. Pixel format is a FOURCC ASCII code. * @width: width of buffer in pixels. * @height: height of buffer in pixels. @@ -692,7 +687,7 @@ static int ipu_init_channel_buffer(struct idmac_channel *ichan, } /* IC channel's stride must be a multiple of 8 pixels */ - if ((channel <= IDMAC_IC_13) && (stride % 8)) { + if ((channel <= 13) && (stride % 8)) { dev_err(ipu->dev, "Stride must be 8 pixel multiple\n"); return -EINVAL; } @@ -757,7 +752,7 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n) /** * ipu_update_channel_buffer() - update physical address of a channel buffer. - * @ichan: IDMAC channel. + * @channel: channel ID. * @buffer_n: buffer number to update. * 0 or 1 are the only valid values. * @phyaddr: buffer physical address. @@ -765,10 +760,9 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n) * function will fail if the buffer is set to ready. */ /* Called under spin_lock(_irqsave)(&ichan->lock) */ -static int ipu_update_channel_buffer(struct idmac_channel *ichan, +static int ipu_update_channel_buffer(enum ipu_channel channel, int buffer_n, dma_addr_t phyaddr) { - enum ipu_channel channel = ichan->dma_chan.chan_id; uint32_t reg; unsigned long flags; @@ -777,8 +771,8 @@ static int ipu_update_channel_buffer(struct idmac_channel *ichan, if (buffer_n == 0) { reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY); if (reg & (1UL << channel)) { - ipu_ic_disable_task(&ipu_data, channel); - ichan->status = IPU_CHANNEL_READY; + spin_unlock_irqrestore(&ipu_data.lock, flags); + return -EACCES; } /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 0) */ @@ -788,8 +782,8 @@ static int ipu_update_channel_buffer(struct idmac_channel *ichan, } else { reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY); if (reg & (1UL << channel)) { - ipu_ic_disable_task(&ipu_data, channel); - ichan->status = IPU_CHANNEL_READY; + spin_unlock_irqrestore(&ipu_data.lock, flags); + return -EACCES; } /* Check if double-buffering is already enabled */ @@ -810,39 +804,6 @@ static int ipu_update_channel_buffer(struct idmac_channel *ichan, return 0; } -/* Called under spin_lock_irqsave(&ichan->lock) */ -static int ipu_submit_buffer(struct idmac_channel *ichan, - struct idmac_tx_desc *desc, struct scatterlist *sg, int buf_idx) -{ - unsigned int chan_id = ichan->dma_chan.chan_id; - struct device *dev = &ichan->dma_chan.dev->device; - int ret; - - if (async_tx_test_ack(&desc->txd)) - return -EINTR; - - /* - * On first invocation this shouldn't be necessary, the call to - * ipu_init_channel_buffer() above will set addresses for us, so we - * could make it conditional on status >= IPU_CHANNEL_ENABLED, but - * doing it again shouldn't hurt either. - */ - ret = ipu_update_channel_buffer(ichan, buf_idx, - sg_dma_address(sg)); - - if (ret < 0) { - dev_err(dev, "Updating sg %p on channel 0x%x buffer %d failed!\n", - sg, chan_id, buf_idx); - return ret; - } - - ipu_select_buffer(chan_id, buf_idx); - dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n", - sg, chan_id, buf_idx); - - return 0; -} - /* Called under spin_lock_irqsave(&ichan->lock) */ static int ipu_submit_channel_buffers(struct idmac_channel *ichan, struct idmac_tx_desc *desc) @@ -854,10 +815,20 @@ static int ipu_submit_channel_buffers(struct idmac_channel *ichan, if (!ichan->sg[i]) { ichan->sg[i] = sg; - ret = ipu_submit_buffer(ichan, desc, sg, i); + /* + * On first invocation this shouldn't be necessary, the + * call to ipu_init_channel_buffer() above will set + * addresses for us, so we could make it conditional + * on status >= IPU_CHANNEL_ENABLED, but doing it again + * shouldn't hurt either. + */ + ret = ipu_update_channel_buffer(ichan->dma_chan.chan_id, i, + sg_dma_address(sg)); if (ret < 0) return ret; + ipu_select_buffer(ichan->dma_chan.chan_id, i); + sg = sg_next(sg); } } @@ -871,22 +842,19 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx) struct idmac_channel *ichan = to_idmac_chan(tx->chan); struct idmac *idmac = to_idmac(tx->chan->device); struct ipu *ipu = to_ipu(idmac); - struct device *dev = &ichan->dma_chan.dev->device; dma_cookie_t cookie; unsigned long flags; - int ret; /* Sanity check */ if (!list_empty(&desc->list)) { /* The descriptor doesn't belong to client */ - dev_err(dev, "Descriptor %p not prepared!\n", tx); + dev_err(&ichan->dma_chan.dev->device, + "Descriptor %p not prepared!\n", tx); return -EBUSY; } mutex_lock(&ichan->chan_mutex); - async_tx_clear_ack(tx); - if (ichan->status < IPU_CHANNEL_READY) { struct idmac_video_param *video = &ichan->params.video; /* @@ -910,7 +878,16 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx) goto out; } - dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]); + /* ipu->lock can be taken under ichan->lock, but not v.v. */ + spin_lock_irqsave(&ichan->lock, flags); + + /* submit_buffers() atomically verifies and fills empty sg slots */ + cookie = ipu_submit_channel_buffers(ichan, desc); + + spin_unlock_irqrestore(&ichan->lock, flags); + + if (cookie < 0) + goto out; cookie = ichan->dma_chan.cookie; @@ -920,40 +897,24 @@ static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx) /* from dmaengine.h: "last cookie value returned to client" */ ichan->dma_chan.cookie = cookie; tx->cookie = cookie; - - /* ipu->lock can be taken under ichan->lock, but not v.v. */ spin_lock_irqsave(&ichan->lock, flags); - list_add_tail(&desc->list, &ichan->queue); - /* submit_buffers() atomically verifies and fills empty sg slots */ - ret = ipu_submit_channel_buffers(ichan, desc); - spin_unlock_irqrestore(&ichan->lock, flags); - if (ret < 0) { - cookie = ret; - goto dequeue; - } - if (ichan->status < IPU_CHANNEL_ENABLED) { - ret = ipu_enable_channel(idmac, ichan); + int ret = ipu_enable_channel(idmac, ichan); if (ret < 0) { cookie = ret; - goto dequeue; + spin_lock_irqsave(&ichan->lock, flags); + list_del_init(&desc->list); + spin_unlock_irqrestore(&ichan->lock, flags); + tx->cookie = cookie; + ichan->dma_chan.cookie = cookie; } } dump_idmac_reg(ipu); -dequeue: - if (cookie < 0) { - spin_lock_irqsave(&ichan->lock, flags); - list_del_init(&desc->list); - spin_unlock_irqrestore(&ichan->lock, flags); - tx->cookie = cookie; - ichan->dma_chan.cookie = cookie; - } - out: mutex_unlock(&ichan->chan_mutex); @@ -983,6 +944,8 @@ static int idmac_desc_alloc(struct idmac_channel *ichan, int n) memset(txd, 0, sizeof(*txd)); dma_async_tx_descriptor_init(txd, &ichan->dma_chan); txd->tx_submit = idmac_tx_submit; + txd->chan = &ichan->dma_chan; + INIT_LIST_HEAD(&txd->tx_list); list_add(&desc->list, &ichan->free_list); @@ -1198,24 +1161,6 @@ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan, return 0; } -static struct scatterlist *idmac_sg_next(struct idmac_channel *ichan, - struct idmac_tx_desc **desc, struct scatterlist *sg) -{ - struct scatterlist *sgnew = sg ? sg_next(sg) : NULL; - - if (sgnew) - /* next sg-element in this list */ - return sgnew; - - if ((*desc)->list.next == &ichan->queue) - /* No more descriptors on the queue */ - return NULL; - - /* Fetch next descriptor */ - *desc = list_entry((*desc)->list.next, struct idmac_tx_desc, list); - return (*desc)->sg; -} - /* * We have several possibilities here: * current BUF next BUF @@ -1231,46 +1176,23 @@ static struct scatterlist *idmac_sg_next(struct idmac_channel *ichan, static irqreturn_t idmac_interrupt(int irq, void *dev_id) { struct idmac_channel *ichan = dev_id; - struct device *dev = &ichan->dma_chan.dev->device; unsigned int chan_id = ichan->dma_chan.chan_id; struct scatterlist **sg, *sgnext, *sgnew = NULL; /* Next transfer descriptor */ - struct idmac_tx_desc *desc, *descnew; + struct idmac_tx_desc *desc = NULL, *descnew; dma_async_tx_callback callback; void *callback_param; bool done = false; - u32 ready0, ready1, curbuf, err; - unsigned long flags; + u32 ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY), + ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY), + curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF); /* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */ - dev_dbg(dev, "IDMAC irq %d, buf %d\n", irq, ichan->active_buffer); - - spin_lock_irqsave(&ipu_data.lock, flags); - - ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY); - ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY); - curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF); - err = idmac_read_ipureg(&ipu_data, IPU_INT_STAT_4); - - if (err & (1 << chan_id)) { - idmac_write_ipureg(&ipu_data, 1 << chan_id, IPU_INT_STAT_4); - spin_unlock_irqrestore(&ipu_data.lock, flags); - /* - * Doing this - * ichan->sg[0] = ichan->sg[1] = NULL; - * you can force channel re-enable on the next tx_submit(), but - * this is dirty - think about descriptors with multiple - * sg elements. - */ - dev_warn(dev, "NFB4EOF on channel %d, ready %x, %x, cur %x\n", - chan_id, ready0, ready1, curbuf); - return IRQ_HANDLED; - } - spin_unlock_irqrestore(&ipu_data.lock, flags); - + pr_debug("IDMAC irq %d\n", irq); /* Other interrupts do not interfere with this channel */ spin_lock(&ichan->lock); + if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 && ((curbuf >> chan_id) & 1) == ichan->active_buffer)) { int i = 100; @@ -1285,23 +1207,19 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) if (!i) { spin_unlock(&ichan->lock); - dev_dbg(dev, + dev_dbg(ichan->dma_chan.device->dev, "IRQ on active buffer on channel %x, active " "%d, ready %x, %x, current %x!\n", chan_id, ichan->active_buffer, ready0, ready1, curbuf); return IRQ_NONE; - } else - dev_dbg(dev, - "Buffer deactivated on channel %x, active " - "%d, ready %x, %x, current %x, rest %d!\n", chan_id, - ichan->active_buffer, ready0, ready1, curbuf, i); + } } if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) || (!ichan->active_buffer && (ready0 >> chan_id) & 1) )) { spin_unlock(&ichan->lock); - dev_dbg(dev, + dev_dbg(ichan->dma_chan.device->dev, "IRQ with active buffer still ready on channel %x, " "active %d, ready %x, %x!\n", chan_id, ichan->active_buffer, ready0, ready1); @@ -1309,9 +1227,8 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) } if (unlikely(list_empty(&ichan->queue))) { - ichan->sg[ichan->active_buffer] = NULL; spin_unlock(&ichan->lock); - dev_err(dev, + dev_err(ichan->dma_chan.device->dev, "IRQ without queued buffers on channel %x, active %d, " "ready %x, %x!\n", chan_id, ichan->active_buffer, ready0, ready1); @@ -1326,44 +1243,40 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) sg = &ichan->sg[ichan->active_buffer]; sgnext = ichan->sg[!ichan->active_buffer]; - if (!*sg) { - spin_unlock(&ichan->lock); - return IRQ_HANDLED; - } - - desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list); - descnew = desc; - - dev_dbg(dev, "IDMAC irq %d, dma 0x%08x, next dma 0x%08x, current %d, curbuf 0x%08x\n", - irq, sg_dma_address(*sg), sgnext ? sg_dma_address(sgnext) : 0, ichan->active_buffer, curbuf); - - /* Find the descriptor of sgnext */ - sgnew = idmac_sg_next(ichan, &descnew, *sg); - if (sgnext != sgnew) - dev_err(dev, "Submitted buffer %p, next buffer %p\n", sgnext, sgnew); - /* * if sgnext == NULL sg must be the last element in a scatterlist and * queue must be empty */ if (unlikely(!sgnext)) { - if (!WARN_ON(sg_next(*sg))) - dev_dbg(dev, "Underrun on channel %x\n", chan_id); - ichan->sg[!ichan->active_buffer] = sgnew; - - if (unlikely(sgnew)) { - ipu_submit_buffer(ichan, descnew, sgnew, !ichan->active_buffer); + if (unlikely(sg_next(*sg))) { + dev_err(ichan->dma_chan.device->dev, + "Broken buffer-update locking on channel %x!\n", + chan_id); + /* We'll let the user catch up */ } else { - spin_lock_irqsave(&ipu_data.lock, flags); + /* Underrun */ ipu_ic_disable_task(&ipu_data, chan_id); - spin_unlock_irqrestore(&ipu_data.lock, flags); + dev_dbg(ichan->dma_chan.device->dev, + "Underrun on channel %x\n", chan_id); ichan->status = IPU_CHANNEL_READY; /* Continue to check for complete descriptor */ } } - /* Calculate and submit the next sg element */ - sgnew = idmac_sg_next(ichan, &descnew, sgnew); + desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list); + + /* First calculate and submit the next sg element */ + if (likely(sgnext)) + sgnew = sg_next(sgnext); + + if (unlikely(!sgnew)) { + /* Start a new scatterlist, if any queued */ + if (likely(desc->list.next != &ichan->queue)) { + descnew = list_entry(desc->list.next, + struct idmac_tx_desc, list); + sgnew = &descnew->sg[0]; + } + } if (unlikely(!sg_next(*sg)) || !sgnext) { /* @@ -1376,13 +1289,17 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) *sg = sgnew; - if (likely(sgnew) && - ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) { - callback = desc->txd.callback; - callback_param = desc->txd.callback_param; - spin_unlock(&ichan->lock); - callback(callback_param); - spin_lock(&ichan->lock); + if (likely(sgnew)) { + int ret; + + ret = ipu_update_channel_buffer(chan_id, ichan->active_buffer, + sg_dma_address(*sg)); + if (ret < 0) + dev_err(ichan->dma_chan.device->dev, + "Failed to update buffer on channel %x buffer %d!\n", + chan_id, ichan->active_buffer); + else + ipu_select_buffer(chan_id, ichan->active_buffer); } /* Flip the active buffer - even if update above failed */ @@ -1410,20 +1327,13 @@ static void ipu_gc_tasklet(unsigned long arg) struct idmac_channel *ichan = ipu->channel + i; struct idmac_tx_desc *desc; unsigned long flags; - struct scatterlist *sg; - int j, k; + int j; for (j = 0; j < ichan->n_tx_desc; j++) { desc = ichan->desc + j; spin_lock_irqsave(&ichan->lock, flags); if (async_tx_test_ack(&desc->txd)) { list_move(&desc->list, &ichan->free_list); - for_each_sg(desc->sg, sg, desc->sg_len, k) { - if (ichan->sg[0] == sg) - ichan->sg[0] = NULL; - else if (ichan->sg[1] == sg) - ichan->sg[1] = NULL; - } async_tx_clear_ack(&desc->txd); } spin_unlock_irqrestore(&ichan->lock, flags); @@ -1431,7 +1341,13 @@ static void ipu_gc_tasklet(unsigned long arg) } } -/* Allocate and initialise a transfer descriptor. */ +/* + * At the time .device_alloc_chan_resources() method is called, we cannot know, + * whether the client will accept the channel. Thus we must only check, if we + * can satisfy client's request but the only real criterion to verify, whether + * the client has accepted our offer is the client_count. That's why we have to + * perform the rest of our allocation tasks on the first call to this function. + */ static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_data_direction direction, unsigned long tx_flags) @@ -1442,8 +1358,8 @@ static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan unsigned long flags; /* We only can handle these three channels so far */ - if (chan->chan_id != IDMAC_SDC_0 && chan->chan_id != IDMAC_SDC_1 && - chan->chan_id != IDMAC_IC_7) + if (ichan->dma_chan.chan_id != IDMAC_SDC_0 && ichan->dma_chan.chan_id != IDMAC_SDC_1 && + ichan->dma_chan.chan_id != IDMAC_IC_7) return NULL; if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) { @@ -1484,7 +1400,7 @@ static void idmac_issue_pending(struct dma_chan *chan) /* This is not always needed, but doesn't hurt either */ spin_lock_irqsave(&ipu->lock, flags); - ipu_select_buffer(chan->chan_id, ichan->active_buffer); + ipu_select_buffer(ichan->dma_chan.chan_id, ichan->active_buffer); spin_unlock_irqrestore(&ipu->lock, flags); /* @@ -1516,7 +1432,8 @@ static void __idmac_terminate_all(struct dma_chan *chan) struct idmac_tx_desc *desc = ichan->desc + i; if (list_empty(&desc->list)) /* Descriptor was prepared, but not submitted */ - list_add(&desc->list, &ichan->free_list); + list_add(&desc->list, + &ichan->free_list); async_tx_clear_ack(&desc->txd); } @@ -1541,28 +1458,6 @@ static void idmac_terminate_all(struct dma_chan *chan) mutex_unlock(&ichan->chan_mutex); } -#ifdef DEBUG -static irqreturn_t ic_sof_irq(int irq, void *dev_id) -{ - struct idmac_channel *ichan = dev_id; - printk(KERN_DEBUG "Got SOF IRQ %d on Channel %d\n", - irq, ichan->dma_chan.chan_id); - disable_irq(irq); - return IRQ_HANDLED; -} - -static irqreturn_t ic_eof_irq(int irq, void *dev_id) -{ - struct idmac_channel *ichan = dev_id; - printk(KERN_DEBUG "Got EOF IRQ %d on Channel %d\n", - irq, ichan->dma_chan.chan_id); - disable_irq(irq); - return IRQ_HANDLED; -} - -static int ic_sof = -EINVAL, ic_eof = -EINVAL; -#endif - static int idmac_alloc_chan_resources(struct dma_chan *chan) { struct idmac_channel *ichan = to_idmac_chan(chan); @@ -1576,49 +1471,31 @@ static int idmac_alloc_chan_resources(struct dma_chan *chan) chan->cookie = 1; ichan->completed = -ENXIO; - ret = ipu_irq_map(chan->chan_id); + ret = ipu_irq_map(ichan->dma_chan.chan_id); if (ret < 0) goto eimap; ichan->eof_irq = ret; - - /* - * Important to first disable the channel, because maybe someone - * used it before us, e.g., the bootloader - */ - ipu_disable_channel(idmac, ichan, true); - - ret = ipu_init_channel(idmac, ichan); - if (ret < 0) - goto eichan; - ret = request_irq(ichan->eof_irq, idmac_interrupt, 0, ichan->eof_name, ichan); if (ret < 0) goto erirq; -#ifdef DEBUG - if (chan->chan_id == IDMAC_IC_7) { - ic_sof = ipu_irq_map(69); - if (ic_sof > 0) - request_irq(ic_sof, ic_sof_irq, 0, "IC SOF", ichan); - ic_eof = ipu_irq_map(70); - if (ic_eof > 0) - request_irq(ic_eof, ic_eof_irq, 0, "IC EOF", ichan); - } -#endif + ret = ipu_init_channel(idmac, ichan); + if (ret < 0) + goto eichan; ichan->status = IPU_CHANNEL_INITIALIZED; - dev_dbg(&chan->dev->device, "Found channel 0x%x, irq %d\n", - chan->chan_id, ichan->eof_irq); + dev_dbg(&ichan->dma_chan.dev->device, "Found channel 0x%x, irq %d\n", + ichan->dma_chan.chan_id, ichan->eof_irq); return ret; -erirq: - ipu_uninit_channel(idmac, ichan); eichan: - ipu_irq_unmap(chan->chan_id); + free_irq(ichan->eof_irq, ichan); +erirq: + ipu_irq_unmap(ichan->dma_chan.chan_id); eimap: return ret; } @@ -1633,22 +1510,8 @@ static void idmac_free_chan_resources(struct dma_chan *chan) __idmac_terminate_all(chan); if (ichan->status > IPU_CHANNEL_FREE) { -#ifdef DEBUG - if (chan->chan_id == IDMAC_IC_7) { - if (ic_sof > 0) { - free_irq(ic_sof, ichan); - ipu_irq_unmap(69); - ic_sof = -EINVAL; - } - if (ic_eof > 0) { - free_irq(ic_eof, ichan); - ipu_irq_unmap(70); - ic_eof = -EINVAL; - } - } -#endif free_irq(ichan->eof_irq, ichan); - ipu_irq_unmap(chan->chan_id); + ipu_irq_unmap(ichan->dma_chan.chan_id); } ichan->status = IPU_CHANNEL_FREE; @@ -1710,7 +1573,7 @@ static int __init ipu_idmac_init(struct ipu *ipu) dma_chan->device = &idmac->dma; dma_chan->cookie = 1; dma_chan->chan_id = i; - list_add_tail(&dma_chan->device_node, &dma->channels); + list_add_tail(&ichan->dma_chan.device_node, &dma->channels); } idmac_write_icreg(ipu, 0x00000070, IDMAC_CONF); @@ -1718,7 +1581,7 @@ static int __init ipu_idmac_init(struct ipu *ipu) return dma_async_device_register(&idmac->dma); } -static void __exit ipu_idmac_exit(struct ipu *ipu) +static void ipu_idmac_exit(struct ipu *ipu) { int i; struct idmac *idmac = &ipu->idmac; @@ -1737,7 +1600,7 @@ static void __exit ipu_idmac_exit(struct ipu *ipu) * IPU common probe / remove */ -static int __init ipu_probe(struct platform_device *pdev) +static int ipu_probe(struct platform_device *pdev) { struct ipu_platform_data *pdata = pdev->dev.platform_data; struct resource *mem_ipu, *mem_ic; @@ -1837,7 +1700,7 @@ static int __init ipu_probe(struct platform_device *pdev) return ret; } -static int __exit ipu_remove(struct platform_device *pdev) +static int ipu_remove(struct platform_device *pdev) { struct ipu *ipu = platform_get_drvdata(pdev); @@ -1862,7 +1725,7 @@ static struct platform_driver ipu_platform_driver = { .name = "ipu-core", .owner = THIS_MODULE, }, - .remove = __exit_p(ipu_remove), + .remove = ipu_remove, }; static int __init ipu_init(void) diff --git a/trunk/drivers/dma/ipu/ipu_irq.c b/trunk/drivers/dma/ipu/ipu_irq.c index dd8ebc75b667..83f532cc767f 100644 --- a/trunk/drivers/dma/ipu/ipu_irq.c +++ b/trunk/drivers/dma/ipu/ipu_irq.c @@ -352,7 +352,7 @@ static struct irq_chip ipu_irq_chip = { }; /* Install the IRQ handler */ -int __init ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev) +int ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev) { struct ipu_platform_data *pdata = dev->dev.platform_data; unsigned int irq, irq_base, i; diff --git a/trunk/drivers/dma/mv_xor.c b/trunk/drivers/dma/mv_xor.c index ddab94f51224..cb7f26fb9f18 100644 --- a/trunk/drivers/dma/mv_xor.c +++ b/trunk/drivers/dma/mv_xor.c @@ -632,6 +632,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) slot->async_tx.tx_submit = mv_xor_tx_submit; INIT_LIST_HEAD(&slot->chain_node); INIT_LIST_HEAD(&slot->slot_node); + INIT_LIST_HEAD(&slot->async_tx.tx_list); hw_desc = (char *) mv_chan->device->dma_desc_pool; slot->async_tx.phys = (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; diff --git a/trunk/fs/ocfs2/cluster/heartbeat.c b/trunk/fs/ocfs2/cluster/heartbeat.c index 04697ba7f73e..4f85eceab376 100644 --- a/trunk/fs/ocfs2/cluster/heartbeat.c +++ b/trunk/fs/ocfs2/cluster/heartbeat.c @@ -33,6 +33,7 @@ #include #include #include +#include #include "heartbeat.h" #include "tcp.h" @@ -60,6 +61,11 @@ static unsigned long o2hb_live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; static LIST_HEAD(o2hb_node_events); static DECLARE_WAIT_QUEUE_HEAD(o2hb_steady_queue); +#define O2HB_DEBUG_DIR "o2hb" +#define O2HB_DEBUG_LIVENODES "livenodes" +static struct dentry *o2hb_debug_dir; +static struct dentry *o2hb_debug_livenodes; + static LIST_HEAD(o2hb_all_regions); static struct o2hb_callback { @@ -905,7 +911,77 @@ static int o2hb_thread(void *data) return 0; } -void o2hb_init(void) +#ifdef CONFIG_DEBUG_FS +static int o2hb_debug_open(struct inode *inode, struct file *file) +{ + unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)]; + char *buf = NULL; + int i = -1; + int out = 0; + + buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!buf) + goto bail; + + o2hb_fill_node_map(map, sizeof(map)); + + while ((i = find_next_bit(map, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) + out += snprintf(buf + out, PAGE_SIZE - out, "%d ", i); + out += snprintf(buf + out, PAGE_SIZE - out, "\n"); + + i_size_write(inode, out); + + file->private_data = buf; + + return 0; +bail: + return -ENOMEM; +} + +static int o2hb_debug_release(struct inode *inode, struct file *file) +{ + kfree(file->private_data); + return 0; +} + +static ssize_t o2hb_debug_read(struct file *file, char __user *buf, + size_t nbytes, loff_t *ppos) +{ + return simple_read_from_buffer(buf, nbytes, ppos, file->private_data, + i_size_read(file->f_mapping->host)); +} +#else +static int o2hb_debug_open(struct inode *inode, struct file *file) +{ + return 0; +} +static int o2hb_debug_release(struct inode *inode, struct file *file) +{ + return 0; +} +static ssize_t o2hb_debug_read(struct file *file, char __user *buf, + size_t nbytes, loff_t *ppos) +{ + return 0; +} +#endif /* CONFIG_DEBUG_FS */ + +static struct file_operations o2hb_debug_fops = { + .open = o2hb_debug_open, + .release = o2hb_debug_release, + .read = o2hb_debug_read, + .llseek = generic_file_llseek, +}; + +void o2hb_exit(void) +{ + if (o2hb_debug_livenodes) + debugfs_remove(o2hb_debug_livenodes); + if (o2hb_debug_dir) + debugfs_remove(o2hb_debug_dir); +} + +int o2hb_init(void) { int i; @@ -918,6 +994,24 @@ void o2hb_init(void) INIT_LIST_HEAD(&o2hb_node_events); memset(o2hb_live_node_bitmap, 0, sizeof(o2hb_live_node_bitmap)); + + o2hb_debug_dir = debugfs_create_dir(O2HB_DEBUG_DIR, NULL); + if (!o2hb_debug_dir) { + mlog_errno(-ENOMEM); + return -ENOMEM; + } + + o2hb_debug_livenodes = debugfs_create_file(O2HB_DEBUG_LIVENODES, + S_IFREG|S_IRUSR, + o2hb_debug_dir, NULL, + &o2hb_debug_fops); + if (!o2hb_debug_livenodes) { + mlog_errno(-ENOMEM); + debugfs_remove(o2hb_debug_dir); + return -ENOMEM; + } + + return 0; } /* if we're already in a callback then we're already serialized by the sem */ diff --git a/trunk/fs/ocfs2/cluster/heartbeat.h b/trunk/fs/ocfs2/cluster/heartbeat.h index e511339886b3..2f1649253b49 100644 --- a/trunk/fs/ocfs2/cluster/heartbeat.h +++ b/trunk/fs/ocfs2/cluster/heartbeat.h @@ -75,7 +75,8 @@ void o2hb_unregister_callback(const char *region_uuid, struct o2hb_callback_func *hc); void o2hb_fill_node_map(unsigned long *map, unsigned bytes); -void o2hb_init(void); +void o2hb_exit(void); +int o2hb_init(void); int o2hb_check_node_heartbeating(u8 node_num); int o2hb_check_node_heartbeating_from_callback(u8 node_num); int o2hb_check_local_node_heartbeating(void); diff --git a/trunk/fs/ocfs2/cluster/nodemanager.c b/trunk/fs/ocfs2/cluster/nodemanager.c index 70e8fa9e2539..7ee6188bc79a 100644 --- a/trunk/fs/ocfs2/cluster/nodemanager.c +++ b/trunk/fs/ocfs2/cluster/nodemanager.c @@ -881,6 +881,7 @@ static void __exit exit_o2nm(void) o2cb_sys_shutdown(); o2net_exit(); + o2hb_exit(); } static int __init init_o2nm(void) @@ -889,11 +890,13 @@ static int __init init_o2nm(void) cluster_print_version(); - o2hb_init(); + ret = o2hb_init(); + if (ret) + goto out; ret = o2net_init(); if (ret) - goto out; + goto out_o2hb; ret = o2net_register_hb_callbacks(); if (ret) @@ -916,6 +919,8 @@ static int __init init_o2nm(void) o2net_unregister_hb_callbacks(); out_o2net: o2net_exit(); +out_o2hb: + o2hb_exit(); out: return ret; } diff --git a/trunk/include/linux/async_tx.h b/trunk/include/linux/async_tx.h index 5fc2ef8d97fa..45f6297821bd 100644 --- a/trunk/include/linux/async_tx.h +++ b/trunk/include/linux/async_tx.h @@ -21,15 +21,6 @@ #include #include -/* on architectures without dma-mapping capabilities we need to ensure - * that the asynchronous path compiles away - */ -#ifdef CONFIG_HAS_DMA -#define __async_inline -#else -#define __async_inline __always_inline -#endif - /** * dma_chan_ref - object used to manage dma channels received from the * dmaengine core. diff --git a/trunk/include/linux/dmaengine.h b/trunk/include/linux/dmaengine.h index 2e2aa3df170c..1956c8d46d32 100644 --- a/trunk/include/linux/dmaengine.h +++ b/trunk/include/linux/dmaengine.h @@ -23,6 +23,9 @@ #include #include +#include +#include +#include #include /** @@ -202,7 +205,6 @@ struct dma_async_tx_descriptor { /** * struct dma_device - info on the entity supplying DMA services * @chancnt: how many DMA channels are supported - * @privatecnt: how many DMA channels are requested by dma_request_channel * @channels: the list of struct dma_chan * @global_node: list_head for global dma_device_list * @cap_mask: one or more dma_capability flags @@ -225,7 +227,6 @@ struct dma_async_tx_descriptor { struct dma_device { unsigned int chancnt; - unsigned int privatecnt; struct list_head channels; struct list_head global_node; dma_cap_mask_t cap_mask; @@ -290,24 +291,6 @@ static inline void net_dmaengine_put(void) } #endif -#ifdef CONFIG_ASYNC_TX_DMA -#define async_dmaengine_get() dmaengine_get() -#define async_dmaengine_put() dmaengine_put() -#define async_dma_find_channel(type) dma_find_channel(type) -#else -static inline void async_dmaengine_get(void) -{ -} -static inline void async_dmaengine_put(void) -{ -} -static inline struct dma_chan * -async_dma_find_channel(enum dma_transaction_type type) -{ - return NULL; -} -#endif - dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, void *src, size_t len); dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, @@ -354,13 +337,6 @@ __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) set_bit(tx_type, dstp->bits); } -#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask)) -static inline void -__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) -{ - clear_bit(tx_type, dstp->bits); -} - #define dma_cap_zero(mask) __dma_cap_zero(&(mask)) static inline void __dma_cap_zero(dma_cap_mask_t *dstp) { diff --git a/trunk/include/linux/dw_dmac.h b/trunk/include/linux/dw_dmac.h index c8aad713a046..d797dde247f7 100644 --- a/trunk/include/linux/dw_dmac.h +++ b/trunk/include/linux/dw_dmac.h @@ -74,23 +74,4 @@ struct dw_dma_slave { #define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ #define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ -/* DMA API extensions */ -struct dw_cyclic_desc { - struct dw_desc **desc; - unsigned long periods; - void (*period_callback)(void *param); - void *period_callback_param; -}; - -struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, - dma_addr_t buf_addr, size_t buf_len, size_t period_len, - enum dma_data_direction direction); -void dw_dma_cyclic_free(struct dma_chan *chan); -int dw_dma_cyclic_start(struct dma_chan *chan); -void dw_dma_cyclic_stop(struct dma_chan *chan); - -dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan); - -dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan); - #endif /* DW_DMAC_H */