From ae215bdf716d4dcb9589e2533cffd6e5db8393bb Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Thu, 10 Jan 2008 11:30:36 -0600 Subject: [PATCH] --- yaml --- r: 77639 b: refs/heads/master c: fa0ccd837e3dddb44c7db2f128a8bb7e4eabc21a h: refs/heads/master i: 77637: b75c918eca4f2cbd8ee61f6bf65661628d4cea5f 77635: 996ac0a6abfc04c0621323c8b5810df01bfa47a7 77631: 8d3c7ac3e8b79e634978aaa00d36eeac41652e11 v: v3 --- [refs] | 2 +- trunk/block/elevator.c | 26 ++++++++++++++++++- trunk/block/ll_rw_blk.c | 49 ++++++++++++++++++++++++++++++++++++ trunk/include/linux/blkdev.h | 4 +++ 4 files changed, 79 insertions(+), 2 deletions(-) diff --git a/[refs] b/[refs] index d2825a448df0..f58482007187 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 5d84070ee0a433620c57e85dac7f82faaec5fbb3 +refs/heads/master: fa0ccd837e3dddb44c7db2f128a8bb7e4eabc21a diff --git a/trunk/block/elevator.c b/trunk/block/elevator.c index f9736fbdab03..8cd5775acd7a 100644 --- a/trunk/block/elevator.c +++ b/trunk/block/elevator.c @@ -741,7 +741,21 @@ struct request *elv_next_request(struct request_queue *q) q->boundary_rq = NULL; } - if ((rq->cmd_flags & REQ_DONTPREP) || !q->prep_rq_fn) + if (rq->cmd_flags & REQ_DONTPREP) + break; + + if (q->dma_drain_size && rq->data_len) { + /* + * make sure space for the drain appears we + * know we can do this because max_hw_segments + * has been adjusted to be one fewer than the + * device can handle + */ + rq->nr_phys_segments++; + rq->nr_hw_segments++; + } + + if (!q->prep_rq_fn) break; ret = q->prep_rq_fn(q, rq); @@ -754,6 +768,16 @@ struct request *elv_next_request(struct request_queue *q) * avoid resource deadlock. REQ_STARTED will * prevent other fs requests from passing this one. */ + if (q->dma_drain_size && rq->data_len && + !(rq->cmd_flags & REQ_DONTPREP)) { + /* + * remove the space for the drain we added + * so that we don't add it again + */ + --rq->nr_phys_segments; + --rq->nr_hw_segments; + } + rq = NULL; break; } else if (ret == BLKPREP_KILL) { diff --git a/trunk/block/ll_rw_blk.c b/trunk/block/ll_rw_blk.c index 3d0422f48453..768987dc2697 100644 --- a/trunk/block/ll_rw_blk.c +++ b/trunk/block/ll_rw_blk.c @@ -725,6 +725,45 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) EXPORT_SYMBOL(blk_queue_stack_limits); +/** + * blk_queue_dma_drain - Set up a drain buffer for excess dma. + * + * @q: the request queue for the device + * @buf: physically contiguous buffer + * @size: size of the buffer in bytes + * + * Some devices have excess DMA problems and can't simply discard (or + * zero fill) the unwanted piece of the transfer. They have to have a + * real area of memory to transfer it into. The use case for this is + * ATAPI devices in DMA mode. If the packet command causes a transfer + * bigger than the transfer size some HBAs will lock up if there + * aren't DMA elements to contain the excess transfer. What this API + * does is adjust the queue so that the buf is always appended + * silently to the scatterlist. + * + * Note: This routine adjusts max_hw_segments to make room for + * appending the drain buffer. If you call + * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after + * calling this routine, you must set the limit to one fewer than your + * device can support otherwise there won't be room for the drain + * buffer. + */ +int blk_queue_dma_drain(struct request_queue *q, void *buf, + unsigned int size) +{ + if (q->max_hw_segments < 2 || q->max_phys_segments < 2) + return -EINVAL; + /* make room for appending the drain */ + --q->max_hw_segments; + --q->max_phys_segments; + q->dma_drain_buffer = buf; + q->dma_drain_size = size; + + return 0; +} + +EXPORT_SYMBOL_GPL(blk_queue_dma_drain); + /** * blk_queue_segment_boundary - set boundary rules for segment merging * @q: the request queue for the device @@ -1379,6 +1418,16 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, bvprv = bvec; } /* segments in rq */ + if (q->dma_drain_size) { + sg->page_link &= ~0x02; + sg = sg_next(sg); + sg_set_page(sg, virt_to_page(q->dma_drain_buffer), + q->dma_drain_size, + ((unsigned long)q->dma_drain_buffer) & + (PAGE_SIZE - 1)); + nsegs++; + } + if (sg) sg_mark_end(sg); diff --git a/trunk/include/linux/blkdev.h b/trunk/include/linux/blkdev.h index c7a3ab575c24..e542c8fd9215 100644 --- a/trunk/include/linux/blkdev.h +++ b/trunk/include/linux/blkdev.h @@ -429,6 +429,8 @@ struct request_queue unsigned int max_segment_size; unsigned long seg_boundary_mask; + void *dma_drain_buffer; + unsigned int dma_drain_size; unsigned int dma_alignment; struct blk_queue_tag *queue_tags; @@ -760,6 +762,8 @@ extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); +extern int blk_queue_dma_drain(struct request_queue *q, void *buf, + unsigned int size); extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);