Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 77639
b: refs/heads/master
c: fa0ccd8
h: refs/heads/master
i:
  77637: b75c918
  77635: 996ac0a
  77631: 8d3c7ac
v: v3
  • Loading branch information
James Bottomley authored and Jens Axboe committed Jan 28, 2008
1 parent d77bbe4 commit ae215bd
Show file tree
Hide file tree
Showing 4 changed files with 79 additions and 2 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 5d84070ee0a433620c57e85dac7f82faaec5fbb3
refs/heads/master: fa0ccd837e3dddb44c7db2f128a8bb7e4eabc21a
26 changes: 25 additions & 1 deletion trunk/block/elevator.c
Original file line number Diff line number Diff line change
Expand Up @@ -741,7 +741,21 @@ struct request *elv_next_request(struct request_queue *q)
q->boundary_rq = NULL;
}

if ((rq->cmd_flags & REQ_DONTPREP) || !q->prep_rq_fn)
if (rq->cmd_flags & REQ_DONTPREP)
break;

if (q->dma_drain_size && rq->data_len) {
/*
* make sure space for the drain appears we
* know we can do this because max_hw_segments
* has been adjusted to be one fewer than the
* device can handle
*/
rq->nr_phys_segments++;
rq->nr_hw_segments++;
}

if (!q->prep_rq_fn)
break;

ret = q->prep_rq_fn(q, rq);
Expand All @@ -754,6 +768,16 @@ struct request *elv_next_request(struct request_queue *q)
* avoid resource deadlock. REQ_STARTED will
* prevent other fs requests from passing this one.
*/
if (q->dma_drain_size && rq->data_len &&
!(rq->cmd_flags & REQ_DONTPREP)) {
/*
* remove the space for the drain we added
* so that we don't add it again
*/
--rq->nr_phys_segments;
--rq->nr_hw_segments;
}

rq = NULL;
break;
} else if (ret == BLKPREP_KILL) {
Expand Down
49 changes: 49 additions & 0 deletions trunk/block/ll_rw_blk.c
Original file line number Diff line number Diff line change
Expand Up @@ -725,6 +725,45 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)

EXPORT_SYMBOL(blk_queue_stack_limits);

/**
* blk_queue_dma_drain - Set up a drain buffer for excess dma.
*
* @q: the request queue for the device
* @buf: physically contiguous buffer
* @size: size of the buffer in bytes
*
* Some devices have excess DMA problems and can't simply discard (or
* zero fill) the unwanted piece of the transfer. They have to have a
* real area of memory to transfer it into. The use case for this is
* ATAPI devices in DMA mode. If the packet command causes a transfer
* bigger than the transfer size some HBAs will lock up if there
* aren't DMA elements to contain the excess transfer. What this API
* does is adjust the queue so that the buf is always appended
* silently to the scatterlist.
*
* Note: This routine adjusts max_hw_segments to make room for
* appending the drain buffer. If you call
* blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after
* calling this routine, you must set the limit to one fewer than your
* device can support otherwise there won't be room for the drain
* buffer.
*/
int blk_queue_dma_drain(struct request_queue *q, void *buf,
unsigned int size)
{
if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
return -EINVAL;
/* make room for appending the drain */
--q->max_hw_segments;
--q->max_phys_segments;
q->dma_drain_buffer = buf;
q->dma_drain_size = size;

return 0;
}

EXPORT_SYMBOL_GPL(blk_queue_dma_drain);

/**
* blk_queue_segment_boundary - set boundary rules for segment merging
* @q: the request queue for the device
Expand Down Expand Up @@ -1379,6 +1418,16 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
bvprv = bvec;
} /* segments in rq */

if (q->dma_drain_size) {
sg->page_link &= ~0x02;
sg = sg_next(sg);
sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
q->dma_drain_size,
((unsigned long)q->dma_drain_buffer) &
(PAGE_SIZE - 1));
nsegs++;
}

if (sg)
sg_mark_end(sg);

Expand Down
4 changes: 4 additions & 0 deletions trunk/include/linux/blkdev.h
Original file line number Diff line number Diff line change
Expand Up @@ -429,6 +429,8 @@ struct request_queue
unsigned int max_segment_size;

unsigned long seg_boundary_mask;
void *dma_drain_buffer;
unsigned int dma_drain_size;
unsigned int dma_alignment;

struct blk_queue_tag *queue_tags;
Expand Down Expand Up @@ -760,6 +762,8 @@ extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
extern int blk_queue_dma_drain(struct request_queue *q, void *buf,
unsigned int size);
extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
Expand Down

0 comments on commit ae215bd

Please sign in to comment.