Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 322218
b: refs/heads/master
c: 963ab9e
h: refs/heads/master
v: v3
  • Loading branch information
Asias He authored and Jens Axboe committed Aug 2, 2012
1 parent 001e350 commit 8efe1d0
Show file tree
Hide file tree
Showing 2 changed files with 46 additions and 36 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 53362a05ae683e12a20d9ffdf58a88094a0bed9d
refs/heads/master: 963ab9e5da95c654bb3ab937cc478de4f7088a96
80 changes: 45 additions & 35 deletions trunk/block/blk-merge.c
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,49 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
return 0;
}

static void
__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
struct scatterlist *sglist, struct bio_vec **bvprv,
struct scatterlist **sg, int *nsegs, int *cluster)
{

int nbytes = bvec->bv_len;

if (*bvprv && *cluster) {
if ((*sg)->length + nbytes > queue_max_segment_size(q))
goto new_segment;

if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec))
goto new_segment;
if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec))
goto new_segment;

(*sg)->length += nbytes;
} else {
new_segment:
if (!*sg)
*sg = sglist;
else {
/*
* If the driver previously mapped a shorter
* list, we could see a termination bit
* prematurely unless it fully inits the sg
* table on each mapping. We KNOW that there
* must be more entries here or the driver
* would be buggy, so force clear the
* termination bit to avoid doing a full
* sg_init_table() in drivers for each command.
*/
(*sg)->page_link &= ~0x02;
*sg = sg_next(*sg);
}

sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
(*nsegs)++;
}
*bvprv = bvec;
}

/*
* map a request to scatterlist, return number of sg entries setup. Caller
* must make sure sg can hold rq->nr_phys_segments entries
Expand All @@ -131,41 +174,8 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
bvprv = NULL;
sg = NULL;
rq_for_each_segment(bvec, rq, iter) {
int nbytes = bvec->bv_len;

if (bvprv && cluster) {
if (sg->length + nbytes > queue_max_segment_size(q))
goto new_segment;

if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
goto new_segment;
if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
goto new_segment;

sg->length += nbytes;
} else {
new_segment:
if (!sg)
sg = sglist;
else {
/*
* If the driver previously mapped a shorter
* list, we could see a termination bit
* prematurely unless it fully inits the sg
* table on each mapping. We KNOW that there
* must be more entries here or the driver
* would be buggy, so force clear the
* termination bit to avoid doing a full
* sg_init_table() in drivers for each command.
*/
sg->page_link &= ~0x02;
sg = sg_next(sg);
}

sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset);
nsegs++;
}
bvprv = bvec;
__blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
&nsegs, &cluster);
} /* segments in rq */


Expand Down

0 comments on commit 8efe1d0

Please sign in to comment.