From 382333b44cc0f977f258d60a249d9fe9b32c29bb Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Sun, 17 May 2009 18:55:18 +0300 Subject: [PATCH] --- yaml --- r: 147047 b: refs/heads/master c: 3a5a39276d2a32b05b1ee25b384516805b17cf87 h: refs/heads/master i: 147045: 189ae277b439b8a081cffe09aa3494565b81ef14 147043: 28aaa5fd112ae3ccd7f5c35d7cb002cb36b71d38 147039: 2e6e0b01e42796afdccc4c9c46d4649ce01c4dc7 v: v3 --- [refs] | 2 +- trunk/block/blk-map.c | 12 ++++++++++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/[refs] b/[refs] index c0044ea4873b..8ec13f54ffb8 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: b2858d7d1639c04ca3c54988d76c5f7300b76f1c +refs/heads/master: 3a5a39276d2a32b05b1ee25b384516805b17cf87 diff --git a/trunk/block/blk-map.c b/trunk/block/blk-map.c index 56082bea4504..caa05a667743 100644 --- a/trunk/block/blk-map.c +++ b/trunk/block/blk-map.c @@ -282,7 +282,8 @@ EXPORT_SYMBOL(blk_rq_unmap_user); * * Description: * Data will be mapped directly if possible. Otherwise a bounce - * buffer is used. + * buffer is used. Can be called multple times to append multple + * buffers. */ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, unsigned int len, gfp_t gfp_mask) @@ -290,6 +291,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, int reading = rq_data_dir(rq) == READ; int do_copy = 0; struct bio *bio; + int ret; if (len > (q->max_hw_sectors << 9)) return -EINVAL; @@ -311,7 +313,13 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, if (do_copy) rq->cmd_flags |= REQ_COPY_USER; - blk_rq_bio_prep(q, rq, bio); + ret = blk_rq_append_bio(q, rq, bio); + if (unlikely(ret)) { + /* request is too big */ + bio_put(bio); + return ret; + } + blk_queue_bounce(q, &rq->bio); rq->buffer = NULL; return 0;