From 876471ac6748641afd1f3e7f4ad5e49f6044ffce Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 5 Aug 2005 13:28:11 -0700 Subject: [PATCH] --- yaml --- r: 5674 b: refs/heads/master c: ba02508248e90a9d696aebd18b48a3290235b53c h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/drivers/block/ll_rw_blk.c | 18 +++++++++++++++--- trunk/include/linux/blkdev.h | 1 + 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/[refs] b/[refs] index 40e29d263af3..7b494b27f86a 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: c7546f8f03f5a4fa612605b6be930234d6026860 +refs/heads/master: ba02508248e90a9d696aebd18b48a3290235b53c diff --git a/trunk/drivers/block/ll_rw_blk.c b/trunk/drivers/block/ll_rw_blk.c index 692a5fced76e..3c818544475e 100644 --- a/trunk/drivers/block/ll_rw_blk.c +++ b/trunk/drivers/block/ll_rw_blk.c @@ -719,7 +719,7 @@ struct request *blk_queue_find_tag(request_queue_t *q, int tag) { struct blk_queue_tag *bqt = q->queue_tags; - if (unlikely(bqt == NULL || tag >= bqt->max_depth)) + if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) return NULL; return bqt->tag_index[tag]; @@ -798,6 +798,7 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) memset(tag_index, 0, depth * sizeof(struct request *)); memset(tag_map, 0, nr_ulongs * sizeof(unsigned long)); + tags->real_max_depth = depth; tags->max_depth = depth; tags->tag_index = tag_index; tags->tag_map = tag_map; @@ -871,12 +872,23 @@ int blk_queue_resize_tags(request_queue_t *q, int new_depth) if (!bqt) return -ENXIO; + /* + * if we already have large enough real_max_depth. just + * adjust max_depth. *NOTE* as requests with tag value + * between new_depth and real_max_depth can be in-flight, tag + * map can not be shrunk blindly here. + */ + if (new_depth <= bqt->real_max_depth) { + bqt->max_depth = new_depth; + return 0; + } + /* * save the old state info, so we can copy it back */ tag_index = bqt->tag_index; tag_map = bqt->tag_map; - max_depth = bqt->max_depth; + max_depth = bqt->real_max_depth; if (init_tag_map(q, bqt, new_depth)) return -ENOMEM; @@ -913,7 +925,7 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq) BUG_ON(tag == -1); - if (unlikely(tag >= bqt->max_depth)) + if (unlikely(tag >= bqt->real_max_depth)) /* * This can happen after tag depth has been reduced. * FIXME: how about a warning or info message here? diff --git a/trunk/include/linux/blkdev.h b/trunk/include/linux/blkdev.h index 0881b5cdee3d..19bd8e7e11bf 100644 --- a/trunk/include/linux/blkdev.h +++ b/trunk/include/linux/blkdev.h @@ -301,6 +301,7 @@ struct blk_queue_tag { struct list_head busy_list; /* fifo list of busy tags */ int busy; /* current depth */ int max_depth; /* what we will send to device */ + int real_max_depth; /* what the array can hold */ atomic_t refcnt; /* map can be shared */ };