Skip to content

Commit

Permalink
blk-mq: handle failure path for initializing hctx
Browse files Browse the repository at this point in the history
Failure of initializing one hctx isn't handled, so this patch
introduces blk_mq_init_hctx() and its pair to handle it explicitly.
Also this patch makes code cleaner.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
  • Loading branch information
Ming Lei authored and Jens Axboe committed Sep 25, 2014
1 parent fe05252 commit 08e98fc
Showing 1 changed file with 69 additions and 45 deletions.
114 changes: 69 additions & 45 deletions block/blk-mq.c
Original file line number Diff line number Diff line change
Expand Up @@ -1509,6 +1509,20 @@ static int blk_mq_hctx_notify(void *data, unsigned long action,
return NOTIFY_OK;
}

static void blk_mq_exit_hctx(struct request_queue *q,
struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
blk_mq_tag_idle(hctx);

if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);

blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
kfree(hctx->ctxs);
blk_mq_free_bitmap(&hctx->ctx_map);
}

static void blk_mq_exit_hw_queues(struct request_queue *q,
struct blk_mq_tag_set *set, int nr_queue)
{
Expand All @@ -1518,17 +1532,8 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
queue_for_each_hw_ctx(q, hctx, i) {
if (i == nr_queue)
break;

blk_mq_tag_idle(hctx);

if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, i);

blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
kfree(hctx->ctxs);
blk_mq_free_bitmap(&hctx->ctx_map);
blk_mq_exit_hctx(q, set, hctx, i);
}

}

static void blk_mq_free_hw_queues(struct request_queue *q,
Expand All @@ -1543,53 +1548,72 @@ static void blk_mq_free_hw_queues(struct request_queue *q,
}
}

static int blk_mq_init_hw_queues(struct request_queue *q,
struct blk_mq_tag_set *set)
static int blk_mq_init_hctx(struct request_queue *q,
struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
{
struct blk_mq_hw_ctx *hctx;
unsigned int i;
int node;

node = hctx->numa_node;
if (node == NUMA_NO_NODE)
node = hctx->numa_node = set->numa_node;

INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
spin_lock_init(&hctx->lock);
INIT_LIST_HEAD(&hctx->dispatch);
hctx->queue = q;
hctx->queue_num = hctx_idx;
hctx->flags = set->flags;
hctx->cmd_size = set->cmd_size;

blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
blk_mq_hctx_notify, hctx);
blk_mq_register_cpu_notifier(&hctx->cpu_notifier);

hctx->tags = set->tags[hctx_idx];

/*
* Initialize hardware queues
* Allocate space for all possible cpus to avoid allocation at
* runtime
*/
queue_for_each_hw_ctx(q, hctx, i) {
int node;
hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
GFP_KERNEL, node);
if (!hctx->ctxs)
goto unregister_cpu_notifier;

node = hctx->numa_node;
if (node == NUMA_NO_NODE)
node = hctx->numa_node = set->numa_node;
if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
goto free_ctxs;

INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
spin_lock_init(&hctx->lock);
INIT_LIST_HEAD(&hctx->dispatch);
hctx->queue = q;
hctx->queue_num = i;
hctx->flags = set->flags;
hctx->cmd_size = set->cmd_size;
hctx->nr_ctx = 0;

blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
blk_mq_hctx_notify, hctx);
blk_mq_register_cpu_notifier(&hctx->cpu_notifier);
if (set->ops->init_hctx &&
set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
goto free_bitmap;

hctx->tags = set->tags[i];
return 0;

/*
* Allocate space for all possible cpus to avoid allocation at
* runtime
*/
hctx->ctxs = kmalloc_node(nr_cpu_ids * sizeof(void *),
GFP_KERNEL, node);
if (!hctx->ctxs)
break;
free_bitmap:
blk_mq_free_bitmap(&hctx->ctx_map);
free_ctxs:
kfree(hctx->ctxs);
unregister_cpu_notifier:
blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);

if (blk_mq_alloc_bitmap(&hctx->ctx_map, node))
break;
return -1;
}

hctx->nr_ctx = 0;
static int blk_mq_init_hw_queues(struct request_queue *q,
struct blk_mq_tag_set *set)
{
struct blk_mq_hw_ctx *hctx;
unsigned int i;

if (set->ops->init_hctx &&
set->ops->init_hctx(hctx, set->driver_data, i))
/*
* Initialize hardware queues
*/
queue_for_each_hw_ctx(q, hctx, i) {
if (blk_mq_init_hctx(q, set, hctx, i))
break;
}

Expand Down

0 comments on commit 08e98fc

Please sign in to comment.