Skip to content

Commit

Permalink
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Browse files Browse the repository at this point in the history
Pull block layer fixes from Jens Axboe:
 "The major part is an update to the NVMe driver, fixing various issues
  around surprise removal and hung controllers.  Most of that is from
  Keith, and parts are simple blk-mq fixes or exports/additions of minor
  functions to aid this effort, and parts are changes directly to the
  NVMe driver.

  Apart from the above, this contains:

   - Small blk-mq change from me, killing an unused member of the
     hardware queue structure.

   - Small fix from Ming Lei, fixing up a few drivers that didn't
     properly check for ERR_PTR() returns from blk_mq_init_queue()"

* 'for-linus' of git://git.kernel.dk/linux-block:
  NVMe: Fix locking on abort handling
  NVMe: Start and stop h/w queues on reset
  NVMe: Command abort handling fixes
  NVMe: Admin queue removal handling
  NVMe: Reference count admin queue usage
  NVMe: Start all requests
  blk-mq: End unstarted requests on a dying queue
  blk-mq: Allow requests to never expire
  blk-mq: Add helper to abort requeued requests
  blk-mq: Let drivers cancel requeue_work
  blk-mq: Export if requests were started
  blk-mq: Wake tasks entering queue on dying
  blk-mq: get rid of ->cmd_size in the hardware queue
  block: fix checking return value of blk_mq_init_queue
  block: wake up waiters when a queue is marked dying
  NVMe: Fix double free irq
  blk-mq: Export freeze/unfreeze functions
  blk-mq: Exit queue on alloc failure
  • Loading branch information
Linus Torvalds committed Jan 14, 2015
2 parents 188c901 + 7a509a6 commit 31238e6
Show file tree
Hide file tree
Showing 11 changed files with 240 additions and 64 deletions.
21 changes: 20 additions & 1 deletion block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -473,6 +473,25 @@ void blk_queue_bypass_end(struct request_queue *q)
}
EXPORT_SYMBOL_GPL(blk_queue_bypass_end);

void blk_set_queue_dying(struct request_queue *q)
{
queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);

if (q->mq_ops)
blk_mq_wake_waiters(q);
else {
struct request_list *rl;

blk_queue_for_each_rl(rl, q) {
if (rl->rq_pool) {
wake_up(&rl->wait[BLK_RW_SYNC]);
wake_up(&rl->wait[BLK_RW_ASYNC]);
}
}
}
}
EXPORT_SYMBOL_GPL(blk_set_queue_dying);

/**
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
Expand All @@ -486,7 +505,7 @@ void blk_cleanup_queue(struct request_queue *q)

/* mark @q DYING, no new request or merges will be allowed afterwards */
mutex_lock(&q->sysfs_lock);
queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
blk_set_queue_dying(q);
spin_lock_irq(lock);

/*
Expand Down
14 changes: 10 additions & 4 deletions block/blk-mq-tag.c
Original file line number Diff line number Diff line change
Expand Up @@ -68,9 +68,9 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
}

/*
* Wakeup all potentially sleeping on normal (non-reserved) tags
* Wakeup all potentially sleeping on tags
*/
static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
{
struct blk_mq_bitmap_tags *bt;
int i, wake_index;
Expand All @@ -85,6 +85,12 @@ static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)

wake_index = bt_index_inc(wake_index);
}

if (include_reserve) {
bt = &tags->breserved_tags;
if (waitqueue_active(&bt->bs[0].wait))
wake_up(&bt->bs[0].wait);
}
}

/*
Expand All @@ -100,7 +106,7 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)

atomic_dec(&tags->active_queues);

blk_mq_tag_wakeup_all(tags);
blk_mq_tag_wakeup_all(tags, false);
}

/*
Expand Down Expand Up @@ -584,7 +590,7 @@ int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
* static and should never need resizing.
*/
bt_update_count(&tags->bitmap_tags, tdepth);
blk_mq_tag_wakeup_all(tags);
blk_mq_tag_wakeup_all(tags, false);
return 0;
}

Expand Down
1 change: 1 addition & 0 deletions block/blk-mq-tag.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);

enum {
BLK_MQ_TAG_CACHE_MIN = 1,
Expand Down
75 changes: 69 additions & 6 deletions block/blk-mq.c
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
wake_up_all(&q->mq_freeze_wq);
}

static void blk_mq_freeze_queue_start(struct request_queue *q)
void blk_mq_freeze_queue_start(struct request_queue *q)
{
bool freeze;

Expand All @@ -120,6 +120,7 @@ static void blk_mq_freeze_queue_start(struct request_queue *q)
blk_mq_run_queues(q, false);
}
}
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);

static void blk_mq_freeze_queue_wait(struct request_queue *q)
{
Expand All @@ -136,7 +137,7 @@ void blk_mq_freeze_queue(struct request_queue *q)
blk_mq_freeze_queue_wait(q);
}

static void blk_mq_unfreeze_queue(struct request_queue *q)
void blk_mq_unfreeze_queue(struct request_queue *q)
{
bool wake;

Expand All @@ -149,6 +150,24 @@ static void blk_mq_unfreeze_queue(struct request_queue *q)
wake_up_all(&q->mq_freeze_wq);
}
}
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);

void blk_mq_wake_waiters(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
unsigned int i;

queue_for_each_hw_ctx(q, hctx, i)
if (blk_mq_hw_queue_mapped(hctx))
blk_mq_tag_wakeup_all(hctx->tags, true);

/*
* If we are called because the queue has now been marked as
* dying, we need to ensure that processes currently waiting on
* the queue are notified as well.
*/
wake_up_all(&q->mq_freeze_wq);
}

bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
{
Expand Down Expand Up @@ -258,8 +277,10 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
ctx = alloc_data.ctx;
}
blk_mq_put_ctx(ctx);
if (!rq)
if (!rq) {
blk_mq_queue_exit(q);
return ERR_PTR(-EWOULDBLOCK);
}
return rq;
}
EXPORT_SYMBOL(blk_mq_alloc_request);
Expand Down Expand Up @@ -383,6 +404,12 @@ void blk_mq_complete_request(struct request *rq)
}
EXPORT_SYMBOL(blk_mq_complete_request);

int blk_mq_request_started(struct request *rq)
{
return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
}
EXPORT_SYMBOL_GPL(blk_mq_request_started);

void blk_mq_start_request(struct request *rq)
{
struct request_queue *q = rq->q;
Expand Down Expand Up @@ -500,12 +527,38 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
}
EXPORT_SYMBOL(blk_mq_add_to_requeue_list);

void blk_mq_cancel_requeue_work(struct request_queue *q)
{
cancel_work_sync(&q->requeue_work);
}
EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work);

void blk_mq_kick_requeue_list(struct request_queue *q)
{
kblockd_schedule_work(&q->requeue_work);
}
EXPORT_SYMBOL(blk_mq_kick_requeue_list);

void blk_mq_abort_requeue_list(struct request_queue *q)
{
unsigned long flags;
LIST_HEAD(rq_list);

spin_lock_irqsave(&q->requeue_lock, flags);
list_splice_init(&q->requeue_list, &rq_list);
spin_unlock_irqrestore(&q->requeue_lock, flags);

while (!list_empty(&rq_list)) {
struct request *rq;

rq = list_first_entry(&rq_list, struct request, queuelist);
list_del_init(&rq->queuelist);
rq->errors = -EIO;
blk_mq_end_request(rq, rq->errors);
}
}
EXPORT_SYMBOL(blk_mq_abort_requeue_list);

static inline bool is_flush_request(struct request *rq,
struct blk_flush_queue *fq, unsigned int tag)
{
Expand Down Expand Up @@ -566,13 +619,24 @@ void blk_mq_rq_timed_out(struct request *req, bool reserved)
break;
}
}

static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
struct request *rq, void *priv, bool reserved)
{
struct blk_mq_timeout_data *data = priv;

if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
/*
* If a request wasn't started before the queue was
* marked dying, kill it here or it'll go unnoticed.
*/
if (unlikely(blk_queue_dying(rq->q))) {
rq->errors = -EIO;
blk_mq_complete_request(rq);
}
return;
}
if (rq->cmd_flags & REQ_NO_TIMEOUT)
return;

if (time_after_eq(jiffies, rq->deadline)) {
Expand Down Expand Up @@ -1601,7 +1665,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
hctx->queue = q;
hctx->queue_num = hctx_idx;
hctx->flags = set->flags;
hctx->cmd_size = set->cmd_size;

blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
blk_mq_hctx_notify, hctx);
Expand Down
1 change: 1 addition & 0 deletions block/blk-mq.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ void blk_mq_free_queue(struct request_queue *q);
void blk_mq_clone_flush_request(struct request *flush_rq,
struct request *orig_rq);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
void blk_mq_wake_waiters(struct request_queue *q);

/*
* CPU hotplug helpers
Expand Down
3 changes: 3 additions & 0 deletions block/blk-timeout.c
Original file line number Diff line number Diff line change
Expand Up @@ -190,6 +190,9 @@ void blk_add_timer(struct request *req)
struct request_queue *q = req->q;
unsigned long expiry;

if (req->cmd_flags & REQ_NO_TIMEOUT)
return;

/* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */
if (!q->mq_ops && !q->rq_timed_out_fn)
return;
Expand Down
2 changes: 1 addition & 1 deletion drivers/block/null_blk.c
Original file line number Diff line number Diff line change
Expand Up @@ -530,7 +530,7 @@ static int null_add_dev(void)
goto out_cleanup_queues;

nullb->q = blk_mq_init_queue(&nullb->tag_set);
if (!nullb->q) {
if (IS_ERR(nullb->q)) {
rv = -ENOMEM;
goto out_cleanup_tags;
}
Expand Down
Loading

0 comments on commit 31238e6

Please sign in to comment.