Skip to content

Commit

Permalink
Merge tag 'block-5.6-2020-02-28' of git://git.kernel.dk/linux-block
Browse files Browse the repository at this point in the history
Pull block fixes from Jens Axboe:

 - Passthrough insertion fix (Ming)

 - Kill off some unused arguments (John)

 - blktrace RCU fix (Jan)

 - Dead fields removal for null_blk (Dongli)

 - NVMe polled IO fix (Bijan)

* tag 'block-5.6-2020-02-28' of git://git.kernel.dk/linux-block:
  nvme-pci: Hold cq_poll_lock while completing CQEs
  blk-mq: Remove some unused function arguments
  null_blk: remove unused fields in 'nullb_cmd'
  blktrace: Protect q->blk_trace with RCU
  blk-mq: insert passthrough request into hctx->dispatch directly
  • Loading branch information
Linus Torvalds committed Feb 28, 2020
2 parents 74dea5d + 5b8ea58 commit 2edc78b
Show file tree
Hide file tree
Showing 12 changed files with 136 additions and 70 deletions.
2 changes: 1 addition & 1 deletion block/blk-flush.c
Original file line number Diff line number Diff line change
Expand Up @@ -412,7 +412,7 @@ void blk_insert_flush(struct request *rq)
*/
if ((policy & REQ_FSEQ_DATA) &&
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
blk_mq_request_bypass_insert(rq, false);
blk_mq_request_bypass_insert(rq, false, false);
return;
}

Expand Down
22 changes: 15 additions & 7 deletions block/blk-mq-sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -361,13 +361,19 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
bool has_sched,
struct request *rq)
{
/* dispatch flush rq directly */
if (rq->rq_flags & RQF_FLUSH_SEQ) {
spin_lock(&hctx->lock);
list_add(&rq->queuelist, &hctx->dispatch);
spin_unlock(&hctx->lock);
/*
* dispatch flush and passthrough rq directly
*
* passthrough request has to be added to hctx->dispatch directly.
* For some reason, device may be in one situation which can't
* handle FS request, so STS_RESOURCE is always returned and the
* FS request will be added to hctx->dispatch. However passthrough
* request may be required at that time for fixing the problem. If
* passthrough request is added to scheduler queue, there isn't any
* chance to dispatch it given we prioritize requests in hctx->dispatch.
*/
if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq))
return true;
}

if (has_sched)
rq->rq_flags |= RQF_SORTED;
Expand All @@ -391,8 +397,10 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,

WARN_ON(e && (rq->tag != -1));

if (blk_mq_sched_bypass_insert(hctx, !!e, rq))
if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) {
blk_mq_request_bypass_insert(rq, at_head, false);
goto run;
}

if (e && e->type->ops.insert_requests) {
LIST_HEAD(list);
Expand Down
4 changes: 2 additions & 2 deletions block/blk-mq-tag.c
Original file line number Diff line number Diff line change
Expand Up @@ -183,8 +183,8 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
return tag + tag_offset;
}

void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
struct blk_mq_ctx *ctx, unsigned int tag)
void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
unsigned int tag)
{
if (!blk_mq_tag_is_reserved(tags, tag)) {
const int real_tag = tag - tags->nr_reserved_tags;
Expand Down
4 changes: 2 additions & 2 deletions block/blk-mq-tag.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int r
extern void blk_mq_free_tags(struct blk_mq_tags *tags);

extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
struct blk_mq_ctx *ctx, unsigned int tag);
extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
unsigned int tag);
extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
struct blk_mq_tags **tags,
unsigned int depth, bool can_grow);
Expand Down
28 changes: 15 additions & 13 deletions block/blk-mq.c
Original file line number Diff line number Diff line change
Expand Up @@ -477,9 +477,9 @@ static void __blk_mq_free_request(struct request *rq)
blk_pm_mark_last_busy(rq);
rq->mq_hctx = NULL;
if (rq->tag != -1)
blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
blk_mq_put_tag(hctx->tags, ctx, rq->tag);
if (sched_tag != -1)
blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag);
blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
blk_mq_sched_restart(hctx);
blk_queue_exit(q);
}
Expand Down Expand Up @@ -735,7 +735,7 @@ static void blk_mq_requeue_work(struct work_struct *work)
* merge.
*/
if (rq->rq_flags & RQF_DONTPREP)
blk_mq_request_bypass_insert(rq, false);
blk_mq_request_bypass_insert(rq, false, false);
else
blk_mq_sched_insert_request(rq, true, false, false);
}
Expand Down Expand Up @@ -1286,7 +1286,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
q->mq_ops->commit_rqs(hctx);

spin_lock(&hctx->lock);
list_splice_init(list, &hctx->dispatch);
list_splice_tail_init(list, &hctx->dispatch);
spin_unlock(&hctx->lock);

/*
Expand Down Expand Up @@ -1677,12 +1677,16 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
* Should only be used carefully, when the caller knows we want to
* bypass a potential IO scheduler on the target device.
*/
void blk_mq_request_bypass_insert(struct request *rq, bool run_queue)
void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
bool run_queue)
{
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;

spin_lock(&hctx->lock);
list_add_tail(&rq->queuelist, &hctx->dispatch);
if (at_head)
list_add(&rq->queuelist, &hctx->dispatch);
else
list_add_tail(&rq->queuelist, &hctx->dispatch);
spin_unlock(&hctx->lock);

if (run_queue)
Expand Down Expand Up @@ -1849,7 +1853,7 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
if (bypass_insert)
return BLK_STS_RESOURCE;

blk_mq_request_bypass_insert(rq, run_queue);
blk_mq_request_bypass_insert(rq, false, run_queue);
return BLK_STS_OK;
}

Expand All @@ -1876,7 +1880,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,

ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
blk_mq_request_bypass_insert(rq, true);
blk_mq_request_bypass_insert(rq, false, true);
else if (ret != BLK_STS_OK)
blk_mq_end_request(rq, ret);

Expand Down Expand Up @@ -1910,7 +1914,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
if (ret != BLK_STS_OK) {
if (ret == BLK_STS_RESOURCE ||
ret == BLK_STS_DEV_RESOURCE) {
blk_mq_request_bypass_insert(rq,
blk_mq_request_bypass_insert(rq, false,
list_empty(list));
break;
}
Expand Down Expand Up @@ -3398,7 +3402,6 @@ static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
}

static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
struct blk_mq_hw_ctx *hctx,
struct request *rq)
{
unsigned long ret = 0;
Expand Down Expand Up @@ -3431,7 +3434,6 @@ static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
}

static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
struct blk_mq_hw_ctx *hctx,
struct request *rq)
{
struct hrtimer_sleeper hs;
Expand All @@ -3451,7 +3453,7 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
if (q->poll_nsec > 0)
nsecs = q->poll_nsec;
else
nsecs = blk_mq_poll_nsecs(q, hctx, rq);
nsecs = blk_mq_poll_nsecs(q, rq);

if (!nsecs)
return false;
Expand Down Expand Up @@ -3506,7 +3508,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
return false;
}

return blk_mq_poll_hybrid_sleep(q, hctx, rq);
return blk_mq_poll_hybrid_sleep(q, rq);
}

/**
Expand Down
5 changes: 3 additions & 2 deletions block/blk-mq.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,8 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
*/
void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
bool at_head);
void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
bool run_queue);
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
struct list_head *list);

Expand Down Expand Up @@ -199,7 +200,7 @@ static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx)
static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
struct request *rq)
{
blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
rq->tag = -1;

if (rq->rq_flags & RQF_MQ_INFLIGHT) {
Expand Down
3 changes: 0 additions & 3 deletions drivers/block/null_blk.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,6 @@
#include <linux/fault-inject.h>

struct nullb_cmd {
struct list_head list;
struct llist_node ll_list;
struct __call_single_data csd;
struct request *rq;
struct bio *bio;
unsigned int tag;
Expand Down
2 changes: 0 additions & 2 deletions drivers/block/null_blk_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -1518,8 +1518,6 @@ static int setup_commands(struct nullb_queue *nq)

for (i = 0; i < nq->queue_depth; i++) {
cmd = &nq->cmds[i];
INIT_LIST_HEAD(&cmd->list);
cmd->ll_list.next = NULL;
cmd->tag = -1U;
}

Expand Down
2 changes: 1 addition & 1 deletion drivers/nvme/host/pci.c
Original file line number Diff line number Diff line change
Expand Up @@ -1078,9 +1078,9 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx)

spin_lock(&nvmeq->cq_poll_lock);
found = nvme_process_cq(nvmeq, &start, &end, -1);
nvme_complete_cqes(nvmeq, start, end);
spin_unlock(&nvmeq->cq_poll_lock);

nvme_complete_cqes(nvmeq, start, end);
return found;
}

Expand Down
2 changes: 1 addition & 1 deletion include/linux/blkdev.h
Original file line number Diff line number Diff line change
Expand Up @@ -524,7 +524,7 @@ struct request_queue {
unsigned int sg_reserved_size;
int node;
#ifdef CONFIG_BLK_DEV_IO_TRACE
struct blk_trace *blk_trace;
struct blk_trace __rcu *blk_trace;
struct mutex blk_trace_mutex;
#endif
/*
Expand Down
18 changes: 13 additions & 5 deletions include/linux/blktrace_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,20 +51,28 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f
**/
#define blk_add_cgroup_trace_msg(q, cg, fmt, ...) \
do { \
struct blk_trace *bt = (q)->blk_trace; \
struct blk_trace *bt; \
\
rcu_read_lock(); \
bt = rcu_dereference((q)->blk_trace); \
if (unlikely(bt)) \
__trace_note_message(bt, cg, fmt, ##__VA_ARGS__);\
rcu_read_unlock(); \
} while (0)
#define blk_add_trace_msg(q, fmt, ...) \
blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__)
#define BLK_TN_MAX_MSG 128

static inline bool blk_trace_note_message_enabled(struct request_queue *q)
{
struct blk_trace *bt = q->blk_trace;
if (likely(!bt))
return false;
return bt->act_mask & BLK_TC_NOTIFY;
struct blk_trace *bt;
bool ret;

rcu_read_lock();
bt = rcu_dereference(q->blk_trace);
ret = bt && (bt->act_mask & BLK_TC_NOTIFY);
rcu_read_unlock();
return ret;
}

extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
Expand Down
Loading

0 comments on commit 2edc78b

Please sign in to comment.