Skip to content

Commit

Permalink
Merge branch 'for-4.11/block' into for-4.11/rq-refactor
Browse files Browse the repository at this point in the history
Signed-off-by: Jens Axboe <axboe@fb.com>
  • Loading branch information
Jens Axboe committed Jan 27, 2017
2 parents 7a308bb + 400f73b commit f924ba7
Show file tree
Hide file tree
Showing 38 changed files with 3,099 additions and 676 deletions.
4 changes: 2 additions & 2 deletions MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -8604,10 +8604,10 @@ S: Maintained
F: drivers/net/ethernet/netronome/

NETWORK BLOCK DEVICE (NBD)
M: Markus Pargmann <mpa@pengutronix.de>
M: Josef Bacik <jbacik@fb.com>
S: Maintained
L: linux-block@vger.kernel.org
L: nbd-general@lists.sourceforge.net
T: git git://git.pengutronix.de/git/mpa/linux-nbd.git
F: Documentation/blockdev/nbd.txt
F: drivers/block/nbd.c
F: include/uapi/linux/nbd.h
Expand Down
12 changes: 12 additions & 0 deletions block/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,18 @@ config BLK_WBT_MQ
Multiqueue currently doesn't have support for IO scheduling,
enabling this option is recommended.

config BLK_DEBUG_FS
bool "Block layer debugging information in debugfs"
default y
depends on DEBUG_FS
---help---
Include block layer debugging information in debugfs. This information
is mostly useful for kernel developers, but it doesn't incur any cost
at runtime.

Unless you are building a kernel for a tiny system, you should
say Y here.

menu "Partition Types"

source "block/partitions/Kconfig"
Expand Down
50 changes: 50 additions & 0 deletions block/Kconfig.iosched
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,56 @@ config DEFAULT_IOSCHED
default "cfq" if DEFAULT_CFQ
default "noop" if DEFAULT_NOOP

config MQ_IOSCHED_DEADLINE
tristate "MQ deadline I/O scheduler"
default y
---help---
MQ version of the deadline IO scheduler.

config MQ_IOSCHED_NONE
bool
default y

choice
prompt "Default single-queue blk-mq I/O scheduler"
default DEFAULT_SQ_NONE
help
Select the I/O scheduler which will be used by default for blk-mq
managed block devices with a single queue.

config DEFAULT_SQ_DEADLINE
bool "MQ Deadline" if MQ_IOSCHED_DEADLINE=y

config DEFAULT_SQ_NONE
bool "None"

endchoice

config DEFAULT_SQ_IOSCHED
string
default "mq-deadline" if DEFAULT_SQ_DEADLINE
default "none" if DEFAULT_SQ_NONE

choice
prompt "Default multi-queue blk-mq I/O scheduler"
default DEFAULT_MQ_NONE
help
Select the I/O scheduler which will be used by default for blk-mq
managed block devices with multiple queues.

config DEFAULT_MQ_DEADLINE
bool "MQ Deadline" if MQ_IOSCHED_DEADLINE=y

config DEFAULT_MQ_NONE
bool "None"

endchoice

config DEFAULT_MQ_IOSCHED
string
default "mq-deadline" if DEFAULT_MQ_DEADLINE
default "none" if DEFAULT_MQ_NONE

endmenu

endif
4 changes: 3 additions & 1 deletion block/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \
blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \
blk-mq-sysfs.o blk-mq-cpumap.o ioctl.o \
blk-mq-sysfs.o blk-mq-cpumap.o blk-mq-sched.o ioctl.o \
genhd.o scsi_ioctl.o partition-generic.o ioprio.o \
badblocks.o partitions/

Expand All @@ -18,10 +18,12 @@ obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o
obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o

obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o t10-pi.o
obj-$(CONFIG_BLK_MQ_PCI) += blk-mq-pci.o
obj-$(CONFIG_BLK_DEV_ZONED) += blk-zoned.o
obj-$(CONFIG_BLK_WBT) += blk-wbt.o
obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o
22 changes: 18 additions & 4 deletions block/blk-cgroup.c
Original file line number Diff line number Diff line change
Expand Up @@ -1223,7 +1223,10 @@ int blkcg_activate_policy(struct request_queue *q,
if (blkcg_policy_enabled(q, pol))
return 0;

blk_queue_bypass_start(q);
if (q->mq_ops)
blk_mq_freeze_queue(q);
else
blk_queue_bypass_start(q);
pd_prealloc:
if (!pd_prealloc) {
pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node);
Expand Down Expand Up @@ -1261,7 +1264,10 @@ int blkcg_activate_policy(struct request_queue *q,

spin_unlock_irq(q->queue_lock);
out_bypass_end:
blk_queue_bypass_end(q);
if (q->mq_ops)
blk_mq_unfreeze_queue(q);
else
blk_queue_bypass_end(q);
if (pd_prealloc)
pol->pd_free_fn(pd_prealloc);
return ret;
Expand All @@ -1284,7 +1290,11 @@ void blkcg_deactivate_policy(struct request_queue *q,
if (!blkcg_policy_enabled(q, pol))
return;

blk_queue_bypass_start(q);
if (q->mq_ops)
blk_mq_freeze_queue(q);
else
blk_queue_bypass_start(q);

spin_lock_irq(q->queue_lock);

__clear_bit(pol->plid, q->blkcg_pols);
Expand All @@ -1304,7 +1314,11 @@ void blkcg_deactivate_policy(struct request_queue *q,
}

spin_unlock_irq(q->queue_lock);
blk_queue_bypass_end(q);

if (q->mq_ops)
blk_mq_unfreeze_queue(q);
else
blk_queue_bypass_end(q);
}
EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);

Expand Down
30 changes: 8 additions & 22 deletions block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@

#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-sched.h"
#include "blk-wbt.h"

EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
Expand Down Expand Up @@ -134,6 +135,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
rq->cmd = rq->__cmd;
rq->cmd_len = BLK_MAX_CDB;
rq->tag = -1;
rq->internal_tag = -1;
rq->start_time = jiffies;
set_start_time_ns(rq);
rq->part = NULL;
Expand Down Expand Up @@ -1033,28 +1035,12 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
* Flush requests do not use the elevator so skip initialization.
* This allows a request to share the flush and elevator data.
*/
if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA))
if (op_is_flush(bio->bi_opf))
return false;

return true;
}

/**
* rq_ioc - determine io_context for request allocation
* @bio: request being allocated is for this bio (can be %NULL)
*
* Determine io_context to use for request allocation for @bio. May return
* %NULL if %current->io_context doesn't exist.
*/
static struct io_context *rq_ioc(struct bio *bio)
{
#ifdef CONFIG_BLK_CGROUP
if (bio && bio->bi_ioc)
return bio->bi_ioc;
#endif
return current->io_context;
}

/**
* __get_request - get a free request
* @rl: request list to allocate from
Expand Down Expand Up @@ -1655,7 +1641,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
}

if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) {
if (op_is_flush(bio->bi_opf)) {
spin_lock_irq(q->queue_lock);
where = ELEVATOR_INSERT_FLUSH;
goto get_rq;
Expand Down Expand Up @@ -1894,7 +1880,7 @@ generic_make_request_checks(struct bio *bio)
* drivers without flush support don't have to worry
* about them.
*/
if ((bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) &&
if (op_is_flush(bio->bi_opf) &&
!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
if (!nr_sectors) {
Expand Down Expand Up @@ -2143,7 +2129,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
if (q->mq_ops) {
if (blk_queue_io_stat(q))
blk_account_io_start(rq, true);
blk_mq_insert_request(rq, false, true, false);
blk_mq_sched_insert_request(rq, false, true, false, false);
return 0;
}

Expand All @@ -2159,7 +2145,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
*/
BUG_ON(blk_queued_rq(rq));

if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA))
if (op_is_flush(rq->cmd_flags))
where = ELEVATOR_INSERT_FLUSH;

add_acct_request(q, rq, where);
Expand Down Expand Up @@ -3270,7 +3256,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
/*
* rq is already accounted, so use raw insert
*/
if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA))
if (op_is_flush(rq->cmd_flags))
__elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
else
__elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
Expand Down
3 changes: 2 additions & 1 deletion block/blk-exec.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include <linux/sched/sysctl.h>

#include "blk.h"
#include "blk-mq-sched.h"

/*
* for max sense size
Expand Down Expand Up @@ -65,7 +66,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
* be reused after dying flag is set
*/
if (q->mq_ops) {
blk_mq_insert_request(rq, at_head, true, false);
blk_mq_sched_insert_request(rq, at_head, true, false, false);
return;
}

Expand Down
12 changes: 7 additions & 5 deletions block/blk-flush.c
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-tag.h"
#include "blk-mq-sched.h"

/* FLUSH/FUA sequences */
enum {
Expand Down Expand Up @@ -391,9 +392,10 @@ static void mq_flush_data_end_io(struct request *rq, int error)
* the comment in flush_end_io().
*/
spin_lock_irqsave(&fq->mq_flush_lock, flags);
if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error))
blk_mq_run_hw_queue(hctx, true);
blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);

blk_mq_run_hw_queue(hctx, true);
}

/**
Expand Down Expand Up @@ -453,9 +455,9 @@ void blk_insert_flush(struct request *rq)
*/
if ((policy & REQ_FSEQ_DATA) &&
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
if (q->mq_ops) {
blk_mq_insert_request(rq, false, true, false);
} else
if (q->mq_ops)
blk_mq_sched_insert_request(rq, false, true, false, false);
else
list_add_tail(&rq->queuelist, &q->queue_head);
return;
}
Expand Down
12 changes: 8 additions & 4 deletions block/blk-ioc.c
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,10 @@ static void ioc_exit_icq(struct io_cq *icq)
if (icq->flags & ICQ_EXITED)
return;

if (et->ops.elevator_exit_icq_fn)
et->ops.elevator_exit_icq_fn(icq);
if (et->uses_mq && et->ops.mq.exit_icq)
et->ops.mq.exit_icq(icq);
else if (!et->uses_mq && et->ops.sq.elevator_exit_icq_fn)
et->ops.sq.elevator_exit_icq_fn(icq);

icq->flags |= ICQ_EXITED;
}
Expand Down Expand Up @@ -383,8 +385,10 @@ struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
hlist_add_head(&icq->ioc_node, &ioc->icq_list);
list_add(&icq->q_node, &q->icq_list);
if (et->ops.elevator_init_icq_fn)
et->ops.elevator_init_icq_fn(icq);
if (et->uses_mq && et->ops.mq.init_icq)
et->ops.mq.init_icq(icq);
else if (!et->uses_mq && et->ops.sq.elevator_init_icq_fn)
et->ops.sq.elevator_init_icq_fn(icq);
} else {
kmem_cache_free(et->icq_cache, icq);
icq = ioc_lookup_icq(ioc, q);
Expand Down
4 changes: 2 additions & 2 deletions block/blk-merge.c
Original file line number Diff line number Diff line change
Expand Up @@ -763,8 +763,8 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
{
struct elevator_queue *e = q->elevator;

if (e->type->ops.elevator_allow_rq_merge_fn)
if (!e->type->ops.elevator_allow_rq_merge_fn(q, rq, next))
if (!e->uses_mq && e->type->ops.sq.elevator_allow_rq_merge_fn)
if (!e->type->ops.sq.elevator_allow_rq_merge_fn(q, rq, next))
return 0;

return attempt_merge(q, rq, next);
Expand Down
Loading

0 comments on commit f924ba7

Please sign in to comment.