Skip to content

Commit

Permalink
Merge branch 'for-4.11/block' into for-4.11/linus-merge
Browse files Browse the repository at this point in the history
Signed-off-by: Jens Axboe <axboe@fb.com>
  • Loading branch information
Jens Axboe committed Feb 17, 2017
2 parents 2fe1e8a + 8a9ae52 commit 6010720
Show file tree
Hide file tree
Showing 68 changed files with 7,340 additions and 2,832 deletions.
9 changes: 1 addition & 8 deletions Documentation/cdrom/cdrom-standard.tex
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,6 @@
unsigned\ long);\cr
\noalign{\medskip}
&const\ int& capability;& capability flags \cr
&int& n_minors;& number of active minor devices \cr
\};\cr
}
$$
Expand All @@ -258,13 +257,7 @@
function is not implemented, however, this $struct$ should contain a
NULL instead. The $capability$ flags specify the capabilities of the
\cdrom\ hardware and/or low-level \cdrom\ driver when a \cdrom\ drive
is registered with the \UCD. The value $n_minors$ should be a positive
value indicating the number of minor devices that are supported by
the low-level device driver, normally~1. Although these two variables
are `informative' rather than `operational,' they are included in
$cdrom_device_ops$ because they describe the capability of the {\em
driver\/} rather than the {\em drive}. Nomenclature has always been
difficult in computer programming.
is registered with the \UCD.

Note that most functions have fewer parameters than their
$blkdev_fops$ counterparts. This is because very little of the
Expand Down
15 changes: 13 additions & 2 deletions MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -8612,10 +8612,10 @@ S: Maintained
F: drivers/net/ethernet/netronome/

NETWORK BLOCK DEVICE (NBD)
M: Markus Pargmann <mpa@pengutronix.de>
M: Josef Bacik <jbacik@fb.com>
S: Maintained
L: linux-block@vger.kernel.org
L: nbd-general@lists.sourceforge.net
T: git git://git.pengutronix.de/git/mpa/linux-nbd.git
F: Documentation/blockdev/nbd.txt
F: drivers/block/nbd.c
F: include/uapi/linux/nbd.h
Expand Down Expand Up @@ -11089,6 +11089,17 @@ L: linux-mmc@vger.kernel.org
S: Maintained
F: drivers/mmc/host/sdhci-spear.c

SECURE ENCRYPTING DEVICE (SED) OPAL DRIVER
M: Scott Bauer <scott.bauer@intel.com>
M: Jonathan Derrick <jonathan.derrick@intel.com>
M: Rafael Antognolli <rafael.antognolli@intel.com>
L: linux-block@vger.kernel.org
S: Supported
F: block/sed*
F: block/opal_proto.h
F: include/linux/sed*
F: include/uapi/linux/sed*

SECURITY SUBSYSTEM
M: James Morris <james.l.morris@oracle.com>
M: "Serge E. Hallyn" <serge@hallyn.com>
Expand Down
19 changes: 19 additions & 0 deletions block/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,25 @@ config BLK_WBT_MQ
Multiqueue currently doesn't have support for IO scheduling,
enabling this option is recommended.

config BLK_DEBUG_FS
bool "Block layer debugging information in debugfs"
default y
depends on DEBUG_FS
---help---
Include block layer debugging information in debugfs. This information
is mostly useful for kernel developers, but it doesn't incur any cost
at runtime.

Unless you are building a kernel for a tiny system, you should
say Y here.

config BLK_SED_OPAL
bool "Logic for interfacing with Opal enabled SEDs"
---help---
Builds Logic for interfacing with Opal enabled controllers.
Enabling this option enables users to setup/unlock/lock
Locking ranges for SED devices using the Opal protocol.

menu "Partition Types"

source "block/partitions/Kconfig"
Expand Down
50 changes: 50 additions & 0 deletions block/Kconfig.iosched
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,56 @@ config DEFAULT_IOSCHED
default "cfq" if DEFAULT_CFQ
default "noop" if DEFAULT_NOOP

config MQ_IOSCHED_DEADLINE
tristate "MQ deadline I/O scheduler"
default y
---help---
MQ version of the deadline IO scheduler.

config MQ_IOSCHED_NONE
bool
default y

choice
prompt "Default single-queue blk-mq I/O scheduler"
default DEFAULT_SQ_NONE
help
Select the I/O scheduler which will be used by default for blk-mq
managed block devices with a single queue.

config DEFAULT_SQ_DEADLINE
bool "MQ Deadline" if MQ_IOSCHED_DEADLINE=y

config DEFAULT_SQ_NONE
bool "None"

endchoice

config DEFAULT_SQ_IOSCHED
string
default "mq-deadline" if DEFAULT_SQ_DEADLINE
default "none" if DEFAULT_SQ_NONE

choice
prompt "Default multi-queue blk-mq I/O scheduler"
default DEFAULT_MQ_NONE
help
Select the I/O scheduler which will be used by default for blk-mq
managed block devices with multiple queues.

config DEFAULT_MQ_DEADLINE
bool "MQ Deadline" if MQ_IOSCHED_DEADLINE=y

config DEFAULT_MQ_NONE
bool "None"

endchoice

config DEFAULT_MQ_IOSCHED
string
default "mq-deadline" if DEFAULT_MQ_DEADLINE
default "none" if DEFAULT_MQ_NONE

endmenu

endif
5 changes: 4 additions & 1 deletion block/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \
blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \
blk-mq-sysfs.o blk-mq-cpumap.o ioctl.o \
blk-mq-sysfs.o blk-mq-cpumap.o blk-mq-sched.o ioctl.o \
genhd.o scsi_ioctl.o partition-generic.o ioprio.o \
badblocks.o partitions/

Expand All @@ -18,10 +18,13 @@ obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o
obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o

obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o t10-pi.o
obj-$(CONFIG_BLK_MQ_PCI) += blk-mq-pci.o
obj-$(CONFIG_BLK_DEV_ZONED) += blk-zoned.o
obj-$(CONFIG_BLK_WBT) += blk-wbt.o
obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o
obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o
6 changes: 3 additions & 3 deletions block/bio.c
Original file line number Diff line number Diff line change
Expand Up @@ -1403,7 +1403,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
bio_set_flag(bio, BIO_USER_MAPPED);

/*
* subtle -- if __bio_map_user() ended up bouncing a bio,
* subtle -- if bio_map_user_iov() ended up bouncing a bio,
* it would normally disappear when its bi_end_io is run.
* however, we need it for the unmap, so grab an extra
* reference to it
Expand Down Expand Up @@ -1445,8 +1445,8 @@ static void __bio_unmap_user(struct bio *bio)
* bio_unmap_user - unmap a bio
* @bio: the bio being unmapped
*
* Unmap a bio previously mapped by bio_map_user(). Must be called with
* a process context.
* Unmap a bio previously mapped by bio_map_user_iov(). Must be called from
* process context.
*
* bio_unmap_user() may sleep.
*/
Expand Down
22 changes: 18 additions & 4 deletions block/blk-cgroup.c
Original file line number Diff line number Diff line change
Expand Up @@ -1223,7 +1223,10 @@ int blkcg_activate_policy(struct request_queue *q,
if (blkcg_policy_enabled(q, pol))
return 0;

blk_queue_bypass_start(q);
if (q->mq_ops)
blk_mq_freeze_queue(q);
else
blk_queue_bypass_start(q);
pd_prealloc:
if (!pd_prealloc) {
pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node);
Expand Down Expand Up @@ -1261,7 +1264,10 @@ int blkcg_activate_policy(struct request_queue *q,

spin_unlock_irq(q->queue_lock);
out_bypass_end:
blk_queue_bypass_end(q);
if (q->mq_ops)
blk_mq_unfreeze_queue(q);
else
blk_queue_bypass_end(q);
if (pd_prealloc)
pol->pd_free_fn(pd_prealloc);
return ret;
Expand All @@ -1284,7 +1290,11 @@ void blkcg_deactivate_policy(struct request_queue *q,
if (!blkcg_policy_enabled(q, pol))
return;

blk_queue_bypass_start(q);
if (q->mq_ops)
blk_mq_freeze_queue(q);
else
blk_queue_bypass_start(q);

spin_lock_irq(q->queue_lock);

__clear_bit(pol->plid, q->blkcg_pols);
Expand All @@ -1304,7 +1314,11 @@ void blkcg_deactivate_policy(struct request_queue *q,
}

spin_unlock_irq(q->queue_lock);
blk_queue_bypass_end(q);

if (q->mq_ops)
blk_mq_unfreeze_queue(q);
else
blk_queue_bypass_end(q);
}
EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);

Expand Down
32 changes: 10 additions & 22 deletions block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@

#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-sched.h"
#include "blk-wbt.h"

EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
Expand Down Expand Up @@ -134,6 +135,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
rq->cmd = rq->__cmd;
rq->cmd_len = BLK_MAX_CDB;
rq->tag = -1;
rq->internal_tag = -1;
rq->start_time = jiffies;
set_start_time_ns(rq);
rq->part = NULL;
Expand Down Expand Up @@ -525,12 +527,14 @@ void blk_set_queue_dying(struct request_queue *q)
else {
struct request_list *rl;

spin_lock_irq(q->queue_lock);
blk_queue_for_each_rl(rl, q) {
if (rl->rq_pool) {
wake_up(&rl->wait[BLK_RW_SYNC]);
wake_up(&rl->wait[BLK_RW_ASYNC]);
}
}
spin_unlock_irq(q->queue_lock);
}
}
EXPORT_SYMBOL_GPL(blk_set_queue_dying);
Expand Down Expand Up @@ -1033,28 +1037,12 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
* Flush requests do not use the elevator so skip initialization.
* This allows a request to share the flush and elevator data.
*/
if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA))
if (op_is_flush(bio->bi_opf))
return false;

return true;
}

/**
* rq_ioc - determine io_context for request allocation
* @bio: request being allocated is for this bio (can be %NULL)
*
* Determine io_context to use for request allocation for @bio. May return
* %NULL if %current->io_context doesn't exist.
*/
static struct io_context *rq_ioc(struct bio *bio)
{
#ifdef CONFIG_BLK_CGROUP
if (bio && bio->bi_ioc)
return bio->bi_ioc;
#endif
return current->io_context;
}

/**
* __get_request - get a free request
* @rl: request list to allocate from
Expand Down Expand Up @@ -1655,7 +1643,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
}

if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) {
if (op_is_flush(bio->bi_opf)) {
spin_lock_irq(q->queue_lock);
where = ELEVATOR_INSERT_FLUSH;
goto get_rq;
Expand Down Expand Up @@ -1894,7 +1882,7 @@ generic_make_request_checks(struct bio *bio)
* drivers without flush support don't have to worry
* about them.
*/
if ((bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) &&
if (op_is_flush(bio->bi_opf) &&
!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
if (!nr_sectors) {
Expand Down Expand Up @@ -2143,7 +2131,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
if (q->mq_ops) {
if (blk_queue_io_stat(q))
blk_account_io_start(rq, true);
blk_mq_insert_request(rq, false, true, false);
blk_mq_sched_insert_request(rq, false, true, false, false);
return 0;
}

Expand All @@ -2159,7 +2147,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
*/
BUG_ON(blk_queued_rq(rq));

if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA))
if (op_is_flush(rq->cmd_flags))
where = ELEVATOR_INSERT_FLUSH;

add_acct_request(q, rq, where);
Expand Down Expand Up @@ -3270,7 +3258,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
/*
* rq is already accounted, so use raw insert
*/
if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA))
if (op_is_flush(rq->cmd_flags))
__elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
else
__elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
Expand Down
3 changes: 2 additions & 1 deletion block/blk-exec.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include <linux/sched/sysctl.h>

#include "blk.h"
#include "blk-mq-sched.h"

/*
* for max sense size
Expand Down Expand Up @@ -65,7 +66,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
* be reused after dying flag is set
*/
if (q->mq_ops) {
blk_mq_insert_request(rq, at_head, true, false);
blk_mq_sched_insert_request(rq, at_head, true, false, false);
return;
}

Expand Down
12 changes: 7 additions & 5 deletions block/blk-flush.c
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-tag.h"
#include "blk-mq-sched.h"

/* FLUSH/FUA sequences */
enum {
Expand Down Expand Up @@ -391,9 +392,10 @@ static void mq_flush_data_end_io(struct request *rq, int error)
* the comment in flush_end_io().
*/
spin_lock_irqsave(&fq->mq_flush_lock, flags);
if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error))
blk_mq_run_hw_queue(hctx, true);
blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);

blk_mq_run_hw_queue(hctx, true);
}

/**
Expand Down Expand Up @@ -453,9 +455,9 @@ void blk_insert_flush(struct request *rq)
*/
if ((policy & REQ_FSEQ_DATA) &&
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
if (q->mq_ops) {
blk_mq_insert_request(rq, false, true, false);
} else
if (q->mq_ops)
blk_mq_sched_insert_request(rq, false, true, false, false);
else
list_add_tail(&rq->queuelist, &q->queue_head);
return;
}
Expand Down
Loading

0 comments on commit 6010720

Please sign in to comment.