Skip to content

Commit

Permalink
Merge branch 'for-4.7/drivers' of git://git.kernel.dk/linux-block
Browse files Browse the repository at this point in the history
Pull block driver updates from Jens Axboe:
 "On top of the core pull request, this is the drivers pull request for
  this merge window.  This contains:

   - Switch drivers to the new write back cache API, and kill off the
     flush flags.  From me.

   - Kill the discard support for the STEC pci-e flash driver.  It's
     trivially broken, and apparently unmaintained, so it's safer to
     just remove it.  From Jeff Moyer.

   - A set of lightnvm updates from the usual suspects (Matias/Javier,
     and Simon), and fixes from Arnd, Jeff Mahoney, Sagi, and Wenwei
     Tao.

   - A set of updates for NVMe:

        - Turn the controller state management into a proper state
          machine.  From Christoph.

        - Shuffling of code in preparation for NVMe-over-fabrics, also
          from Christoph.

        - Cleanup of the command prep part from Ming Lin.

        - Rewrite of the discard support from Ming Lin.

        - Deadlock fix for namespace removal from Ming Lin.

        - Use the now exported blk-mq tag helper for IO termination.
          From Sagi.

        - Various little fixes from Christoph, Guilherme, Keith, Ming
          Lin, Wang Sheng-Hui.

   - Convert mtip32xx to use the now exported blk-mq tag iter function,
     from Keith"

* 'for-4.7/drivers' of git://git.kernel.dk/linux-block: (74 commits)
  lightnvm: reserved space calculation incorrect
  lightnvm: rename nr_pages to nr_ppas on nvm_rq
  lightnvm: add is_cached entry to struct ppa_addr
  lightnvm: expose gennvm_mark_blk to targets
  lightnvm: remove mgt targets on mgt removal
  lightnvm: pass dma address to hardware rather than pointer
  lightnvm: do not assume sequential lun alloc.
  nvme/lightnvm: Log using the ctrl named device
  lightnvm: rename dma helper functions
  lightnvm: enable metadata to be sent to device
  lightnvm: do not free unused metadata on rrpc
  lightnvm: fix out of bound ppa lun id on bb tbl
  lightnvm: refactor set_bb_tbl for accepting ppa list
  lightnvm: move responsibility for bad blk mgmt to target
  lightnvm: make nvm_set_rqd_ppalist() aware of vblks
  lightnvm: remove struct factory_blks
  lightnvm: refactor device ops->get_bb_tbl()
  lightnvm: introduce nvm_for_each_lun_ppa() macro
  lightnvm: refactor dev->online_target to global nvm_targets
  lightnvm: rename nvm_targets to nvm_tgt_type
  ...
  • Loading branch information
Linus Torvalds committed May 17, 2016
2 parents a4d1dbe + 116f7d4 commit 24b9f0c
Show file tree
Hide file tree
Showing 39 changed files with 966 additions and 798 deletions.
4 changes: 2 additions & 2 deletions Documentation/block/writeback_cache_control.txt
Original file line number Diff line number Diff line change
Expand Up @@ -71,15 +71,15 @@ requests that have a payload. For devices with volatile write caches the
driver needs to tell the block layer that it supports flushing caches by
doing:

blk_queue_flush(sdkp->disk->queue, REQ_FLUSH);
blk_queue_write_cache(sdkp->disk->queue, true, false);

and handle empty REQ_FLUSH requests in its prep_fn/request_fn. Note that
REQ_FLUSH requests with a payload are automatically turned into a sequence
of an empty REQ_FLUSH request followed by the actual write by the block
layer. For devices that also support the FUA bit the block layer needs
to be told to pass through the REQ_FUA bit using:

blk_queue_flush(sdkp->disk->queue, REQ_FLUSH | REQ_FUA);
blk_queue_write_cache(sdkp->disk->queue, true, true);

and the driver must handle write requests that have the REQ_FUA bit set
in prep_fn/request_fn. If the FUA bit is not natively supported the block
Expand Down
2 changes: 1 addition & 1 deletion arch/um/drivers/ubd_kern.c
Original file line number Diff line number Diff line change
Expand Up @@ -862,7 +862,7 @@ static int ubd_add(int n, char **error_out)
goto out;
}
ubd_dev->queue->queuedata = ubd_dev;
blk_queue_flush(ubd_dev->queue, REQ_FLUSH);
blk_queue_write_cache(ubd_dev->queue, true, false);

blk_queue_max_segments(ubd_dev->queue, MAX_SG);
err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]);
Expand Down
3 changes: 2 additions & 1 deletion block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -1964,7 +1964,8 @@ generic_make_request_checks(struct bio *bio)
* drivers without flush support don't have to worry
* about them.
*/
if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
if (!nr_sectors) {
err = 0;
Expand Down
11 changes: 6 additions & 5 deletions block/blk-flush.c
Original file line number Diff line number Diff line change
Expand Up @@ -95,17 +95,18 @@ enum {
static bool blk_kick_flush(struct request_queue *q,
struct blk_flush_queue *fq);

static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
{
unsigned int policy = 0;

if (blk_rq_sectors(rq))
policy |= REQ_FSEQ_DATA;

if (fflags & REQ_FLUSH) {
if (fflags & (1UL << QUEUE_FLAG_WC)) {
if (rq->cmd_flags & REQ_FLUSH)
policy |= REQ_FSEQ_PREFLUSH;
if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
(rq->cmd_flags & REQ_FUA))
policy |= REQ_FSEQ_POSTFLUSH;
}
return policy;
Expand Down Expand Up @@ -384,7 +385,7 @@ static void mq_flush_data_end_io(struct request *rq, int error)
void blk_insert_flush(struct request *rq)
{
struct request_queue *q = rq->q;
unsigned int fflags = q->flush_flags; /* may change, cache */
unsigned long fflags = q->queue_flags; /* may change, cache */
unsigned int policy = blk_flush_policy(fflags, rq);
struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);

Expand All @@ -393,7 +394,7 @@ void blk_insert_flush(struct request *rq)
* REQ_FLUSH and FUA for the driver.
*/
rq->cmd_flags &= ~REQ_FLUSH;
if (!(fflags & REQ_FUA))
if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
rq->cmd_flags &= ~REQ_FUA;

/*
Expand Down
5 changes: 2 additions & 3 deletions block/blk-mq-tag.c
Original file line number Diff line number Diff line change
Expand Up @@ -464,15 +464,14 @@ static void bt_tags_for_each(struct blk_mq_tags *tags,
}
}

void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
void *priv)
static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
busy_tag_iter_fn *fn, void *priv)
{
if (tags->nr_reserved_tags)
bt_tags_for_each(tags, &tags->breserved_tags, 0, fn, priv, true);
bt_tags_for_each(tags, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
false);
}
EXPORT_SYMBOL(blk_mq_all_tag_busy_iter);

void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
busy_tag_iter_fn *fn, void *priv)
Expand Down
38 changes: 10 additions & 28 deletions block/blk-settings.c
Original file line number Diff line number Diff line change
Expand Up @@ -820,29 +820,14 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
}
EXPORT_SYMBOL(blk_queue_update_dma_alignment);

/**
* blk_queue_flush - configure queue's cache flush capability
* @q: the request queue for the device
* @flush: 0, REQ_FLUSH or REQ_FLUSH | REQ_FUA
*
* Tell block layer cache flush capability of @q. If it supports
* flushing, REQ_FLUSH should be set. If it supports bypassing
* write cache for individual writes, REQ_FUA should be set.
*/
void blk_queue_flush(struct request_queue *q, unsigned int flush)
{
WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));

if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
flush &= ~REQ_FUA;

q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
}
EXPORT_SYMBOL_GPL(blk_queue_flush);

void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
{
q->flush_not_queueable = !queueable;
spin_lock_irq(q->queue_lock);
if (queueable)
clear_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
else
set_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
spin_unlock_irq(q->queue_lock);
}
EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);

Expand All @@ -857,16 +842,13 @@ EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
{
spin_lock_irq(q->queue_lock);
if (wc) {
if (wc)
queue_flag_set(QUEUE_FLAG_WC, q);
q->flush_flags = REQ_FLUSH;
} else
else
queue_flag_clear(QUEUE_FLAG_WC, q);
if (fua) {
if (wc)
q->flush_flags |= REQ_FUA;
if (fua)
queue_flag_set(QUEUE_FLAG_FUA, q);
} else
else
queue_flag_clear(QUEUE_FLAG_FUA, q);
spin_unlock_irq(q->queue_lock);
}
Expand Down
2 changes: 1 addition & 1 deletion drivers/block/drbd/drbd_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -2761,7 +2761,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
q->backing_dev_info.congested_data = device;

blk_queue_make_request(q, drbd_make_request);
blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
blk_queue_write_cache(q, true, true);
/* Setting the max_hw_sectors to an odd value of 8kibyte here
This triggers a max_bio_size message upon first attach or connect */
blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
Expand Down
2 changes: 1 addition & 1 deletion drivers/block/loop.c
Original file line number Diff line number Diff line change
Expand Up @@ -943,7 +943,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));

if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
blk_queue_flush(lo->lo_queue, REQ_FLUSH);
blk_queue_write_cache(lo->lo_queue, true, false);

loop_update_dio(lo);
set_capacity(lo->lo_disk, size);
Expand Down
12 changes: 3 additions & 9 deletions drivers/block/mtip32xx/mtip32xx.c
Original file line number Diff line number Diff line change
Expand Up @@ -3000,14 +3000,14 @@ static int mtip_service_thread(void *data)
"Completion workers still active!");

spin_lock(dd->queue->queue_lock);
blk_mq_all_tag_busy_iter(*dd->tags.tags,
blk_mq_tagset_busy_iter(&dd->tags,
mtip_queue_cmd, dd);
spin_unlock(dd->queue->queue_lock);

set_bit(MTIP_PF_ISSUE_CMDS_BIT, &dd->port->flags);

if (mtip_device_reset(dd))
blk_mq_all_tag_busy_iter(*dd->tags.tags,
blk_mq_tagset_busy_iter(&dd->tags,
mtip_abort_cmd, dd);

clear_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags);
Expand Down Expand Up @@ -4023,12 +4023,6 @@ static int mtip_block_initialize(struct driver_data *dd)
blk_queue_io_min(dd->queue, 4096);
blk_queue_bounce_limit(dd->queue, dd->pdev->dma_mask);

/*
* write back cache is not supported in the device. FUA depends on
* write back cache support, hence setting flush support to zero.
*/
blk_queue_flush(dd->queue, 0);

/* Signal trim support */
if (dd->trim_supp == true) {
set_bit(QUEUE_FLAG_DISCARD, &dd->queue->queue_flags);
Expand Down Expand Up @@ -4174,7 +4168,7 @@ static int mtip_block_remove(struct driver_data *dd)

blk_mq_freeze_queue_start(dd->queue);
blk_mq_stop_hw_queues(dd->queue);
blk_mq_all_tag_busy_iter(dd->tags.tags[0], mtip_no_dev_cleanup, dd);
blk_mq_tagset_busy_iter(&dd->tags, mtip_no_dev_cleanup, dd);

/*
* Delete our gendisk structure. This also removes the device
Expand Down
4 changes: 2 additions & 2 deletions drivers/block/nbd.c
Original file line number Diff line number Diff line change
Expand Up @@ -693,9 +693,9 @@ static void nbd_parse_flags(struct nbd_device *nbd, struct block_device *bdev)
if (nbd->flags & NBD_FLAG_SEND_TRIM)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
if (nbd->flags & NBD_FLAG_SEND_FLUSH)
blk_queue_flush(nbd->disk->queue, REQ_FLUSH);
blk_queue_write_cache(nbd->disk->queue, true, false);
else
blk_queue_flush(nbd->disk->queue, 0);
blk_queue_write_cache(nbd->disk->queue, false, false);
}

static int nbd_dev_dbg_init(struct nbd_device *nbd);
Expand Down
2 changes: 1 addition & 1 deletion drivers/block/osdblk.c
Original file line number Diff line number Diff line change
Expand Up @@ -437,7 +437,7 @@ static int osdblk_init_disk(struct osdblk_device *osdev)
blk_queue_stack_limits(q, osd_request_queue(osdev->osd));

blk_queue_prep_rq(q, blk_queue_start_tag);
blk_queue_flush(q, REQ_FLUSH);
blk_queue_write_cache(q, true, false);

disk->queue = q;

Expand Down
2 changes: 1 addition & 1 deletion drivers/block/ps3disk.c
Original file line number Diff line number Diff line change
Expand Up @@ -468,7 +468,7 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
blk_queue_dma_alignment(queue, dev->blk_size-1);
blk_queue_logical_block_size(queue, dev->blk_size);

blk_queue_flush(queue, REQ_FLUSH);
blk_queue_write_cache(queue, true, false);

blk_queue_max_segments(queue, -1);
blk_queue_max_segment_size(queue, dev->bounce_size);
Expand Down
61 changes: 2 additions & 59 deletions drivers/block/skd_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,6 @@ MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
#define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))

#define INQ_STD_NBYTES 36
#define SKD_DISCARD_CDB_LENGTH 24

enum skd_drvr_state {
SKD_DRVR_STATE_LOAD,
Expand Down Expand Up @@ -212,7 +211,6 @@ struct skd_request_context {

struct request *req;
u8 flush_cmd;
u8 discard_page;

u32 timeout_stamp;
u8 sg_data_dir;
Expand All @@ -230,7 +228,6 @@ struct skd_request_context {
};
#define SKD_DATA_DIR_HOST_TO_CARD 1
#define SKD_DATA_DIR_CARD_TO_HOST 2
#define SKD_DATA_DIR_NONE 3 /* especially for DISCARD requests. */

struct skd_special_context {
struct skd_request_context req;
Expand Down Expand Up @@ -540,31 +537,6 @@ skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
scsi_req->cdb[9] = 0;
}

static void
skd_prep_discard_cdb(struct skd_scsi_request *scsi_req,
struct skd_request_context *skreq,
struct page *page,
u32 lba, u32 count)
{
char *buf;
unsigned long len;
struct request *req;

buf = page_address(page);
len = SKD_DISCARD_CDB_LENGTH;

scsi_req->cdb[0] = UNMAP;
scsi_req->cdb[8] = len;

put_unaligned_be16(6 + 16, &buf[0]);
put_unaligned_be16(16, &buf[2]);
put_unaligned_be64(lba, &buf[8]);
put_unaligned_be32(count, &buf[16]);

req = skreq->req;
blk_add_request_payload(req, page, 0, len);
}

static void skd_request_fn_not_online(struct request_queue *q);

static void skd_request_fn(struct request_queue *q)
Expand All @@ -575,7 +547,6 @@ static void skd_request_fn(struct request_queue *q)
struct skd_request_context *skreq;
struct request *req = NULL;
struct skd_scsi_request *scsi_req;
struct page *page;
unsigned long io_flags;
int error;
u32 lba;
Expand Down Expand Up @@ -669,7 +640,6 @@ static void skd_request_fn(struct request_queue *q)
skreq->flush_cmd = 0;
skreq->n_sg = 0;
skreq->sg_byte_count = 0;
skreq->discard_page = 0;

/*
* OK to now dequeue request from q.
Expand Down Expand Up @@ -735,18 +705,7 @@ static void skd_request_fn(struct request_queue *q)
else
skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;

if (io_flags & REQ_DISCARD) {
page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
if (!page) {
pr_err("request_fn:Page allocation failed.\n");
skd_end_request(skdev, skreq, -ENOMEM);
break;
}
skreq->discard_page = 1;
req->completion_data = page;
skd_prep_discard_cdb(scsi_req, skreq, page, lba, count);

} else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
skd_prep_zerosize_flush_cdb(scsi_req, skreq);
SKD_ASSERT(skreq->flush_cmd == 1);

Expand Down Expand Up @@ -851,16 +810,6 @@ static void skd_request_fn(struct request_queue *q)
static void skd_end_request(struct skd_device *skdev,
struct skd_request_context *skreq, int error)
{
struct request *req = skreq->req;
unsigned int io_flags = req->cmd_flags;

if ((io_flags & REQ_DISCARD) &&
(skreq->discard_page == 1)) {
pr_debug("%s:%s:%d, free the page!",
skdev->name, __func__, __LINE__);
__free_page(req->completion_data);
}

if (unlikely(error)) {
struct request *req = skreq->req;
char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
Expand Down Expand Up @@ -4412,19 +4361,13 @@ static int skd_cons_disk(struct skd_device *skdev)
disk->queue = q;
q->queuedata = skdev;

blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
blk_queue_write_cache(q, true, true);
blk_queue_max_segments(q, skdev->sgs_per_request);
blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);

/* set sysfs ptimal_io_size to 8K */
blk_queue_io_opt(q, 8192);

/* DISCARD Flag initialization. */
q->limits.discard_granularity = 8192;
q->limits.discard_alignment = 0;
blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
q->limits.discard_zeroes_data = 1;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);

Expand Down
6 changes: 1 addition & 5 deletions drivers/block/virtio_blk.c
Original file line number Diff line number Diff line change
Expand Up @@ -493,11 +493,7 @@ static void virtblk_update_cache_mode(struct virtio_device *vdev)
u8 writeback = virtblk_get_cache_mode(vdev);
struct virtio_blk *vblk = vdev->priv;

if (writeback)
blk_queue_flush(vblk->disk->queue, REQ_FLUSH);
else
blk_queue_flush(vblk->disk->queue, 0);

blk_queue_write_cache(vblk->disk->queue, writeback, false);
revalidate_disk(vblk->disk);
}

Expand Down
2 changes: 1 addition & 1 deletion drivers/block/xen-blkback/xenbus.c
Original file line number Diff line number Diff line change
Expand Up @@ -477,7 +477,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
vbd->type |= VDISK_REMOVABLE;

q = bdev_get_queue(bdev);
if (q && q->flush_flags)
if (q && test_bit(QUEUE_FLAG_WC, &q->queue_flags))
vbd->flush_support = true;

if (q && blk_queue_secdiscard(q))
Expand Down
Loading

0 comments on commit 24b9f0c

Please sign in to comment.