Skip to content

Commit

Permalink
null_blk: register as a LightNVM device
Browse files Browse the repository at this point in the history
Add support for registering as a LightNVM device. This allows us to
evaluate the performance of the LightNVM subsystem.

In /drivers/Makefile, LightNVM is moved above block device drivers
to make sure that the LightNVM media managers have been initialized
before drivers under /drivers/block are initialized.

Signed-off-by: Matias Bjørling <m@bjorling.me>
Fix by Jens Axboe to remove unneeded slab cache and the following
memory leak.
Signed-off-by: Jens Axboe <axboe@fb.com>
  • Loading branch information
Matias Bjørling authored and Jens Axboe committed Nov 16, 2015
1 parent 4736346 commit b2b7e00
Show file tree
Hide file tree
Showing 3 changed files with 158 additions and 7 deletions.
3 changes: 3 additions & 0 deletions Documentation/block/null_blk.txt
Original file line number Diff line number Diff line change
Expand Up @@ -70,3 +70,6 @@ use_per_node_hctx=[0/1]: Default: 0
parameter.
1: The multi-queue block layer is instantiated with a hardware dispatch
queue for each CPU node in the system.

use_lightnvm=[0/1]: Default: 0
Register device with LightNVM. Requires blk-mq to be used.
2 changes: 1 addition & 1 deletion drivers/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -63,14 +63,14 @@ obj-$(CONFIG_FB_I810) += video/fbdev/i810/
obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/

obj-$(CONFIG_PARPORT) += parport/
obj-$(CONFIG_NVM) += lightnvm/
obj-y += base/ block/ misc/ mfd/ nfc/
obj-$(CONFIG_LIBNVDIMM) += nvdimm/
obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/
obj-$(CONFIG_NUBUS) += nubus/
obj-y += macintosh/
obj-$(CONFIG_IDE) += ide/
obj-$(CONFIG_SCSI) += scsi/
obj-$(CONFIG_NVM) += lightnvm/
obj-y += nvme/
obj-$(CONFIG_ATA) += ata/
obj-$(CONFIG_TARGET_CORE) += target/
Expand Down
160 changes: 154 additions & 6 deletions drivers/block/null_blk.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include <linux/slab.h>
#include <linux/blk-mq.h>
#include <linux/hrtimer.h>
#include <linux/lightnvm.h>

struct nullb_cmd {
struct list_head list;
Expand Down Expand Up @@ -39,6 +40,7 @@ struct nullb {

struct nullb_queue *queues;
unsigned int nr_queues;
char disk_name[DISK_NAME_LEN];
};

static LIST_HEAD(nullb_list);
Expand Down Expand Up @@ -119,6 +121,10 @@ static int nr_devices = 2;
module_param(nr_devices, int, S_IRUGO);
MODULE_PARM_DESC(nr_devices, "Number of devices to register");

static bool use_lightnvm;
module_param(use_lightnvm, bool, S_IRUGO);
MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");

static int irqmode = NULL_IRQ_SOFTIRQ;

static int null_set_irqmode(const char *str, const struct kernel_param *kp)
Expand Down Expand Up @@ -427,6 +433,8 @@ static void null_del_dev(struct nullb *nullb)
{
list_del_init(&nullb->list);

if (use_lightnvm)
nvm_unregister(nullb->disk->disk_name);
del_gendisk(nullb->disk);
blk_cleanup_queue(nullb->q);
if (queue_mode == NULL_Q_MQ)
Expand All @@ -436,6 +444,125 @@ static void null_del_dev(struct nullb *nullb)
kfree(nullb);
}

#ifdef CONFIG_NVM

static void null_lnvm_end_io(struct request *rq, int error)
{
struct nvm_rq *rqd = rq->end_io_data;
struct nvm_dev *dev = rqd->dev;

dev->mt->end_io(rqd, error);

blk_put_request(rq);
}

static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd)
{
struct request *rq;
struct bio *bio = rqd->bio;

rq = blk_mq_alloc_request(q, bio_rw(bio), GFP_KERNEL, 0);
if (IS_ERR(rq))
return -ENOMEM;

rq->cmd_type = REQ_TYPE_DRV_PRIV;
rq->__sector = bio->bi_iter.bi_sector;
rq->ioprio = bio_prio(bio);

if (bio_has_data(bio))
rq->nr_phys_segments = bio_phys_segments(q, bio);

rq->__data_len = bio->bi_iter.bi_size;
rq->bio = rq->biotail = bio;

rq->end_io_data = rqd;

blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);

return 0;
}

static int null_lnvm_id(struct request_queue *q, struct nvm_id *id)
{
sector_t size = gb * 1024 * 1024 * 1024ULL;
struct nvm_id_group *grp;

id->ver_id = 0x1;
id->vmnt = 0;
id->cgrps = 1;
id->cap = 0x3;
id->dom = 0x1;
id->ppat = NVM_ADDRMODE_LINEAR;

do_div(size, bs); /* convert size to pages */
grp = &id->groups[0];
grp->mtype = 0;
grp->fmtype = 1;
grp->num_ch = 1;
grp->num_lun = 1;
grp->num_pln = 1;
grp->num_blk = size / 256;
grp->num_pg = 256;
grp->fpg_sz = bs;
grp->csecs = bs;
grp->trdt = 25000;
grp->trdm = 25000;
grp->tprt = 500000;
grp->tprm = 500000;
grp->tbet = 1500000;
grp->tbem = 1500000;
grp->mpos = 0x010101; /* single plane rwe */
grp->cpar = hw_queue_depth;

return 0;
}

static void *null_lnvm_create_dma_pool(struct request_queue *q, char *name)
{
mempool_t *virtmem_pool;

virtmem_pool = mempool_create_page_pool(64, 0);
if (!virtmem_pool) {
pr_err("null_blk: Unable to create virtual memory pool\n");
return NULL;
}

return virtmem_pool;
}

static void null_lnvm_destroy_dma_pool(void *pool)
{
mempool_destroy(pool);
}

static void *null_lnvm_dev_dma_alloc(struct request_queue *q, void *pool,
gfp_t mem_flags, dma_addr_t *dma_handler)
{
return mempool_alloc(pool, mem_flags);
}

static void null_lnvm_dev_dma_free(void *pool, void *entry,
dma_addr_t dma_handler)
{
mempool_free(entry, pool);
}

static struct nvm_dev_ops null_lnvm_dev_ops = {
.identity = null_lnvm_id,
.submit_io = null_lnvm_submit_io,

.create_dma_pool = null_lnvm_create_dma_pool,
.destroy_dma_pool = null_lnvm_destroy_dma_pool,
.dev_dma_alloc = null_lnvm_dev_dma_alloc,
.dev_dma_free = null_lnvm_dev_dma_free,

/* Simulate nvme protocol restriction */
.max_phys_sect = 64,
};
#else
static struct nvm_dev_ops null_lnvm_dev_ops;
#endif /* CONFIG_NVM */

static int null_open(struct block_device *bdev, fmode_t mode)
{
return 0;
Expand Down Expand Up @@ -575,11 +702,6 @@ static int null_add_dev(void)
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);

disk = nullb->disk = alloc_disk_node(1, home_node);
if (!disk) {
rv = -ENOMEM;
goto out_cleanup_blk_queue;
}

mutex_lock(&lock);
list_add_tail(&nullb->list, &nullb_list);
Expand All @@ -589,6 +711,21 @@ static int null_add_dev(void)
blk_queue_logical_block_size(nullb->q, bs);
blk_queue_physical_block_size(nullb->q, bs);

sprintf(nullb->disk_name, "nullb%d", nullb->index);

if (use_lightnvm) {
rv = nvm_register(nullb->q, nullb->disk_name,
&null_lnvm_dev_ops);
if (rv)
goto out_cleanup_blk_queue;
goto done;
}

disk = nullb->disk = alloc_disk_node(1, home_node);
if (!disk) {
rv = -ENOMEM;
goto out_cleanup_lightnvm;
}
size = gb * 1024 * 1024 * 1024ULL;
set_capacity(disk, size >> 9);

Expand All @@ -598,10 +735,15 @@ static int null_add_dev(void)
disk->fops = &null_fops;
disk->private_data = nullb;
disk->queue = nullb->q;
sprintf(disk->disk_name, "nullb%d", nullb->index);
strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);

add_disk(disk);
done:
return 0;

out_cleanup_lightnvm:
if (use_lightnvm)
nvm_unregister(nullb->disk_name);
out_cleanup_blk_queue:
blk_cleanup_queue(nullb->q);
out_cleanup_tags:
Expand All @@ -625,6 +767,12 @@ static int __init null_init(void)
bs = PAGE_SIZE;
}

if (use_lightnvm && queue_mode != NULL_Q_MQ) {
pr_warn("null_blk: LightNVM only supported for blk-mq\n");
pr_warn("null_blk: defaults queue mode to blk-mq\n");
queue_mode = NULL_Q_MQ;
}

if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
if (submit_queues < nr_online_nodes) {
pr_warn("null_blk: submit_queues param is set to %u.",
Expand Down

0 comments on commit b2b7e00

Please sign in to comment.