Skip to content

Commit

Permalink
target: Pass through I/O topology for block backstores
Browse files Browse the repository at this point in the history
In addition to block size (already implemented), passing through
alignment offset, logical-to-phys block exponent, I/O granularity and
optimal I/O length will allow initiators to properly handle layout on
LUNs with 4K block sizes.

Tested with various weird values via scsi_debug module.

One thing to look at with this patch is the new block limits values --
instead of granularity 1 optimal 8192, Lio will now be returning whatever
the block device says, which may affect performance.

Signed-off-by: Andy Grover <agrover@redhat.com>
Acked-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
  • Loading branch information
Andy Grover authored and Nicholas Bellinger committed Nov 12, 2013
1 parent f01b9f7 commit 7f7caf6
Show file tree
Hide file tree
Showing 4 changed files with 68 additions and 3 deletions.
43 changes: 43 additions & 0 deletions drivers/target/target_core_iblock.c
Original file line number Diff line number Diff line change
Expand Up @@ -710,6 +710,45 @@ static sector_t iblock_get_blocks(struct se_device *dev)
return iblock_emulate_read_cap_with_block_size(dev, bd, q);
}

static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct block_device *bd = ib_dev->ibd_bd;
int ret;

ret = bdev_alignment_offset(bd);
if (ret == -1)
return 0;

/* convert offset-bytes to offset-lbas */
return ret / bdev_logical_block_size(bd);
}

static unsigned int iblock_get_lbppbe(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct block_device *bd = ib_dev->ibd_bd;
int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd);

return ilog2(logs_per_phys);
}

static unsigned int iblock_get_io_min(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct block_device *bd = ib_dev->ibd_bd;

return bdev_io_min(bd);
}

static unsigned int iblock_get_io_opt(struct se_device *dev)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
struct block_device *bd = ib_dev->ibd_bd;

return bdev_io_opt(bd);
}

static struct sbc_ops iblock_sbc_ops = {
.execute_rw = iblock_execute_rw,
.execute_sync_cache = iblock_execute_sync_cache,
Expand Down Expand Up @@ -749,6 +788,10 @@ static struct se_subsystem_api iblock_template = {
.show_configfs_dev_params = iblock_show_configfs_dev_params,
.get_device_type = sbc_get_device_type,
.get_blocks = iblock_get_blocks,
.get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
.get_lbppbe = iblock_get_lbppbe,
.get_io_min = iblock_get_io_min,
.get_io_opt = iblock_get_io_opt,
.get_write_cache = iblock_get_write_cache,
};

Expand Down
12 changes: 11 additions & 1 deletion drivers/target/target_core_sbc.c
Original file line number Diff line number Diff line change
Expand Up @@ -105,12 +105,22 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff;
buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff;
buf[11] = dev->dev_attrib.block_size & 0xff;

if (dev->transport->get_lbppbe)
buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;

if (dev->transport->get_alignment_offset_lbas) {
u16 lalba = dev->transport->get_alignment_offset_lbas(dev);
buf[14] = (lalba >> 8) & 0x3f;
buf[15] = lalba & 0xff;
}

/*
* Set Thin Provisioning Enable bit following sbc3r22 in section
* READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
*/
if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws)
buf[14] = 0x80;
buf[14] |= 0x80;

rbuf = transport_kmap_data_sg(cmd);
if (rbuf) {
Expand Down
11 changes: 9 additions & 2 deletions drivers/target/target_core_spc.c
Original file line number Diff line number Diff line change
Expand Up @@ -452,6 +452,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
struct se_device *dev = cmd->se_dev;
u32 max_sectors;
int have_tp = 0;
int opt, min;

/*
* Following spc3r22 section 6.5.3 Block Limits VPD page, when
Expand All @@ -475,7 +476,10 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/*
* Set OPTIMAL TRANSFER LENGTH GRANULARITY
*/
put_unaligned_be16(1, &buf[6]);
if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev)))
put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]);
else
put_unaligned_be16(1, &buf[6]);

/*
* Set MAXIMUM TRANSFER LENGTH
Expand All @@ -487,7 +491,10 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/*
* Set OPTIMAL TRANSFER LENGTH
*/
put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);
if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev)))
put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]);
else
put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]);

/*
* Exit now if we don't support TP.
Expand Down
5 changes: 5 additions & 0 deletions include/target/target_core_backend.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,11 @@ struct se_subsystem_api {
sense_reason_t (*parse_cdb)(struct se_cmd *cmd);
u32 (*get_device_type)(struct se_device *);
sector_t (*get_blocks)(struct se_device *);
sector_t (*get_alignment_offset_lbas)(struct se_device *);
/* lbppbe = logical blocks per physical block exponent. see SBC-3 */
unsigned int (*get_lbppbe)(struct se_device *);
unsigned int (*get_io_min)(struct se_device *);
unsigned int (*get_io_opt)(struct se_device *);
unsigned char *(*get_sense_buffer)(struct se_cmd *);
bool (*get_write_cache)(struct se_device *);
};
Expand Down

0 comments on commit 7f7caf6

Please sign in to comment.