Skip to content

Commit

Permalink
block: Declare several function pointer arguments 'const'
Browse files Browse the repository at this point in the history
Make it clear to the compiler and also to humans that the functions
that query request queue properties do not modify any member of the
request_queue data structure.

Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Ming Lei <ming.lei@redhat.com>
Cc: Hannes Reinecke <hare@suse.com>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
  • Loading branch information
Bart Van Assche authored and Jens Axboe committed Aug 5, 2019
1 parent a87ccce commit af2c68f
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 19 deletions.
7 changes: 4 additions & 3 deletions block/blk-merge.c
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ static inline unsigned get_max_io_size(struct request_queue *q,
return sectors;
}

static unsigned get_max_segment_size(struct request_queue *q,
static unsigned get_max_segment_size(const struct request_queue *q,
unsigned offset)
{
unsigned long mask = queue_segment_boundary(q);
Expand All @@ -161,8 +161,9 @@ static unsigned get_max_segment_size(struct request_queue *q,
* Split the bvec @bv into segments, and update all kinds of
* variables.
*/
static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv,
unsigned *nsegs, unsigned *sectors, unsigned max_segs)
static bool bvec_split_segs(const struct request_queue *q,
const struct bio_vec *bv, unsigned *nsegs,
unsigned *sectors, unsigned max_segs)
{
unsigned len = bv->bv_len;
unsigned total_len = 0;
Expand Down
32 changes: 16 additions & 16 deletions include/linux/blkdev.h
Original file line number Diff line number Diff line change
Expand Up @@ -1232,42 +1232,42 @@ enum blk_default_limits {
BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
};

static inline unsigned long queue_segment_boundary(struct request_queue *q)
static inline unsigned long queue_segment_boundary(const struct request_queue *q)
{
return q->limits.seg_boundary_mask;
}

static inline unsigned long queue_virt_boundary(struct request_queue *q)
static inline unsigned long queue_virt_boundary(const struct request_queue *q)
{
return q->limits.virt_boundary_mask;
}

static inline unsigned int queue_max_sectors(struct request_queue *q)
static inline unsigned int queue_max_sectors(const struct request_queue *q)
{
return q->limits.max_sectors;
}

static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
{
return q->limits.max_hw_sectors;
}

static inline unsigned short queue_max_segments(struct request_queue *q)
static inline unsigned short queue_max_segments(const struct request_queue *q)
{
return q->limits.max_segments;
}

static inline unsigned short queue_max_discard_segments(struct request_queue *q)
static inline unsigned short queue_max_discard_segments(const struct request_queue *q)
{
return q->limits.max_discard_segments;
}

static inline unsigned int queue_max_segment_size(struct request_queue *q)
static inline unsigned int queue_max_segment_size(const struct request_queue *q)
{
return q->limits.max_segment_size;
}

static inline unsigned short queue_logical_block_size(struct request_queue *q)
static inline unsigned short queue_logical_block_size(const struct request_queue *q)
{
int retval = 512;

Expand All @@ -1282,7 +1282,7 @@ static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
return queue_logical_block_size(bdev_get_queue(bdev));
}

static inline unsigned int queue_physical_block_size(struct request_queue *q)
static inline unsigned int queue_physical_block_size(const struct request_queue *q)
{
return q->limits.physical_block_size;
}
Expand All @@ -1292,7 +1292,7 @@ static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
return queue_physical_block_size(bdev_get_queue(bdev));
}

static inline unsigned int queue_io_min(struct request_queue *q)
static inline unsigned int queue_io_min(const struct request_queue *q)
{
return q->limits.io_min;
}
Expand All @@ -1302,7 +1302,7 @@ static inline int bdev_io_min(struct block_device *bdev)
return queue_io_min(bdev_get_queue(bdev));
}

static inline unsigned int queue_io_opt(struct request_queue *q)
static inline unsigned int queue_io_opt(const struct request_queue *q)
{
return q->limits.io_opt;
}
Expand All @@ -1312,7 +1312,7 @@ static inline int bdev_io_opt(struct block_device *bdev)
return queue_io_opt(bdev_get_queue(bdev));
}

static inline int queue_alignment_offset(struct request_queue *q)
static inline int queue_alignment_offset(const struct request_queue *q)
{
if (q->limits.misaligned)
return -1;
Expand Down Expand Up @@ -1342,7 +1342,7 @@ static inline int bdev_alignment_offset(struct block_device *bdev)
return q->limits.alignment_offset;
}

static inline int queue_discard_alignment(struct request_queue *q)
static inline int queue_discard_alignment(const struct request_queue *q)
{
if (q->limits.discard_misaligned)
return -1;
Expand Down Expand Up @@ -1432,7 +1432,7 @@ static inline sector_t bdev_zone_sectors(struct block_device *bdev)
return 0;
}

static inline int queue_dma_alignment(struct request_queue *q)
static inline int queue_dma_alignment(const struct request_queue *q)
{
return q ? q->dma_alignment : 511;
}
Expand Down Expand Up @@ -1543,7 +1543,7 @@ static inline void blk_queue_max_integrity_segments(struct request_queue *q,
}

static inline unsigned short
queue_max_integrity_segments(struct request_queue *q)
queue_max_integrity_segments(const struct request_queue *q)
{
return q->limits.max_integrity_segments;
}
Expand Down Expand Up @@ -1626,7 +1626,7 @@ static inline void blk_queue_max_integrity_segments(struct request_queue *q,
unsigned int segs)
{
}
static inline unsigned short queue_max_integrity_segments(struct request_queue *q)
static inline unsigned short queue_max_integrity_segments(const struct request_queue *q)
{
return 0;
}
Expand Down

0 comments on commit af2c68f

Please sign in to comment.