Skip to content

Commit

Permalink
raid5-cache: statically allocate the recovery ra bio
Browse files Browse the repository at this point in the history
There is no need to preallocate the bio and reset it when use.  Just
allocate it on-stack and use a bvec places next to the pages used for
it.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Song Liu <song@kernel.org>
  • Loading branch information
Christoph Hellwig authored and Song Liu committed Mar 9, 2022
1 parent 0dd00cb commit 89f94b6
Showing 1 changed file with 13 additions and 15 deletions.
28 changes: 13 additions & 15 deletions drivers/md/raid5-cache.c
Original file line number Diff line number Diff line change
Expand Up @@ -1623,22 +1623,17 @@ struct r5l_recovery_ctx {
* just copy data from the pool.
*/
struct page *ra_pool[R5L_RECOVERY_PAGE_POOL_SIZE];
struct bio_vec ra_bvec[R5L_RECOVERY_PAGE_POOL_SIZE];
sector_t pool_offset; /* offset of first page in the pool */
int total_pages; /* total allocated pages */
int valid_pages; /* pages with valid data */
struct bio *ra_bio; /* bio to do the read ahead */
};

static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
struct r5l_recovery_ctx *ctx)
{
struct page *page;

ctx->ra_bio = bio_alloc_bioset(NULL, BIO_MAX_VECS, 0, GFP_KERNEL,
&log->bs);
if (!ctx->ra_bio)
return -ENOMEM;

ctx->valid_pages = 0;
ctx->total_pages = 0;
while (ctx->total_pages < R5L_RECOVERY_PAGE_POOL_SIZE) {
Expand All @@ -1650,10 +1645,8 @@ static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
ctx->total_pages += 1;
}

if (ctx->total_pages == 0) {
bio_put(ctx->ra_bio);
if (ctx->total_pages == 0)
return -ENOMEM;
}

ctx->pool_offset = 0;
return 0;
Expand All @@ -1666,7 +1659,6 @@ static void r5l_recovery_free_ra_pool(struct r5l_log *log,

for (i = 0; i < ctx->total_pages; ++i)
put_page(ctx->ra_pool[i]);
bio_put(ctx->ra_bio);
}

/*
Expand All @@ -1679,15 +1671,19 @@ static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
struct r5l_recovery_ctx *ctx,
sector_t offset)
{
bio_reset(ctx->ra_bio, log->rdev->bdev, REQ_OP_READ);
ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset;
struct bio bio;
int ret;

bio_init(&bio, log->rdev->bdev, ctx->ra_bvec,
R5L_RECOVERY_PAGE_POOL_SIZE, REQ_OP_READ);
bio.bi_iter.bi_sector = log->rdev->data_offset + offset;

ctx->valid_pages = 0;
ctx->pool_offset = offset;

while (ctx->valid_pages < ctx->total_pages) {
bio_add_page(ctx->ra_bio,
ctx->ra_pool[ctx->valid_pages], PAGE_SIZE, 0);
__bio_add_page(&bio, ctx->ra_pool[ctx->valid_pages], PAGE_SIZE,
0);
ctx->valid_pages += 1;

offset = r5l_ring_add(log, offset, BLOCK_SECTORS);
Expand All @@ -1696,7 +1692,9 @@ static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
break;
}

return submit_bio_wait(ctx->ra_bio);
ret = submit_bio_wait(&bio);
bio_uninit(&bio);
return ret;
}

/*
Expand Down

0 comments on commit 89f94b6

Please sign in to comment.