Skip to content

Commit

Permalink
lib/scatterlist: add check when merging zone device pages
Browse files Browse the repository at this point in the history
Consecutive zone device pages should not be merged into the same sgl
or bvec segment with other types of pages or if they belong to different
pgmaps. Otherwise getting the pgmap of a given segment is not possible
without scanning the entire segment. This helper returns true either if
both pages are not zone device pages or both pages are zone device
pages with the same pgmap.

Factor out the check for page mergability into a pages_are_mergable()
helper and add a check with zone_device_pages_are_mergeable().

Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Link: https://lore.kernel.org/r/20221021174116.7200-6-logang@deltatee.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
  • Loading branch information
Logan Gunthorpe authored and Jens Axboe committed Nov 9, 2022
1 parent 49580e6 commit 1567b49
Showing 1 changed file with 15 additions and 10 deletions.
25 changes: 15 additions & 10 deletions lib/scatterlist.c
Original file line number Diff line number Diff line change
Expand Up @@ -410,6 +410,15 @@ static struct scatterlist *get_next_sg(struct sg_append_table *table,
return new_sg;
}

static bool pages_are_mergeable(struct page *a, struct page *b)
{
if (page_to_pfn(a) != page_to_pfn(b) + 1)
return false;
if (!zone_device_pages_have_same_pgmap(a, b))
return false;
return true;
}

/**
* sg_alloc_append_table_from_pages - Allocate and initialize an append sg
* table from an array of pages
Expand Down Expand Up @@ -447,6 +456,7 @@ int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append,
unsigned int chunks, cur_page, seg_len, i, prv_len = 0;
unsigned int added_nents = 0;
struct scatterlist *s = sgt_append->prv;
struct page *last_pg;

/*
* The algorithm below requires max_segment to be aligned to PAGE_SIZE
Expand All @@ -460,21 +470,17 @@ int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append,
return -EOPNOTSUPP;

if (sgt_append->prv) {
unsigned long paddr =
(page_to_pfn(sg_page(sgt_append->prv)) * PAGE_SIZE +
sgt_append->prv->offset + sgt_append->prv->length) /
PAGE_SIZE;

if (WARN_ON(offset))
return -EINVAL;

/* Merge contiguous pages into the last SG */
prv_len = sgt_append->prv->length;
while (n_pages && page_to_pfn(pages[0]) == paddr) {
last_pg = sg_page(sgt_append->prv);
while (n_pages && pages_are_mergeable(last_pg, pages[0])) {
if (sgt_append->prv->length + PAGE_SIZE > max_segment)
break;
sgt_append->prv->length += PAGE_SIZE;
paddr++;
last_pg = pages[0];
pages++;
n_pages--;
}
Expand All @@ -488,7 +494,7 @@ int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append,
for (i = 1; i < n_pages; i++) {
seg_len += PAGE_SIZE;
if (seg_len >= max_segment ||
page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
!pages_are_mergeable(pages[i], pages[i - 1])) {
chunks++;
seg_len = 0;
}
Expand All @@ -504,8 +510,7 @@ int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append,
for (j = cur_page + 1; j < n_pages; j++) {
seg_len += PAGE_SIZE;
if (seg_len >= max_segment ||
page_to_pfn(pages[j]) !=
page_to_pfn(pages[j - 1]) + 1)
!pages_are_mergeable(pages[j], pages[j - 1]))
break;
}

Expand Down

0 comments on commit 1567b49

Please sign in to comment.