Skip to content

Commit

Permalink
mm/hugetlb: remove unnecessary holding of hugetlb_lock
Browse files Browse the repository at this point in the history
In isolate_or_dissolve_huge_folio(), after acquiring the hugetlb_lock, it
is only for the purpose of obtaining the correct hstate, which is then
passed to alloc_and_dissolve_hugetlb_folio().

alloc_and_dissolve_hugetlb_folio() itself also acquires the hugetlb_lock. 
We can have alloc_and_dissolve_hugetlb_folio() obtain the hstate by
itself, so that isolate_or_dissolve_huge_folio() no longer needs to
acquire the hugetlb_lock.  In addition, we keep the folio_test_hugetlb()
check within isolate_or_dissolve_huge_folio().  By doing so, we can avoid
disrupting the normal path by vainly holding the hugetlb_lock.

replace_free_hugepage_folios() has the same issue, and we should address
it as well.

Addresses a possible performance problem which was added by the hotfix
113ed54 ("mm/hugetlb: fix kernel NULL pointer dereference when
replacing free hugetlb folios").

Link: https://lkml.kernel.org/r/1748317010-16272-1-git-send-email-yangge1116@126.com
Fixes: 113ed54 ("mm/hugetlb: fix kernel NULL pointer dereference when replacing free hugetlb folios")
Signed-off-by: Ge Yang <yangge1116@126.com>
Suggested-by: Oscar Salvador <osalvador@suse.de>
Reviewed-by: Muchun Song <muchun.song@linux.dev>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <21cnbao@gmail.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
  • Loading branch information
Ge Yang authored and Andrew Morton committed Jun 25, 2025
1 parent 3746351 commit 344ef45
Showing 1 changed file with 17 additions and 37 deletions.
54 changes: 17 additions & 37 deletions mm/hugetlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -2787,20 +2787,24 @@ void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
/*
* alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve
* the old one
* @h: struct hstate old page belongs to
* @old_folio: Old folio to dissolve
* @list: List to isolate the page in case we need to
* Returns 0 on success, otherwise negated error.
*/
static int alloc_and_dissolve_hugetlb_folio(struct hstate *h,
struct folio *old_folio, struct list_head *list)
static int alloc_and_dissolve_hugetlb_folio(struct folio *old_folio,
struct list_head *list)
{
gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
gfp_t gfp_mask;
struct hstate *h;
int nid = folio_nid(old_folio);
struct folio *new_folio = NULL;
int ret = 0;

retry:
/*
* The old_folio might have been dissolved from under our feet, so make sure
* to carefully check the state under the lock.
*/
spin_lock_irq(&hugetlb_lock);
if (!folio_test_hugetlb(old_folio)) {
/*
Expand Down Expand Up @@ -2829,8 +2833,10 @@ static int alloc_and_dissolve_hugetlb_folio(struct hstate *h,
cond_resched();
goto retry;
} else {
h = folio_hstate(old_folio);
if (!new_folio) {
spin_unlock_irq(&hugetlb_lock);
gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid,
NULL, NULL);
if (!new_folio)
Expand Down Expand Up @@ -2874,35 +2880,24 @@ static int alloc_and_dissolve_hugetlb_folio(struct hstate *h,

int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list)
{
struct hstate *h;
int ret = -EBUSY;

/*
* The page might have been dissolved from under our feet, so make sure
* to carefully check the state under the lock.
* Return success when racing as if we dissolved the page ourselves.
*/
spin_lock_irq(&hugetlb_lock);
if (folio_test_hugetlb(folio)) {
h = folio_hstate(folio);
} else {
spin_unlock_irq(&hugetlb_lock);
/* Not to disrupt normal path by vainly holding hugetlb_lock */
if (!folio_test_hugetlb(folio))
return 0;
}
spin_unlock_irq(&hugetlb_lock);

/*
* Fence off gigantic pages as there is a cyclic dependency between
* alloc_contig_range and them. Return -ENOMEM as this has the effect
* of bailing out right away without further retrying.
*/
if (hstate_is_gigantic(h))
if (folio_order(folio) > MAX_PAGE_ORDER)
return -ENOMEM;

if (folio_ref_count(folio) && folio_isolate_hugetlb(folio, list))
ret = 0;
else if (!folio_ref_count(folio))
ret = alloc_and_dissolve_hugetlb_folio(h, folio, list);
ret = alloc_and_dissolve_hugetlb_folio(folio, list);

return ret;
}
Expand All @@ -2916,7 +2911,6 @@ int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list)
*/
int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn)
{
struct hstate *h;
struct folio *folio;
int ret = 0;

Expand All @@ -2925,23 +2919,9 @@ int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn)
while (start_pfn < end_pfn) {
folio = pfn_folio(start_pfn);

/*
* The folio might have been dissolved from under our feet, so make sure
* to carefully check the state under the lock.
*/
spin_lock_irq(&hugetlb_lock);
if (folio_test_hugetlb(folio)) {
h = folio_hstate(folio);
} else {
spin_unlock_irq(&hugetlb_lock);
start_pfn++;
continue;
}
spin_unlock_irq(&hugetlb_lock);

if (!folio_ref_count(folio)) {
ret = alloc_and_dissolve_hugetlb_folio(h, folio,
&isolate_list);
/* Not to disrupt normal path by vainly holding hugetlb_lock */
if (folio_test_hugetlb(folio) && !folio_ref_count(folio)) {
ret = alloc_and_dissolve_hugetlb_folio(folio, &isolate_list);
if (ret)
break;

Expand Down

0 comments on commit 344ef45

Please sign in to comment.