Skip to content

Commit

Permalink
mm: hugetlb_vmemmap: cleanup hugetlb_free_vmemmap_enabled*
Browse files Browse the repository at this point in the history
The word of "free" is not expressive enough to express the feature of
optimizing vmemmap pages associated with each HugeTLB, rename this keywork
to "optimize".  In this patch , cheanup the static key and
hugetlb_free_vmemmap_enabled() to make code more expressive.

Link: https://lkml.kernel.org/r/20220404074652.68024-3-songmuchun@bytedance.com
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
  • Loading branch information
Muchun Song authored and akpm committed Apr 29, 2022
1 parent 5981611 commit f10f144
Show file tree
Hide file tree
Showing 4 changed files with 13 additions and 13 deletions.
2 changes: 1 addition & 1 deletion arch/arm64/mm/flush.c
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ void flush_dcache_page(struct page *page)
* is reused (more details can refer to the comments above
* page_fixed_fake_head()).
*/
if (hugetlb_free_vmemmap_enabled() && PageHuge(page))
if (hugetlb_optimize_vmemmap_enabled() && PageHuge(page))
page = compound_head(page);

if (test_bit(PG_dcache_clean, &page->flags))
Expand Down
12 changes: 6 additions & 6 deletions include/linux/page-flags.h
Original file line number Diff line number Diff line change
Expand Up @@ -192,16 +192,16 @@ enum pageflags {

#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
DECLARE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON,
hugetlb_free_vmemmap_enabled_key);
hugetlb_optimize_vmemmap_key);

static __always_inline bool hugetlb_free_vmemmap_enabled(void)
static __always_inline bool hugetlb_optimize_vmemmap_enabled(void)
{
return static_branch_maybe(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON,
&hugetlb_free_vmemmap_enabled_key);
&hugetlb_optimize_vmemmap_key);
}

/*
* If the feature of freeing some vmemmap pages associated with each HugeTLB
* If the feature of optimizing vmemmap pages associated with each HugeTLB
* page is enabled, the head vmemmap page frame is reused and all of the tail
* vmemmap addresses map to the head vmemmap page frame (furture details can
* refer to the figure at the head of the mm/hugetlb_vmemmap.c). In other
Expand All @@ -218,7 +218,7 @@ static __always_inline bool hugetlb_free_vmemmap_enabled(void)
*/
static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
{
if (!hugetlb_free_vmemmap_enabled())
if (!hugetlb_optimize_vmemmap_enabled())
return page;

/*
Expand Down Expand Up @@ -247,7 +247,7 @@ static inline const struct page *page_fixed_fake_head(const struct page *page)
return page;
}

static inline bool hugetlb_free_vmemmap_enabled(void)
static inline bool hugetlb_optimize_vmemmap_enabled(void)
{
return false;
}
Expand Down
10 changes: 5 additions & 5 deletions mm/hugetlb_vmemmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -189,8 +189,8 @@
#define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT)

DEFINE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON,
hugetlb_free_vmemmap_enabled_key);
EXPORT_SYMBOL(hugetlb_free_vmemmap_enabled_key);
hugetlb_optimize_vmemmap_key);
EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);

static int __init hugetlb_vmemmap_early_param(char *buf)
{
Expand All @@ -204,9 +204,9 @@ static int __init hugetlb_vmemmap_early_param(char *buf)
return -EINVAL;

if (!strcmp(buf, "on"))
static_branch_enable(&hugetlb_free_vmemmap_enabled_key);
static_branch_enable(&hugetlb_optimize_vmemmap_key);
else if (!strcmp(buf, "off"))
static_branch_disable(&hugetlb_free_vmemmap_enabled_key);
static_branch_disable(&hugetlb_optimize_vmemmap_key);
else
return -EINVAL;

Expand Down Expand Up @@ -282,7 +282,7 @@ void __init hugetlb_vmemmap_init(struct hstate *h)
BUILD_BUG_ON(__NR_USED_SUBPAGE >=
RESERVE_VMEMMAP_SIZE / sizeof(struct page));

if (!hugetlb_free_vmemmap_enabled())
if (!hugetlb_optimize_vmemmap_enabled())
return;

vmemmap_pages = (nr_pages * sizeof(struct page)) >> PAGE_SHIFT;
Expand Down
2 changes: 1 addition & 1 deletion mm/memory_hotplug.c
Original file line number Diff line number Diff line change
Expand Up @@ -1289,7 +1289,7 @@ bool mhp_supports_memmap_on_memory(unsigned long size)
* populate a single PMD.
*/
return memmap_on_memory &&
!hugetlb_free_vmemmap_enabled() &&
!hugetlb_optimize_vmemmap_enabled() &&
IS_ENABLED(CONFIG_MHP_MEMMAP_ON_MEMORY) &&
size == memory_block_size_bytes() &&
IS_ALIGNED(vmemmap_size, PMD_SIZE) &&
Expand Down

0 comments on commit f10f144

Please sign in to comment.