Skip to content

Commit

Permalink
hugetlb: arm64: add mte support
Browse files Browse the repository at this point in the history
Enable MTE support for hugetlb.

The MTE page flags will be set on the folio only.  When copying
hugetlb folio (for example, CoW), the tags for all subpages will be copied
when copying the first subpage.

When freeing hugetlb folio, the MTE flags will be cleared.

Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Yang Shi <yang@os.amperecomputing.com>
Link: https://lore.kernel.org/r/20241001225220.271178-1-yang@os.amperecomputing.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
  • Loading branch information
Yang Shi authored and Catalin Marinas committed Oct 16, 2024
1 parent 9852d85 commit 25c17c4
Show file tree
Hide file tree
Showing 9 changed files with 159 additions and 8 deletions.
8 changes: 8 additions & 0 deletions arch/arm64/include/asm/hugetlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
#define __ASM_HUGETLB_H

#include <asm/cacheflush.h>
#include <asm/mte.h>
#include <asm/page.h>

#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
Expand All @@ -21,6 +22,13 @@ extern bool arch_hugetlb_migration_supported(struct hstate *h);
static inline void arch_clear_hugetlb_flags(struct folio *folio)
{
clear_bit(PG_dcache_clean, &folio->flags);

#ifdef CONFIG_ARM64_MTE
if (system_supports_mte()) {
clear_bit(PG_mte_tagged, &folio->flags);
clear_bit(PG_mte_lock, &folio->flags);
}
#endif
}
#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags

Expand Down
3 changes: 2 additions & 1 deletion arch/arm64/include/asm/mman.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,8 @@ static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags)
* backed by tags-capable memory. The vm_flags may be overridden by a
* filesystem supporting MTE (RAM-based).
*/
if (system_supports_mte() && (flags & MAP_ANONYMOUS))
if (system_supports_mte() &&
(flags & (MAP_ANONYMOUS | MAP_HUGETLB)))
return VM_MTE_ALLOWED;

return 0;
Expand Down
67 changes: 67 additions & 0 deletions arch/arm64/include/asm/mte.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,8 @@ void mte_free_tag_storage(char *storage);

static inline void set_page_mte_tagged(struct page *page)
{
VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));

/*
* Ensure that the tags written prior to this function are visible
* before the page flags update.
Expand All @@ -53,6 +55,8 @@ static inline bool page_mte_tagged(struct page *page)
{
bool ret = test_bit(PG_mte_tagged, &page->flags);

VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));

/*
* If the page is tagged, ensure ordering with a likely subsequent
* read of the tags.
Expand All @@ -76,6 +80,8 @@ static inline bool page_mte_tagged(struct page *page)
*/
static inline bool try_page_mte_tagging(struct page *page)
{
VM_WARN_ON_ONCE(folio_test_hugetlb(page_folio(page)));

if (!test_and_set_bit(PG_mte_lock, &page->flags))
return true;

Expand Down Expand Up @@ -157,6 +163,67 @@ static inline int mte_ptrace_copy_tags(struct task_struct *child,

#endif /* CONFIG_ARM64_MTE */

#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_ARM64_MTE)
static inline void folio_set_hugetlb_mte_tagged(struct folio *folio)
{
VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));

/*
* Ensure that the tags written prior to this function are visible
* before the folio flags update.
*/
smp_wmb();
set_bit(PG_mte_tagged, &folio->flags);

}

static inline bool folio_test_hugetlb_mte_tagged(struct folio *folio)
{
bool ret = test_bit(PG_mte_tagged, &folio->flags);

VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));

/*
* If the folio is tagged, ensure ordering with a likely subsequent
* read of the tags.
*/
if (ret)
smp_rmb();
return ret;
}

static inline bool folio_try_hugetlb_mte_tagging(struct folio *folio)
{
VM_WARN_ON_ONCE(!folio_test_hugetlb(folio));

if (!test_and_set_bit(PG_mte_lock, &folio->flags))
return true;

/*
* The tags are either being initialised or may have been initialised
* already. Check if the PG_mte_tagged flag has been set or wait
* otherwise.
*/
smp_cond_load_acquire(&folio->flags, VAL & (1UL << PG_mte_tagged));

return false;
}
#else
static inline void folio_set_hugetlb_mte_tagged(struct folio *folio)
{
}

static inline bool folio_test_hugetlb_mte_tagged(struct folio *folio)
{
return false;
}

static inline bool folio_try_hugetlb_mte_tagging(struct folio *folio)
{
return false;
}
#endif

static inline void mte_disable_tco_entry(struct task_struct *task)
{
if (!system_supports_mte())
Expand Down
6 changes: 6 additions & 0 deletions arch/arm64/kernel/hibernate.c
Original file line number Diff line number Diff line change
Expand Up @@ -266,9 +266,15 @@ static int swsusp_mte_save_tags(void)
max_zone_pfn = zone_end_pfn(zone);
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
struct page *page = pfn_to_online_page(pfn);
struct folio *folio;

if (!page)
continue;
folio = page_folio(page);

if (folio_test_hugetlb(folio) &&
!folio_test_hugetlb_mte_tagged(folio))
continue;

if (!page_mte_tagged(page))
continue;
Expand Down
27 changes: 25 additions & 2 deletions arch/arm64/kernel/mte.c
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,24 @@ EXPORT_SYMBOL_GPL(mte_async_or_asymm_mode);
void mte_sync_tags(pte_t pte, unsigned int nr_pages)
{
struct page *page = pte_page(pte);
unsigned int i;
struct folio *folio = page_folio(page);
unsigned long i;

if (folio_test_hugetlb(folio)) {
unsigned long nr = folio_nr_pages(folio);

/* Hugetlb MTE flags are set for head page only */
if (folio_try_hugetlb_mte_tagging(folio)) {
for (i = 0; i < nr; i++, page++)
mte_clear_page_tags(page_address(page));
folio_set_hugetlb_mte_tagged(folio);
}

/* ensure the tags are visible before the PTE is set */
smp_wmb();

return;
}

/* if PG_mte_tagged is set, tags have already been initialised */
for (i = 0; i < nr_pages; i++, page++) {
Expand Down Expand Up @@ -410,6 +427,7 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
void *maddr;
struct page *page = get_user_page_vma_remote(mm, addr,
gup_flags, &vma);
struct folio *folio;

if (IS_ERR(page)) {
err = PTR_ERR(page);
Expand All @@ -428,7 +446,12 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
put_page(page);
break;
}
WARN_ON_ONCE(!page_mte_tagged(page));

folio = page_folio(page);
if (folio_test_hugetlb(folio))
WARN_ON_ONCE(!folio_test_hugetlb_mte_tagged(folio));
else
WARN_ON_ONCE(!page_mte_tagged(page));

/* limit access to the end of the page */
offset = offset_in_page(addr);
Expand Down
16 changes: 13 additions & 3 deletions arch/arm64/kvm/guest.c
Original file line number Diff line number Diff line change
Expand Up @@ -1055,6 +1055,7 @@ int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
void *maddr;
unsigned long num_tags;
struct page *page;
struct folio *folio;

if (is_error_noslot_pfn(pfn)) {
ret = -EFAULT;
Expand All @@ -1068,10 +1069,13 @@ int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
ret = -EFAULT;
goto out;
}
folio = page_folio(page);
maddr = page_address(page);

if (!write) {
if (page_mte_tagged(page))
if ((folio_test_hugetlb(folio) &&
folio_test_hugetlb_mte_tagged(folio)) ||
page_mte_tagged(page))
num_tags = mte_copy_tags_to_user(tags, maddr,
MTE_GRANULES_PER_PAGE);
else
Expand All @@ -1085,14 +1089,20 @@ int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
* __set_ptes() in the VMM but still overriding the
* tags, hence ignoring the return value.
*/
try_page_mte_tagging(page);
if (folio_test_hugetlb(folio))
folio_try_hugetlb_mte_tagging(folio);
else
try_page_mte_tagging(page);
num_tags = mte_copy_tags_from_user(maddr, tags,
MTE_GRANULES_PER_PAGE);

/* uaccess failed, don't leave stale tags */
if (num_tags != MTE_GRANULES_PER_PAGE)
mte_clear_page_tags(maddr);
set_page_mte_tagged(page);
if (folio_test_hugetlb(folio))
folio_set_hugetlb_mte_tagged(folio);
else
set_page_mte_tagged(page);

kvm_release_pfn_dirty(pfn);
}
Expand Down
11 changes: 11 additions & 0 deletions arch/arm64/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1401,10 +1401,21 @@ static void sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
{
unsigned long i, nr_pages = size >> PAGE_SHIFT;
struct page *page = pfn_to_page(pfn);
struct folio *folio = page_folio(page);

if (!kvm_has_mte(kvm))
return;

if (folio_test_hugetlb(folio)) {
/* Hugetlb has MTE flags set on head page only */
if (folio_try_hugetlb_mte_tagging(folio)) {
for (i = 0; i < nr_pages; i++, page++)
mte_clear_page_tags(page_address(page));
folio_set_hugetlb_mte_tagged(folio);
}
return;
}

for (i = 0; i < nr_pages; i++, page++) {
if (try_page_mte_tagging(page)) {
mte_clear_page_tags(page_address(page));
Expand Down
27 changes: 26 additions & 1 deletion arch/arm64/mm/copypage.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,40 @@ void copy_highpage(struct page *to, struct page *from)
{
void *kto = page_address(to);
void *kfrom = page_address(from);
struct folio *src = page_folio(from);
struct folio *dst = page_folio(to);
unsigned int i, nr_pages;

copy_page(kto, kfrom);

if (kasan_hw_tags_enabled())
page_kasan_tag_reset(to);

if (system_supports_mte() && page_mte_tagged(from)) {
if (!system_supports_mte())
return;

if (folio_test_hugetlb(src) &&
folio_test_hugetlb_mte_tagged(src)) {
if (!folio_try_hugetlb_mte_tagging(dst))
return;

/*
* Populate tags for all subpages.
*
* Don't assume the first page is head page since
* huge page copy may start from any subpage.
*/
nr_pages = folio_nr_pages(src);
for (i = 0; i < nr_pages; i++) {
kfrom = page_address(folio_page(src, i));
kto = page_address(folio_page(dst, i));
mte_copy_page_tags(kto, kfrom);
}
folio_set_hugetlb_mte_tagged(dst);
} else if (page_mte_tagged(from)) {
/* It's a new page, shouldn't have been tagged yet */
WARN_ON_ONCE(!try_page_mte_tagging(to));

mte_copy_page_tags(kto, kfrom);
set_page_mte_tagged(to);
}
Expand Down
2 changes: 1 addition & 1 deletion fs/hugetlbfs/inode.c
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
* way when do_mmap unwinds (may be important on powerpc
* and ia64).
*/
vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND);
vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND | VM_MTE_ALLOWED);
vma->vm_ops = &hugetlb_vm_ops;

ret = seal_check_write(info->seals, vma);
Expand Down

0 comments on commit 25c17c4

Please sign in to comment.