Skip to content

Commit

Permalink
mm/mprotect: use long for page accountings and retval
Browse files Browse the repository at this point in the history
Switch to use type "long" for page accountings and retval across the whole
procedure of change_protection().

The change should have shrinked the possible maximum page number to be
half comparing to previous (ULONG_MAX / 2), but it shouldn't overflow on
any system either because the maximum possible pages touched by change
protection should be ULONG_MAX / PAGE_SIZE.

Two reasons to switch from "unsigned long" to "long":

  1. It suites better on count_vm_numa_events(), whose 2nd parameter takes
     a long type.

  2. It paves way for returning negative (error) values in the future.

Currently the only caller that consumes this retval is change_prot_numa(),
where the unsigned long was converted to an int.  Since at it, touching up
the numa code to also take a long, so it'll avoid any possible overflow
too during the int-size convertion.

Link: https://lkml.kernel.org/r/20230104225207.1066932-3-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Acked-by: Mike Kravetz <mike.kravetz@oracle.com>
Acked-by: James Houghton <jthoughton@google.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Nadav Amit <nadav.amit@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
  • Loading branch information
Peter Xu authored and Andrew Morton committed Jan 19, 2023
1 parent 6b7cea9 commit a79390f
Show file tree
Hide file tree
Showing 5 changed files with 19 additions and 19 deletions.
4 changes: 2 additions & 2 deletions include/linux/hugetlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ void hugetlb_vma_lock_release(struct kref *kref);

int pmd_huge(pmd_t pmd);
int pud_huge(pud_t pud);
unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot,
unsigned long cp_flags);

Expand Down Expand Up @@ -437,7 +437,7 @@ static inline void move_hugetlb_state(struct folio *old_folio,
{
}

static inline unsigned long hugetlb_change_protection(
static inline long hugetlb_change_protection(
struct vm_area_struct *vma, unsigned long address,
unsigned long end, pgprot_t newprot,
unsigned long cp_flags)
Expand Down
2 changes: 1 addition & 1 deletion include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -2132,7 +2132,7 @@ static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma
}
bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
extern unsigned long change_protection(struct mmu_gather *tlb,
extern long change_protection(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start,
unsigned long end, unsigned long cp_flags);
extern int mprotect_fixup(struct mmu_gather *tlb, struct vm_area_struct *vma,
Expand Down
4 changes: 2 additions & 2 deletions mm/hugetlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -6615,7 +6615,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
return i ? i : err;
}

unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end,
pgprot_t newprot, unsigned long cp_flags)
{
Expand All @@ -6624,7 +6624,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
pte_t *ptep;
pte_t pte;
struct hstate *h = hstate_vma(vma);
unsigned long pages = 0, psize = huge_page_size(h);
long pages = 0, psize = huge_page_size(h);
bool shared_pmd = false;
struct mmu_notifier_range range;
unsigned long last_addr_mask;
Expand Down
2 changes: 1 addition & 1 deletion mm/mempolicy.c
Original file line number Diff line number Diff line change
Expand Up @@ -631,7 +631,7 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
unsigned long addr, unsigned long end)
{
struct mmu_gather tlb;
int nr_updated;
long nr_updated;

tlb_gather_mmu(&tlb, vma->vm_mm);

Expand Down
26 changes: 13 additions & 13 deletions mm/mprotect.c
Original file line number Diff line number Diff line change
Expand Up @@ -80,13 +80,13 @@ bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
return pte_dirty(pte);
}

static unsigned long change_pte_range(struct mmu_gather *tlb,
static long change_pte_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
unsigned long end, pgprot_t newprot, unsigned long cp_flags)
{
pte_t *pte, oldpte;
spinlock_t *ptl;
unsigned long pages = 0;
long pages = 0;
int target_node = NUMA_NO_NODE;
bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
Expand Down Expand Up @@ -353,21 +353,21 @@ uffd_wp_protect_file(struct vm_area_struct *vma, unsigned long cp_flags)
} \
} while (0)

static inline unsigned long change_pmd_range(struct mmu_gather *tlb,
static inline long change_pmd_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pud_t *pud, unsigned long addr,
unsigned long end, pgprot_t newprot, unsigned long cp_flags)
{
pmd_t *pmd;
unsigned long next;
unsigned long pages = 0;
long pages = 0;
unsigned long nr_huge_updates = 0;
struct mmu_notifier_range range;

range.start = 0;

pmd = pmd_offset(pud, addr);
do {
unsigned long this_pages;
long this_pages;

next = pmd_addr_end(addr, end);

Expand Down Expand Up @@ -437,13 +437,13 @@ static inline unsigned long change_pmd_range(struct mmu_gather *tlb,
return pages;
}

static inline unsigned long change_pud_range(struct mmu_gather *tlb,
static inline long change_pud_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr,
unsigned long end, pgprot_t newprot, unsigned long cp_flags)
{
pud_t *pud;
unsigned long next;
unsigned long pages = 0;
long pages = 0;

pud = pud_offset(p4d, addr);
do {
Expand All @@ -458,13 +458,13 @@ static inline unsigned long change_pud_range(struct mmu_gather *tlb,
return pages;
}

static inline unsigned long change_p4d_range(struct mmu_gather *tlb,
static inline long change_p4d_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr,
unsigned long end, pgprot_t newprot, unsigned long cp_flags)
{
p4d_t *p4d;
unsigned long next;
unsigned long pages = 0;
long pages = 0;

p4d = p4d_offset(pgd, addr);
do {
Expand All @@ -479,14 +479,14 @@ static inline unsigned long change_p4d_range(struct mmu_gather *tlb,
return pages;
}

static unsigned long change_protection_range(struct mmu_gather *tlb,
static long change_protection_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long addr,
unsigned long end, pgprot_t newprot, unsigned long cp_flags)
{
struct mm_struct *mm = vma->vm_mm;
pgd_t *pgd;
unsigned long next;
unsigned long pages = 0;
long pages = 0;

BUG_ON(addr >= end);
pgd = pgd_offset(mm, addr);
Expand All @@ -505,12 +505,12 @@ static unsigned long change_protection_range(struct mmu_gather *tlb,
return pages;
}

unsigned long change_protection(struct mmu_gather *tlb,
long change_protection(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start,
unsigned long end, unsigned long cp_flags)
{
pgprot_t newprot = vma->vm_page_prot;
unsigned long pages;
long pages;

BUG_ON((cp_flags & MM_CP_UFFD_WP_ALL) == MM_CP_UFFD_WP_ALL);

Expand Down

0 comments on commit a79390f

Please sign in to comment.