Skip to content

Commit

Permalink
arm/mm: Convert to using lock_mm_and_find_vma()
Browse files Browse the repository at this point in the history
arm has an additional check for address < FIRST_USER_ADDRESS before
expanding the stack.  Since FIRST_USER_ADDRESS is defined everywhere
(generally as 0), move that check to the generic expand_downwards().

Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Ben Hutchings authored and Linus Torvalds committed Jun 24, 2023
1 parent 7267ef7 commit 8b35ca3
Show file tree
Hide file tree
Showing 3 changed files with 16 additions and 50 deletions.
1 change: 1 addition & 0 deletions arch/arm/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,7 @@ config ARM
select HAVE_UID16
select HAVE_VIRT_CPU_ACCOUNTING_GEN
select IRQ_FORCED_THREADING
select LOCK_MM_AND_FIND_VMA
select MODULES_USE_ELF_REL
select NEED_DMA_MAP_STATE
select OF_EARLY_FLATTREE if OF
Expand Down
63 changes: 14 additions & 49 deletions arch/arm/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -232,37 +232,11 @@ static inline bool is_permission_fault(unsigned int fsr)
return false;
}

static vm_fault_t __kprobes
__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int flags,
unsigned long vma_flags, struct pt_regs *regs)
{
struct vm_area_struct *vma = find_vma(mm, addr);
if (unlikely(!vma))
return VM_FAULT_BADMAP;

if (unlikely(vma->vm_start > addr)) {
if (!(vma->vm_flags & VM_GROWSDOWN))
return VM_FAULT_BADMAP;
if (addr < FIRST_USER_ADDRESS)
return VM_FAULT_BADMAP;
if (expand_stack(vma, addr))
return VM_FAULT_BADMAP;
}

/*
* ok, we have a good vm_area for this memory access, check the
* permissions on the VMA allow for the fault which occurred.
*/
if (!(vma->vm_flags & vma_flags))
return VM_FAULT_BADACCESS;

return handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);
}

static int __kprobes
do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int sig, code;
vm_fault_t fault;
unsigned int flags = FAULT_FLAG_DEFAULT;
Expand Down Expand Up @@ -301,31 +275,21 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)

perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);

/*
* As per x86, we may deadlock here. However, since the kernel only
* validly references user space from well defined areas of the code,
* we can bug out early if this is from code which shouldn't.
*/
if (!mmap_read_trylock(mm)) {
if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
goto no_context;
retry:
mmap_read_lock(mm);
} else {
/*
* The above down_read_trylock() might have succeeded in
* which case, we'll have missed the might_sleep() from
* down_read()
*/
might_sleep();
#ifdef CONFIG_DEBUG_VM
if (!user_mode(regs) &&
!search_exception_tables(regs->ARM_pc))
goto no_context;
#endif
vma = lock_mm_and_find_vma(mm, addr, regs);
if (unlikely(!vma)) {
fault = VM_FAULT_BADMAP;
goto bad_area;
}

fault = __do_page_fault(mm, addr, flags, vm_flags, regs);
/*
* ok, we have a good vm_area for this memory access, check the
* permissions on the VMA allow for the fault which occurred.
*/
if (!(vma->vm_flags & vm_flags))
fault = VM_FAULT_BADACCESS;
else
fault = handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);

/* If we need to retry but a fatal signal is pending, handle the
* signal first. We do not need to release the mmap_lock because
Expand Down Expand Up @@ -356,6 +320,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
return 0;

bad_area:
/*
* If we are in kernel mode at this point, we
* have no context to handle this fault with.
Expand Down
2 changes: 1 addition & 1 deletion mm/mmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -2036,7 +2036,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
int error = 0;

address &= PAGE_MASK;
if (address < mmap_min_addr)
if (address < mmap_min_addr || address < FIRST_USER_ADDRESS)
return -EPERM;

/* Enforce stack_guard_gap */
Expand Down

0 comments on commit 8b35ca3

Please sign in to comment.