Skip to content

Commit

Permalink
arm64/mm: Convert to using lock_mm_and_find_vma()
Browse files Browse the repository at this point in the history
This converts arm64 to use the new page fault helper.  It was very
straightforward, but still needed a fix for the "obvious" conversion I
initially did.  Thanks to Suren for the fix and testing.

Fixed-and-tested-by: Suren Baghdasaryan <surenb@google.com>
Unnecessary-code-removal-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Linus Torvalds committed Jun 24, 2023
1 parent eda0047 commit ae870a6
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 39 deletions.
1 change: 1 addition & 0 deletions arch/arm64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -225,6 +225,7 @@ config ARM64
select IRQ_DOMAIN
select IRQ_FORCED_THREADING
select KASAN_VMALLOC if KASAN
select LOCK_MM_AND_FIND_VMA
select MODULES_USE_ELF_RELA
select NEED_DMA_MAP_STATE
select NEED_SG_DMA_LENGTH
Expand Down
47 changes: 8 additions & 39 deletions arch/arm64/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -483,27 +483,14 @@ static void do_bad_area(unsigned long far, unsigned long esr,
#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000)
#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000)

static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
static vm_fault_t __do_page_fault(struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long addr,
unsigned int mm_flags, unsigned long vm_flags,
struct pt_regs *regs)
{
struct vm_area_struct *vma = find_vma(mm, addr);

if (unlikely(!vma))
return VM_FAULT_BADMAP;

/*
* Ok, we have a good vm_area for this memory access, so we can handle
* it.
*/
if (unlikely(vma->vm_start > addr)) {
if (!(vma->vm_flags & VM_GROWSDOWN))
return VM_FAULT_BADMAP;
if (expand_stack(vma, addr))
return VM_FAULT_BADMAP;
}

/*
* Check that the permissions on the VMA allow for the fault which
* occurred.
*/
Expand Down Expand Up @@ -617,31 +604,15 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
}
lock_mmap:
#endif /* CONFIG_PER_VMA_LOCK */
/*
* As per x86, we may deadlock here. However, since the kernel only
* validly references user space from well defined areas of the code,
* we can bug out early if this is from code which shouldn't.
*/
if (!mmap_read_trylock(mm)) {
if (!user_mode(regs) && !search_exception_tables(regs->pc))
goto no_context;

retry:
mmap_read_lock(mm);
} else {
/*
* The above mmap_read_trylock() might have succeeded in which
* case, we'll have missed the might_sleep() from down_read().
*/
might_sleep();
#ifdef CONFIG_DEBUG_VM
if (!user_mode(regs) && !search_exception_tables(regs->pc)) {
mmap_read_unlock(mm);
goto no_context;
}
#endif
vma = lock_mm_and_find_vma(mm, addr, regs);
if (unlikely(!vma)) {
fault = VM_FAULT_BADMAP;
goto done;
}

fault = __do_page_fault(mm, addr, mm_flags, vm_flags, regs);
fault = __do_page_fault(mm, vma, addr, mm_flags, vm_flags, regs);

/* Quick path to respond to signals */
if (fault_signal_pending(fault, regs)) {
Expand All @@ -660,9 +631,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
}
mmap_read_unlock(mm);

#ifdef CONFIG_PER_VMA_LOCK
done:
#endif
/*
* Handle the "normal" (no error) case first.
*/
Expand Down

0 comments on commit ae870a6

Please sign in to comment.