From c6d3cd32fd0064af7611d00877a67e6993bf220b Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 29 Oct 2021 17:22:45 +0100 Subject: [PATCH 1/3] arm64: ftrace: use HAVE_FUNCTION_GRAPH_RET_ADDR_PTR When CONFIG_FUNCTION_GRAPH_TRACER is selected and the function graph tracer is in use, unwind_frame() may erroneously associate a traced function with an incorrect return address. This can happen when starting an unwind from a pt_regs, or when unwinding across an exception boundary. This can be seen when recording with perf while the function graph tracer is in use. For example: | # echo function_graph > /sys/kernel/debug/tracing/current_tracer | # perf record -g -e raw_syscalls:sys_enter:k /bin/true | # perf report ... reports the callchain erroneously as: | el0t_64_sync | el0t_64_sync_handler | el0_svc_common.constprop.0 | perf_callchain | get_perf_callchain | syscall_trace_enter | syscall_trace_enter ... whereas when the function graph tracer is not in use, it reports: | el0t_64_sync | el0t_64_sync_handler | el0_svc | do_el0_svc | el0_svc_common.constprop.0 | syscall_trace_enter | syscall_trace_enter The underlying problem is that ftrace_graph_get_ret_stack() takes an index offset from the most recent entry added to the fgraph return stack. We start an unwind at offset 0, and increment the offset each time we encounter a rewritten return address (i.e. when we see `return_to_handler`). This is broken in two cases: 1) Between creating a pt_regs and starting the unwind, function calls may place entries on the stack, leaving an arbitrary offset which we can only determine by performing a full unwind from the caller of the unwind code (and relying on none of the unwind code being instrumented). This can result in erroneous entries being reported in a backtrace recorded by perf or kfence when the function graph tracer is in use. Currently show_regs() is unaffected as dump_backtrace() performs an initial unwind. 2) When unwinding across an exception boundary (whether continuing an unwind or starting a new unwind from regs), we currently always skip the LR of the interrupted context. Where this was live and contained a rewritten address, we won't consume the corresponding fgraph ret stack entry, leaving subsequent entries off-by-one. This can result in erroneous entries being reported in a backtrace performed by any in-kernel unwinder when that backtrace crosses an exception boundary, with entries after the boundary being reported incorrectly. This includes perf, kfence, show_regs(), panic(), etc. To fix this, we need to be able to uniquely identify each rewritten return address such that we can map this back to the original return address. We can use HAVE_FUNCTION_GRAPH_RET_ADDR_PTR to associate each rewritten return address with a unique location on the stack. As the return address is passed in the LR (and so is not guaranteed a unique location in memory), we use the FP upon entry to the function (i.e. the address of the caller's frame record) as the return address pointer. Any nested call will have a different FP value as the caller must create its own frame record and update FP to point to this. Since ftrace_graph_ret_addr() requires the return address with the PAC stripped, the stripping of the PAC is moved before the fixup of the rewritten address. As we would unconditionally strip the PAC, moving this earlier is not harmful, and we can avoid a redundant strip in the return address fixup code. I've tested this with the perf case above, the ftrace selftests, and a number of ad-hoc unwinder tests. The tests all pass, and I have seen no unexpected behaviour as a result of this change. I've tested with pointer authentication under QEMU TCG where magic-sysrq+l correctly recovers the original return addresses. Note that this doesn't fix the issue of skipping a live LR at an exception boundary, which is a more general problem and requires more substantial rework. Were we to consume the LR in all cases this would result in warnings where the interrupted context's LR contains `return_to_handler`, but the FP has been altered, e.g. | func: | <--- ftrace entry ---> // logs FP & LR, rewrites LR | STP FP, LR, [SP, #-16]! | MOV FP, SP | <--- INTERRUPT ---> ... as ftrace_graph_get_ret_stack() fill not find a matching entry, triggering the WARN_ON_ONCE() in unwind_frame(). Link: https://lore.kernel.org/r/20211025164925.GB2001@C02TD0UTHF1T.local Link: https://lore.kernel.org/r/20211027132529.30027-1-mark.rutland@arm.com Signed-off-by: Mark Rutland Cc: Catalin Marinas Cc: Madhavan T. Venkataraman Cc: Mark Brown Cc: Steven Rostedt Cc: Will Deacon Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/20211029162245.39761-1-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/ftrace.h | 11 +++++++++++ arch/arm64/include/asm/stacktrace.h | 6 ------ arch/arm64/kernel/ftrace.c | 6 +++--- arch/arm64/kernel/stacktrace.c | 18 ++++++++---------- 4 files changed, 22 insertions(+), 19 deletions(-) diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h index 347b0cc68f071..1494cfa8639be 100644 --- a/arch/arm64/include/asm/ftrace.h +++ b/arch/arm64/include/asm/ftrace.h @@ -12,6 +12,17 @@ #define HAVE_FUNCTION_GRAPH_FP_TEST +/* + * HAVE_FUNCTION_GRAPH_RET_ADDR_PTR means that the architecture can provide a + * "return address pointer" which can be used to uniquely identify a return + * address which has been overwritten. + * + * On arm64 we use the address of the caller's frame record, which remains the + * same for the lifetime of the instrumented function, unlike the return + * address in the LR. + */ +#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR + #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS #define ARCH_SUPPORTS_FTRACE_OPS 1 #else diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index a4e046ef4568e..6564a01cc085a 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -47,9 +47,6 @@ struct stack_info { * @prev_type: The type of stack this frame record was on, or a synthetic * value of STACK_TYPE_UNKNOWN. This is used to detect a * transition from one stack to another. - * - * @graph: When FUNCTION_GRAPH_TRACER is selected, holds the index of a - * replacement lr value in the ftrace graph stack. */ struct stackframe { unsigned long fp; @@ -57,9 +54,6 @@ struct stackframe { DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES); unsigned long prev_fp; enum stack_type prev_type; -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - int graph; -#endif #ifdef CONFIG_KRETPROBES struct llist_node *kr_cur; #endif diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index fc62dfe73f933..4506c4a90ac10 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -244,8 +244,6 @@ void arch_ftrace_update_code(int command) * on the way back to parent. For this purpose, this function is called * in _mcount() or ftrace_caller() to replace return address (*parent) on * the call stack to return_to_handler. - * - * Note that @frame_pointer is used only for sanity check later. */ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, unsigned long frame_pointer) @@ -263,8 +261,10 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, */ old = *parent; - if (!function_graph_enter(old, self_addr, frame_pointer, NULL)) + if (!function_graph_enter(old, self_addr, frame_pointer, + (void *)frame_pointer)) { *parent = return_hooker; + } } #ifdef CONFIG_DYNAMIC_FTRACE diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index c30624fff6acd..94f83cd44e507 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -38,9 +38,6 @@ void start_backtrace(struct stackframe *frame, unsigned long fp, { frame->fp = fp; frame->pc = pc; -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame->graph = 0; -#endif #ifdef CONFIG_KRETPROBES frame->kr_cur = NULL; #endif @@ -116,20 +113,23 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) frame->prev_fp = fp; frame->prev_type = info.type; + frame->pc = ptrauth_strip_insn_pac(frame->pc); + #ifdef CONFIG_FUNCTION_GRAPH_TRACER if (tsk->ret_stack && - (ptrauth_strip_insn_pac(frame->pc) == (unsigned long)return_to_handler)) { - struct ftrace_ret_stack *ret_stack; + (frame->pc == (unsigned long)return_to_handler)) { + unsigned long orig_pc; /* * This is a case where function graph tracer has * modified a return address (LR) in a stack frame * to hook a function return. * So replace it to an original value. */ - ret_stack = ftrace_graph_get_ret_stack(tsk, frame->graph++); - if (WARN_ON_ONCE(!ret_stack)) + orig_pc = ftrace_graph_ret_addr(tsk, NULL, frame->pc, + (void *)frame->fp); + if (WARN_ON_ONCE(frame->pc == orig_pc)) return -EINVAL; - frame->pc = ret_stack->ret; + frame->pc = orig_pc; } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #ifdef CONFIG_KRETPROBES @@ -137,8 +137,6 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) frame->pc = kretprobe_find_ret_addr(tsk, (void *)frame->fp, &frame->kr_cur); #endif - frame->pc = ptrauth_strip_insn_pac(frame->pc); - return 0; } NOKPROBE_SYMBOL(unwind_frame); From d3eb70ead6474ec16f976fcacf10a7a890a95bd3 Mon Sep 17 00:00:00 2001 From: Pingfan Liu Date: Fri, 12 Nov 2021 13:22:14 +0800 Subject: [PATCH 2/3] arm64: mm: Fix VM_BUG_ON(mm != &init_mm) for trans_pgd trans_pgd_create_copy() can hit "VM_BUG_ON(mm != &init_mm)" in the function pmd_populate_kernel(). This is the combined consequence of commit 5de59884ac0e ("arm64: trans_pgd: pass NULL instead of init_mm to *_populate functions"), which replaced &init_mm with NULL and commit 59511cfd08f3 ("arm64: mm: use XN table mapping attributes for user/kernel mappings"), which introduced the VM_BUG_ON. Since the former sounds reasonable, it is better to work on the later. From the perspective of trans_pgd, two groups of functions are considered in the later one: pmd_populate_kernel() mm == NULL should be fixed, else it hits VM_BUG_ON() p?d_populate() mm == NULL means PXN, that is OK, since trans_pgd only copies a linear map, no execution will happen on the map. So it is good enough to just relax VM_BUG_ON() to disregard mm == NULL Fixes: 59511cfd08f3 ("arm64: mm: use XN table mapping attributes for user/kernel mappings") Signed-off-by: Pingfan Liu Cc: # 5.13.x Cc: Ard Biesheuvel Cc: James Morse Cc: Matthias Brugger Reviewed-by: Catalin Marinas Reviewed-by: Pasha Tatashin Link: https://lore.kernel.org/r/20211112052214.9086-1-kernelfans@gmail.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/pgalloc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index 8433a2058eb15..237224484d0f6 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h @@ -76,7 +76,7 @@ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep, static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) { - VM_BUG_ON(mm != &init_mm); + VM_BUG_ON(mm && mm != &init_mm); __pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE | PMD_TABLE_UXN); } From 94902d849e85093aafcdbea2be8e2beff47233e6 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 22 Nov 2021 12:58:20 +0000 Subject: [PATCH 3/3] arm64: uaccess: avoid blocking within critical sections As Vincent reports in: https://lore.kernel.org/r/20211118163417.21617-1-vincent.whitchurch@axis.com The put_user() in schedule_tail() can get stuck in a livelock, similar to a problem recently fixed on riscv in commit: 285a76bb2cf51b0c ("riscv: evaluate put_user() arg before enabling user access") In __raw_put_user() we have a critical section between uaccess_ttbr0_enable() and uaccess_ttbr0_disable() where we cannot safely call into the scheduler without having taken an exception, as schedule() and other scheduling functions will not save/restore the TTBR0 state. If either of the `x` or `ptr` arguments to __raw_put_user() contain a blocking call, we may call into the scheduler within the critical section. This can result in two problems: 1) The access within the critical section will occur without the required TTBR0 tables installed. This will fault, and where the required tables permit access, the access will be retried without the required tables, resulting in a livelock. 2) When TTBR0 SW PAN is in use, check_and_switch_context() does not modify TTBR0, leaving a stale value installed. The mappings of the blocked task will erroneously be accessible to regular accesses in the context of the new task. Additionally, if the tables are subsequently freed, local TLB maintenance required to reuse the ASID may be lost, potentially resulting in TLB corruption (e.g. in the presence of CnP). The same issue exists for __raw_get_user() in the critical section between uaccess_ttbr0_enable() and uaccess_ttbr0_disable(). A similar issue exists for __get_kernel_nofault() and __put_kernel_nofault() for the critical section between __uaccess_enable_tco_async() and __uaccess_disable_tco_async(), as the TCO state is not context-switched by direct calls into the scheduler. Here the TCO state may be lost from the context of the current task, resulting in unexpected asynchronous tag check faults. It may also be leaked to another task, suppressing expected tag check faults. To fix all of these cases, we must ensure that we do not directly call into the scheduler in their respective critical sections. This patch reworks __raw_put_user(), __raw_get_user(), __get_kernel_nofault(), and __put_kernel_nofault(), ensuring that parameters are evaluated outside of the critical sections. To make this requirement clear, comments are added describing the problem, and line spaces added to separate the critical sections from other portions of the macros. For __raw_get_user() and __raw_put_user() the `err` parameter is conditionally assigned to, and we must currently evaluate this in the critical section. This behaviour is relied upon by the signal code, which uses chains of put_user_error() and get_user_error(), checking the return value at the end. In all cases, the `err` parameter is a plain int rather than a more complex expression with a blocking call, so this is safe. In future we should try to clean up the `err` usage to remove the potential for this to be a problem. Aside from the changes to time of evaluation, there should be no functional change as a result of this patch. Reported-by: Vincent Whitchurch Link: https://lore.kernel.org/r/20211118163417.21617-1-vincent.whitchurch@axis.com Fixes: f253d827f33c ("arm64: uaccess: refactor __{get,put}_user") Signed-off-by: Mark Rutland Cc: Will Deacon Cc: Catalin Marinas Link: https://lore.kernel.org/r/20211122125820.55286-1-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/include/asm/uaccess.h | 48 +++++++++++++++++++++++++++----- 1 file changed, 41 insertions(+), 7 deletions(-) diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 6e2e0b7031aba..3a5ff5e205863 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -281,12 +281,22 @@ do { \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ } while (0) +/* + * We must not call into the scheduler between uaccess_ttbr0_enable() and + * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions, + * we must evaluate these outside of the critical section. + */ #define __raw_get_user(x, ptr, err) \ do { \ + __typeof__(*(ptr)) __user *__rgu_ptr = (ptr); \ + __typeof__(x) __rgu_val; \ __chk_user_ptr(ptr); \ + \ uaccess_ttbr0_enable(); \ - __raw_get_mem("ldtr", x, ptr, err); \ + __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err); \ uaccess_ttbr0_disable(); \ + \ + (x) = __rgu_val; \ } while (0) #define __get_user_error(x, ptr, err) \ @@ -310,14 +320,22 @@ do { \ #define get_user __get_user +/* + * We must not call into the scheduler between __uaccess_enable_tco_async() and + * __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking + * functions, we must evaluate these outside of the critical section. + */ #define __get_kernel_nofault(dst, src, type, err_label) \ do { \ + __typeof__(dst) __gkn_dst = (dst); \ + __typeof__(src) __gkn_src = (src); \ int __gkn_err = 0; \ \ __uaccess_enable_tco_async(); \ - __raw_get_mem("ldr", *((type *)(dst)), \ - (__force type *)(src), __gkn_err); \ + __raw_get_mem("ldr", *((type *)(__gkn_dst)), \ + (__force type *)(__gkn_src), __gkn_err); \ __uaccess_disable_tco_async(); \ + \ if (unlikely(__gkn_err)) \ goto err_label; \ } while (0) @@ -351,11 +369,19 @@ do { \ } \ } while (0) +/* + * We must not call into the scheduler between uaccess_ttbr0_enable() and + * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions, + * we must evaluate these outside of the critical section. + */ #define __raw_put_user(x, ptr, err) \ do { \ - __chk_user_ptr(ptr); \ + __typeof__(*(ptr)) __user *__rpu_ptr = (ptr); \ + __typeof__(*(ptr)) __rpu_val = (x); \ + __chk_user_ptr(__rpu_ptr); \ + \ uaccess_ttbr0_enable(); \ - __raw_put_mem("sttr", x, ptr, err); \ + __raw_put_mem("sttr", __rpu_val, __rpu_ptr, err); \ uaccess_ttbr0_disable(); \ } while (0) @@ -380,14 +406,22 @@ do { \ #define put_user __put_user +/* + * We must not call into the scheduler between __uaccess_enable_tco_async() and + * __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking + * functions, we must evaluate these outside of the critical section. + */ #define __put_kernel_nofault(dst, src, type, err_label) \ do { \ + __typeof__(dst) __pkn_dst = (dst); \ + __typeof__(src) __pkn_src = (src); \ int __pkn_err = 0; \ \ __uaccess_enable_tco_async(); \ - __raw_put_mem("str", *((type *)(src)), \ - (__force type *)(dst), __pkn_err); \ + __raw_put_mem("str", *((type *)(__pkn_src)), \ + (__force type *)(__pkn_dst), __pkn_err); \ __uaccess_disable_tco_async(); \ + \ if (unlikely(__pkn_err)) \ goto err_label; \ } while(0)