From 55b87b74996383230586f4f9f801ae304c70e649 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 3 Jul 2023 14:04:16 +0100 Subject: [PATCH 1/4] arm64: Fix HFGxTR_EL2 field naming The HFGxTR_EL2 fields do not always follow the naming described in the spec, nor do they match the name of the register they trap in the rest of the kernel. It is a bit sad that they were written by hand despite the availability of a machine readable version... Fixes: cc077e7facbe ("arm64/sysreg: Convert HFG[RW]TR_EL2 to automatic generation") Signed-off-by: Marc Zyngier Cc: Mark Brown Cc: Will Deacon Cc: Catalin Marinas Cc: Mark Rutland Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/20230703130416.1495307-1-maz@kernel.org Signed-off-by: Will Deacon --- arch/arm64/tools/sysreg | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 1ea4a3dc68f88..65866bf819c33 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -2017,7 +2017,7 @@ Field 0 SM EndSysreg SysregFields HFGxTR_EL2 -Field 63 nAMIAIR2_EL1 +Field 63 nAMAIR2_EL1 Field 62 nMAIR2_EL1 Field 61 nS2POR_EL1 Field 60 nPOR_EL1 @@ -2032,9 +2032,9 @@ Field 52 nGCS_EL0 Res0 51 Field 50 nACCDATA_EL1 Field 49 ERXADDR_EL1 -Field 48 EXRPFGCDN_EL1 -Field 47 EXPFGCTL_EL1 -Field 46 EXPFGF_EL1 +Field 48 ERXPFGCDN_EL1 +Field 47 ERXPFGCTL_EL1 +Field 46 ERXPFGF_EL1 Field 45 ERXMISCn_EL1 Field 44 ERXSTATUS_EL1 Field 43 ERXCTLR_EL1 @@ -2049,8 +2049,8 @@ Field 35 TPIDR_EL0 Field 34 TPIDRRO_EL0 Field 33 TPIDR_EL1 Field 32 TCR_EL1 -Field 31 SCTXNUM_EL0 -Field 30 SCTXNUM_EL1 +Field 31 SCXTNUM_EL0 +Field 30 SCXTNUM_EL1 Field 29 SCTLR_EL1 Field 28 REVIDR_EL1 Field 27 PAR_EL1 From a8bd38dbc57c2fe074df2c9e549b9c2ad3183c83 Mon Sep 17 00:00:00 2001 From: Nikhil V Date: Thu, 13 Jul 2023 12:37:57 +0530 Subject: [PATCH 2/4] arm64: mm: Make hibernation aware of KFENCE In the restore path, swsusp_arch_suspend_exit uses copy_page() to over-write memory. However, with features like KFENCE enabled, there could be situations where it may have marked some pages as not valid, due to which it could be reported as invalid accesses. Consider a situation where page 'P' was part of the hibernation image. Now, when the resume kernel tries to restore the pages, the same page 'P' is already in use in the resume kernel and is kfence protected, due to which its mapping is removed from linear map. Since restoring pages happens with the resume kernel page tables, we would end up accessing 'P' during copy and results in kernel pagefault. The proposed fix tries to solve this issue by marking PTE as valid for such kfence protected pages. Co-developed-by: Pavankumar Kondeti Signed-off-by: Pavankumar Kondeti Signed-off-by: Nikhil V Link: https://lore.kernel.org/r/20230713070757.4093-1-quic_nprakash@quicinc.com Signed-off-by: Will Deacon --- arch/arm64/mm/trans_pgd.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/arm64/mm/trans_pgd.c b/arch/arm64/mm/trans_pgd.c index 4ea2eefbc053f..e9ad391fc8ea3 100644 --- a/arch/arm64/mm/trans_pgd.c +++ b/arch/arm64/mm/trans_pgd.c @@ -24,6 +24,7 @@ #include #include #include +#include static void *trans_alloc(struct trans_pgd_info *info) { @@ -41,7 +42,8 @@ static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr) * the temporary mappings we use during restore. */ set_pte(dst_ptep, pte_mkwrite(pte)); - } else if (debug_pagealloc_enabled() && !pte_none(pte)) { + } else if ((debug_pagealloc_enabled() || + is_kfence_address((void *)addr)) && !pte_none(pte)) { /* * debug_pagealloc will removed the PTE_VALID bit if * the page isn't in use by the resume kernel. It may have From 71e06e1acecbb50e712a3be9a7d56778509f3538 Mon Sep 17 00:00:00 2001 From: Zhen Lei Date: Thu, 13 Jul 2023 19:58:31 +0800 Subject: [PATCH 3/4] arm64: vdso: Clear common make C=2 warnings make C=2 ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- xxx.o When I use the command above to do a 'make C=2' check on any object file, the following warnings are always output: CHECK arch/arm64/kernel/vdso/vgettimeofday.c arch/arm64/kernel/vdso/vgettimeofday.c:9:5: warning: symbol '__kernel_clock_gettime' was not declared. Should it be static? arch/arm64/kernel/vdso/vgettimeofday.c:15:5: warning: symbol '__kernel_gettimeofday' was not declared. Should it be static? arch/arm64/kernel/vdso/vgettimeofday.c:21:5: warning: symbol '__kernel_clock_getres' was not declared. Should it be static? Therefore, the declaration of the three functions is added to eliminate these common warnings to provide a clean output. Signed-off-by: Zhen Lei Link: https://lore.kernel.org/r/20230713115831.777-1-thunder.leizhen@huawei.com Signed-off-by: Will Deacon --- arch/arm64/kernel/vdso/vgettimeofday.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/arm64/kernel/vdso/vgettimeofday.c b/arch/arm64/kernel/vdso/vgettimeofday.c index 4236cf34d7d9c..9941c5b04f158 100644 --- a/arch/arm64/kernel/vdso/vgettimeofday.c +++ b/arch/arm64/kernel/vdso/vgettimeofday.c @@ -6,6 +6,10 @@ * */ +int __kernel_clock_gettime(clockid_t clock, struct __kernel_timespec *ts); +int __kernel_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz); +int __kernel_clock_getres(clockid_t clock_id, struct __kernel_timespec *res); + int __kernel_clock_gettime(clockid_t clock, struct __kernel_timespec *ts) { From d4d5be94a87872421ea2569044092535aff0b886 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 20 Jul 2023 19:38:58 +0100 Subject: [PATCH 4/4] arm64/fpsimd: Ensure SME storage is allocated after SVE VL changes When we reconfigure the SVE vector length we discard the backing storage for the SVE vectors and then reallocate on next SVE use, leaving the SME specific state alone. This means that we do not enable SME traps if they were already disabled. That means that userspace code can enter streaming mode without trapping, putting the task in a state where if we try to save the state of the task we will fault. Since the ABI does not specify that changing the SVE vector length disturbs SME state, and since SVE code may not be aware of SME code in the process, we shouldn't simply discard any ZA state. Instead immediately reallocate the storage for SVE, and disable SME if we change the SVE vector length while there is no SME state active. Disabling SME traps on SVE vector length changes would make the overall code more complex since we would have a state where we have valid SME state stored but might get a SME trap. Fixes: 9e4ab6c89109 ("arm64/sme: Implement vector length configuration prctl()s") Reported-by: David Spickett Signed-off-by: Mark Brown Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230720-arm64-fix-sve-sme-vl-change-v2-1-8eea06b82d57@kernel.org Signed-off-by: Will Deacon --- arch/arm64/kernel/fpsimd.c | 33 +++++++++++++++++++++++++-------- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 7a1aeb95d7c3d..89d54a5242d16 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -847,6 +847,8 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task) int vec_set_vector_length(struct task_struct *task, enum vec_type type, unsigned long vl, unsigned long flags) { + bool free_sme = false; + if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT | PR_SVE_SET_VL_ONEXEC)) return -EINVAL; @@ -897,21 +899,36 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type, task->thread.fp_type = FP_STATE_FPSIMD; } - if (system_supports_sme() && type == ARM64_VEC_SME) { - task->thread.svcr &= ~(SVCR_SM_MASK | - SVCR_ZA_MASK); - clear_thread_flag(TIF_SME); + if (system_supports_sme()) { + if (type == ARM64_VEC_SME || + !(task->thread.svcr & (SVCR_SM_MASK | SVCR_ZA_MASK))) { + /* + * We are changing the SME VL or weren't using + * SME anyway, discard the state and force a + * reallocation. + */ + task->thread.svcr &= ~(SVCR_SM_MASK | + SVCR_ZA_MASK); + clear_thread_flag(TIF_SME); + free_sme = true; + } } if (task == current) put_cpu_fpsimd_context(); /* - * Force reallocation of task SVE and SME state to the correct - * size on next use: + * Free the changed states if they are not in use, SME will be + * reallocated to the correct size on next use and we just + * allocate SVE now in case it is needed for use in streaming + * mode. */ - sve_free(task); - if (system_supports_sme() && type == ARM64_VEC_SME) + if (system_supports_sve()) { + sve_free(task); + sve_alloc(task, true); + } + + if (free_sme) sme_free(task); task_set_vl(task, type, vl);