diff --git a/arch/powerpc/include/asm/book3s/64/kup.h b/arch/powerpc/include/asm/book3s/64/kup.h index f54ab2cb189b1..f2e6dd78d5e22 100644 --- a/arch/powerpc/include/asm/book3s/64/kup.h +++ b/arch/powerpc/include/asm/book3s/64/kup.h @@ -12,28 +12,54 @@ #ifdef __ASSEMBLY__ -.macro kuap_user_restore gpr1 +.macro kuap_user_restore gpr1, gpr2 #if defined(CONFIG_PPC_PKEY) BEGIN_MMU_FTR_SECTION_NESTED(67) + b 100f // skip_restore_amr + END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67) /* * AMR and IAMR are going to be different when * returning to userspace. */ ld \gpr1, STACK_REGS_AMR(r1) + + /* + * If kuap feature is not enabled, do the mtspr + * only if AMR value is different. + */ + BEGIN_MMU_FTR_SECTION_NESTED(68) + mfspr \gpr2, SPRN_AMR + cmpd \gpr1, \gpr2 + beq 99f + END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUAP, 68) + isync mtspr SPRN_AMR, \gpr1 +99: /* * Restore IAMR only when returning to userspace */ ld \gpr1, STACK_REGS_IAMR(r1) + + /* + * If kuep feature is not enabled, do the mtspr + * only if IAMR value is different. + */ + BEGIN_MMU_FTR_SECTION_NESTED(69) + mfspr \gpr2, SPRN_IAMR + cmpd \gpr1, \gpr2 + beq 100f + END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUEP, 69) + + isync mtspr SPRN_IAMR, \gpr1 +100: //skip_restore_amr /* No isync required, see kuap_user_restore() */ - END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_PKEY, 67) #endif .endm -.macro kuap_kernel_restore gpr1, gpr2 +.macro kuap_kernel_restore gpr1, gpr2 #if defined(CONFIG_PPC_PKEY) BEGIN_MMU_FTR_SECTION_NESTED(67) @@ -199,18 +225,43 @@ static inline u64 current_thread_iamr(void) static inline void kuap_user_restore(struct pt_regs *regs) { + bool restore_amr = false, restore_iamr = false; + unsigned long amr, iamr; + if (!mmu_has_feature(MMU_FTR_PKEY)) return; - isync(); - mtspr(SPRN_AMR, regs->amr); - mtspr(SPRN_IAMR, regs->iamr); + if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) { + amr = mfspr(SPRN_AMR); + if (amr != regs->amr) + restore_amr = true; + } else { + restore_amr = true; + } + + if (!mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) { + iamr = mfspr(SPRN_IAMR); + if (iamr != regs->iamr) + restore_iamr = true; + } else { + restore_iamr = true; + } + + + if (restore_amr || restore_iamr) { + isync(); + if (restore_amr) + mtspr(SPRN_AMR, regs->amr); + if (restore_iamr) + mtspr(SPRN_IAMR, regs->iamr); + } /* * No isync required here because we are about to rfi * back to previous context before any user accesses * would be made, which is a CSI. */ } + static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long amr) { diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index da23c397ceb22..c9d59450fba0e 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -675,7 +675,7 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return) bne- .Lrestore_nvgprs .Lfast_user_interrupt_return_amr: - kuap_user_restore r3 + kuap_user_restore r3, r4 .Lfast_user_interrupt_return: ld r11,_NIP(r1) ld r12,_MSR(r1) diff --git a/arch/powerpc/kernel/syscall_64.c b/arch/powerpc/kernel/syscall_64.c index 11f1c6360291f..7c85ed04a1641 100644 --- a/arch/powerpc/kernel/syscall_64.c +++ b/arch/powerpc/kernel/syscall_64.c @@ -38,6 +38,7 @@ notrace long system_call_exception(long r3, long r4, long r5, #ifdef CONFIG_PPC_PKEY if (mmu_has_feature(MMU_FTR_PKEY)) { unsigned long amr, iamr; + bool flush_needed = false; /* * When entering from userspace we mostly have the AMR/IAMR * different from kernel default values. Hence don't compare. @@ -46,11 +47,16 @@ notrace long system_call_exception(long r3, long r4, long r5, iamr = mfspr(SPRN_IAMR); regs->amr = amr; regs->iamr = iamr; - if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) + if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) { mtspr(SPRN_AMR, AMR_KUAP_BLOCKED); - if (mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) + flush_needed = true; + } + if (mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) { mtspr(SPRN_IAMR, AMR_KUEP_BLOCKED); - isync(); + flush_needed = true; + } + if (flush_needed) + isync(); } else #endif kuap_check_amr();