Skip to content

Commit

Permalink
Merge tag 'powerpc-cve-2020-4788' into fixes
Browse files Browse the repository at this point in the history
From Daniel's cover letter:

IBM Power9 processors can speculatively operate on data in the L1 cache
before it has been completely validated, via a way-prediction mechanism. It
is not possible for an attacker to determine the contents of impermissible
memory using this method, since these systems implement a combination of
hardware and software security measures to prevent scenarios where
protected data could be leaked.

However these measures don't address the scenario where an attacker induces
the operating system to speculatively execute instructions using data that
the attacker controls. This can be used for example to speculatively bypass
"kernel user access prevention" techniques, as discovered by Anthony
Steinhauser of Google's Safeside Project. This is not an attack by itself,
but there is a possibility it could be used in conjunction with
side-channels or other weaknesses in the privileged code to construct an
attack.

This issue can be mitigated by flushing the L1 cache between privilege
boundaries of concern.

This patch series flushes the L1 cache on kernel entry (patch 2) and after the
kernel performs any user accesses (patch 3). It also adds a self-test and
performs some related cleanups.
  • Loading branch information
Michael Ellerman committed Nov 23, 2020
2 parents cd81acc + da631f7 commit 962f8e6
Show file tree
Hide file tree
Showing 23 changed files with 693 additions and 147 deletions.
7 changes: 7 additions & 0 deletions Documentation/admin-guide/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2858,6 +2858,8 @@
mds=off [X86]
tsx_async_abort=off [X86]
kvm.nx_huge_pages=off [X86]
no_entry_flush [PPC]
no_uaccess_flush [PPC]

Exceptions:
This does not have any effect on
Expand Down Expand Up @@ -3186,6 +3188,8 @@

noefi Disable EFI runtime services support.

no_entry_flush [PPC] Don't flush the L1-D cache when entering the kernel.

noexec [IA-64]

noexec [X86]
Expand Down Expand Up @@ -3235,6 +3239,9 @@
nospec_store_bypass_disable
[HW] Disable all mitigations for the Speculative Store Bypass vulnerability

no_uaccess_flush
[PPC] Don't flush the L1-D cache after accessing user data.

noxsave [BUGS=X86] Disables x86 extended register state save
and restore using xsave. The kernel will fallback to
enabling legacy floating-point and sse state.
Expand Down
66 changes: 42 additions & 24 deletions arch/powerpc/include/asm/book3s/64/kup-radix.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
#endif
.endm

#ifdef CONFIG_PPC_KUAP
.macro kuap_check_amr gpr1, gpr2
#ifdef CONFIG_PPC_KUAP_DEBUG
BEGIN_MMU_FTR_SECTION_NESTED(67)
Expand All @@ -38,6 +39,7 @@
END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
#endif
.endm
#endif

.macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
#ifdef CONFIG_PPC_KUAP
Expand All @@ -61,6 +63,8 @@

#else /* !__ASSEMBLY__ */

DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);

#ifdef CONFIG_PPC_KUAP

#include <asm/mmu.h>
Expand Down Expand Up @@ -103,8 +107,16 @@ static inline void kuap_check_amr(void)

static inline unsigned long get_kuap(void)
{
/*
* We return AMR_KUAP_BLOCKED when we don't support KUAP because
* prevent_user_access_return needs to return AMR_KUAP_BLOCKED to
* cause restore_user_access to do a flush.
*
* This has no effect in terms of actually blocking things on hash,
* so it doesn't break anything.
*/
if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP))
return 0;
return AMR_KUAP_BLOCKED;

return mfspr(SPRN_AMR);
}
Expand All @@ -123,6 +135,29 @@ static inline void set_kuap(unsigned long value)
isync();
}

static inline bool
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) &&
(regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
"Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
}
#else /* CONFIG_PPC_KUAP */
static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr) { }

static inline unsigned long kuap_get_and_check_amr(void)
{
return 0UL;
}

static inline unsigned long get_kuap(void)
{
return AMR_KUAP_BLOCKED;
}

static inline void set_kuap(unsigned long value) { }
#endif /* !CONFIG_PPC_KUAP */

static __always_inline void allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir)
{
Expand All @@ -142,44 +177,27 @@ static inline void prevent_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir)
{
set_kuap(AMR_KUAP_BLOCKED);
if (static_branch_unlikely(&uaccess_flush_key))
do_uaccess_flush();
}

static inline unsigned long prevent_user_access_return(void)
{
unsigned long flags = get_kuap();

set_kuap(AMR_KUAP_BLOCKED);
if (static_branch_unlikely(&uaccess_flush_key))
do_uaccess_flush();

return flags;
}

static inline void restore_user_access(unsigned long flags)
{
set_kuap(flags);
if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED)
do_uaccess_flush();
}

static inline bool
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) &&
(regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
"Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
}
#else /* CONFIG_PPC_KUAP */
static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr)
{
}

static inline void kuap_check_amr(void)
{
}

static inline unsigned long kuap_get_and_check_amr(void)
{
return 0;
}
#endif /* CONFIG_PPC_KUAP */

#endif /* __ASSEMBLY__ */

#endif /* _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H */
12 changes: 11 additions & 1 deletion arch/powerpc/include/asm/exception-64s.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,11 +57,18 @@
nop; \
nop

#define ENTRY_FLUSH_SLOT \
ENTRY_FLUSH_FIXUP_SECTION; \
nop; \
nop; \
nop;

/*
* r10 must be free to use, r13 must be paca
*/
#define INTERRUPT_TO_KERNEL \
STF_ENTRY_BARRIER_SLOT
STF_ENTRY_BARRIER_SLOT; \
ENTRY_FLUSH_SLOT

/*
* Macros for annotating the expected destination of (h)rfid
Expand Down Expand Up @@ -137,6 +144,9 @@
RFSCV; \
b rfscv_flush_fallback

#else /* __ASSEMBLY__ */
/* Prototype for function defined in exceptions-64s.S */
void do_uaccess_flush(void);
#endif /* __ASSEMBLY__ */

#endif /* _ASM_POWERPC_EXCEPTION_H */
19 changes: 19 additions & 0 deletions arch/powerpc/include/asm/feature-fixups.h
Original file line number Diff line number Diff line change
Expand Up @@ -205,6 +205,22 @@ label##3: \
FTR_ENTRY_OFFSET 955b-956b; \
.popsection;

#define UACCESS_FLUSH_FIXUP_SECTION \
959: \
.pushsection __uaccess_flush_fixup,"a"; \
.align 2; \
960: \
FTR_ENTRY_OFFSET 959b-960b; \
.popsection;

#define ENTRY_FLUSH_FIXUP_SECTION \
957: \
.pushsection __entry_flush_fixup,"a"; \
.align 2; \
958: \
FTR_ENTRY_OFFSET 957b-958b; \
.popsection;

#define RFI_FLUSH_FIXUP_SECTION \
951: \
.pushsection __rfi_flush_fixup,"a"; \
Expand Down Expand Up @@ -237,8 +253,11 @@ label##3: \
#include <linux/types.h>

extern long stf_barrier_fallback;
extern long entry_flush_fallback;
extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
extern long __start___uaccess_flush_fixup, __stop___uaccess_flush_fixup;
extern long __start___entry_flush_fixup, __stop___entry_flush_fixup;
extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;
Expand Down
26 changes: 20 additions & 6 deletions arch/powerpc/include/asm/kup.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
#define KUAP_CURRENT_WRITE 8
#define KUAP_CURRENT (KUAP_CURRENT_READ | KUAP_CURRENT_WRITE)

#ifdef CONFIG_PPC64
#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/book3s/64/kup-radix.h>
#endif
#ifdef CONFIG_PPC_8xx
Expand All @@ -35,6 +35,9 @@
.macro kuap_check current, gpr
.endm

.macro kuap_check_amr gpr1, gpr2
.endm

#endif

#else /* !__ASSEMBLY__ */
Expand All @@ -53,17 +56,28 @@ static inline void setup_kuep(bool disabled) { }
void setup_kuap(bool disabled);
#else
static inline void setup_kuap(bool disabled) { }

static inline bool
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
return false;
}

static inline void kuap_check_amr(void) { }

/*
* book3s/64/kup-radix.h defines these functions for the !KUAP case to flush
* the L1D cache after user accesses. Only include the empty stubs for other
* platforms.
*/
#ifndef CONFIG_PPC_BOOK3S_64
static inline void allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir) { }
static inline void prevent_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir) { }
static inline unsigned long prevent_user_access_return(void) { return 0UL; }
static inline void restore_user_access(unsigned long flags) { }
static inline bool
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
return false;
}
#endif /* CONFIG_PPC_BOOK3S_64 */
#endif /* CONFIG_PPC_KUAP */

static inline void allow_read_from_user(const void __user *from, unsigned long size)
Expand Down
7 changes: 7 additions & 0 deletions arch/powerpc/include/asm/security_features.h
Original file line number Diff line number Diff line change
Expand Up @@ -86,12 +86,19 @@ static inline bool security_ftr_enabled(u64 feature)
// Software required to flush link stack on context switch
#define SEC_FTR_FLUSH_LINK_STACK 0x0000000000001000ull

// The L1-D cache should be flushed when entering the kernel
#define SEC_FTR_L1D_FLUSH_ENTRY 0x0000000000004000ull

// The L1-D cache should be flushed after user accesses from the kernel
#define SEC_FTR_L1D_FLUSH_UACCESS 0x0000000000008000ull

// Features enabled by default
#define SEC_FTR_DEFAULT \
(SEC_FTR_L1D_FLUSH_HV | \
SEC_FTR_L1D_FLUSH_PR | \
SEC_FTR_BNDS_CHK_SPEC_BAR | \
SEC_FTR_L1D_FLUSH_ENTRY | \
SEC_FTR_L1D_FLUSH_UACCESS | \
SEC_FTR_FAVOUR_SECURITY)

#endif /* _ASM_POWERPC_SECURITY_FEATURES_H */
4 changes: 4 additions & 0 deletions arch/powerpc/include/asm/setup.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,12 +52,16 @@ enum l1d_flush_type {
};

void setup_rfi_flush(enum l1d_flush_type, bool enable);
void setup_entry_flush(bool enable);
void setup_uaccess_flush(bool enable);
void do_rfi_flush_fixups(enum l1d_flush_type types);
#ifdef CONFIG_PPC_BARRIER_NOSPEC
void setup_barrier_nospec(void);
#else
static inline void setup_barrier_nospec(void) { };
#endif
void do_uaccess_flush_fixups(enum l1d_flush_type types);
void do_entry_flush_fixups(enum l1d_flush_type types);
void do_barrier_nospec_fixups(bool enable);
extern bool barrier_nospec_enabled;

Expand Down
Loading

0 comments on commit 962f8e6

Please sign in to comment.