Skip to content

Commit

Permalink
Merge branch 'kvm-arm64/vector-rework' into kvmarm-master/next
Browse files Browse the repository at this point in the history
Signed-off-by: Marc Zyngier <maz@kernel.org>
  • Loading branch information
Marc Zyngier committed Nov 27, 2020
2 parents 6e5d8c7 + 4f6a36f commit dc2286f
Show file tree
Hide file tree
Showing 13 changed files with 215 additions and 260 deletions.
2 changes: 1 addition & 1 deletion Documentation/arm64/memory.rst
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ hypervisor maps kernel pages in EL2 at a fixed (and potentially
random) offset from the linear mapping. See the kern_hyp_va macro and
kvm_update_va_mask function for more details. MMIO devices such as
GICv2 gets mapped next to the HYP idmap page, as do vectors when
ARM64_HARDEN_EL2_VECTORS is selected for particular CPUs.
ARM64_SPECTRE_V3A is enabled for particular CPUs.

When using KVM with the Virtualization Host Extensions, no additional
mappings are created, since the host kernel runs directly in EL2.
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/include/asm/cpucaps.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
#define ARM64_HAS_VIRT_HOST_EXTN 11
#define ARM64_WORKAROUND_CAVIUM_27456 12
#define ARM64_HAS_32BIT_EL0 13
#define ARM64_HARDEN_EL2_VECTORS 14
#define ARM64_SPECTRE_V3A 14
#define ARM64_HAS_CNP 15
#define ARM64_HAS_NO_FPSIMD 16
#define ARM64_WORKAROUND_REPEAT_TLBI 17
Expand Down
5 changes: 0 additions & 5 deletions arch/arm64/include/asm/kvm_asm.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,6 @@
*/
#define KVM_VECTOR_PREAMBLE (2 * AARCH64_INSN_SIZE)

#define __SMCCC_WORKAROUND_1_SMC_SZ 36

#define KVM_HOST_SMCCC_ID(id) \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
ARM_SMCCC_SMC_64, \
Expand Down Expand Up @@ -175,7 +173,6 @@ extern unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
DECLARE_KVM_NVHE_SYM(__per_cpu_start);
DECLARE_KVM_NVHE_SYM(__per_cpu_end);

extern atomic_t arm64_el2_vector_last_slot;
DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)

Expand All @@ -196,8 +193,6 @@ extern void __vgic_v3_init_lrs(void);

extern u32 __kvm_get_mdcr_el2(void);

extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];

/*
* Obtain the PC-relative address of a kernel symbol
* s: symbol
Expand Down
46 changes: 0 additions & 46 deletions arch/arm64/include/asm/kvm_mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -248,52 +248,6 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
return ret;
}

/*
* EL2 vectors can be mapped and rerouted in a number of ways,
* depending on the kernel configuration and CPU present:
*
* - If the CPU is affected by Spectre-v2, the hardening sequence is
* placed in one of the vector slots, which is executed before jumping
* to the real vectors.
*
* - If the CPU also has the ARM64_HARDEN_EL2_VECTORS cap, the slot
* containing the hardening sequence is mapped next to the idmap page,
* and executed before jumping to the real vectors.
*
* - If the CPU only has the ARM64_HARDEN_EL2_VECTORS cap, then an
* empty slot is selected, mapped next to the idmap page, and
* executed before jumping to the real vectors.
*
* Note that ARM64_HARDEN_EL2_VECTORS is somewhat incompatible with
* VHE, as we don't have hypervisor-specific mappings. If the system
* is VHE and yet selects this capability, it will be ignored.
*/
extern void *__kvm_bp_vect_base;
extern int __kvm_harden_el2_vector_slot;

static inline void *kvm_get_hyp_vector(void)
{
struct bp_hardening_data *data = arm64_get_bp_hardening_data();
void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
int slot = -1;

if (cpus_have_const_cap(ARM64_SPECTRE_V2) && data->fn) {
vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
slot = data->hyp_vectors_slot;
}

if (this_cpu_has_cap(ARM64_HARDEN_EL2_VECTORS) && !has_vhe()) {
vect = __kvm_bp_vect_base;
if (slot == -1)
slot = __kvm_harden_el2_vector_slot;
}

if (slot != -1)
vect += slot * SZ_2K;

return vect;
}

#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)

static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
Expand Down
29 changes: 0 additions & 29 deletions arch/arm64/include/asm/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,6 @@
#define USER_ASID_FLAG (UL(1) << USER_ASID_BIT)
#define TTBR_ASID_MASK (UL(0xffff) << 48)

#define BP_HARDEN_EL2_SLOTS 4
#define __BP_HARDEN_HYP_VECS_SZ (BP_HARDEN_EL2_SLOTS * SZ_2K)

#ifndef __ASSEMBLY__

#include <linux/refcount.h>
Expand All @@ -41,32 +38,6 @@ static inline bool arm64_kernel_unmapped_at_el0(void)
return cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
}

typedef void (*bp_hardening_cb_t)(void);

struct bp_hardening_data {
int hyp_vectors_slot;
bp_hardening_cb_t fn;
};

DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);

static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
{
return this_cpu_ptr(&bp_hardening_data);
}

static inline void arm64_apply_bp_hardening(void)
{
struct bp_hardening_data *d;

if (!cpus_have_const_cap(ARM64_SPECTRE_V2))
return;

d = arm64_get_bp_hardening_data();
if (d->fn)
d->fn();
}

extern void arm64_memblock_init(void);
extern void paging_init(void);
extern void bootmem_init(void);
Expand Down
63 changes: 63 additions & 0 deletions arch/arm64/include/asm/spectre.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,15 @@
#ifndef __ASM_SPECTRE_H
#define __ASM_SPECTRE_H

#define BP_HARDEN_EL2_SLOTS 4
#define __BP_HARDEN_HYP_VECS_SZ ((BP_HARDEN_EL2_SLOTS - 1) * SZ_2K)

#ifndef __ASSEMBLY__

#include <linux/percpu.h>

#include <asm/cpufeature.h>
#include <asm/virt.h>

/* Watch out, ordering is important here. */
enum mitigation_state {
Expand All @@ -20,13 +28,68 @@ enum mitigation_state {

struct task_struct;

/*
* Note: the order of this enum corresponds to __bp_harden_hyp_vecs and
* we rely on having the direct vectors first.
*/
enum arm64_hyp_spectre_vector {
/*
* Take exceptions directly to __kvm_hyp_vector. This must be
* 0 so that it used by default when mitigations are not needed.
*/
HYP_VECTOR_DIRECT,

/*
* Bounce via a slot in the hypervisor text mapping of
* __bp_harden_hyp_vecs, which contains an SMC call.
*/
HYP_VECTOR_SPECTRE_DIRECT,

/*
* Bounce via a slot in a special mapping of __bp_harden_hyp_vecs
* next to the idmap page.
*/
HYP_VECTOR_INDIRECT,

/*
* Bounce via a slot in a special mapping of __bp_harden_hyp_vecs
* next to the idmap page, which contains an SMC call.
*/
HYP_VECTOR_SPECTRE_INDIRECT,
};

typedef void (*bp_hardening_cb_t)(void);

struct bp_hardening_data {
enum arm64_hyp_spectre_vector slot;
bp_hardening_cb_t fn;
};

DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);

static inline void arm64_apply_bp_hardening(void)
{
struct bp_hardening_data *d;

if (!cpus_have_const_cap(ARM64_SPECTRE_V2))
return;

d = this_cpu_ptr(&bp_hardening_data);
if (d->fn)
d->fn();
}

enum mitigation_state arm64_get_spectre_v2_state(void);
bool has_spectre_v2(const struct arm64_cpu_capabilities *cap, int scope);
void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused);

bool has_spectre_v3a(const struct arm64_cpu_capabilities *cap, int scope);
void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused);

enum mitigation_state arm64_get_spectre_v4_state(void);
bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope);
void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
void spectre_v4_enable_task_mitigation(struct task_struct *tsk);

#endif /* __ASSEMBLY__ */
#endif /* __ASM_SPECTRE_H */
19 changes: 6 additions & 13 deletions arch/arm64/kernel/cpu_errata.c
Original file line number Diff line number Diff line change
Expand Up @@ -196,16 +196,6 @@ has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
return is_midr_in_range(midr, &range) && has_dic;
}

#ifdef CONFIG_RANDOMIZE_BASE

static const struct midr_range ca57_a72[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
{},
};

#endif

#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
Expand Down Expand Up @@ -461,9 +451,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
},
#ifdef CONFIG_RANDOMIZE_BASE
{
.desc = "EL2 vector hardening",
.capability = ARM64_HARDEN_EL2_VECTORS,
ERRATA_MIDR_RANGE_LIST(ca57_a72),
/* Must come after the Spectre-v2 entry */
.desc = "Spectre-v3a",
.capability = ARM64_SPECTRE_V3A,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.matches = has_spectre_v3a,
.cpu_enable = spectre_v3a_enable_mitigation,
},
#endif
{
Expand Down
84 changes: 33 additions & 51 deletions arch/arm64/kernel/proton-pack.c
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Handle detection, reporting and mitigation of Spectre v1, v2 and v4, as
* Handle detection, reporting and mitigation of Spectre v1, v2, v3a and v4, as
* detailed at:
*
* https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability
Expand All @@ -26,6 +26,7 @@

#include <asm/spectre.h>
#include <asm/traps.h>
#include <asm/virt.h>

/*
* We try to ensure that the mitigation state can never change as the result of
Expand Down Expand Up @@ -170,72 +171,26 @@ bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope)
return true;
}

DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);

enum mitigation_state arm64_get_spectre_v2_state(void)
{
return spectre_v2_state;
}

#ifdef CONFIG_KVM
#include <asm/cacheflush.h>
#include <asm/kvm_asm.h>

atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);

static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
const char *hyp_vecs_end)
{
void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K);
int i;

for (i = 0; i < SZ_2K; i += 0x80)
memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);

__flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
}
DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);

static void install_bp_hardening_cb(bp_hardening_cb_t fn)
{
static DEFINE_RAW_SPINLOCK(bp_lock);
int cpu, slot = -1;
const char *hyp_vecs_start = __smccc_workaround_1_smc;
const char *hyp_vecs_end = __smccc_workaround_1_smc +
__SMCCC_WORKAROUND_1_SMC_SZ;
__this_cpu_write(bp_hardening_data.fn, fn);

/*
* Vinz Clortho takes the hyp_vecs start/end "keys" at
* the door when we're a guest. Skip the hyp-vectors work.
*/
if (!is_hyp_mode_available()) {
__this_cpu_write(bp_hardening_data.fn, fn);
if (!is_hyp_mode_available())
return;
}

raw_spin_lock(&bp_lock);
for_each_possible_cpu(cpu) {
if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
break;
}
}

if (slot == -1) {
slot = atomic_inc_return(&arm64_el2_vector_last_slot);
BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
__copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
}

__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
__this_cpu_write(bp_hardening_data.fn, fn);
raw_spin_unlock(&bp_lock);
}
#else
static void install_bp_hardening_cb(bp_hardening_cb_t fn)
{
__this_cpu_write(bp_hardening_data.fn, fn);
__this_cpu_write(bp_hardening_data.slot, HYP_VECTOR_SPECTRE_DIRECT);
}
#endif /* CONFIG_KVM */

static void call_smc_arch_workaround_1(void)
{
Expand Down Expand Up @@ -316,6 +271,33 @@ void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
update_mitigation_state(&spectre_v2_state, state);
}

/*
* Spectre-v3a.
*
* Phew, there's not an awful lot to do here! We just instruct EL2 to use
* an indirect trampoline for the hyp vectors so that guests can't read
* VBAR_EL2 to defeat randomisation of the hypervisor VA layout.
*/
bool has_spectre_v3a(const struct arm64_cpu_capabilities *entry, int scope)
{
static const struct midr_range spectre_v3a_unsafe_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
{},
};

WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
return is_midr_in_range_list(read_cpuid_id(), spectre_v3a_unsafe_list);
}

void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
{
struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);

if (this_cpu_has_cap(ARM64_SPECTRE_V3A))
data->slot += HYP_VECTOR_INDIRECT;
}

/*
* Spectre v4.
*
Expand Down
Loading

0 comments on commit dc2286f

Please sign in to comment.