Skip to content

Commit

Permalink
arm64: kvm: Modernize annotation for __bp_harden_hyp_vecs
Browse files Browse the repository at this point in the history
We have recently introduced new macros for annotating assembly symbols
for things that aren't C functions, SYM_CODE_START() and SYM_CODE_END(),
in an effort to clarify and simplify our annotations of assembly files.

Using these for __bp_harden_hyp_vecs is more involved than for most symbols
as this symbol is annotated quite unusually as rather than just have the
explicit symbol we define _start and _end symbols which we then use to
compute the length. This does not play at all nicely with the new style
macros. Since the size of the vectors is a known constant which won't vary
the simplest thing to do is simply to drop the separate _start and _end
symbols and just use a #define for the size.

Signed-off-by: Mark Brown <broonie@kernel.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Marc Zyngier <maz@kernel.org>
  • Loading branch information
Mark Brown authored and Catalin Marinas committed Mar 9, 2020
1 parent 617a2f3 commit 6e52aab
Show file tree
Hide file tree
Showing 4 changed files with 12 additions and 9 deletions.
9 changes: 4 additions & 5 deletions arch/arm64/include/asm/kvm_mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -480,7 +480,7 @@ static inline void *kvm_get_hyp_vector(void)
int slot = -1;

if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs_start));
vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
slot = data->hyp_vectors_slot;
}

Expand Down Expand Up @@ -509,14 +509,13 @@ static inline int kvm_map_vectors(void)
* HBP + HEL2 -> use hardened vertors and use exec mapping
*/
if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs_start);
__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
}

if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs_start);
unsigned long size = (__bp_harden_hyp_vecs_end -
__bp_harden_hyp_vecs_start);
phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
unsigned long size = __BP_HARDEN_HYP_VECS_SZ;

/*
* Always allocate a spare vector slot, as we don't
Expand Down
4 changes: 3 additions & 1 deletion arch/arm64/include/asm/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
#define TTBR_ASID_MASK (UL(0xffff) << 48)

#define BP_HARDEN_EL2_SLOTS 4
#define __BP_HARDEN_HYP_VECS_SZ (BP_HARDEN_EL2_SLOTS * SZ_2K)

#ifndef __ASSEMBLY__

Expand Down Expand Up @@ -45,7 +46,8 @@ struct bp_hardening_data {

#if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) || \
defined(CONFIG_HARDEN_EL2_VECTORS))
extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];

extern char __bp_harden_hyp_vecs[];
extern atomic_t arm64_el2_vector_last_slot;
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR || CONFIG_HARDEN_EL2_VECTORS */

Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/kernel/cpu_errata.c
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ extern char __smccc_workaround_1_smc_end[];
static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
const char *hyp_vecs_end)
{
void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K);
int i;

for (i = 0; i < SZ_2K; i += 0x80)
Expand Down
6 changes: 4 additions & 2 deletions arch/arm64/kvm/hyp/hyp-entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -312,11 +312,13 @@ alternative_cb_end
.endm

.align 11
ENTRY(__bp_harden_hyp_vecs_start)
SYM_CODE_START(__bp_harden_hyp_vecs)
.rept BP_HARDEN_EL2_SLOTS
generate_vectors
.endr
ENTRY(__bp_harden_hyp_vecs_end)
1: .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
.org 1b
SYM_CODE_END(__bp_harden_hyp_vecs)

.popsection

Expand Down

0 comments on commit 6e52aab

Please sign in to comment.