From e891432cf7171ab2054222e2ce4c94f8080b92e7 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 21 Dec 2024 10:06:17 +0000 Subject: [PATCH 1/6] KVM: arm64: nv: Advertise the lack of AArch32 EL0 support Although we never supported 32bit anywhere in NV, we fail to advertise so for EL0, probably owing to the relative lack of hardware supporting both NV2 and 32bit EL0. Add some sanitising to ID_AA64PFR0_EL1.EL0, and reaffirm that "in 64bit-only we trust". Reported-by: Oliver Upton Acked-by: Oliver Upton Signed-off-by: Marc Zyngier --- arch/arm64/kvm/nested.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c index 9b36218b48def..9e74f7b38e056 100644 --- a/arch/arm64/kvm/nested.c +++ b/arch/arm64/kvm/nested.c @@ -830,8 +830,10 @@ static void limit_nv_id_regs(struct kvm *kvm) NV_FTR(PFR0, RAS) | NV_FTR(PFR0, EL3) | NV_FTR(PFR0, EL2) | - NV_FTR(PFR0, EL1)); - /* 64bit EL1/EL2/EL3 only */ + NV_FTR(PFR0, EL1) | + NV_FTR(PFR0, EL0)); + /* 64bit only at any EL */ + val |= FIELD_PREP(NV_FTR(PFR0, EL0), 0b0001); val |= FIELD_PREP(NV_FTR(PFR0, EL1), 0b0001); val |= FIELD_PREP(NV_FTR(PFR0, EL2), 0b0001); val |= FIELD_PREP(NV_FTR(PFR0, EL3), 0b0001); From e8440c1e2d23a9ca5e0af1a18be637cbd5a5d44f Mon Sep 17 00:00:00 2001 From: Mostafa Saleh Date: Fri, 25 Oct 2024 09:32:59 +0000 Subject: [PATCH 2/6] Documentation: Update the behaviour of "kvm-arm.mode" Commit 5053c3f0519c ("KVM: arm64: Use hVHE in pKVM by default on CPUs with VHE support") modified the behaviour of "kvm-arm.mode=protected" without the updating the kernel parameters doc. Update it to match the current implementation. Also, update required architecture version for nested virtualization as suggested by Marc. Cc: Will Deacon Cc: Marc Zyngier Signed-off-by: Mostafa Saleh Link: https://lore.kernel.org/r/20241025093259.2216093-1-smostafa@google.com Signed-off-by: Marc Zyngier --- Documentation/admin-guide/kernel-parameters.txt | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 3872bc6ec49d6..7c870883af8e3 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2748,17 +2748,21 @@ nvhe: Standard nVHE-based mode, without support for protected guests. - protected: nVHE-based mode with support for guests whose - state is kept private from the host. + protected: Mode with support for guests whose state is + kept private from the host, using VHE or + nVHE depending on HW support. nested: VHE-based mode with support for nested - virtualization. Requires at least ARMv8.3 - hardware. + virtualization. Requires at least ARMv8.4 + hardware (with FEAT_NV2). Defaults to VHE/nVHE based on hardware support. Setting mode to "protected" will disable kexec and hibernation - for the host. "nested" is experimental and should be - used with extreme caution. + for the host. To force nVHE on VHE hardware, add + "arm64_sw.hvhe=0 id_aa64mmfr1.vh=0" to the + command-line. + "nested" is experimental and should be used with + extreme caution. kvm-arm.vgic_v3_group0_trap= [KVM,ARM,EARLY] Trap guest accesses to GICv3 group-0 From 68344037b764401f751c66661c53334ea1e15324 Mon Sep 17 00:00:00 2001 From: Vincent Donnefort Date: Tue, 7 Jan 2025 11:28:21 +0000 Subject: [PATCH 3/6] KVM: arm64: Fix nVHE stacktrace VA bits mask The hypervisor VA space size depends on both the ID map's (IDMAP_VA_BITS) and the kernel stage-1 (VA_BITS). However, the hypervisor stacktrace decoding is solely relying on VA_BITS. This is especially an issue when VA_BITS < IDMAP_VA_BITS (i.e. VA_BITS is 39-bit): the hypervisor may have addresses bigger than the stacktrace is masking. Align this mask with hyp_va_bits. Signed-off-by: Vincent Donnefort Link: https://lore.kernel.org/r/20250107112821.416591-1-vdonnefort@google.com Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_mmu.h | 2 ++ arch/arm64/kvm/mmu.c | 3 +++ arch/arm64/kvm/stacktrace.c | 3 ++- 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 66d93e320ec8e..2ddd98fd2df45 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -139,6 +139,8 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v) #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v)))) +extern u32 __hyp_va_bits; + /* * We currently support using a VM-specified IPA size. For backward * compatibility, the default IPA size is fixed to 40bits. diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index c9d46ad57e52d..d36be6d2ac91b 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -29,6 +29,8 @@ static unsigned long __ro_after_init hyp_idmap_start; static unsigned long __ro_after_init hyp_idmap_end; static phys_addr_t __ro_after_init hyp_idmap_vector; +u32 __ro_after_init __hyp_va_bits; + static unsigned long __ro_after_init io_map_base; static phys_addr_t __stage2_range_addr_end(phys_addr_t addr, phys_addr_t end, @@ -2056,6 +2058,7 @@ int __init kvm_mmu_init(u32 *hyp_va_bits) goto out_destroy_pgtable; io_map_base = hyp_idmap_start; + __hyp_va_bits = *hyp_va_bits; return 0; out_destroy_pgtable: diff --git a/arch/arm64/kvm/stacktrace.c b/arch/arm64/kvm/stacktrace.c index 3ace5b75813bd..fdedd8a3ed6f9 100644 --- a/arch/arm64/kvm/stacktrace.c +++ b/arch/arm64/kvm/stacktrace.c @@ -19,6 +19,7 @@ #include #include +#include #include static struct stack_info stackinfo_get_overflow(void) @@ -145,7 +146,7 @@ static void unwind(struct unwind_state *state, */ static bool kvm_nvhe_dump_backtrace_entry(void *arg, unsigned long where) { - unsigned long va_mask = GENMASK_ULL(vabits_actual - 1, 0); + unsigned long va_mask = GENMASK_ULL(__hyp_va_bits - 1, 0); unsigned long hyp_offset = (unsigned long)arg; /* Mask tags and convert to kern addr */ From 38f9e4b905a00047a96fbdc6cefe9ceb4dae34c3 Mon Sep 17 00:00:00 2001 From: Kalesh Singh Date: Mon, 11 Nov 2024 16:32:50 -0800 Subject: [PATCH 4/6] arm64: kvm: Introduce nvhe stack size constants Refactor nvhe stack code to use NVHE_STACK_SIZE/SHIFT constants, instead of directly using PAGE_SIZE/SHIFT. This makes the code a bit easier to read, without introducing any functional changes. Cc: Marc Zyngier Cc: Mark Brown Cc: Mark Rutland Cc: Will Deacon Signed-off-by: Kalesh Singh Link: https://lore.kernel.org/r/20241112003336.1375584-1-kaleshsingh@google.com Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/memory.h | 5 ++++- arch/arm64/include/asm/stacktrace/nvhe.h | 2 +- arch/arm64/kvm/arm.c | 18 +++++++++--------- arch/arm64/kvm/hyp/nvhe/host.S | 4 ++-- arch/arm64/kvm/hyp/nvhe/mm.c | 12 ++++++------ arch/arm64/kvm/hyp/nvhe/stacktrace.c | 4 ++-- arch/arm64/kvm/mmu.c | 12 ++++++------ arch/arm64/kvm/stacktrace.c | 6 +++--- 8 files changed, 33 insertions(+), 30 deletions(-) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 8b9f33cf561bb..717829df294ea 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -145,13 +145,16 @@ #define OVERFLOW_STACK_SIZE SZ_4K +#define NVHE_STACK_SHIFT PAGE_SHIFT +#define NVHE_STACK_SIZE (UL(1) << NVHE_STACK_SHIFT) + /* * With the minimum frame size of [x29, x30], exactly half the combined * sizes of the hyp and overflow stacks is the maximum size needed to * save the unwinded stacktrace; plus an additional entry to delimit the * end. */ -#define NVHE_STACKTRACE_SIZE ((OVERFLOW_STACK_SIZE + PAGE_SIZE) / 2 + sizeof(long)) +#define NVHE_STACKTRACE_SIZE ((OVERFLOW_STACK_SIZE + NVHE_STACK_SIZE) / 2 + sizeof(long)) /* * Alignment of kernel segments (e.g. .text, .data). diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h index 44759281d0d43..171f9edef49fc 100644 --- a/arch/arm64/include/asm/stacktrace/nvhe.h +++ b/arch/arm64/include/asm/stacktrace/nvhe.h @@ -47,7 +47,7 @@ static inline void kvm_nvhe_unwind_init(struct unwind_state *state, DECLARE_KVM_NVHE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack); DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info); -DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); +DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_base); void kvm_nvhe_dump_backtrace(unsigned long hyp_offset); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index a102c3aebdbc4..9ac45e23770f9 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -61,7 +61,7 @@ static enum kvm_wfx_trap_policy kvm_wfe_trap_policy __read_mostly = KVM_WFX_NOTR DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector); -DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); +DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_base); DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); @@ -2339,7 +2339,7 @@ static void __init teardown_hyp_mode(void) free_hyp_pgds(); for_each_possible_cpu(cpu) { - free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); + free_pages(per_cpu(kvm_arm_hyp_stack_base, cpu), NVHE_STACK_SHIFT - PAGE_SHIFT); free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order()); if (free_sve) { @@ -2527,15 +2527,15 @@ static int __init init_hyp_mode(void) * Allocate stack pages for Hypervisor-mode */ for_each_possible_cpu(cpu) { - unsigned long stack_page; + unsigned long stack_base; - stack_page = __get_free_page(GFP_KERNEL); - if (!stack_page) { + stack_base = __get_free_pages(GFP_KERNEL, NVHE_STACK_SHIFT - PAGE_SHIFT); + if (!stack_base) { err = -ENOMEM; goto out_err; } - per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page; + per_cpu(kvm_arm_hyp_stack_base, cpu) = stack_base; } /* @@ -2604,9 +2604,9 @@ static int __init init_hyp_mode(void) */ for_each_possible_cpu(cpu) { struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu); - char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu); + char *stack_base = (char *)per_cpu(kvm_arm_hyp_stack_base, cpu); - err = create_hyp_stack(__pa(stack_page), ¶ms->stack_hyp_va); + err = create_hyp_stack(__pa(stack_base), ¶ms->stack_hyp_va); if (err) { kvm_err("Cannot map hyp stack\n"); goto out_err; @@ -2618,7 +2618,7 @@ static int __init init_hyp_mode(void) * __hyp_pa() won't do the right thing there, since the stack * has been mapped in the flexible private VA space. */ - params->stack_pa = __pa(stack_page); + params->stack_pa = __pa(stack_base); } for_each_possible_cpu(cpu) { diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S index 3d610fc51f4d3..58f0cb2298cc2 100644 --- a/arch/arm64/kvm/hyp/nvhe/host.S +++ b/arch/arm64/kvm/hyp/nvhe/host.S @@ -188,12 +188,12 @@ SYM_FUNC_END(__host_hvc) /* * Test whether the SP has overflowed, without corrupting a GPR. - * nVHE hypervisor stacks are aligned so that the PAGE_SHIFT bit + * nVHE hypervisor stacks are aligned so that the NVHE_STACK_SHIFT bit * of SP should always be 1. */ add sp, sp, x0 // sp' = sp + x0 sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp - tbz x0, #PAGE_SHIFT, .L__hyp_sp_overflow\@ + tbz x0, #NVHE_STACK_SHIFT, .L__hyp_sp_overflow\@ sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0 sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c index 8850b591d7751..f41c7440b34b4 100644 --- a/arch/arm64/kvm/hyp/nvhe/mm.c +++ b/arch/arm64/kvm/hyp/nvhe/mm.c @@ -360,10 +360,10 @@ int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr) prev_base = __io_map_base; /* - * Efficient stack verification using the PAGE_SHIFT bit implies + * Efficient stack verification using the NVHE_STACK_SHIFT bit implies * an alignment of our allocation on the order of the size. */ - size = PAGE_SIZE * 2; + size = NVHE_STACK_SIZE * 2; addr = ALIGN(__io_map_base, size); ret = __pkvm_alloc_private_va_range(addr, size); @@ -373,12 +373,12 @@ int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr) * at the higher address and leave the lower guard page * unbacked. * - * Any valid stack address now has the PAGE_SHIFT bit as 1 + * Any valid stack address now has the NVHE_STACK_SHIFT bit as 1 * and addresses corresponding to the guard page have the - * PAGE_SHIFT bit as 0 - this is used for overflow detection. + * NVHE_STACK_SHIFT bit as 0 - this is used for overflow detection. */ - ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr + PAGE_SIZE, - PAGE_SIZE, phys, PAGE_HYP); + ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr + NVHE_STACK_SIZE, + NVHE_STACK_SIZE, phys, PAGE_HYP); if (ret) __io_map_base = prev_base; } diff --git a/arch/arm64/kvm/hyp/nvhe/stacktrace.c b/arch/arm64/kvm/hyp/nvhe/stacktrace.c index ed6b58b19cfa5..5b6eeab1a7743 100644 --- a/arch/arm64/kvm/hyp/nvhe/stacktrace.c +++ b/arch/arm64/kvm/hyp/nvhe/stacktrace.c @@ -28,7 +28,7 @@ static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc) struct kvm_nvhe_stacktrace_info *stacktrace_info = this_cpu_ptr(&kvm_stacktrace_info); struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params); - stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - PAGE_SIZE); + stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - NVHE_STACK_SIZE); stacktrace_info->overflow_stack_base = (unsigned long)this_cpu_ptr(overflow_stack); stacktrace_info->fp = fp; stacktrace_info->pc = pc; @@ -54,7 +54,7 @@ static struct stack_info stackinfo_get_hyp(void) { struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params); unsigned long high = params->stack_hyp_va; - unsigned long low = high - PAGE_SIZE; + unsigned long low = high - NVHE_STACK_SIZE; return (struct stack_info) { .low = low, diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index d36be6d2ac91b..8850741243bb5 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -706,10 +706,10 @@ int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr) mutex_lock(&kvm_hyp_pgd_mutex); /* - * Efficient stack verification using the PAGE_SHIFT bit implies + * Efficient stack verification using the NVHE_STACK_SHIFT bit implies * an alignment of our allocation on the order of the size. */ - size = PAGE_SIZE * 2; + size = NVHE_STACK_SIZE * 2; base = ALIGN_DOWN(io_map_base - size, size); ret = __hyp_alloc_private_va_range(base); @@ -726,12 +726,12 @@ int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr) * at the higher address and leave the lower guard page * unbacked. * - * Any valid stack address now has the PAGE_SHIFT bit as 1 + * Any valid stack address now has the NVHE_STACK_SHIFT bit as 1 * and addresses corresponding to the guard page have the - * PAGE_SHIFT bit as 0 - this is used for overflow detection. + * NVHE_STACK_SHIFT bit as 0 - this is used for overflow detection. */ - ret = __create_hyp_mappings(base + PAGE_SIZE, PAGE_SIZE, phys_addr, - PAGE_HYP); + ret = __create_hyp_mappings(base + NVHE_STACK_SIZE, NVHE_STACK_SIZE, + phys_addr, PAGE_HYP); if (ret) kvm_err("Cannot map hyp stack\n"); diff --git a/arch/arm64/kvm/stacktrace.c b/arch/arm64/kvm/stacktrace.c index fdedd8a3ed6f9..af5eec6811279 100644 --- a/arch/arm64/kvm/stacktrace.c +++ b/arch/arm64/kvm/stacktrace.c @@ -51,7 +51,7 @@ static struct stack_info stackinfo_get_hyp(void) struct kvm_nvhe_stacktrace_info *stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); unsigned long low = (unsigned long)stacktrace_info->stack_base; - unsigned long high = low + PAGE_SIZE; + unsigned long high = low + NVHE_STACK_SIZE; return (struct stack_info) { .low = low, @@ -61,8 +61,8 @@ static struct stack_info stackinfo_get_hyp(void) static struct stack_info stackinfo_get_hyp_kern_va(void) { - unsigned long low = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page); - unsigned long high = low + PAGE_SIZE; + unsigned long low = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_base); + unsigned long high = low + NVHE_STACK_SIZE; return (struct stack_info) { .low = low, From dea8838128c580cc6b563c901576b580f93a703e Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Fri, 10 Jan 2025 23:53:09 +0100 Subject: [PATCH 5/6] KVM: arm64: vgic: Use str_enabled_disabled() in vgic_v3_probe() Remove hard-coded strings by using the str_enabled_disabled() helper function. Suggested-by: Christophe JAILLET Signed-off-by: Thorsten Blum Reviewed-by: Oliver Upton Link: https://lore.kernel.org/r/20250110225310.369980-2-thorsten.blum@linux.dev Signed-off-by: Marc Zyngier --- arch/arm64/kvm/vgic/vgic-v3.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index f267bc2486a18..528ee93fefd44 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -663,9 +664,9 @@ int vgic_v3_probe(const struct gic_kvm_info *info) if (info->has_v4) { kvm_vgic_global_state.has_gicv4 = gicv4_enable; kvm_vgic_global_state.has_gicv4_1 = info->has_v4_1 && gicv4_enable; - kvm_info("GICv4%s support %sabled\n", + kvm_info("GICv4%s support %s\n", kvm_vgic_global_state.has_gicv4_1 ? ".1" : "", - gicv4_enable ? "en" : "dis"); + str_enabled_disabled(gicv4_enable)); } kvm_vgic_global_state.vcpu_base = 0; From a7f1fa5564be565bd4bc18875bb46ffd0c01d292 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 9 Jan 2025 16:38:36 -0600 Subject: [PATCH 6/6] KVM: arm64: Explicitly handle BRBE traps as UNDEFINED The Branch Record Buffer Extension (BRBE) adds a number of system registers and instructions which we don't currently intend to expose to guests. Our existing logic handles this safely, but this could be improved with some explicit handling of BRBE. KVM currently hides BRBE from guests: the cpufeature code's ftr_id_aa64dfr0[] table doesn't have an entry for the BRBE field, and so this will be zero in the sanitised value of ID_AA64DFR0 exposed to guests via read_sanitised_id_aa64dfr0_el1(). KVM currently traps BRBE usage from guests: the default configuration of the fine-grained trap controls HDFGRTR_EL2.{nBRBDATA,nBRBCTL,nBRBIDR} and HFGITR_EL2.{nBRBINJ_nBRBIALL} cause these to be trapped to EL2. Well-behaved guests shouldn't try to use the registers or instructions, but badly-behaved guests could use these, resulting in unnecessary warnings from KVM before it injects an UNDEF, e.g. | kvm [197]: Unsupported guest access at: 401c98 | { Op0( 2), Op1( 1), CRn( 9), CRm( 0), Op2( 0), func_read }, | kvm [197]: Unsupported guest access at: 401d04 | { Op0( 2), Op1( 1), CRn( 9), CRm( 0), Op2( 1), func_read }, | kvm [197]: Unsupported guest access at: 401d70 | { Op0( 2), Op1( 1), CRn( 9), CRm( 2), Op2( 0), func_read }, | kvm [197]: Unsupported guest access at: 401ddc | { Op0( 2), Op1( 1), CRn( 9), CRm( 1), Op2( 0), func_read }, | kvm [197]: Unsupported guest access at: 401e48 | { Op0( 2), Op1( 1), CRn( 9), CRm( 1), Op2( 1), func_read }, | kvm [197]: Unsupported guest access at: 401eb4 | { Op0( 2), Op1( 1), CRn( 9), CRm( 1), Op2( 2), func_read }, | kvm [197]: Unsupported guest access at: 401f20 | { Op0( 2), Op1( 1), CRn( 9), CRm( 0), Op2( 2), func_read }, | kvm [197]: Unsupported guest access at: 401f8c | { Op0( 1), Op1( 1), CRn( 7), CRm( 2), Op2( 4), func_write }, | kvm [197]: Unsupported guest access at: 401ff8 | { Op0( 1), Op1( 1), CRn( 7), CRm( 2), Op2( 5), func_write }, As with other features that we know how to handle, these warnings aren't particularly interesting, and we can simply treat these as UNDEFINED without any warning. Add the necessary fine-grained undef configuration to make this happen, as suggested by Marc Zyngier: https://lore.kernel.org/linux-arm-kernel/86r0czk6wd.wl-maz@kernel.org/ At the same time, update read_sanitised_id_aa64dfr0_el1() to hide BRBE from guests, as we do for SPE. This will prevent accidentally exposing BRBE to guests if/when ftr_id_aa64dfr0[] gains a BRBE entry. Cc: Anshuman Khandual Signed-off-by: Mark Rutland Signed-off-by: Rob Herring (Arm) Link: https://lore.kernel.org/r/20250109223836.419240-1-robh@kernel.org Signed-off-by: Marc Zyngier --- arch/arm64/kvm/sys_regs.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index e2a5c2918d9e5..bc5a10b590ad3 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1806,6 +1806,9 @@ static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val) /* Hide SPE from guests */ val &= ~ID_AA64DFR0_EL1_PMSVer_MASK; + /* Hide BRBE from guests */ + val &= ~ID_AA64DFR0_EL1_BRBE_MASK; + return val; } @@ -4973,6 +4976,14 @@ void kvm_calculate_traps(struct kvm_vcpu *vcpu) kvm->arch.fgu[HAFGRTR_GROUP] |= ~(HAFGRTR_EL2_RES0 | HAFGRTR_EL2_RES1); + if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, BRBE, IMP)) { + kvm->arch.fgu[HDFGRTR_GROUP] |= (HDFGRTR_EL2_nBRBDATA | + HDFGRTR_EL2_nBRBCTL | + HDFGRTR_EL2_nBRBIDR); + kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_nBRBINJ | + HFGITR_EL2_nBRBIALL); + } + set_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags); out: mutex_unlock(&kvm->arch.config_lock);