Skip to content

Commit

Permalink
KVM: arm64: Introduce __pkvm_vcpu_{load,put}()
Browse files Browse the repository at this point in the history
Rather than look-up the hyp vCPU on every run hypercall at EL2,
introduce a per-CPU 'loaded_hyp_vcpu' tracking variable which is updated
by a pair of load/put hypercalls called directly from
kvm_arch_vcpu_{load,put}() when pKVM is enabled.

Tested-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Quentin Perret <qperret@google.com>
Link: https://lore.kernel.org/r/20241218194059.3670226-10-qperret@google.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
  • Loading branch information
Marc Zyngier committed Dec 20, 2024
1 parent 99996d5 commit f7d03fc
Show file tree
Hide file tree
Showing 6 changed files with 93 additions and 12 deletions.
2 changes: 2 additions & 0 deletions arch/arm64/include/asm/kvm_asm.h
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,8 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___pkvm_init_vm,
__KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu,
__KVM_HOST_SMCCC_FUNC___pkvm_teardown_vm,
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_load,
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_put,
};

#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
Expand Down
14 changes: 14 additions & 0 deletions arch/arm64/kvm/arm.c
Original file line number Diff line number Diff line change
Expand Up @@ -619,12 +619,26 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)

kvm_arch_vcpu_load_debug_state_flags(vcpu);

if (is_protected_kvm_enabled()) {
kvm_call_hyp_nvhe(__pkvm_vcpu_load,
vcpu->kvm->arch.pkvm.handle,
vcpu->vcpu_idx, vcpu->arch.hcr_el2);
kvm_call_hyp(__vgic_v3_restore_vmcr_aprs,
&vcpu->arch.vgic_cpu.vgic_v3);
}

if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus))
vcpu_set_on_unsupported_cpu(vcpu);
}

void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
if (is_protected_kvm_enabled()) {
kvm_call_hyp(__vgic_v3_save_vmcr_aprs,
&vcpu->arch.vgic_cpu.vgic_v3);
kvm_call_hyp_nvhe(__pkvm_vcpu_put);
}

kvm_arch_vcpu_put_debug_state_flags(vcpu);
kvm_arch_vcpu_put_fp(vcpu);
if (has_vhe())
Expand Down
7 changes: 7 additions & 0 deletions arch/arm64/kvm/hyp/include/nvhe/pkvm.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,12 @@ struct pkvm_hyp_vcpu {

/* Backpointer to the host's (untrusted) vCPU instance. */
struct kvm_vcpu *host_vcpu;

/*
* If this hyp vCPU is loaded, then this is a backpointer to the
* per-cpu pointer tracking us. Otherwise, NULL if not loaded.
*/
struct pkvm_hyp_vcpu **loaded_hyp_vcpu;
};

/*
Expand Down Expand Up @@ -69,6 +75,7 @@ int __pkvm_teardown_vm(pkvm_handle_t handle);
struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
unsigned int vcpu_idx);
void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu);
struct pkvm_hyp_vcpu *pkvm_get_loaded_hyp_vcpu(void);

struct pkvm_hyp_vm *get_pkvm_hyp_vm(pkvm_handle_t handle);
void put_pkvm_hyp_vm(struct pkvm_hyp_vm *hyp_vm);
Expand Down
47 changes: 37 additions & 10 deletions arch/arm64/kvm/hyp/nvhe/hyp-main.c
Original file line number Diff line number Diff line change
Expand Up @@ -141,16 +141,46 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
host_cpu_if->vgic_lr[i] = hyp_cpu_if->vgic_lr[i];
}

static void handle___pkvm_vcpu_load(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
DECLARE_REG(unsigned int, vcpu_idx, host_ctxt, 2);
DECLARE_REG(u64, hcr_el2, host_ctxt, 3);
struct pkvm_hyp_vcpu *hyp_vcpu;

if (!is_protected_kvm_enabled())
return;

hyp_vcpu = pkvm_load_hyp_vcpu(handle, vcpu_idx);
if (!hyp_vcpu)
return;

if (pkvm_hyp_vcpu_is_protected(hyp_vcpu)) {
/* Propagate WFx trapping flags */
hyp_vcpu->vcpu.arch.hcr_el2 &= ~(HCR_TWE | HCR_TWI);
hyp_vcpu->vcpu.arch.hcr_el2 |= hcr_el2 & (HCR_TWE | HCR_TWI);
}
}

static void handle___pkvm_vcpu_put(struct kvm_cpu_context *host_ctxt)
{
struct pkvm_hyp_vcpu *hyp_vcpu;

if (!is_protected_kvm_enabled())
return;

hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
if (hyp_vcpu)
pkvm_put_hyp_vcpu(hyp_vcpu);
}

static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 1);
int ret;

host_vcpu = kern_hyp_va(host_vcpu);

if (unlikely(is_protected_kvm_enabled())) {
struct pkvm_hyp_vcpu *hyp_vcpu;
struct kvm *host_kvm;
struct pkvm_hyp_vcpu *hyp_vcpu = pkvm_get_loaded_hyp_vcpu();

/*
* KVM (and pKVM) doesn't support SME guests for now, and
Expand All @@ -163,9 +193,6 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
goto out;
}

host_kvm = kern_hyp_va(host_vcpu->kvm);
hyp_vcpu = pkvm_load_hyp_vcpu(host_kvm->arch.pkvm.handle,
host_vcpu->vcpu_idx);
if (!hyp_vcpu) {
ret = -EINVAL;
goto out;
Expand All @@ -176,12 +203,10 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
ret = __kvm_vcpu_run(&hyp_vcpu->vcpu);

sync_hyp_vcpu(hyp_vcpu);
pkvm_put_hyp_vcpu(hyp_vcpu);
} else {
/* The host is fully trusted, run its vCPU directly. */
ret = __kvm_vcpu_run(host_vcpu);
ret = __kvm_vcpu_run(kern_hyp_va(host_vcpu));
}

out:
cpu_reg(host_ctxt, 1) = ret;
}
Expand Down Expand Up @@ -409,6 +434,8 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__pkvm_init_vm),
HANDLE_FUNC(__pkvm_init_vcpu),
HANDLE_FUNC(__pkvm_teardown_vm),
HANDLE_FUNC(__pkvm_vcpu_load),
HANDLE_FUNC(__pkvm_vcpu_put),
};

static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
Expand Down
29 changes: 29 additions & 0 deletions arch/arm64/kvm/hyp/nvhe/pkvm.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,12 @@ unsigned int kvm_arm_vmid_bits;

unsigned int kvm_host_sve_max_vl;

/*
* The currently loaded hyp vCPU for each physical CPU. Used only when
* protected KVM is enabled, but for both protected and non-protected VMs.
*/
static DEFINE_PER_CPU(struct pkvm_hyp_vcpu *, loaded_hyp_vcpu);

/*
* Set trap register values based on features in ID_AA64PFR0.
*/
Expand Down Expand Up @@ -306,15 +312,30 @@ struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
struct pkvm_hyp_vcpu *hyp_vcpu = NULL;
struct pkvm_hyp_vm *hyp_vm;

/* Cannot load a new vcpu without putting the old one first. */
if (__this_cpu_read(loaded_hyp_vcpu))
return NULL;

hyp_spin_lock(&vm_table_lock);
hyp_vm = get_vm_by_handle(handle);
if (!hyp_vm || hyp_vm->nr_vcpus <= vcpu_idx)
goto unlock;

hyp_vcpu = hyp_vm->vcpus[vcpu_idx];

/* Ensure vcpu isn't loaded on more than one cpu simultaneously. */
if (unlikely(hyp_vcpu->loaded_hyp_vcpu)) {
hyp_vcpu = NULL;
goto unlock;
}

hyp_vcpu->loaded_hyp_vcpu = this_cpu_ptr(&loaded_hyp_vcpu);
hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
unlock:
hyp_spin_unlock(&vm_table_lock);

if (hyp_vcpu)
__this_cpu_write(loaded_hyp_vcpu, hyp_vcpu);
return hyp_vcpu;
}

Expand All @@ -323,10 +344,18 @@ void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);

hyp_spin_lock(&vm_table_lock);
hyp_vcpu->loaded_hyp_vcpu = NULL;
__this_cpu_write(loaded_hyp_vcpu, NULL);
hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
hyp_spin_unlock(&vm_table_lock);
}

struct pkvm_hyp_vcpu *pkvm_get_loaded_hyp_vcpu(void)
{
return __this_cpu_read(loaded_hyp_vcpu);

}

struct pkvm_hyp_vm *get_pkvm_hyp_vm(pkvm_handle_t handle)
{
struct pkvm_hyp_vm *hyp_vm;
Expand Down
6 changes: 4 additions & 2 deletions arch/arm64/kvm/vgic/vgic-v3.c
Original file line number Diff line number Diff line change
Expand Up @@ -734,7 +734,8 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;

kvm_call_hyp(__vgic_v3_restore_vmcr_aprs, cpu_if);
if (likely(!is_protected_kvm_enabled()))
kvm_call_hyp(__vgic_v3_restore_vmcr_aprs, cpu_if);

if (has_vhe())
__vgic_v3_activate_traps(cpu_if);
Expand All @@ -746,7 +747,8 @@ void vgic_v3_put(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;

kvm_call_hyp(__vgic_v3_save_vmcr_aprs, cpu_if);
if (likely(!is_protected_kvm_enabled()))
kvm_call_hyp(__vgic_v3_save_vmcr_aprs, cpu_if);
WARN_ON(vgic_v4_put(vcpu));

if (has_vhe())
Expand Down

0 comments on commit f7d03fc

Please sign in to comment.