Skip to content

Commit

Permalink
KVM: arm64: Use the pKVM hyp vCPU structure in handle___kvm_vcpu_run()
Browse files Browse the repository at this point in the history
As a stepping stone towards deprivileging the host's access to the
guest's vCPU structures, introduce some naive flush/sync routines to
copy most of the host vCPU into the hyp vCPU on vCPU run and back
again on return to EL1.

This allows us to run using the pKVM hyp structures when KVM is
initialised in protected mode.

Tested-by: Vincent Donnefort <vdonnefort@google.com>
Co-developed-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Will Deacon <will@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221110190259.26861-27-will@kernel.org
  • Loading branch information
Will Deacon authored and Marc Zyngier committed Nov 11, 2022
1 parent 169cd0f commit be66e67
Show file tree
Hide file tree
Showing 3 changed files with 109 additions and 2 deletions.
4 changes: 4 additions & 0 deletions arch/arm64/kvm/hyp/include/nvhe/pkvm.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,4 +61,8 @@ int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
unsigned long vcpu_hva);
int __pkvm_teardown_vm(pkvm_handle_t handle);

struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
unsigned int vcpu_idx);
void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu);

#endif /* __ARM64_KVM_NVHE_PKVM_H__ */
79 changes: 77 additions & 2 deletions arch/arm64/kvm/hyp/nvhe/hyp-main.c
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,86 @@ DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);

void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);

static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
{
struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;

hyp_vcpu->vcpu.arch.ctxt = host_vcpu->arch.ctxt;

hyp_vcpu->vcpu.arch.sve_state = kern_hyp_va(host_vcpu->arch.sve_state);
hyp_vcpu->vcpu.arch.sve_max_vl = host_vcpu->arch.sve_max_vl;

hyp_vcpu->vcpu.arch.hw_mmu = host_vcpu->arch.hw_mmu;

hyp_vcpu->vcpu.arch.hcr_el2 = host_vcpu->arch.hcr_el2;
hyp_vcpu->vcpu.arch.mdcr_el2 = host_vcpu->arch.mdcr_el2;
hyp_vcpu->vcpu.arch.cptr_el2 = host_vcpu->arch.cptr_el2;

hyp_vcpu->vcpu.arch.iflags = host_vcpu->arch.iflags;
hyp_vcpu->vcpu.arch.fp_state = host_vcpu->arch.fp_state;

hyp_vcpu->vcpu.arch.debug_ptr = kern_hyp_va(host_vcpu->arch.debug_ptr);
hyp_vcpu->vcpu.arch.host_fpsimd_state = host_vcpu->arch.host_fpsimd_state;

hyp_vcpu->vcpu.arch.vsesr_el2 = host_vcpu->arch.vsesr_el2;

hyp_vcpu->vcpu.arch.vgic_cpu.vgic_v3 = host_vcpu->arch.vgic_cpu.vgic_v3;
}

static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
{
struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
struct vgic_v3_cpu_if *hyp_cpu_if = &hyp_vcpu->vcpu.arch.vgic_cpu.vgic_v3;
struct vgic_v3_cpu_if *host_cpu_if = &host_vcpu->arch.vgic_cpu.vgic_v3;
unsigned int i;

host_vcpu->arch.ctxt = hyp_vcpu->vcpu.arch.ctxt;

host_vcpu->arch.hcr_el2 = hyp_vcpu->vcpu.arch.hcr_el2;
host_vcpu->arch.cptr_el2 = hyp_vcpu->vcpu.arch.cptr_el2;

host_vcpu->arch.fault = hyp_vcpu->vcpu.arch.fault;

host_vcpu->arch.iflags = hyp_vcpu->vcpu.arch.iflags;
host_vcpu->arch.fp_state = hyp_vcpu->vcpu.arch.fp_state;

host_cpu_if->vgic_hcr = hyp_cpu_if->vgic_hcr;
for (i = 0; i < hyp_cpu_if->used_lrs; ++i)
host_cpu_if->vgic_lr[i] = hyp_cpu_if->vgic_lr[i];
}

static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 1);
int ret;

host_vcpu = kern_hyp_va(host_vcpu);

if (unlikely(is_protected_kvm_enabled())) {
struct pkvm_hyp_vcpu *hyp_vcpu;
struct kvm *host_kvm;

host_kvm = kern_hyp_va(host_vcpu->kvm);
hyp_vcpu = pkvm_load_hyp_vcpu(host_kvm->arch.pkvm.handle,
host_vcpu->vcpu_idx);
if (!hyp_vcpu) {
ret = -EINVAL;
goto out;
}

flush_hyp_vcpu(hyp_vcpu);

ret = __kvm_vcpu_run(&hyp_vcpu->vcpu);

sync_hyp_vcpu(hyp_vcpu);
pkvm_put_hyp_vcpu(hyp_vcpu);
} else {
/* The host is fully trusted, run its vCPU directly. */
ret = __kvm_vcpu_run(host_vcpu);
}

cpu_reg(host_ctxt, 1) = __kvm_vcpu_run(kern_hyp_va(vcpu));
out:
cpu_reg(host_ctxt, 1) = ret;
}

static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt)
Expand Down
28 changes: 28 additions & 0 deletions arch/arm64/kvm/hyp/nvhe/pkvm.c
Original file line number Diff line number Diff line change
Expand Up @@ -241,6 +241,33 @@ static struct pkvm_hyp_vm *get_vm_by_handle(pkvm_handle_t handle)
return vm_table[idx];
}

struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
unsigned int vcpu_idx)
{
struct pkvm_hyp_vcpu *hyp_vcpu = NULL;
struct pkvm_hyp_vm *hyp_vm;

hyp_spin_lock(&vm_table_lock);
hyp_vm = get_vm_by_handle(handle);
if (!hyp_vm || hyp_vm->nr_vcpus <= vcpu_idx)
goto unlock;

hyp_vcpu = hyp_vm->vcpus[vcpu_idx];
hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
unlock:
hyp_spin_unlock(&vm_table_lock);
return hyp_vcpu;
}

void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
{
struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);

hyp_spin_lock(&vm_table_lock);
hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
hyp_spin_unlock(&vm_table_lock);
}

static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu)
{
if (host_vcpu)
Expand Down Expand Up @@ -286,6 +313,7 @@ static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
hyp_vcpu->vcpu.vcpu_idx = vcpu_idx;

hyp_vcpu->vcpu.arch.hw_mmu = &hyp_vm->kvm.arch.mmu;
hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
done:
if (ret)
unpin_host_vcpu(host_vcpu);
Expand Down

0 comments on commit be66e67

Please sign in to comment.