Skip to content

Commit

Permalink
KVM: arm64: Introduce __pkvm_host_share_guest()
Browse files Browse the repository at this point in the history
In preparation for handling guest stage-2 mappings at EL2, introduce a
new pKVM hypercall allowing to share pages with non-protected guests.

Tested-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Quentin Perret <qperret@google.com>
Link: https://lore.kernel.org/r/20241218194059.3670226-11-qperret@google.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
  • Loading branch information
Quentin Perret authored and Marc Zyngier committed Dec 20, 2024
1 parent f7d03fc commit d0bd3e6
Show file tree
Hide file tree
Showing 7 changed files with 123 additions and 1 deletion.
1 change: 1 addition & 0 deletions arch/arm64/include/asm/kvm_asm.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ enum __kvm_host_smccc_func {
/* Hypercalls available after pKVM finalisation */
__KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp,
__KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_hyp,
__KVM_HOST_SMCCC_FUNC___pkvm_host_share_guest,
__KVM_HOST_SMCCC_FUNC___kvm_adjust_pc,
__KVM_HOST_SMCCC_FUNC___kvm_vcpu_run,
__KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context,
Expand Down
3 changes: 3 additions & 0 deletions arch/arm64/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -771,6 +771,9 @@ struct kvm_vcpu_arch {
/* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache;

/* Pages to top-up the pKVM/EL2 guest pool */
struct kvm_hyp_memcache pkvm_memcache;

/* Virtual SError ESR to restore when HCR_EL2.VSE is set */
u64 vsesr_el2;

Expand Down
2 changes: 2 additions & 0 deletions arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,8 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages);
int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages);
int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages);
int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu,
enum kvm_pgtable_prot prot);

bool addr_is_memory(phys_addr_t phys);
int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot);
Expand Down
4 changes: 3 additions & 1 deletion arch/arm64/kvm/hyp/include/nvhe/memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,8 @@ struct hyp_page {

/* Host (non-meta) state. Guarded by the host stage-2 lock. */
enum pkvm_page_state host_state : 8;

u32 host_share_guest_count;
};

extern u64 __hyp_vmemmap;
Expand All @@ -68,7 +70,7 @@ static inline phys_addr_t hyp_virt_to_phys(void *addr)

static inline struct hyp_page *hyp_phys_to_page(phys_addr_t phys)
{
BUILD_BUG_ON(sizeof(struct hyp_page) != sizeof(u32));
BUILD_BUG_ON(sizeof(struct hyp_page) != sizeof(u64));
return &hyp_vmemmap[hyp_phys_to_pfn(phys)];
}

Expand Down
34 changes: 34 additions & 0 deletions arch/arm64/kvm/hyp/nvhe/hyp-main.c
Original file line number Diff line number Diff line change
Expand Up @@ -211,6 +211,39 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = ret;
}

static int pkvm_refill_memcache(struct pkvm_hyp_vcpu *hyp_vcpu)
{
struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;

return refill_memcache(&hyp_vcpu->vcpu.arch.pkvm_memcache,
host_vcpu->arch.pkvm_memcache.nr_pages,
&host_vcpu->arch.pkvm_memcache);
}

static void handle___pkvm_host_share_guest(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(u64, pfn, host_ctxt, 1);
DECLARE_REG(u64, gfn, host_ctxt, 2);
DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 3);
struct pkvm_hyp_vcpu *hyp_vcpu;
int ret = -EINVAL;

if (!is_protected_kvm_enabled())
goto out;

hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
if (!hyp_vcpu || pkvm_hyp_vcpu_is_protected(hyp_vcpu))
goto out;

ret = pkvm_refill_memcache(hyp_vcpu);
if (ret)
goto out;

ret = __pkvm_host_share_guest(pfn, gfn, hyp_vcpu, prot);
out:
cpu_reg(host_ctxt, 1) = ret;
}

static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
Expand Down Expand Up @@ -420,6 +453,7 @@ static const hcall_t host_hcall[] = {

HANDLE_FUNC(__pkvm_host_share_hyp),
HANDLE_FUNC(__pkvm_host_unshare_hyp),
HANDLE_FUNC(__pkvm_host_share_guest),
HANDLE_FUNC(__kvm_adjust_pc),
HANDLE_FUNC(__kvm_vcpu_run),
HANDLE_FUNC(__kvm_flush_vm_context),
Expand Down
72 changes: 72 additions & 0 deletions arch/arm64/kvm/hyp/nvhe/mem_protect.c
Original file line number Diff line number Diff line change
Expand Up @@ -867,6 +867,27 @@ static int hyp_complete_donation(u64 addr,
return pkvm_create_mappings_locked(start, end, prot);
}

static enum pkvm_page_state guest_get_page_state(kvm_pte_t pte, u64 addr)
{
if (!kvm_pte_valid(pte))
return PKVM_NOPAGE;

return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte));
}

static int __guest_check_page_state_range(struct pkvm_hyp_vcpu *vcpu, u64 addr,
u64 size, enum pkvm_page_state state)
{
struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
struct check_walk_data d = {
.desired = state,
.get_page_state = guest_get_page_state,
};

hyp_assert_lock_held(&vm->lock);
return check_page_state_range(&vm->pgt, addr, size, &d);
}

static int check_share(struct pkvm_mem_share *share)
{
const struct pkvm_mem_transition *tx = &share->tx;
Expand Down Expand Up @@ -1349,3 +1370,54 @@ int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages)

return ret;
}

int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu,
enum kvm_pgtable_prot prot)
{
struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
u64 phys = hyp_pfn_to_phys(pfn);
u64 ipa = hyp_pfn_to_phys(gfn);
struct hyp_page *page;
int ret;

if (prot & ~KVM_PGTABLE_PROT_RWX)
return -EINVAL;

ret = check_range_allowed_memory(phys, phys + PAGE_SIZE);
if (ret)
return ret;

host_lock_component();
guest_lock_component(vm);

ret = __guest_check_page_state_range(vcpu, ipa, PAGE_SIZE, PKVM_NOPAGE);
if (ret)
goto unlock;

page = hyp_phys_to_page(phys);
switch (page->host_state) {
case PKVM_PAGE_OWNED:
WARN_ON(__host_set_page_state_range(phys, PAGE_SIZE, PKVM_PAGE_SHARED_OWNED));
break;
case PKVM_PAGE_SHARED_OWNED:
if (page->host_share_guest_count)
break;
/* Only host to np-guest multi-sharing is tolerated */
WARN_ON(1);
fallthrough;
default:
ret = -EPERM;
goto unlock;
}

WARN_ON(kvm_pgtable_stage2_map(&vm->pgt, ipa, PAGE_SIZE, phys,
pkvm_mkstate(prot, PKVM_PAGE_SHARED_BORROWED),
&vcpu->vcpu.arch.pkvm_memcache, 0));
page->host_share_guest_count++;

unlock:
guest_unlock_component(vm);
host_unlock_component();

return ret;
}
8 changes: 8 additions & 0 deletions arch/arm64/kvm/hyp/nvhe/pkvm.c
Original file line number Diff line number Diff line change
Expand Up @@ -795,6 +795,14 @@ int __pkvm_teardown_vm(pkvm_handle_t handle)
/* Push the metadata pages to the teardown memcache */
for (idx = 0; idx < hyp_vm->nr_vcpus; ++idx) {
struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx];
struct kvm_hyp_memcache *vcpu_mc = &hyp_vcpu->vcpu.arch.pkvm_memcache;

while (vcpu_mc->nr_pages) {
void *addr = pop_hyp_memcache(vcpu_mc, hyp_phys_to_virt);

push_hyp_memcache(mc, addr, hyp_virt_to_phys);
unmap_donated_memory_noclear(addr, PAGE_SIZE);
}

teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu));
}
Expand Down

0 comments on commit d0bd3e6

Please sign in to comment.