Skip to content

Commit

Permalink
KVM: arm64: Drop pkvm_mem_transition for host/hyp donations
Browse files Browse the repository at this point in the history
Simplify the __pkvm_host_donate_hyp() and pkvm_hyp_donate_host() paths
by not using the pkvm_mem_transition machinery. As the last users of
this, also remove all the now unused code.

No functional changes intended.

Signed-off-by: Quentin Perret <qperret@google.com>
Link: https://lore.kernel.org/r/20250110121936.1559655-4-qperret@google.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
  • Loading branch information
Quentin Perret authored and Marc Zyngier committed Jan 12, 2025
1 parent 7cbf7c3 commit 6f91d31
Showing 1 changed file with 32 additions and 253 deletions.
285 changes: 32 additions & 253 deletions arch/arm64/kvm/hyp/nvhe/mem_protect.c
Original file line number Diff line number Diff line change
Expand Up @@ -583,39 +583,6 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
BUG_ON(ret && ret != -EAGAIN);
}

struct pkvm_mem_transition {
u64 nr_pages;

struct {
enum pkvm_component_id id;
/* Address in the initiator's address space */
u64 addr;

union {
struct {
/* Address in the completer's address space */
u64 completer_addr;
} host;
struct {
u64 completer_addr;
} hyp;
};
} initiator;

struct {
enum pkvm_component_id id;
} completer;
};

struct pkvm_mem_share {
const struct pkvm_mem_transition tx;
const enum kvm_pgtable_prot completer_prot;
};

struct pkvm_mem_donation {
const struct pkvm_mem_transition tx;
};

struct check_walk_data {
enum pkvm_page_state desired;
enum pkvm_page_state (*get_page_state)(kvm_pte_t pte, u64 addr);
Expand Down Expand Up @@ -675,56 +642,6 @@ static int __host_set_page_state_range(u64 addr, u64 size,
return 0;
}

static int host_request_owned_transition(u64 *completer_addr,
const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
u64 addr = tx->initiator.addr;

*completer_addr = tx->initiator.host.completer_addr;
return __host_check_page_state_range(addr, size, PKVM_PAGE_OWNED);
}

static int host_initiate_donation(u64 *completer_addr,
const struct pkvm_mem_transition *tx)
{
u8 owner_id = tx->completer.id;
u64 size = tx->nr_pages * PAGE_SIZE;

*completer_addr = tx->initiator.host.completer_addr;
return host_stage2_set_owner_locked(tx->initiator.addr, size, owner_id);
}

static bool __host_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx)
{
return !(IS_ENABLED(CONFIG_NVHE_EL2_DEBUG) ||
tx->initiator.id != PKVM_ID_HYP);
}

static int __host_ack_transition(u64 addr, const struct pkvm_mem_transition *tx,
enum pkvm_page_state state)
{
u64 size = tx->nr_pages * PAGE_SIZE;

if (__host_ack_skip_pgtable_check(tx))
return 0;

return __host_check_page_state_range(addr, size, state);
}

static int host_ack_donation(u64 addr, const struct pkvm_mem_transition *tx)
{
return __host_ack_transition(addr, tx, PKVM_NOPAGE);
}

static int host_complete_donation(u64 addr, const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
u8 host_id = tx->completer.id;

return host_stage2_set_owner_locked(addr, size, host_id);
}

static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte, u64 addr)
{
if (!kvm_pte_valid(pte))
Expand All @@ -745,52 +662,6 @@ static int __hyp_check_page_state_range(u64 addr, u64 size,
return check_page_state_range(&pkvm_pgtable, addr, size, &d);
}

static int hyp_request_donation(u64 *completer_addr,
const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
u64 addr = tx->initiator.addr;

*completer_addr = tx->initiator.hyp.completer_addr;
return __hyp_check_page_state_range(addr, size, PKVM_PAGE_OWNED);
}

static int hyp_initiate_donation(u64 *completer_addr,
const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
int ret;

*completer_addr = tx->initiator.hyp.completer_addr;
ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, tx->initiator.addr, size);
return (ret != size) ? -EFAULT : 0;
}

static bool __hyp_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx)
{
return !(IS_ENABLED(CONFIG_NVHE_EL2_DEBUG) ||
tx->initiator.id != PKVM_ID_HOST);
}

static int hyp_ack_donation(u64 addr, const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;

if (__hyp_ack_skip_pgtable_check(tx))
return 0;

return __hyp_check_page_state_range(addr, size, PKVM_NOPAGE);
}

static int hyp_complete_donation(u64 addr,
const struct pkvm_mem_transition *tx)
{
void *start = (void *)addr, *end = start + (tx->nr_pages * PAGE_SIZE);
enum kvm_pgtable_prot prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_OWNED);

return pkvm_create_mappings_locked(start, end, prot);
}

static enum pkvm_page_state guest_get_page_state(kvm_pte_t pte, u64 addr)
{
if (!kvm_pte_valid(pte))
Expand All @@ -812,94 +683,6 @@ static int __guest_check_page_state_range(struct pkvm_hyp_vcpu *vcpu, u64 addr,
return check_page_state_range(&vm->pgt, addr, size, &d);
}

static int check_donation(struct pkvm_mem_donation *donation)
{
const struct pkvm_mem_transition *tx = &donation->tx;
u64 completer_addr;
int ret;

switch (tx->initiator.id) {
case PKVM_ID_HOST:
ret = host_request_owned_transition(&completer_addr, tx);
break;
case PKVM_ID_HYP:
ret = hyp_request_donation(&completer_addr, tx);
break;
default:
ret = -EINVAL;
}

if (ret)
return ret;

switch (tx->completer.id) {
case PKVM_ID_HOST:
ret = host_ack_donation(completer_addr, tx);
break;
case PKVM_ID_HYP:
ret = hyp_ack_donation(completer_addr, tx);
break;
default:
ret = -EINVAL;
}

return ret;
}

static int __do_donate(struct pkvm_mem_donation *donation)
{
const struct pkvm_mem_transition *tx = &donation->tx;
u64 completer_addr;
int ret;

switch (tx->initiator.id) {
case PKVM_ID_HOST:
ret = host_initiate_donation(&completer_addr, tx);
break;
case PKVM_ID_HYP:
ret = hyp_initiate_donation(&completer_addr, tx);
break;
default:
ret = -EINVAL;
}

if (ret)
return ret;

switch (tx->completer.id) {
case PKVM_ID_HOST:
ret = host_complete_donation(completer_addr, tx);
break;
case PKVM_ID_HYP:
ret = hyp_complete_donation(completer_addr, tx);
break;
default:
ret = -EINVAL;
}

return ret;
}

/*
* do_donate():
*
* The page owner transfers ownership to another component, losing access
* as a consequence.
*
* Initiator: OWNED => NOPAGE
* Completer: NOPAGE => OWNED
*/
static int do_donate(struct pkvm_mem_donation *donation)
{
int ret;

ret = check_donation(donation);
if (ret)
return ret;

return WARN_ON(__do_donate(donation));
}

int __pkvm_host_share_hyp(u64 pfn)
{
u64 phys = hyp_pfn_to_phys(pfn);
Expand Down Expand Up @@ -964,30 +747,29 @@ int __pkvm_host_unshare_hyp(u64 pfn)

int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
{
u64 phys = hyp_pfn_to_phys(pfn);
u64 size = PAGE_SIZE * nr_pages;
void *virt = __hyp_va(phys);
enum kvm_pgtable_prot prot;
int ret;
u64 host_addr = hyp_pfn_to_phys(pfn);
u64 hyp_addr = (u64)__hyp_va(host_addr);
struct pkvm_mem_donation donation = {
.tx = {
.nr_pages = nr_pages,
.initiator = {
.id = PKVM_ID_HOST,
.addr = host_addr,
.host = {
.completer_addr = hyp_addr,
},
},
.completer = {
.id = PKVM_ID_HYP,
},
},
};

host_lock_component();
hyp_lock_component();

ret = do_donate(&donation);
ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED);
if (ret)
goto unlock;
if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
ret = __hyp_check_page_state_range((u64)virt, size, PKVM_NOPAGE);
if (ret)
goto unlock;
}

prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_OWNED);
WARN_ON(pkvm_create_mappings_locked(virt, virt + size, prot));
WARN_ON(host_stage2_set_owner_locked(phys, size, PKVM_ID_HYP));

unlock:
hyp_unlock_component();
host_unlock_component();

Expand All @@ -996,30 +778,27 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)

int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
{
u64 phys = hyp_pfn_to_phys(pfn);
u64 size = PAGE_SIZE * nr_pages;
u64 virt = (u64)__hyp_va(phys);
int ret;
u64 host_addr = hyp_pfn_to_phys(pfn);
u64 hyp_addr = (u64)__hyp_va(host_addr);
struct pkvm_mem_donation donation = {
.tx = {
.nr_pages = nr_pages,
.initiator = {
.id = PKVM_ID_HYP,
.addr = hyp_addr,
.hyp = {
.completer_addr = host_addr,
},
},
.completer = {
.id = PKVM_ID_HOST,
},
},
};

host_lock_component();
hyp_lock_component();

ret = do_donate(&donation);
ret = __hyp_check_page_state_range(virt, size, PKVM_PAGE_OWNED);
if (ret)
goto unlock;
if (IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
ret = __host_check_page_state_range(phys, size, PKVM_NOPAGE);
if (ret)
goto unlock;
}

WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, virt, size) != size);
WARN_ON(host_stage2_set_owner_locked(phys, size, PKVM_ID_HOST));

unlock:
hyp_unlock_component();
host_unlock_component();

Expand Down

0 comments on commit 6f91d31

Please sign in to comment.