Skip to content

Commit

Permalink
KVM: arm64: Make block->table PTE changes parallel-aware
Browse files Browse the repository at this point in the history
In order to service stage-2 faults in parallel, stage-2 table walkers
must take exclusive ownership of the PTE being worked on. An additional
requirement of the architecture is that software must perform a
'break-before-make' operation when changing the block size used for
mapping memory.

Roll these two concepts together into helpers for performing a
'break-before-make' sequence. Use a special PTE value to indicate a PTE
has been locked by a software walker. Additionally, use an atomic
compare-exchange to 'break' the PTE when the stage-2 page tables are
possibly shared with another software walker. Elide the DSB + TLBI if
the evicted PTE was invalid (and thus not subject to break-before-make).

All of the atomics do nothing for now, as the stage-2 walker isn't fully
ready to perform parallel walks.

Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221107215855.1895367-1-oliver.upton@linux.dev
  • Loading branch information
Oliver Upton authored and Marc Zyngier committed Nov 10, 2022
1 parent 331aa3a commit 0ab12f3
Showing 1 changed file with 75 additions and 5 deletions.
80 changes: 75 additions & 5 deletions arch/arm64/kvm/hyp/pgtable.c
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,12 @@
#define KVM_INVALID_PTE_OWNER_MASK GENMASK(9, 2)
#define KVM_MAX_OWNER_ID 1

/*
* Used to indicate a pte for which a 'break-before-make' sequence is in
* progress.
*/
#define KVM_INVALID_PTE_LOCKED BIT(10)

struct kvm_pgtable_walk_data {
struct kvm_pgtable_walker *walker;

Expand Down Expand Up @@ -674,6 +680,11 @@ static bool stage2_pte_is_counted(kvm_pte_t pte)
return !!pte;
}

static bool stage2_pte_is_locked(kvm_pte_t pte)
{
return !kvm_pte_valid(pte) && (pte & KVM_INVALID_PTE_LOCKED);
}

static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
{
if (!kvm_pgtable_walk_shared(ctx)) {
Expand All @@ -684,6 +695,64 @@ static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_
return cmpxchg(ctx->ptep, ctx->old, new) == ctx->old;
}

/**
* stage2_try_break_pte() - Invalidates a pte according to the
* 'break-before-make' requirements of the
* architecture.
*
* @ctx: context of the visited pte.
* @mmu: stage-2 mmu
*
* Returns: true if the pte was successfully broken.
*
* If the removed pte was valid, performs the necessary serialization and TLB
* invalidation for the old value. For counted ptes, drops the reference count
* on the containing table page.
*/
static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
struct kvm_s2_mmu *mmu)
{
struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;

if (stage2_pte_is_locked(ctx->old)) {
/*
* Should never occur if this walker has exclusive access to the
* page tables.
*/
WARN_ON(!kvm_pgtable_walk_shared(ctx));
return false;
}

if (!stage2_try_set_pte(ctx, KVM_INVALID_PTE_LOCKED))
return false;

/*
* Perform the appropriate TLB invalidation based on the evicted pte
* value (if any).
*/
if (kvm_pte_table(ctx->old, ctx->level))
kvm_call_hyp(__kvm_tlb_flush_vmid, mmu);
else if (kvm_pte_valid(ctx->old))
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, ctx->level);

if (stage2_pte_is_counted(ctx->old))
mm_ops->put_page(ctx->ptep);

return true;
}

static void stage2_make_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
{
struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;

WARN_ON(!stage2_pte_is_locked(*ctx->ptep));

if (stage2_pte_is_counted(new))
mm_ops->get_page(ctx->ptep);

smp_store_release(ctx->ptep, new);
}

static void stage2_put_pte(const struct kvm_pgtable_visit_ctx *ctx, struct kvm_s2_mmu *mmu,
struct kvm_pgtable_mm_ops *mm_ops)
{
Expand Down Expand Up @@ -812,17 +881,18 @@ static int stage2_map_walk_leaf(const struct kvm_pgtable_visit_ctx *ctx,
if (!childp)
return -ENOMEM;

if (!stage2_try_break_pte(ctx, data->mmu)) {
mm_ops->put_page(childp);
return -EAGAIN;
}

/*
* If we've run into an existing block mapping then replace it with
* a table. Accesses beyond 'end' that fall within the new table
* will be mapped lazily.
*/
if (stage2_pte_is_counted(ctx->old))
stage2_put_pte(ctx, data->mmu, mm_ops);

new = kvm_init_table_pte(childp, mm_ops);
mm_ops->get_page(ctx->ptep);
smp_store_release(ctx->ptep, new);
stage2_make_pte(ctx, new);

return 0;
}
Expand Down

0 comments on commit 0ab12f3

Please sign in to comment.