Skip to content

Commit

Permalink
KVM: RISC-V: Use common KVM implementation of MMU memory caches
Browse files Browse the repository at this point in the history
Use common KVM's implementation of the MMU memory caches, which for all
intents and purposes is semantically identical to RISC-V's version, the
only difference being that the common implementation will fall back to an
atomic allocation if there's a KVM bug that triggers a cache underflow.

RISC-V appears to have based its MMU code on arm64 before the conversion
to the common caches in commit c1a33ae ("KVM: arm64: Use common KVM
implementation of MMU memory caches"), despite having also copy-pasted
the definition of KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE in kvm_types.h.

Opportunistically drop the superfluous wrapper
kvm_riscv_stage2_flush_cache(), whose name is very, very confusing as
"cache flush" in the context of MMU code almost always refers to flushing
hardware caches, not freeing unused software objects.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Anup Patel <anup.patel@wdc.com>
  • Loading branch information
Sean Christopherson authored and Anup Patel committed Jan 6, 2022
1 parent 5e4e84f commit cc4f602
Show file tree
Hide file tree
Showing 4 changed files with 18 additions and 65 deletions.
10 changes: 1 addition & 9 deletions arch/riscv/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -77,13 +77,6 @@ struct kvm_sbi_context {
int return_handled;
};

#define KVM_MMU_PAGE_CACHE_NR_OBJS 32

struct kvm_mmu_page_cache {
int nobjs;
void *objects[KVM_MMU_PAGE_CACHE_NR_OBJS];
};

struct kvm_cpu_trap {
unsigned long sepc;
unsigned long scause;
Expand Down Expand Up @@ -193,7 +186,7 @@ struct kvm_vcpu_arch {
struct kvm_sbi_context sbi_context;

/* Cache pages needed to program page tables with spinlock held */
struct kvm_mmu_page_cache mmu_page_cache;
struct kvm_mmu_memory_cache mmu_page_cache;

/* VCPU power-off state */
bool power_off;
Expand All @@ -220,7 +213,6 @@ void __kvm_riscv_hfence_gvma_all(void);
int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
struct kvm_memory_slot *memslot,
gpa_t gpa, unsigned long hva, bool is_write);
void kvm_riscv_stage2_flush_cache(struct kvm_vcpu *vcpu);
int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm);
void kvm_riscv_stage2_free_pgd(struct kvm *kvm);
void kvm_riscv_stage2_update_hgatp(struct kvm_vcpu *vcpu);
Expand Down
2 changes: 1 addition & 1 deletion arch/riscv/include/asm/kvm_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,6 @@
#ifndef _ASM_RISCV_KVM_TYPES_H
#define _ASM_RISCV_KVM_TYPES_H

#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40
#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 32

#endif /* _ASM_RISCV_KVM_TYPES_H */
66 changes: 13 additions & 53 deletions arch/riscv/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -83,43 +83,6 @@ static int stage2_level_to_page_size(u32 level, unsigned long *out_pgsize)
return 0;
}

static int stage2_cache_topup(struct kvm_mmu_page_cache *pcache,
int min, int max)
{
void *page;

BUG_ON(max > KVM_MMU_PAGE_CACHE_NR_OBJS);
if (pcache->nobjs >= min)
return 0;
while (pcache->nobjs < max) {
page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
return -ENOMEM;
pcache->objects[pcache->nobjs++] = page;
}

return 0;
}

static void stage2_cache_flush(struct kvm_mmu_page_cache *pcache)
{
while (pcache && pcache->nobjs)
free_page((unsigned long)pcache->objects[--pcache->nobjs]);
}

static void *stage2_cache_alloc(struct kvm_mmu_page_cache *pcache)
{
void *p;

if (!pcache)
return NULL;

BUG_ON(!pcache->nobjs);
p = pcache->objects[--pcache->nobjs];

return p;
}

static bool stage2_get_leaf_entry(struct kvm *kvm, gpa_t addr,
pte_t **ptepp, u32 *ptep_level)
{
Expand Down Expand Up @@ -171,7 +134,7 @@ static void stage2_remote_tlb_flush(struct kvm *kvm, u32 level, gpa_t addr)
}

static int stage2_set_pte(struct kvm *kvm, u32 level,
struct kvm_mmu_page_cache *pcache,
struct kvm_mmu_memory_cache *pcache,
gpa_t addr, const pte_t *new_pte)
{
u32 current_level = stage2_pgd_levels - 1;
Expand All @@ -186,7 +149,9 @@ static int stage2_set_pte(struct kvm *kvm, u32 level,
return -EEXIST;

if (!pte_val(*ptep)) {
next_ptep = stage2_cache_alloc(pcache);
if (!pcache)
return -ENOMEM;
next_ptep = kvm_mmu_memory_cache_alloc(pcache);
if (!next_ptep)
return -ENOMEM;
*ptep = pfn_pte(PFN_DOWN(__pa(next_ptep)),
Expand All @@ -209,7 +174,7 @@ static int stage2_set_pte(struct kvm *kvm, u32 level,
}

static int stage2_map_page(struct kvm *kvm,
struct kvm_mmu_page_cache *pcache,
struct kvm_mmu_memory_cache *pcache,
gpa_t gpa, phys_addr_t hpa,
unsigned long page_size,
bool page_rdonly, bool page_exec)
Expand Down Expand Up @@ -384,7 +349,10 @@ static int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
int ret = 0;
unsigned long pfn;
phys_addr_t addr, end;
struct kvm_mmu_page_cache pcache = { 0, };
struct kvm_mmu_memory_cache pcache;

memset(&pcache, 0, sizeof(pcache));
pcache.gfp_zero = __GFP_ZERO;

end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
pfn = __phys_to_pfn(hpa);
Expand All @@ -395,9 +363,7 @@ static int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
if (!writable)
pte = pte_wrprotect(pte);

ret = stage2_cache_topup(&pcache,
stage2_pgd_levels,
KVM_MMU_PAGE_CACHE_NR_OBJS);
ret = kvm_mmu_topup_memory_cache(&pcache, stage2_pgd_levels);
if (ret)
goto out;

Expand All @@ -411,7 +377,7 @@ static int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
}

out:
stage2_cache_flush(&pcache);
kvm_mmu_free_memory_cache(&pcache);
return ret;
}

Expand Down Expand Up @@ -649,7 +615,7 @@ int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
gfn_t gfn = gpa >> PAGE_SHIFT;
struct vm_area_struct *vma;
struct kvm *kvm = vcpu->kvm;
struct kvm_mmu_page_cache *pcache = &vcpu->arch.mmu_page_cache;
struct kvm_mmu_memory_cache *pcache = &vcpu->arch.mmu_page_cache;
bool logging = (memslot->dirty_bitmap &&
!(memslot->flags & KVM_MEM_READONLY)) ? true : false;
unsigned long vma_pagesize, mmu_seq;
Expand Down Expand Up @@ -684,8 +650,7 @@ int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
}

/* We need minimum second+third level pages */
ret = stage2_cache_topup(pcache, stage2_pgd_levels,
KVM_MMU_PAGE_CACHE_NR_OBJS);
ret = kvm_mmu_topup_memory_cache(pcache, stage2_pgd_levels);
if (ret) {
kvm_err("Failed to topup stage2 cache\n");
return ret;
Expand Down Expand Up @@ -734,11 +699,6 @@ int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
return ret;
}

void kvm_riscv_stage2_flush_cache(struct kvm_vcpu *vcpu)
{
stage2_cache_flush(&vcpu->arch.mmu_page_cache);
}

int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm)
{
struct page *pgd_page;
Expand Down
5 changes: 3 additions & 2 deletions arch/riscv/kvm/vcpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)

/* Mark this VCPU never ran */
vcpu->arch.ran_atleast_once = false;
vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;

/* Setup ISA features available to VCPU */
vcpu->arch.isa = riscv_isa_extension_base(NULL) & KVM_RISCV_ISA_ALLOWED;
Expand Down Expand Up @@ -107,8 +108,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
/* Cleanup VCPU timer */
kvm_riscv_vcpu_timer_deinit(vcpu);

/* Flush the pages pre-allocated for Stage2 page table mappings */
kvm_riscv_stage2_flush_cache(vcpu);
/* Free unused pages pre-allocated for Stage2 page table mappings */
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
}

int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
Expand Down

0 comments on commit cc4f602

Please sign in to comment.