Skip to content

Commit

Permalink
KVM: PPC: Book3S HV: Gather HPT related variables into sub-structure
Browse files Browse the repository at this point in the history
Currently, the powerpc kvm_arch structure contains a number of variables
tracking the state of the guest's hashed page table (HPT) in KVM HV.  This
patch gathers them all together into a single kvm_hpt_info substructure.
This makes life more convenient for the upcoming HPT resizing
implementation.

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
  • Loading branch information
David Gibson authored and Paul Mackerras committed Jan 31, 2017
1 parent db9a290 commit 3f9d4f5
Show file tree
Hide file tree
Showing 4 changed files with 92 additions and 84 deletions.
20 changes: 14 additions & 6 deletions arch/powerpc/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -241,12 +241,24 @@ struct kvm_arch_memory_slot {
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
};

struct kvm_hpt_info {
/* Host virtual (linear mapping) address of guest HPT */
unsigned long virt;
/* Array of reverse mapping entries for each guest HPTE */
struct revmap_entry *rev;
unsigned long npte;
unsigned long mask;
/* Guest HPT size is 2**(order) bytes */
u32 order;
/* 1 if HPT allocated with CMA, 0 otherwise */
int cma;
};

struct kvm_arch {
unsigned int lpid;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
unsigned int tlb_sets;
unsigned long hpt_virt;
struct revmap_entry *revmap;
struct kvm_hpt_info hpt;
atomic64_t mmio_update;
unsigned int host_lpid;
unsigned long host_lpcr;
Expand All @@ -256,15 +268,11 @@ struct kvm_arch {
unsigned long lpcr;
unsigned long vrma_slb_v;
int hpte_setup_done;
u32 hpt_order;
atomic_t vcpus_running;
u32 online_vcores;
unsigned long hpt_npte;
unsigned long hpt_mask;
atomic_t hpte_mod_interest;
cpumask_t need_tlb_flush;
cpumask_t cpu_in_guest;
int hpt_cma_alloc;
u8 radix;
pgd_t *pgtable;
u64 process_table;
Expand Down
92 changes: 46 additions & 46 deletions arch/powerpc/kvm/book3s_64_mmu_hv.c
Original file line number Diff line number Diff line change
Expand Up @@ -61,12 +61,12 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
order = PPC_MIN_HPT_ORDER;
}

kvm->arch.hpt_cma_alloc = 0;
kvm->arch.hpt.cma = 0;
page = kvm_alloc_hpt_cma(1ul << (order - PAGE_SHIFT));
if (page) {
hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
memset((void *)hpt, 0, (1ul << order));
kvm->arch.hpt_cma_alloc = 1;
kvm->arch.hpt.cma = 1;
}

/* Lastly try successively smaller sizes from the page allocator */
Expand All @@ -81,22 +81,22 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
if (!hpt)
return -ENOMEM;

kvm->arch.hpt_virt = hpt;
kvm->arch.hpt_order = order;
kvm->arch.hpt.virt = hpt;
kvm->arch.hpt.order = order;
/* HPTEs are 2**4 bytes long */
kvm->arch.hpt_npte = 1ul << (order - 4);
kvm->arch.hpt.npte = 1ul << (order - 4);
/* 128 (2**7) bytes in each HPTEG */
kvm->arch.hpt_mask = (1ul << (order - 7)) - 1;
kvm->arch.hpt.mask = (1ul << (order - 7)) - 1;

atomic64_set(&kvm->arch.mmio_update, 0);

/* Allocate reverse map array */
rev = vmalloc(sizeof(struct revmap_entry) * kvm->arch.hpt_npte);
rev = vmalloc(sizeof(struct revmap_entry) * kvm->arch.hpt.npte);
if (!rev) {
pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n");
goto out_freehpt;
}
kvm->arch.revmap = rev;
kvm->arch.hpt.rev = rev;
kvm->arch.sdr1 = __pa(hpt) | (order - 18);

pr_info("KVM guest htab at %lx (order %ld), LPID %x\n",
Expand All @@ -107,7 +107,7 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
return 0;

out_freehpt:
if (kvm->arch.hpt_cma_alloc)
if (kvm->arch.hpt.cma)
kvm_free_hpt_cma(page, 1 << (order - PAGE_SHIFT));
else
free_pages(hpt, order - PAGE_SHIFT);
Expand All @@ -132,10 +132,10 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp)
goto out;
}
}
if (kvm->arch.hpt_virt) {
order = kvm->arch.hpt_order;
if (kvm->arch.hpt.virt) {
order = kvm->arch.hpt.order;
/* Set the entire HPT to 0, i.e. invalid HPTEs */
memset((void *)kvm->arch.hpt_virt, 0, 1ul << order);
memset((void *)kvm->arch.hpt.virt, 0, 1ul << order);
/*
* Reset all the reverse-mapping chains for all memslots
*/
Expand All @@ -155,13 +155,13 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp)

void kvmppc_free_hpt(struct kvm *kvm)
{
vfree(kvm->arch.revmap);
if (kvm->arch.hpt_cma_alloc)
kvm_free_hpt_cma(virt_to_page(kvm->arch.hpt_virt),
1 << (kvm->arch.hpt_order - PAGE_SHIFT));
else if (kvm->arch.hpt_virt)
free_pages(kvm->arch.hpt_virt,
kvm->arch.hpt_order - PAGE_SHIFT);
vfree(kvm->arch.hpt.rev);
if (kvm->arch.hpt.cma)
kvm_free_hpt_cma(virt_to_page(kvm->arch.hpt.virt),
1 << (kvm->arch.hpt.order - PAGE_SHIFT));
else if (kvm->arch.hpt.virt)
free_pages(kvm->arch.hpt.virt,
kvm->arch.hpt.order - PAGE_SHIFT);
}

/* Bits in first HPTE dword for pagesize 4k, 64k or 16M */
Expand Down Expand Up @@ -196,8 +196,8 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
if (npages > 1ul << (40 - porder))
npages = 1ul << (40 - porder);
/* Can't use more than 1 HPTE per HPTEG */
if (npages > kvm->arch.hpt_mask + 1)
npages = kvm->arch.hpt_mask + 1;
if (npages > kvm->arch.hpt.mask + 1)
npages = kvm->arch.hpt.mask + 1;

hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
HPTE_V_BOLTED | hpte0_pgsize_encoding(psize);
Expand All @@ -207,7 +207,7 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
for (i = 0; i < npages; ++i) {
addr = i << porder;
/* can't use hpt_hash since va > 64 bits */
hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & kvm->arch.hpt_mask;
hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & kvm->arch.hpt.mask;
/*
* We assume that the hash table is empty and no
* vcpus are using it at this stage. Since we create
Expand Down Expand Up @@ -340,11 +340,11 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
preempt_enable();
return -ENOENT;
}
hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4));
v = orig_v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK;
if (cpu_has_feature(CPU_FTR_ARCH_300))
v = hpte_new_to_old_v(v, be64_to_cpu(hptep[1]));
gr = kvm->arch.revmap[index].guest_rpte;
gr = kvm->arch.hpt.rev[index].guest_rpte;

unlock_hpte(hptep, orig_v);
preempt_enable();
Expand Down Expand Up @@ -485,8 +485,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
}
index = vcpu->arch.pgfault_index;
hptep = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
rev = &kvm->arch.revmap[index];
hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4));
rev = &kvm->arch.hpt.rev[index];
preempt_disable();
while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
cpu_relax();
Expand Down Expand Up @@ -748,7 +748,7 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn)
{
struct revmap_entry *rev = kvm->arch.revmap;
struct revmap_entry *rev = kvm->arch.hpt.rev;
unsigned long h, i, j;
__be64 *hptep;
unsigned long ptel, psize, rcbits;
Expand All @@ -768,7 +768,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
* rmap chain lock.
*/
i = *rmapp & KVMPPC_RMAP_INDEX;
hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4));
hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
/* unlock rmap before spinning on the HPTE lock */
unlock_rmap(rmapp);
Expand Down Expand Up @@ -860,7 +860,7 @@ void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
static int kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn)
{
struct revmap_entry *rev = kvm->arch.revmap;
struct revmap_entry *rev = kvm->arch.hpt.rev;
unsigned long head, i, j;
__be64 *hptep;
int ret = 0;
Expand All @@ -880,7 +880,7 @@ static int kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,

i = head = *rmapp & KVMPPC_RMAP_INDEX;
do {
hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4));
hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
j = rev[i].forw;

/* If this HPTE isn't referenced, ignore it */
Expand Down Expand Up @@ -923,7 +923,7 @@ int kvm_age_hva_hv(struct kvm *kvm, unsigned long start, unsigned long end)
static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn)
{
struct revmap_entry *rev = kvm->arch.revmap;
struct revmap_entry *rev = kvm->arch.hpt.rev;
unsigned long head, i, j;
unsigned long *hp;
int ret = 1;
Expand All @@ -940,7 +940,7 @@ static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
if (*rmapp & KVMPPC_RMAP_PRESENT) {
i = head = *rmapp & KVMPPC_RMAP_INDEX;
do {
hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4));
hp = (unsigned long *)(kvm->arch.hpt.virt + (i << 4));
j = rev[i].forw;
if (be64_to_cpu(hp[1]) & HPTE_R_R)
goto out;
Expand Down Expand Up @@ -980,7 +980,7 @@ static int vcpus_running(struct kvm *kvm)
*/
static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
{
struct revmap_entry *rev = kvm->arch.revmap;
struct revmap_entry *rev = kvm->arch.hpt.rev;
unsigned long head, i, j;
unsigned long n;
unsigned long v, r;
Expand All @@ -1005,7 +1005,7 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
i = head = *rmapp & KVMPPC_RMAP_INDEX;
do {
unsigned long hptep1;
hptep = (__be64 *) (kvm->arch.hpt_virt + (i << 4));
hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4));
j = rev[i].forw;

/*
Expand Down Expand Up @@ -1311,8 +1311,8 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf,
flags = ctx->flags;

i = ctx->index;
hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
revp = kvm->arch.revmap + i;
hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE));
revp = kvm->arch.hpt.rev + i;
lbuf = (unsigned long __user *)buf;

nb = 0;
Expand All @@ -1327,7 +1327,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf,

/* Skip uninteresting entries, i.e. clean on not-first pass */
if (!first_pass) {
while (i < kvm->arch.hpt_npte &&
while (i < kvm->arch.hpt.npte &&
!hpte_dirty(revp, hptp)) {
++i;
hptp += 2;
Expand All @@ -1337,7 +1337,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf,
hdr.index = i;

/* Grab a series of valid entries */
while (i < kvm->arch.hpt_npte &&
while (i < kvm->arch.hpt.npte &&
hdr.n_valid < 0xffff &&
nb + HPTE_SIZE < count &&
record_hpte(flags, hptp, hpte, revp, 1, first_pass)) {
Expand All @@ -1353,7 +1353,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf,
++revp;
}
/* Now skip invalid entries while we can */
while (i < kvm->arch.hpt_npte &&
while (i < kvm->arch.hpt.npte &&
hdr.n_invalid < 0xffff &&
record_hpte(flags, hptp, hpte, revp, 0, first_pass)) {
/* found an invalid entry */
Expand All @@ -1374,7 +1374,7 @@ static ssize_t kvm_htab_read(struct file *file, char __user *buf,
}

/* Check if we've wrapped around the hash table */
if (i >= kvm->arch.hpt_npte) {
if (i >= kvm->arch.hpt.npte) {
i = 0;
ctx->first_pass = 0;
break;
Expand Down Expand Up @@ -1433,11 +1433,11 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,

err = -EINVAL;
i = hdr.index;
if (i >= kvm->arch.hpt_npte ||
i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt_npte)
if (i >= kvm->arch.hpt.npte ||
i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt.npte)
break;

hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE));
lbuf = (unsigned long __user *)buf;
for (j = 0; j < hdr.n_valid; ++j) {
__be64 hpte_v;
Expand Down Expand Up @@ -1624,8 +1624,8 @@ static ssize_t debugfs_htab_read(struct file *file, char __user *buf,

kvm = p->kvm;
i = p->hpt_index;
hptp = (__be64 *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
for (; len != 0 && i < kvm->arch.hpt_npte; ++i, hptp += 2) {
hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE));
for (; len != 0 && i < kvm->arch.hpt.npte; ++i, hptp += 2) {
if (!(be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)))
continue;

Expand All @@ -1635,7 +1635,7 @@ static ssize_t debugfs_htab_read(struct file *file, char __user *buf,
cpu_relax();
v = be64_to_cpu(hptp[0]) & ~HPTE_V_HVLOCK;
hr = be64_to_cpu(hptp[1]);
gr = kvm->arch.revmap[i].guest_rpte;
gr = kvm->arch.hpt.rev[i].guest_rpte;
unlock_hpte(hptp, v);
preempt_enable();

Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/kvm/book3s_hv.c
Original file line number Diff line number Diff line change
Expand Up @@ -3197,7 +3197,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
goto out; /* another vcpu beat us to it */

/* Allocate hashed page table (if not done already) and reset it */
if (!kvm->arch.hpt_virt) {
if (!kvm->arch.hpt.virt) {
err = kvmppc_alloc_hpt(kvm, NULL);
if (err) {
pr_err("KVM: Couldn't alloc HPT\n");
Expand Down
Loading

0 comments on commit 3f9d4f5

Please sign in to comment.