Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 297093
b: refs/heads/master
c: 8936dda
h: refs/heads/master
i:
  297091: 98f93a5
v: v3
  • Loading branch information
Paul Mackerras authored and Avi Kivity committed Mar 5, 2012
1 parent 3f71be4 commit ad6eb0e
Show file tree
Hide file tree
Showing 5 changed files with 104 additions and 47 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 4e72dbe13528394a413889d73e5025dbdf6cab70
refs/heads/master: 8936dda4c2ed070ecebd786baf35b08584accf4a
8 changes: 8 additions & 0 deletions trunk/arch/powerpc/include/asm/kvm_book3s_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,14 @@ static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)

#define SPAPR_TCE_SHIFT 12

#ifdef CONFIG_KVM_BOOK3S_64_HV
/* For now use fixed-size 16MB page table */
#define HPT_ORDER 24
#define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */
#define HPT_NPTE (HPT_NPTEG << 3) /* 8 PTEs per PTEG */
#define HPT_HASH_MASK (HPT_NPTEG - 1)
#endif

static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
unsigned long pte_index)
{
Expand Down
10 changes: 10 additions & 0 deletions trunk/arch/powerpc/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -166,9 +166,19 @@ struct kvmppc_rma_info {
atomic_t use_count;
};

/*
* The reverse mapping array has one entry for each HPTE,
* which stores the guest's view of the second word of the HPTE
* (including the guest physical address of the mapping).
*/
struct revmap_entry {
unsigned long guest_rpte;
};

struct kvm_arch {
#ifdef CONFIG_KVM_BOOK3S_64_HV
unsigned long hpt_virt;
struct revmap_entry *revmap;
unsigned long ram_npages;
unsigned long ram_psize;
unsigned long ram_porder;
Expand Down
44 changes: 32 additions & 12 deletions trunk/arch/powerpc/kvm/book3s_64_mmu_hv.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/hugetlb.h>
#include <linux/vmalloc.h>

#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
Expand All @@ -33,11 +34,6 @@
#include <asm/ppc-opcode.h>
#include <asm/cputable.h>

/* For now use fixed-size 16MB page table */
#define HPT_ORDER 24
#define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */
#define HPT_HASH_MASK (HPT_NPTEG - 1)

/* Pages in the VRMA are 16MB pages */
#define VRMA_PAGE_ORDER 24
#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
Expand All @@ -51,7 +47,9 @@ long kvmppc_alloc_hpt(struct kvm *kvm)
{
unsigned long hpt;
unsigned long lpid;
struct revmap_entry *rev;

/* Allocate guest's hashed page table */
hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|__GFP_NOWARN,
HPT_ORDER - PAGE_SHIFT);
if (!hpt) {
Expand All @@ -60,12 +58,20 @@ long kvmppc_alloc_hpt(struct kvm *kvm)
}
kvm->arch.hpt_virt = hpt;

/* Allocate reverse map array */
rev = vmalloc(sizeof(struct revmap_entry) * HPT_NPTE);
if (!rev) {
pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n");
goto out_freehpt;
}
kvm->arch.revmap = rev;

/* Allocate the guest's logical partition ID */
do {
lpid = find_first_zero_bit(lpid_inuse, NR_LPIDS);
if (lpid >= NR_LPIDS) {
pr_err("kvm_alloc_hpt: No LPIDs free\n");
free_pages(hpt, HPT_ORDER - PAGE_SHIFT);
return -ENOMEM;
goto out_freeboth;
}
} while (test_and_set_bit(lpid, lpid_inuse));

Expand All @@ -74,11 +80,18 @@ long kvmppc_alloc_hpt(struct kvm *kvm)

pr_info("KVM guest htab at %lx, LPID %lx\n", hpt, lpid);
return 0;

out_freeboth:
vfree(rev);
out_freehpt:
free_pages(hpt, HPT_ORDER - PAGE_SHIFT);
return -ENOMEM;
}

void kvmppc_free_hpt(struct kvm *kvm)
{
clear_bit(kvm->arch.lpid, lpid_inuse);
vfree(kvm->arch.revmap);
free_pages(kvm->arch.hpt_virt, HPT_ORDER - PAGE_SHIFT);
}

Expand All @@ -89,14 +102,16 @@ void kvmppc_map_vrma(struct kvm *kvm, struct kvm_userspace_memory_region *mem)
unsigned long pfn;
unsigned long *hpte;
unsigned long hash;
unsigned long porder = kvm->arch.ram_porder;
struct revmap_entry *rev;
struct kvmppc_pginfo *pginfo = kvm->arch.ram_pginfo;

if (!pginfo)
return;

/* VRMA can't be > 1TB */
if (npages > 1ul << (40 - kvm->arch.ram_porder))
npages = 1ul << (40 - kvm->arch.ram_porder);
if (npages > 1ul << (40 - porder))
npages = 1ul << (40 - porder);
/* Can't use more than 1 HPTE per HPTEG */
if (npages > HPT_NPTEG)
npages = HPT_NPTEG;
Expand All @@ -113,15 +128,20 @@ void kvmppc_map_vrma(struct kvm *kvm, struct kvm_userspace_memory_region *mem)
* at most one HPTE per HPTEG, we just assume entry 7
* is available and use it.
*/
hpte = (unsigned long *) (kvm->arch.hpt_virt + (hash << 7));
hpte += 7 * 2;
hash = (hash << 3) + 7;
hpte = (unsigned long *) (kvm->arch.hpt_virt + (hash << 4));
/* HPTE low word - RPN, protection, etc. */
hpte[1] = (pfn << PAGE_SHIFT) | HPTE_R_R | HPTE_R_C |
HPTE_R_M | PP_RWXX;
wmb();
smp_wmb();
hpte[0] = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
(i << (VRMA_PAGE_ORDER - 16)) | HPTE_V_BOLTED |
HPTE_V_LARGE | HPTE_V_VALID;

/* Reverse map info */
rev = &kvm->arch.revmap[hash];
rev->guest_rpte = (i << porder) | HPTE_R_R | HPTE_R_C |
HPTE_R_M | PP_RWXX;
}
}

Expand Down
87 changes: 53 additions & 34 deletions trunk/arch/powerpc/kvm/book3s_hv_rm_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,19 @@
#include <asm/synch.h>
#include <asm/ppc-opcode.h>

/* For now use fixed-size 16MB page table */
#define HPT_ORDER 24
#define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */
#define HPT_HASH_MASK (HPT_NPTEG - 1)
/* Translate address of a vmalloc'd thing to a linear map address */
static void *real_vmalloc_addr(void *x)
{
unsigned long addr = (unsigned long) x;
pte_t *p;

p = find_linux_pte(swapper_pg_dir, addr);
if (!p || !pte_present(*p))
return NULL;
/* assume we don't have huge pages in vmalloc space... */
addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
return __va(addr);
}

#define HPTE_V_HVLOCK 0x40UL

Expand Down Expand Up @@ -52,6 +61,8 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
struct kvm *kvm = vcpu->kvm;
unsigned long i, lpn, pa;
unsigned long *hpte;
struct revmap_entry *rev;
unsigned long g_ptel = ptel;

/* only handle 4k, 64k and 16M pages for now */
porder = 12;
Expand Down Expand Up @@ -82,7 +93,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
pteh &= ~0x60UL;
ptel &= ~(HPTE_R_PP0 - kvm->arch.ram_psize);
ptel |= pa;
if (pte_index >= (HPT_NPTEG << 3))
if (pte_index >= HPT_NPTE)
return H_PARAMETER;
if (likely((flags & H_EXACT) == 0)) {
pte_index &= ~7UL;
Expand All @@ -95,18 +106,22 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
break;
hpte += 2;
}
pte_index += i;
} else {
i = 0;
hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
if (!lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID))
return H_PTEG_FULL;
}

/* Save away the guest's idea of the second HPTE dword */
rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
if (rev)
rev->guest_rpte = g_ptel;
hpte[1] = ptel;
eieio();
hpte[0] = pteh;
asm volatile("ptesync" : : : "memory");
atomic_inc(&kvm->arch.ram_pginfo[lpn].refcnt);
vcpu->arch.gpr[4] = pte_index + i;
vcpu->arch.gpr[4] = pte_index;
return H_SUCCESS;
}

Expand Down Expand Up @@ -138,7 +153,7 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long *hpte;
unsigned long v, r, rb;

if (pte_index >= (HPT_NPTEG << 3))
if (pte_index >= HPT_NPTE)
return H_PARAMETER;
hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
while (!lock_hpte(hpte, HPTE_V_HVLOCK))
Expand Down Expand Up @@ -193,7 +208,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
if (req == 3)
break;
if (req != 1 || flags == 3 ||
pte_index >= (HPT_NPTEG << 3)) {
pte_index >= HPT_NPTE) {
/* parameter error */
args[i * 2] = ((0xa0 | flags) << 56) + pte_index;
ret = H_PARAMETER;
Expand Down Expand Up @@ -256,9 +271,10 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
{
struct kvm *kvm = vcpu->kvm;
unsigned long *hpte;
unsigned long v, r, rb;
struct revmap_entry *rev;
unsigned long v, r, rb, mask, bits;

if (pte_index >= (HPT_NPTEG << 3))
if (pte_index >= HPT_NPTE)
return H_PARAMETER;
hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
while (!lock_hpte(hpte, HPTE_V_HVLOCK))
Expand All @@ -271,11 +287,21 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
if (atomic_read(&kvm->online_vcpus) == 1)
flags |= H_LOCAL;
v = hpte[0];
r = hpte[1] & ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
HPTE_R_KEY_HI | HPTE_R_KEY_LO);
r |= (flags << 55) & HPTE_R_PP0;
r |= (flags << 48) & HPTE_R_KEY_HI;
r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
bits = (flags << 55) & HPTE_R_PP0;
bits |= (flags << 48) & HPTE_R_KEY_HI;
bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);

/* Update guest view of 2nd HPTE dword */
mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
HPTE_R_KEY_HI | HPTE_R_KEY_LO;
rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
if (rev) {
r = (rev->guest_rpte & ~mask) | bits;
rev->guest_rpte = r;
}
r = (hpte[1] & ~mask) | bits;

/* Update HPTE */
rb = compute_tlbie_rb(v, r, pte_index);
hpte[0] = v & ~HPTE_V_VALID;
if (!(flags & H_LOCAL)) {
Expand All @@ -298,38 +324,31 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
return H_SUCCESS;
}

static unsigned long reverse_xlate(struct kvm *kvm, unsigned long realaddr)
{
long int i;
unsigned long offset, rpn;

offset = realaddr & (kvm->arch.ram_psize - 1);
rpn = (realaddr - offset) >> PAGE_SHIFT;
for (i = 0; i < kvm->arch.ram_npages; ++i)
if (rpn == kvm->arch.ram_pginfo[i].pfn)
return (i << PAGE_SHIFT) + offset;
return HPTE_R_RPN; /* all 1s in the RPN field */
}

long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index)
{
struct kvm *kvm = vcpu->kvm;
unsigned long *hpte, r;
int i, n = 1;
struct revmap_entry *rev = NULL;

if (pte_index >= (HPT_NPTEG << 3))
if (pte_index >= HPT_NPTE)
return H_PARAMETER;
if (flags & H_READ_4) {
pte_index &= ~3;
n = 4;
}
if (flags & H_R_XLATE)
rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
for (i = 0; i < n; ++i, ++pte_index) {
hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
r = hpte[1];
if ((flags & H_R_XLATE) && (hpte[0] & HPTE_V_VALID))
r = reverse_xlate(kvm, r & HPTE_R_RPN) |
(r & ~HPTE_R_RPN);
if (hpte[0] & HPTE_V_VALID) {
if (rev)
r = rev[i].guest_rpte;
else
r = hpte[1] | HPTE_R_RPN;
}
vcpu->arch.gpr[4 + i * 2] = hpte[0];
vcpu->arch.gpr[5 + i * 2] = r;
}
Expand Down

0 comments on commit ad6eb0e

Please sign in to comment.