Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 215688
b: refs/heads/master
c: e850894
h: refs/heads/master
v: v3
  • Loading branch information
Alexander Graf authored and Avi Kivity committed Oct 24, 2010
1 parent 873d3d8 commit b45f1ca
Show file tree
Hide file tree
Showing 7 changed files with 82 additions and 13 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: beb03f14da9ceff76ff08cbb8af064b52dc21f7e
refs/heads/master: e8508940a88691ad3d1c46608cd968eb4be9cbc5
1 change: 1 addition & 0 deletions trunk/arch/powerpc/include/asm/kvm_book3s.h
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,7 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
bool upper, u32 val);
extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);

extern u32 kvmppc_trampoline_lowmem;
extern u32 kvmppc_trampoline_enter;
Expand Down
35 changes: 32 additions & 3 deletions trunk/arch/powerpc/kvm/book3s.c
Original file line number Diff line number Diff line change
Expand Up @@ -419,6 +419,25 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
}
}

pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
ulong mp_pa = vcpu->arch.magic_page_pa;

/* Magic page override */
if (unlikely(mp_pa) &&
unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) ==
((mp_pa & PAGE_MASK) & KVM_PAM))) {
ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
pfn_t pfn;

pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
get_page(pfn_to_page(pfn));
return pfn;
}

return gfn_to_pfn(vcpu->kvm, gfn);
}

/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
* make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
* emulate 32 bytes dcbz length.
Expand Down Expand Up @@ -554,6 +573,13 @@ int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,

static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
ulong mp_pa = vcpu->arch.magic_page_pa;

if (unlikely(mp_pa) &&
unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
return 1;
}

return kvm_is_visible_gfn(vcpu->kvm, gfn);
}

Expand Down Expand Up @@ -1257,6 +1283,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
struct kvmppc_vcpu_book3s *vcpu_book3s;
struct kvm_vcpu *vcpu;
int err = -ENOMEM;
unsigned long p;

vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s));
if (!vcpu_book3s)
Expand All @@ -1274,8 +1301,10 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
if (err)
goto free_shadow_vcpu;

vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO);
if (!vcpu->arch.shared)
p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
/* the real shared page fills the last 4k of our page */
vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096);
if (!p)
goto uninit_vcpu;

vcpu->arch.host_retip = kvm_return_point;
Expand Down Expand Up @@ -1322,7 +1351,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);

free_page((unsigned long)vcpu->arch.shared);
free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
kvm_vcpu_uninit(vcpu);
kfree(vcpu_book3s->shadow_vcpu);
vfree(vcpu_book3s);
Expand Down
16 changes: 16 additions & 0 deletions trunk/arch/powerpc/kvm/book3s_32_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -281,8 +281,24 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_pte *pte, bool data)
{
int r;
ulong mp_ea = vcpu->arch.magic_page_ea;

pte->eaddr = eaddr;

/* Magic page override */
if (unlikely(mp_ea) &&
unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
!(vcpu->arch.shared->msr & MSR_PR)) {
pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data);
pte->raddr = vcpu->arch.magic_page_pa | (pte->raddr & 0xfff);
pte->raddr &= KVM_PAM;
pte->may_execute = true;
pte->may_read = true;
pte->may_write = true;

return 0;
}

r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data);
if (r < 0)
r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, true);
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/powerpc/kvm/book3s_32_mmu_host.c
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
struct hpte_cache *pte;

/* Get host physical address for gpa */
hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
if (kvm_is_error_hva(hpaddr)) {
printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
orig_pte->eaddr);
Expand Down
30 changes: 29 additions & 1 deletion trunk/arch/powerpc/kvm/book3s_64_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,22 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
bool found = false;
bool perm_err = false;
int second = 0;
ulong mp_ea = vcpu->arch.magic_page_ea;

/* Magic page override */
if (unlikely(mp_ea) &&
unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
!(vcpu->arch.shared->msr & MSR_PR)) {
gpte->eaddr = eaddr;
gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff);
gpte->raddr &= KVM_PAM;
gpte->may_execute = true;
gpte->may_read = true;
gpte->may_write = true;

return 0;
}

slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, eaddr);
if (!slbe)
Expand Down Expand Up @@ -445,6 +461,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
ulong ea = esid << SID_SHIFT;
struct kvmppc_slb *slb;
u64 gvsid = esid;
ulong mp_ea = vcpu->arch.magic_page_ea;

if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
Expand All @@ -464,7 +481,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
break;
case MSR_DR|MSR_IR:
if (!slb)
return -ENOENT;
goto no_slb;

*vsid = gvsid;
break;
Expand All @@ -477,6 +494,17 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
*vsid |= VSID_PR;

return 0;

no_slb:
/* Catch magic page case */
if (unlikely(mp_ea) &&
unlikely(esid == (mp_ea >> SID_SHIFT)) &&
!(vcpu->arch.shared->msr & MSR_PR)) {
*vsid = VSID_REAL | esid;
return 0;
}

return -EINVAL;
}

static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu)
Expand Down
9 changes: 2 additions & 7 deletions trunk/arch/powerpc/kvm/book3s_64_mmu_host.c
Original file line number Diff line number Diff line change
Expand Up @@ -101,18 +101,13 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
struct kvmppc_sid_map *map;

/* Get host physical address for gpa */
hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
if (kvm_is_error_hva(hpaddr)) {
printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
return -EINVAL;
}
hpaddr <<= PAGE_SHIFT;
#if PAGE_SHIFT == 12
#elif PAGE_SHIFT == 16
hpaddr |= orig_pte->raddr & 0xf000;
#else
#error Unknown page size
#endif
hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);

/* and write the mapping ea -> hpa into the pt */
vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
Expand Down

0 comments on commit b45f1ca

Please sign in to comment.