Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 215677
b: refs/heads/master
c: 666e725
h: refs/heads/master
i:
  215675: a3bd622
v: v3
  • Loading branch information
Alexander Graf authored and Avi Kivity committed Oct 24, 2010
1 parent ee63698 commit 65b7f56
Show file tree
Hide file tree
Showing 19 changed files with 94 additions and 85 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 96bc451a153297bf1f99ef2d633d512ea349ae7a
refs/heads/master: 666e7252a15b7fc4a116e65deaf6da5e4ce660e3
1 change: 0 additions & 1 deletion trunk/arch/powerpc/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,6 @@ struct kvm_vcpu_arch {
u32 cr;
#endif

ulong msr;
#ifdef CONFIG_PPC_BOOK3S
ulong shadow_msr;
ulong hflags;
Expand Down
1 change: 1 addition & 0 deletions trunk/arch/powerpc/include/asm/kvm_para.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
#include <linux/types.h>

struct kvm_vcpu_arch_shared {
__u64 msr;
};

#ifdef __KERNEL__
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/powerpc/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -394,13 +394,13 @@ int main(void)
DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr));
DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4));
DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6));
DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared));
DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));

/* book3s */
#ifdef CONFIG_PPC_BOOK3S
Expand Down
8 changes: 4 additions & 4 deletions trunk/arch/powerpc/kvm/44x_tlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -221,14 +221,14 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,

int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
{
unsigned int as = !!(vcpu->arch.msr & MSR_IS);
unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);

return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
}

int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
{
unsigned int as = !!(vcpu->arch.msr & MSR_DS);
unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);

return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
}
Expand Down Expand Up @@ -354,7 +354,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,

stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags,
vcpu->arch.msr & MSR_PR);
vcpu->arch.shared->msr & MSR_PR);
stlbe.tid = !(asid & 0xff);

/* Keep track of the reference so we can properly release it later. */
Expand Down Expand Up @@ -423,7 +423,7 @@ static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,

/* Does it match current guest AS? */
/* XXX what about IS != DS? */
if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS))
if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS))
return 0;

gpa = get_tlb_raddr(tlbe);
Expand Down
65 changes: 35 additions & 30 deletions trunk/arch/powerpc/kvm/book3s.c
Original file line number Diff line number Diff line change
Expand Up @@ -115,31 +115,31 @@ static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu)

static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
{
vcpu->arch.shadow_msr = vcpu->arch.msr;
ulong smsr = vcpu->arch.shared->msr;

/* Guest MSR values */
vcpu->arch.shadow_msr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE |
MSR_BE | MSR_DE;
smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_DE;
/* Process MSR values */
vcpu->arch.shadow_msr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR |
MSR_EE;
smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
/* External providers the guest reserved */
vcpu->arch.shadow_msr |= (vcpu->arch.msr & vcpu->arch.guest_owned_ext);
smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext);
/* 64-bit Process MSR values */
#ifdef CONFIG_PPC_BOOK3S_64
vcpu->arch.shadow_msr |= MSR_ISF | MSR_HV;
smsr |= MSR_ISF | MSR_HV;
#endif
vcpu->arch.shadow_msr = smsr;
}

void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
{
ulong old_msr = vcpu->arch.msr;
ulong old_msr = vcpu->arch.shared->msr;

#ifdef EXIT_DEBUG
printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
#endif

msr &= to_book3s(vcpu)->msr_mask;
vcpu->arch.msr = msr;
vcpu->arch.shared->msr = msr;
kvmppc_recalc_shadow_msr(vcpu);

if (msr & (MSR_WE|MSR_POW)) {
Expand All @@ -149,21 +149,21 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
}
}

if ((vcpu->arch.msr & (MSR_PR|MSR_IR|MSR_DR)) !=
if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
(old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
kvmppc_mmu_flush_segments(vcpu);
kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
}

/* Preload FPU if it's enabled */
if (vcpu->arch.msr & MSR_FP)
if (vcpu->arch.shared->msr & MSR_FP)
kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
}

void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
{
vcpu->arch.srr0 = kvmppc_get_pc(vcpu);
vcpu->arch.srr1 = vcpu->arch.msr | flags;
vcpu->arch.srr1 = vcpu->arch.shared->msr | flags;
kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec);
vcpu->arch.mmu.reset_msr(vcpu);
}
Expand Down Expand Up @@ -254,11 +254,11 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)

switch (priority) {
case BOOK3S_IRQPRIO_DECREMENTER:
deliver = vcpu->arch.msr & MSR_EE;
deliver = vcpu->arch.shared->msr & MSR_EE;
vec = BOOK3S_INTERRUPT_DECREMENTER;
break;
case BOOK3S_IRQPRIO_EXTERNAL:
deliver = vcpu->arch.msr & MSR_EE;
deliver = vcpu->arch.shared->msr & MSR_EE;
vec = BOOK3S_INTERRUPT_EXTERNAL;
break;
case BOOK3S_IRQPRIO_SYSTEM_RESET:
Expand Down Expand Up @@ -437,7 +437,7 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
struct kvmppc_pte *pte)
{
int relocated = (vcpu->arch.msr & (data ? MSR_DR : MSR_IR));
int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR));
int r;

if (relocated) {
Expand Down Expand Up @@ -545,8 +545,8 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
int page_found = 0;
struct kvmppc_pte pte;
bool is_mmio = false;
bool dr = (vcpu->arch.msr & MSR_DR) ? true : false;
bool ir = (vcpu->arch.msr & MSR_IR) ? true : false;
bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false;
bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false;
u64 vsid;

relocated = data ? dr : ir;
Expand All @@ -563,15 +563,15 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
pte.vpage = eaddr >> 12;
}

switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
case 0:
pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
break;
case MSR_DR:
case MSR_IR:
vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);

if ((vcpu->arch.msr & (MSR_DR|MSR_IR)) == MSR_DR)
if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
else
pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
Expand All @@ -596,14 +596,16 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* Page not found in guest PTE entries */
vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr;
vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
vcpu->arch.shared->msr |=
(to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
kvmppc_book3s_queue_irqprio(vcpu, vec);
} else if (page_found == -EPERM) {
/* Storage protection */
vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE;
to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT;
vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
vcpu->arch.shared->msr |=
(to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
kvmppc_book3s_queue_irqprio(vcpu, vec);
} else if (page_found == -EINVAL) {
/* Page not found in guest SLB */
Expand Down Expand Up @@ -695,9 +697,11 @@ static int kvmppc_read_inst(struct kvm_vcpu *vcpu)

ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
if (ret == -ENOENT) {
vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 33, 1);
vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 34, 36, 0);
vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 42, 47, 0);
ulong msr = vcpu->arch.shared->msr;

msr = kvmppc_set_field(msr, 33, 33, 1);
msr = kvmppc_set_field(msr, 34, 36, 0);
vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
return EMULATE_AGAIN;
}
Expand Down Expand Up @@ -736,7 +740,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
return RESUME_GUEST;

if (!(vcpu->arch.msr & msr)) {
if (!(vcpu->arch.shared->msr & msr)) {
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
return RESUME_GUEST;
}
Expand Down Expand Up @@ -804,7 +808,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
if ((exit_nr != 0x900) && (exit_nr != 0x500))
printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n",
exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu),
vcpu->arch.msr);
vcpu->arch.shared->msr);
#endif
kvm_resched(vcpu);
switch (exit_nr) {
Expand Down Expand Up @@ -836,7 +840,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
r = RESUME_GUEST;
} else {
vcpu->arch.msr |= to_svcpu(vcpu)->shadow_srr1 & 0x58000000;
vcpu->arch.shared->msr |=
to_svcpu(vcpu)->shadow_srr1 & 0x58000000;
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
r = RESUME_GUEST;
Expand Down Expand Up @@ -904,7 +909,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
program_interrupt:
flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull;

if (vcpu->arch.msr & MSR_PR) {
if (vcpu->arch.shared->msr & MSR_PR) {
#ifdef EXIT_DEBUG
printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
#endif
Expand Down Expand Up @@ -1052,7 +1057,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
regs->ctr = kvmppc_get_ctr(vcpu);
regs->lr = kvmppc_get_lr(vcpu);
regs->xer = kvmppc_get_xer(vcpu);
regs->msr = vcpu->arch.msr;
regs->msr = vcpu->arch.shared->msr;
regs->srr0 = vcpu->arch.srr0;
regs->srr1 = vcpu->arch.srr1;
regs->pid = vcpu->arch.pid;
Expand Down Expand Up @@ -1353,7 +1358,7 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
local_irq_enable();

/* Preload FPU if it's enabled */
if (vcpu->arch.msr & MSR_FP)
if (vcpu->arch.shared->msr & MSR_FP)
kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);

ret = __kvmppc_vcpu_entry(kvm_run, vcpu);
Expand Down
12 changes: 6 additions & 6 deletions trunk/arch/powerpc/kvm/book3s_32_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
else
bat = &vcpu_book3s->ibat[i];

if (vcpu->arch.msr & MSR_PR) {
if (vcpu->arch.shared->msr & MSR_PR) {
if (!bat->vp)
continue;
} else {
Expand Down Expand Up @@ -214,8 +214,8 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
pte->raddr = (pteg[i+1] & ~(0xFFFULL)) | (eaddr & 0xFFF);
pp = pteg[i+1] & 3;

if ((sre->Kp && (vcpu->arch.msr & MSR_PR)) ||
(sre->Ks && !(vcpu->arch.msr & MSR_PR)))
if ((sre->Kp && (vcpu->arch.shared->msr & MSR_PR)) ||
(sre->Ks && !(vcpu->arch.shared->msr & MSR_PR)))
pp |= 4;

pte->may_write = false;
Expand Down Expand Up @@ -334,7 +334,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
struct kvmppc_sr *sr;
u64 gvsid = esid;

if (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
sr = find_sr(to_book3s(vcpu), ea);
if (sr->valid)
gvsid = sr->vsid;
Expand All @@ -343,7 +343,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
/* In case we only have one of MSR_IR or MSR_DR set, let's put
that in the real-mode context (and hope RM doesn't access
high memory) */
switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
case 0:
*vsid = VSID_REAL | esid;
break;
Expand All @@ -363,7 +363,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
BUG();
}

if (vcpu->arch.msr & MSR_PR)
if (vcpu->arch.shared->msr & MSR_PR)
*vsid |= VSID_PR;

return 0;
Expand Down
4 changes: 2 additions & 2 deletions trunk/arch/powerpc/kvm/book3s_32_mmu_host.c
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
struct kvmppc_sid_map *map;
u16 sid_map_mask;

if (vcpu->arch.msr & MSR_PR)
if (vcpu->arch.shared->msr & MSR_PR)
gvsid |= VSID_PR;

sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
Expand Down Expand Up @@ -253,7 +253,7 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
u16 sid_map_mask;
static int backwards_map = 0;

if (vcpu->arch.msr & MSR_PR)
if (vcpu->arch.shared->msr & MSR_PR)
gvsid |= VSID_PR;

/* We might get collisions that trap in preceding order, so let's
Expand Down
12 changes: 6 additions & 6 deletions trunk/arch/powerpc/kvm/book3s_64_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -180,9 +180,9 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
goto no_page_found;
}

if ((vcpu->arch.msr & MSR_PR) && slbe->Kp)
if ((vcpu->arch.shared->msr & MSR_PR) && slbe->Kp)
key = 4;
else if (!(vcpu->arch.msr & MSR_PR) && slbe->Ks)
else if (!(vcpu->arch.shared->msr & MSR_PR) && slbe->Ks)
key = 4;

for (i=0; i<16; i+=2) {
Expand Down Expand Up @@ -381,7 +381,7 @@ static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
for (i = 1; i < vcpu_book3s->slb_nr; i++)
vcpu_book3s->slb[i].valid = false;

if (vcpu->arch.msr & MSR_IR) {
if (vcpu->arch.shared->msr & MSR_IR) {
kvmppc_mmu_flush_segments(vcpu);
kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
}
Expand Down Expand Up @@ -446,13 +446,13 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
struct kvmppc_slb *slb;
u64 gvsid = esid;

if (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea);
if (slb)
gvsid = slb->vsid;
}

switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
case 0:
*vsid = VSID_REAL | esid;
break;
Expand All @@ -473,7 +473,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
break;
}

if (vcpu->arch.msr & MSR_PR)
if (vcpu->arch.shared->msr & MSR_PR)
*vsid |= VSID_PR;

return 0;
Expand Down
Loading

0 comments on commit 65b7f56

Please sign in to comment.