Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 271508
b: refs/heads/master
c: 0214394
h: refs/heads/master
v: v3
  • Loading branch information
Paul Mackerras authored and Avi Kivity committed Sep 25, 2011
1 parent 34dd1bd commit 340369b
Show file tree
Hide file tree
Showing 12 changed files with 121 additions and 213 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 177339d7f7c99a25ecfdb6baeea6a2508fb2349f
refs/heads/master: 02143947603fe90237a0423d34dd8943de229f78
4 changes: 1 addition & 3 deletions trunk/arch/powerpc/include/asm/kvm_book3s.h
Original file line number Diff line number Diff line change
Expand Up @@ -141,9 +141,7 @@ extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);

extern void kvmppc_handler_lowmem_trampoline(void);
extern void kvmppc_handler_trampoline_enter(void);
extern void kvmppc_rmcall(ulong srr0, ulong srr1);
extern void kvmppc_entry_trampoline(void);
extern void kvmppc_hv_entry_trampoline(void);
extern void kvmppc_load_up_fpu(void);
extern void kvmppc_load_up_altivec(void);
Expand Down
1 change: 1 addition & 0 deletions trunk/arch/powerpc/include/asm/kvm_book3s_asm.h
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@ struct kvmppc_host_state {
ulong scratch0;
ulong scratch1;
u8 in_guest;
u8 restore_hid5;

#ifdef CONFIG_KVM_BOOK3S_64_HV
struct kvm_vcpu *kvm_vcpu;
Expand Down
8 changes: 0 additions & 8 deletions trunk/arch/powerpc/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -258,14 +258,6 @@ struct kvm_vcpu_arch {
ulong host_stack;
u32 host_pid;
#ifdef CONFIG_PPC_BOOK3S
ulong host_msr;
ulong host_r2;
void *host_retip;
ulong trampoline_lowmem;
ulong trampoline_enter;
ulong highmem_handler;
ulong rmcall;
ulong host_paca_phys;
struct kvmppc_slb slb[64];
int slb_max; /* 1 + index of last valid entry in slb[] */
int slb_nr; /* total number of entries in SLB */
Expand Down
7 changes: 1 addition & 6 deletions trunk/arch/powerpc/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -449,19 +449,13 @@ int main(void)
#ifdef CONFIG_PPC_BOOK3S
DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip));
DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr));
DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr));
DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr));
DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr));
DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr));
DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor));
DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl));
DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr));
DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall));
DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec));
DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires));
Expand Down Expand Up @@ -537,6 +531,7 @@ int main(void)
HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);

#ifdef CONFIG_KVM_BOOK3S_64_HV
HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/powerpc/kvm/book3s_32_sr.S
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
* R1 = host R1
* R2 = host R2
* R3 = shadow vcpu
* all other volatile GPRS = free
* all other volatile GPRS = free except R4, R6
* SVCPU[CR] = guest CR
* SVCPU[XER] = guest XER
* SVCPU[CTR] = guest CTR
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/powerpc/kvm/book3s_64_slb.S
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ slb_exit_skip_ ## num:
* R1 = host R1
* R2 = host R2
* R3 = shadow vcpu
* all other volatile GPRS = free
* all other volatile GPRS = free except R4, R6
* SVCPU[CR] = guest CR
* SVCPU[XER] = guest XER
* SVCPU[CTR] = guest CTR
Expand Down
4 changes: 1 addition & 3 deletions trunk/arch/powerpc/kvm/book3s_exports.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,7 @@
#ifdef CONFIG_KVM_BOOK3S_64_HV
EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline);
#else
EXPORT_SYMBOL_GPL(kvmppc_handler_trampoline_enter);
EXPORT_SYMBOL_GPL(kvmppc_handler_lowmem_trampoline);
EXPORT_SYMBOL_GPL(kvmppc_rmcall);
EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline);
EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu);
#ifdef CONFIG_ALTIVEC
EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec);
Expand Down
129 changes: 5 additions & 124 deletions trunk/arch/powerpc/kvm/book3s_interrupts.S
Original file line number Diff line number Diff line change
Expand Up @@ -29,27 +29,11 @@
#define ULONG_SIZE 8
#define FUNC(name) GLUE(.,name)

#define GET_SHADOW_VCPU_R13

#define DISABLE_INTERRUPTS \
mfmsr r0; \
rldicl r0,r0,48,1; \
rotldi r0,r0,16; \
mtmsrd r0,1; \

#elif defined(CONFIG_PPC_BOOK3S_32)

#define ULONG_SIZE 4
#define FUNC(name) name

#define GET_SHADOW_VCPU_R13 \
lwz r13, (THREAD + THREAD_KVM_SVCPU)(r2)

#define DISABLE_INTERRUPTS \
mfmsr r0; \
rlwinm r0,r0,0,17,15; \
mtmsr r0; \

#endif /* CONFIG_PPC_BOOK3S_XX */


Expand Down Expand Up @@ -108,44 +92,17 @@ kvm_start_entry:

kvm_start_lightweight:

GET_SHADOW_VCPU_R13
PPC_LL r3, VCPU_HIGHMEM_HANDLER(r4)
PPC_STL r3, HSTATE_VMHANDLER(r13)

PPC_LL r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */

DISABLE_INTERRUPTS

#ifdef CONFIG_PPC_BOOK3S_64
/* Some guests may need to have dcbz set to 32 byte length.
*
* Usually we ensure that by patching the guest's instructions
* to trap on dcbz and emulate it in the hypervisor.
*
* If we can, we should tell the CPU to use 32 byte dcbz though,
* because that's a lot faster.
*/

PPC_LL r3, VCPU_HFLAGS(r4)
rldicl. r3, r3, 0, 63 /* CR = ((r3 & 1) == 0) */
beq no_dcbz32_on

mfspr r3,SPRN_HID5
ori r3, r3, 0x80 /* XXX HID5_dcbz32 = 0x80 */
mtspr SPRN_HID5,r3

no_dcbz32_on:

rldicl r3, r3, 0, 63 /* r3 &= 1 */
stb r3, HSTATE_RESTORE_HID5(r13)
#endif /* CONFIG_PPC_BOOK3S_64 */

PPC_LL r6, VCPU_RMCALL(r4)
mtctr r6

PPC_LL r3, VCPU_TRAMPOLINE_ENTER(r4)
LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR))
PPC_LL r4, VCPU_SHADOW_MSR(r4) /* get shadow_msr */

/* Jump to segment patching handler and into our guest */
bctr
bl FUNC(kvmppc_entry_trampoline)
nop

/*
* This is the handler in module memory. It gets jumped at from the
Expand All @@ -170,21 +127,6 @@ kvmppc_handler_highmem:
/* R7 = vcpu */
PPC_LL r7, GPR4(r1)

#ifdef CONFIG_PPC_BOOK3S_64

PPC_LL r5, VCPU_HFLAGS(r7)
rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */
beq no_dcbz32_off

li r4, 0
mfspr r5,SPRN_HID5
rldimi r5,r4,6,56
mtspr SPRN_HID5,r5

no_dcbz32_off:

#endif /* CONFIG_PPC_BOOK3S_64 */

PPC_STL r14, VCPU_GPR(r14)(r7)
PPC_STL r15, VCPU_GPR(r15)(r7)
PPC_STL r16, VCPU_GPR(r16)(r7)
Expand All @@ -204,67 +146,6 @@ no_dcbz32_off:
PPC_STL r30, VCPU_GPR(r30)(r7)
PPC_STL r31, VCPU_GPR(r31)(r7)

/* Restore host msr -> SRR1 */
PPC_LL r6, VCPU_HOST_MSR(r7)

/*
* For some interrupts, we need to call the real Linux
* handler, so it can do work for us. This has to happen
* as if the interrupt arrived from the kernel though,
* so let's fake it here where most state is restored.
*
* Call Linux for hardware interrupts/decrementer
* r3 = address of interrupt handler (exit reason)
*/

cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
beq call_linux_handler
cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER
beq call_linux_handler
cmpwi r12, BOOK3S_INTERRUPT_PERFMON
beq call_linux_handler

/* Back to EE=1 */
mtmsr r6
sync
b kvm_return_point

call_linux_handler:

/*
* If we land here we need to jump back to the handler we
* came from.
*
* We have a page that we can access from real mode, so let's
* jump back to that and use it as a trampoline to get back into the
* interrupt handler!
*
* R3 still contains the exit code,
* R5 VCPU_HOST_RETIP and
* R6 VCPU_HOST_MSR
*/

/* Restore host IP -> SRR0 */
PPC_LL r5, VCPU_HOST_RETIP(r7)

/* XXX Better move to a safe function?
* What if we get an HTAB flush in between mtsrr0 and mtsrr1? */

mtlr r12

PPC_LL r4, VCPU_TRAMPOLINE_LOWMEM(r7)
mtsrr0 r4
LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR))
mtsrr1 r3

RFI

.global kvm_return_point
kvm_return_point:

/* Jump back to lightweight entry if we're supposed to */
/* go back into the guest */

/* Pass the exit number as 3rd argument to kvmppc_handle_exit */
mr r5, r12

Expand Down
12 changes: 0 additions & 12 deletions trunk/arch/powerpc/kvm/book3s_pr.c
Original file line number Diff line number Diff line change
Expand Up @@ -875,8 +875,6 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
if (!p)
goto uninit_vcpu;

vcpu->arch.host_retip = kvm_return_point;
vcpu->arch.host_msr = mfmsr();
#ifdef CONFIG_PPC_BOOK3S_64
/* default to book3s_64 (970fx) */
vcpu->arch.pvr = 0x3C0301;
Expand All @@ -887,16 +885,6 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
vcpu->arch.slb_nr = 64;

/* remember where some real-mode handlers are */
vcpu->arch.trampoline_lowmem = __pa(kvmppc_handler_lowmem_trampoline);
vcpu->arch.trampoline_enter = __pa(kvmppc_handler_trampoline_enter);
vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem;
#ifdef CONFIG_PPC_BOOK3S_64
vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall;
#else
vcpu->arch.rmcall = (ulong)kvmppc_rmcall;
#endif

vcpu->arch.shadow_msr = MSR_USER64;

err = kvmppc_mmu_init(vcpu);
Expand Down
51 changes: 17 additions & 34 deletions trunk/arch/powerpc/kvm/book3s_rmhandlers.S
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,8 @@

#if defined(CONFIG_PPC_BOOK3S_64)

#define LOAD_SHADOW_VCPU(reg) GET_PACA(reg)
#define MSR_NOIRQ MSR_KERNEL & ~(MSR_IR | MSR_DR)
#define FUNC(name) GLUE(.,name)
#define MTMSR_EERI(reg) mtmsrd (reg),1

.globl kvmppc_skip_interrupt
kvmppc_skip_interrupt:
Expand Down Expand Up @@ -68,8 +67,8 @@ kvmppc_skip_Hinterrupt:

#elif defined(CONFIG_PPC_BOOK3S_32)

#define MSR_NOIRQ MSR_KERNEL
#define FUNC(name) name
#define MTMSR_EERI(reg) mtmsr (reg)

.macro INTERRUPT_TRAMPOLINE intno

Expand Down Expand Up @@ -170,40 +169,24 @@ kvmppc_handler_skip_ins:
#endif

/*
* This trampoline brings us back to a real mode handler
*
* Input Registers:
*
* R5 = SRR0
* R6 = SRR1
* LR = real-mode IP
* Call kvmppc_handler_trampoline_enter in real mode
*
* On entry, r4 contains the guest shadow MSR
*/
.global kvmppc_handler_lowmem_trampoline
kvmppc_handler_lowmem_trampoline:

mtsrr0 r5
_GLOBAL(kvmppc_entry_trampoline)
mfmsr r5
LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter)
toreal(r7)

li r9, MSR_RI
ori r9, r9, MSR_EE
andc r9, r5, r9 /* Clear EE and RI in MSR value */
li r6, MSR_IR | MSR_DR
ori r6, r6, MSR_EE
andc r6, r5, r6 /* Clear EE, DR and IR in MSR value */
MTMSR_EERI(r9) /* Clear EE and RI in MSR */
mtsrr0 r7 /* before we set srr0/1 */
mtsrr1 r6
blr
kvmppc_handler_lowmem_trampoline_end:

/*
* Call a function in real mode
*
* Input Registers:
*
* R3 = function
* R4 = MSR
* R5 = scratch register
*
*/
_GLOBAL(kvmppc_rmcall)
LOAD_REG_IMMEDIATE(r5, MSR_NOIRQ)
mtmsr r5 /* Disable relocation and interrupts, so mtsrr
doesn't get interrupted */
sync
mtsrr0 r3
mtsrr1 r4
RFI

#if defined(CONFIG_PPC_BOOK3S_32)
Expand Down
Loading

0 comments on commit 340369b

Please sign in to comment.