Skip to content

Commit

Permalink
KVM: PPC: Call SLB patching code in interrupt safe manner
Browse files Browse the repository at this point in the history
Currently we're racy when doing the transition from IR=1 to IR=0, from
the module memory entry code to the real mode SLB switching code.

To work around that I took a look at the RTAS entry code which is faced
with a similar problem and did the same thing:

  A small helper in linear mapped memory that does mtmsr with IR=0 and
  then RFIs info the actual handler.

Thanks to that trick we can safely take page faults in the entry code
and only need to be really wary of what to do as of the SLB switching
part.

Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
  • Loading branch information
Alexander Graf authored and Marcelo Tosatti committed Mar 1, 2010
1 parent bc90923 commit 021ec9c
Show file tree
Hide file tree
Showing 9 changed files with 34 additions and 21 deletions.
1 change: 1 addition & 0 deletions arch/powerpc/include/asm/kvm_book3s.h
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,7 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,

extern u32 kvmppc_trampoline_lowmem;
extern u32 kvmppc_trampoline_enter;
extern void kvmppc_rmcall(ulong srr0, ulong srr1);

static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
{
Expand Down
1 change: 0 additions & 1 deletion arch/powerpc/include/asm/kvm_book3s_64_asm.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,6 @@ struct kvmppc_book3s_shadow_vcpu {
ulong scratch0;
ulong scratch1;
ulong vmhandler;
ulong rmhandler;
};

#endif /*__ASSEMBLY__ */
Expand Down
1 change: 1 addition & 0 deletions arch/powerpc/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -167,6 +167,7 @@ struct kvm_vcpu_arch {
ulong trampoline_lowmem;
ulong trampoline_enter;
ulong highmem_handler;
ulong rmcall;
ulong host_paca_phys;
struct kvmppc_mmu mmu;
#endif
Expand Down
3 changes: 1 addition & 2 deletions arch/powerpc/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -214,8 +214,6 @@ int main(void)
DEFINE(PACA_KVM_HOST_R2, offsetof(struct paca_struct, shadow_vcpu.host_r2));
DEFINE(PACA_KVM_VMHANDLER, offsetof(struct paca_struct,
shadow_vcpu.vmhandler));
DEFINE(PACA_KVM_RMHANDLER, offsetof(struct paca_struct,
shadow_vcpu.rmhandler));
DEFINE(PACA_KVM_SCRATCH0, offsetof(struct paca_struct,
shadow_vcpu.scratch0));
DEFINE(PACA_KVM_SCRATCH1, offsetof(struct paca_struct,
Expand Down Expand Up @@ -438,6 +436,7 @@ int main(void)
DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall));
DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
#else
DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
Expand Down
1 change: 1 addition & 0 deletions arch/powerpc/kvm/book3s.c
Original file line number Diff line number Diff line change
Expand Up @@ -919,6 +919,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem;
vcpu->arch.trampoline_enter = kvmppc_trampoline_enter;
vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem;
vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall;

vcpu->arch.shadow_msr = MSR_USER64;

Expand Down
1 change: 1 addition & 0 deletions arch/powerpc/kvm/book3s_64_exports.c
Original file line number Diff line number Diff line change
Expand Up @@ -22,3 +22,4 @@

EXPORT_SYMBOL_GPL(kvmppc_trampoline_enter);
EXPORT_SYMBOL_GPL(kvmppc_trampoline_lowmem);
EXPORT_SYMBOL_GPL(kvmppc_rmcall);
25 changes: 7 additions & 18 deletions arch/powerpc/kvm/book3s_64_interrupts.S
Original file line number Diff line number Diff line change
Expand Up @@ -95,17 +95,14 @@ kvm_start_entry:
ld r3, VCPU_HIGHMEM_HANDLER(r4)
std r3, PACA_KVM_VMHANDLER(r13)

ld r3, VCPU_TRAMPOLINE_ENTER(r4)
std r3, PACA_KVM_RMHANDLER(r13)

kvm_start_lightweight:

ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */
ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */

/* Load some guest state in the respective registers */
ld r3, VCPU_CTR(r4) /* r3 = vcpu->arch.ctr */
mtctr r3 /* CTR = r3 */
ld r5, VCPU_CTR(r4) /* r5 = vcpu->arch.ctr */
/* will be swapped in by rmcall */

ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */
mtlr r3 /* LR = r3 */
Expand All @@ -131,22 +128,14 @@ kvm_start_lightweight:

no_dcbz32_on:

/* This sets the Magic value for the trampoline */

/* XXX this needs to move into a safe function, so we can
be sure we don't get any interrupts */

li r11, 1
stb r11, PACA_KVM_IN_GUEST(r13)

ld r3, PACA_KVM_RMHANDLER(r13)
mtsrr0 r3
ld r6, VCPU_RMCALL(r4)
mtctr r6

LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR))
mtsrr1 r3
ld r3, VCPU_TRAMPOLINE_ENTER(r4)
LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR))

/* Jump to SLB patching handlder and into our guest */
RFI
bctr

/*
* This is the handler in module memory. It gets jumped at from the
Expand Down
18 changes: 18 additions & 0 deletions arch/powerpc/kvm/book3s_64_rmhandlers.S
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,24 @@ kvmppc_handler_lowmem_trampoline:
blr
kvmppc_handler_lowmem_trampoline_end:

/*
* Call a function in real mode
*
* Input Registers:
*
* R3 = function
* R4 = MSR
* R5 = CTR
*
*/
_GLOBAL(kvmppc_rmcall)
mtmsr r4 /* Disable relocation, so mtsrr
doesn't get interrupted */
mtctr r5
mtsrr0 r3
mtsrr1 r4
RFI

.global kvmppc_trampoline_lowmem
kvmppc_trampoline_lowmem:
.long kvmppc_handler_lowmem_trampoline - _stext
Expand Down
4 changes: 4 additions & 0 deletions arch/powerpc/kvm/book3s_64_slb.S
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,10 @@ kvmppc_handler_trampoline_enter:
mtsrr0 r9
mtsrr1 r10

/* Activate guest mode, so faults get handled by KVM */
li r11, KVM_GUEST_MODE_GUEST
stb r11, PACA_KVM_IN_GUEST(r13)

/* Remove LPAR shadow entries */

#if SLB_NUM_BOLTED == 3
Expand Down

0 comments on commit 021ec9c

Please sign in to comment.