Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 185977
b: refs/heads/master
c: 97c4cfb
h: refs/heads/master
i:
  185975: e5a5e88
v: v3
  • Loading branch information
Alexander Graf authored and Marcelo Tosatti committed Mar 1, 2010
1 parent b919322 commit 6f356ee
Show file tree
Hide file tree
Showing 3 changed files with 58 additions and 54 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: b480f780f071a068810ccd0e49c1daa210bfbeab
refs/heads/master: 97c4cfbe890a4ad82dde8660008d42b7b05dc488
4 changes: 1 addition & 3 deletions trunk/arch/powerpc/kvm/book3s.c
Original file line number Diff line number Diff line change
Expand Up @@ -539,8 +539,6 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = kvmppc_emulate_mmio(run, vcpu);
if ( r == RESUME_HOST_NV )
r = RESUME_HOST;
if ( r == RESUME_GUEST_NV )
r = RESUME_GUEST;
}

return r;
Expand Down Expand Up @@ -645,7 +643,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
er = kvmppc_emulate_instruction(run, vcpu);
switch (er) {
case EMULATE_DONE:
r = RESUME_GUEST;
r = RESUME_GUEST_NV;
break;
case EMULATE_FAIL:
printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
Expand Down
106 changes: 56 additions & 50 deletions trunk/arch/powerpc/kvm/book3s_64_interrupts.S
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,26 @@
mtmsrd r0,1
.endm

#define VCPU_LOAD_NVGPRS(vcpu) \
ld r14, VCPU_GPR(r14)(vcpu); \
ld r15, VCPU_GPR(r15)(vcpu); \
ld r16, VCPU_GPR(r16)(vcpu); \
ld r17, VCPU_GPR(r17)(vcpu); \
ld r18, VCPU_GPR(r18)(vcpu); \
ld r19, VCPU_GPR(r19)(vcpu); \
ld r20, VCPU_GPR(r20)(vcpu); \
ld r21, VCPU_GPR(r21)(vcpu); \
ld r22, VCPU_GPR(r22)(vcpu); \
ld r23, VCPU_GPR(r23)(vcpu); \
ld r24, VCPU_GPR(r24)(vcpu); \
ld r25, VCPU_GPR(r25)(vcpu); \
ld r26, VCPU_GPR(r26)(vcpu); \
ld r27, VCPU_GPR(r27)(vcpu); \
ld r28, VCPU_GPR(r28)(vcpu); \
ld r29, VCPU_GPR(r29)(vcpu); \
ld r30, VCPU_GPR(r30)(vcpu); \
ld r31, VCPU_GPR(r31)(vcpu); \

/*****************************************************************************
* *
* Guest entry / exit code that is in kernel module memory (highmem) *
Expand Down Expand Up @@ -67,12 +87,16 @@ kvm_start_entry:
SAVE_NVGPRS(r1)

/* Save LR */
mflr r14
std r14, _LINK(r1)
std r0, _LINK(r1)

/* Load non-volatile guest state from the vcpu */
VCPU_LOAD_NVGPRS(r4)

/* XXX optimize non-volatile loading away */
kvm_start_lightweight:

ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */
ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */

DISABLE_INTERRUPTS

/* Save R1/R2 in the PACA */
Expand All @@ -81,29 +105,6 @@ kvm_start_lightweight:
ld r3, VCPU_HIGHMEM_HANDLER(r4)
std r3, PACASAVEDMSR(r13)

/* Load non-volatile guest state from the vcpu */
ld r14, VCPU_GPR(r14)(r4)
ld r15, VCPU_GPR(r15)(r4)
ld r16, VCPU_GPR(r16)(r4)
ld r17, VCPU_GPR(r17)(r4)
ld r18, VCPU_GPR(r18)(r4)
ld r19, VCPU_GPR(r19)(r4)
ld r20, VCPU_GPR(r20)(r4)
ld r21, VCPU_GPR(r21)(r4)
ld r22, VCPU_GPR(r22)(r4)
ld r23, VCPU_GPR(r23)(r4)
ld r24, VCPU_GPR(r24)(r4)
ld r25, VCPU_GPR(r25)(r4)
ld r26, VCPU_GPR(r26)(r4)
ld r27, VCPU_GPR(r27)(r4)
ld r28, VCPU_GPR(r28)(r4)
ld r29, VCPU_GPR(r29)(r4)
ld r30, VCPU_GPR(r30)(r4)
ld r31, VCPU_GPR(r31)(r4)

ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */
ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */

ld r3, VCPU_TRAMPOLINE_ENTER(r4)
mtsrr0 r3

Expand Down Expand Up @@ -247,7 +248,6 @@ kvmppc_handler_highmem:

no_dcbz32_off:

/* XXX maybe skip on lightweight? */
std r14, VCPU_GPR(r14)(r12)
std r15, VCPU_GPR(r15)(r12)
std r16, VCPU_GPR(r16)(r12)
Expand All @@ -267,9 +267,6 @@ no_dcbz32_off:
std r30, VCPU_GPR(r30)(r12)
std r31, VCPU_GPR(r31)(r12)

/* Restore non-volatile host registers (r14 - r31) */
REST_NVGPRS(r1)

/* Save guest PC (R10) */
std r10, VCPU_PC(r12)

Expand Down Expand Up @@ -351,42 +348,51 @@ kvm_return_point:

/* Jump back to lightweight entry if we're supposed to */
/* go back into the guest */

/* Pass the exit number as 3rd argument to kvmppc_handle_exit */
mr r5, r3

/* Restore r3 (kvm_run) and r4 (vcpu) */
REST_2GPRS(3, r1)
bl KVMPPC_HANDLE_EXIT

#if 0 /* XXX get lightweight exits back */
/* If RESUME_GUEST, get back in the loop */
cmpwi r3, RESUME_GUEST
bne kvm_exit_heavyweight
beq kvm_loop_lightweight

/* put VCPU and KVM_RUN back into place and roll again! */
REST_2GPRS(3, r1)
b kvm_start_lightweight
cmpwi r3, RESUME_GUEST_NV
beq kvm_loop_heavyweight

kvm_exit_heavyweight:
/* Restore non-volatile host registers */
ld r14, _LINK(r1)
mtlr r14
REST_NVGPRS(r1)
kvm_exit_loop:

addi r1, r1, SWITCH_FRAME_SIZE
#else
ld r4, _LINK(r1)
mtlr r4

cmpwi r3, RESUME_GUEST
bne kvm_exit_heavyweight
/* Restore non-volatile host registers (r14 - r31) */
REST_NVGPRS(r1)

addi r1, r1, SWITCH_FRAME_SIZE
blr

kvm_loop_heavyweight:

ld r4, _LINK(r1)
std r4, (16 + SWITCH_FRAME_SIZE)(r1)

/* Load vcpu and cpu_run */
REST_2GPRS(3, r1)

addi r1, r1, SWITCH_FRAME_SIZE
/* Load non-volatile guest state from the vcpu */
VCPU_LOAD_NVGPRS(r4)

b kvm_start_entry
/* Jump back into the beginning of this function */
b kvm_start_lightweight

kvm_exit_heavyweight:
kvm_loop_lightweight:

addi r1, r1, SWITCH_FRAME_SIZE
#endif
/* We'll need the vcpu pointer */
REST_GPR(4, r1)

/* Jump back into the beginning of this function */
b kvm_start_lightweight

blr

0 comments on commit 6f356ee

Please sign in to comment.