Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 343578
b: refs/heads/master
c: b4072df
h: refs/heads/master
v: v3
  • Loading branch information
Paul Mackerras authored and Alexander Graf committed Dec 6, 2012
1 parent 6b3a6a1 commit c8dcb58
Show file tree
Hide file tree
Showing 6 changed files with 214 additions and 29 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 1b400ba0cd24a5994d792c7cfa0ee24cac266d3c
refs/heads/master: b4072df4076c4f33ac9f518052c318c979bca533
10 changes: 10 additions & 0 deletions trunk/arch/powerpc/include/asm/mmu-hash64.h
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,16 @@ extern char initial_stab[];
#define PP_RXRX 3 /* Supervisor read, User read */
#define PP_RXXX (HPTE_R_PP0 | 2) /* Supervisor read, user none */

/* Fields for tlbiel instruction in architecture 2.06 */
#define TLBIEL_INVAL_SEL_MASK 0xc00 /* invalidation selector */
#define TLBIEL_INVAL_PAGE 0x000 /* invalidate a single page */
#define TLBIEL_INVAL_SET_LPID 0x800 /* invalidate a set for current LPID */
#define TLBIEL_INVAL_SET 0xc00 /* invalidate a set for all LPIDs */
#define TLBIEL_INVAL_SET_MASK 0xfff000 /* set number to inval. */
#define TLBIEL_INVAL_SET_SHIFT 12

#define POWER7_TLB_SETS 128 /* # sets in POWER7 TLB */

#ifndef __ASSEMBLY__

struct hash_pte {
Expand Down
1 change: 1 addition & 0 deletions trunk/arch/powerpc/kvm/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
book3s_hv_rmhandlers.o \
book3s_hv_rm_mmu.o \
book3s_64_vio_hv.o \
book3s_hv_ras.o \
book3s_hv_builtin.o

kvm-book3s_64-module-objs := \
Expand Down
11 changes: 11 additions & 0 deletions trunk/arch/powerpc/kvm/book3s_hv.c
Original file line number Diff line number Diff line change
Expand Up @@ -545,6 +545,17 @@ static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
case BOOK3S_INTERRUPT_PERFMON:
r = RESUME_GUEST;
break;
case BOOK3S_INTERRUPT_MACHINE_CHECK:
/*
* Deliver a machine check interrupt to the guest.
* We have to do this, even if the host has handled the
* machine check, because machine checks use SRR0/1 and
* the interrupt might have trashed guest state in them.
*/
kvmppc_book3s_queue_irqprio(vcpu,
BOOK3S_INTERRUPT_MACHINE_CHECK);
r = RESUME_GUEST;
break;
case BOOK3S_INTERRUPT_PROGRAM:
{
ulong flags;
Expand Down
144 changes: 144 additions & 0 deletions trunk/arch/powerpc/kvm/book3s_hv_ras.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,144 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* Copyright 2012 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
*/

#include <linux/types.h>
#include <linux/string.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/kernel.h>
#include <asm/opal.h>

/* SRR1 bits for machine check on POWER7 */
#define SRR1_MC_LDSTERR (1ul << (63-42))
#define SRR1_MC_IFETCH_SH (63-45)
#define SRR1_MC_IFETCH_MASK 0x7
#define SRR1_MC_IFETCH_SLBPAR 2 /* SLB parity error */
#define SRR1_MC_IFETCH_SLBMULTI 3 /* SLB multi-hit */
#define SRR1_MC_IFETCH_SLBPARMULTI 4 /* SLB parity + multi-hit */
#define SRR1_MC_IFETCH_TLBMULTI 5 /* I-TLB multi-hit */

/* DSISR bits for machine check on POWER7 */
#define DSISR_MC_DERAT_MULTI 0x800 /* D-ERAT multi-hit */
#define DSISR_MC_TLB_MULTI 0x400 /* D-TLB multi-hit */
#define DSISR_MC_SLB_PARITY 0x100 /* SLB parity error */
#define DSISR_MC_SLB_MULTI 0x080 /* SLB multi-hit */
#define DSISR_MC_SLB_PARMULTI 0x040 /* SLB parity + multi-hit */

/* POWER7 SLB flush and reload */
static void reload_slb(struct kvm_vcpu *vcpu)
{
struct slb_shadow *slb;
unsigned long i, n;

/* First clear out SLB */
asm volatile("slbmte %0,%0; slbia" : : "r" (0));

/* Do they have an SLB shadow buffer registered? */
slb = vcpu->arch.slb_shadow.pinned_addr;
if (!slb)
return;

/* Sanity check */
n = min_t(u32, slb->persistent, SLB_MIN_SIZE);
if ((void *) &slb->save_area[n] > vcpu->arch.slb_shadow.pinned_end)
return;

/* Load up the SLB from that */
for (i = 0; i < n; ++i) {
unsigned long rb = slb->save_area[i].esid;
unsigned long rs = slb->save_area[i].vsid;

rb = (rb & ~0xFFFul) | i; /* insert entry number */
asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb));
}
}

/* POWER7 TLB flush */
static void flush_tlb_power7(struct kvm_vcpu *vcpu)
{
unsigned long i, rb;

rb = TLBIEL_INVAL_SET_LPID;
for (i = 0; i < POWER7_TLB_SETS; ++i) {
asm volatile("tlbiel %0" : : "r" (rb));
rb += 1 << TLBIEL_INVAL_SET_SHIFT;
}
}

/*
* On POWER7, see if we can handle a machine check that occurred inside
* the guest in real mode, without switching to the host partition.
*
* Returns: 0 => exit guest, 1 => deliver machine check to guest
*/
static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
{
unsigned long srr1 = vcpu->arch.shregs.msr;
struct opal_machine_check_event *opal_evt;
long handled = 1;

if (srr1 & SRR1_MC_LDSTERR) {
/* error on load/store */
unsigned long dsisr = vcpu->arch.shregs.dsisr;

if (dsisr & (DSISR_MC_SLB_PARMULTI | DSISR_MC_SLB_MULTI |
DSISR_MC_SLB_PARITY | DSISR_MC_DERAT_MULTI)) {
/* flush and reload SLB; flushes D-ERAT too */
reload_slb(vcpu);
dsisr &= ~(DSISR_MC_SLB_PARMULTI | DSISR_MC_SLB_MULTI |
DSISR_MC_SLB_PARITY | DSISR_MC_DERAT_MULTI);
}
if (dsisr & DSISR_MC_TLB_MULTI) {
flush_tlb_power7(vcpu);
dsisr &= ~DSISR_MC_TLB_MULTI;
}
/* Any other errors we don't understand? */
if (dsisr & 0xffffffffUL)
handled = 0;
}

switch ((srr1 >> SRR1_MC_IFETCH_SH) & SRR1_MC_IFETCH_MASK) {
case 0:
break;
case SRR1_MC_IFETCH_SLBPAR:
case SRR1_MC_IFETCH_SLBMULTI:
case SRR1_MC_IFETCH_SLBPARMULTI:
reload_slb(vcpu);
break;
case SRR1_MC_IFETCH_TLBMULTI:
flush_tlb_power7(vcpu);
break;
default:
handled = 0;
}

/*
* See if OPAL has already handled the condition.
* We assume that if the condition is recovered then OPAL
* will have generated an error log event that we will pick
* up and log later.
*/
opal_evt = local_paca->opal_mc_evt;
if (opal_evt->version == OpalMCE_V1 &&
(opal_evt->severity == OpalMCE_SEV_NO_ERROR ||
opal_evt->disposition == OpalMCE_DISPOSITION_RECOVERED))
handled = 1;

if (handled)
opal_evt->in_use = 0;

return handled;
}

long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu)
{
if (cpu_has_feature(CPU_FTR_ARCH_206))
return kvmppc_realmode_mc_power7(vcpu);

return 0;
}
75 changes: 47 additions & 28 deletions trunk/arch/powerpc/kvm/book3s_hv_rmhandlers.S
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
#include <asm/asm-offsets.h>
#include <asm/exception-64s.h>
#include <asm/kvm_book3s_asm.h>
#include <asm/mmu-hash64.h>

/*****************************************************************************
* *
Expand Down Expand Up @@ -678,8 +679,7 @@ BEGIN_FTR_SECTION
1:
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)

nohpte_cont:
hcall_real_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
/* Save DEC */
mfspr r5,SPRN_DEC
mftb r6
Expand All @@ -700,6 +700,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
std r6, VCPU_FAULT_DAR(r9)
stw r7, VCPU_FAULT_DSISR(r9)

/* See if it is a machine check */
cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
beq machine_check_realmode
mc_cont:

/* Save guest CTRL register, set runlatch to 1 */
6: mfspr r6,SPRN_CTRLF
stw r6,VCPU_CTRL(r9)
Expand Down Expand Up @@ -1112,38 +1117,41 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
/*
* For external and machine check interrupts, we need
* to call the Linux handler to process the interrupt.
* We do that by jumping to the interrupt vector address
* which we have in r12. The [h]rfid at the end of the
* We do that by jumping to absolute address 0x500 for
* external interrupts, or the machine_check_fwnmi label
* for machine checks (since firmware might have patched
* the vector area at 0x200). The [h]rfid at the end of the
* handler will return to the book3s_hv_interrupts.S code.
* For other interrupts we do the rfid to get back
* to the book3s_interrupts.S code here.
* to the book3s_hv_interrupts.S code here.
*/
ld r8, HSTATE_VMHANDLER(r13)
ld r7, HSTATE_HOST_MSR(r13)

cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
BEGIN_FTR_SECTION
beq 11f
cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)

/* RFI into the highmem handler, or branch to interrupt handler */
12: mfmsr r6
mtctr r12
mfmsr r6
li r0, MSR_RI
andc r6, r6, r0
mtmsrd r6, 1 /* Clear RI in MSR */
mtsrr0 r8
mtsrr1 r7
beqctr
beqa 0x500 /* external interrupt (PPC970) */
beq cr1, 13f /* machine check */
RFI

11:
BEGIN_FTR_SECTION
b 12b
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
mtspr SPRN_HSRR0, r8
/* On POWER7, we have external interrupts set to use HSRR0/1 */
11: mtspr SPRN_HSRR0, r8
mtspr SPRN_HSRR1, r7
ba 0x500

13: b machine_check_fwnmi

/*
* Check whether an HDSI is an HPTE not found fault or something else.
* If it is an HPTE not found fault that is due to the guest accessing
Expand Down Expand Up @@ -1176,7 +1184,7 @@ kvmppc_hdsi:
cmpdi r3, 0 /* retry the instruction */
beq 6f
cmpdi r3, -1 /* handle in kernel mode */
beq nohpte_cont
beq guest_exit_cont
cmpdi r3, -2 /* MMIO emulation; need instr word */
beq 2f

Expand All @@ -1190,6 +1198,7 @@ kvmppc_hdsi:
li r10, BOOK3S_INTERRUPT_DATA_STORAGE
li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
rotldi r11, r11, 63
fast_interrupt_c_return:
6: ld r7, VCPU_CTR(r9)
lwz r8, VCPU_XER(r9)
mtctr r7
Expand Down Expand Up @@ -1222,7 +1231,7 @@ kvmppc_hdsi:
/* Unset guest mode. */
li r0, KVM_GUEST_MODE_NONE
stb r0, HSTATE_IN_GUEST(r13)
b nohpte_cont
b guest_exit_cont

/*
* Similarly for an HISI, reflect it to the guest as an ISI unless
Expand All @@ -1248,9 +1257,9 @@ kvmppc_hisi:
ld r11, VCPU_MSR(r9)
li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
cmpdi r3, 0 /* retry the instruction */
beq 6f
beq fast_interrupt_c_return
cmpdi r3, -1 /* handle in kernel mode */
beq nohpte_cont
beq guest_exit_cont

/* Synthesize an ISI for the guest */
mr r11, r3
Expand All @@ -1259,12 +1268,7 @@ kvmppc_hisi:
li r10, BOOK3S_INTERRUPT_INST_STORAGE
li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
rotldi r11, r11, 63
6: ld r7, VCPU_CTR(r9)
lwz r8, VCPU_XER(r9)
mtctr r7
mtxer r8
mr r4, r9
b fast_guest_return
b fast_interrupt_c_return

3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
ld r5, KVM_VRMA_SLB_V(r6)
Expand All @@ -1280,14 +1284,14 @@ kvmppc_hisi:
hcall_try_real_mode:
ld r3,VCPU_GPR(R3)(r9)
andi. r0,r11,MSR_PR
bne hcall_real_cont
bne guest_exit_cont
clrrdi r3,r3,2
cmpldi r3,hcall_real_table_end - hcall_real_table
bge hcall_real_cont
bge guest_exit_cont
LOAD_REG_ADDR(r4, hcall_real_table)
lwzx r3,r3,r4
cmpwi r3,0
beq hcall_real_cont
beq guest_exit_cont
add r3,r3,r4
mtctr r3
mr r3,r9 /* get vcpu pointer */
Expand All @@ -1308,7 +1312,7 @@ hcall_real_fallback:
li r12,BOOK3S_INTERRUPT_SYSCALL
ld r9, HSTATE_KVM_VCPU(r13)

b hcall_real_cont
b guest_exit_cont

.globl hcall_real_table
hcall_real_table:
Expand Down Expand Up @@ -1567,6 +1571,21 @@ kvm_cede_exit:
li r3,H_TOO_HARD
blr

/* Try to handle a machine check in real mode */
machine_check_realmode:
mr r3, r9 /* get vcpu pointer */
bl .kvmppc_realmode_machine_check
nop
cmpdi r3, 0 /* continue exiting from guest? */
ld r9, HSTATE_KVM_VCPU(r13)
li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
beq mc_cont
/* If not, deliver a machine check. SRR0/1 are already set */
li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
rotldi r11, r11, 63
b fast_interrupt_c_return

secondary_too_late:
ld r5,HSTATE_KVM_VCORE(r13)
HMT_LOW
Expand Down

0 comments on commit c8dcb58

Please sign in to comment.