diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 959f566a455ca..14ee0dece8538 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -758,7 +758,7 @@ struct kvm_vcpu_arch { u8 prodded; u8 doorbell_request; u8 irq_pending; /* Used by XIVE to signal pending guest irqs */ - u32 last_inst; + unsigned long last_inst; struct rcuwait wait; struct rcuwait *waitp; @@ -818,7 +818,7 @@ struct kvm_vcpu_arch { u64 busy_stolen; u64 busy_preempt; - u32 emul_inst; + u64 emul_inst; u32 online; diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 6bef23d6d0e35..bc57d058ad5ba 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -28,6 +28,7 @@ #include #include #endif +#include /* * KVMPPC_INST_SW_BREAKPOINT is debug Instruction @@ -84,7 +85,8 @@ extern int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu, int is_default_endian); extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, - enum instruction_fetch_type type, u32 *inst); + enum instruction_fetch_type type, + unsigned long *inst); extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); @@ -126,25 +128,34 @@ extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu); extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu); extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); -extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags); + +extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, + ulong srr1_flags); extern void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu); -extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags); -extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu); -extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu); -extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu); +extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, + ulong srr1_flags); +extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu, + ulong srr1_flags); +extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu, + ulong srr1_flags); +extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu, + ulong srr1_flags); extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu); extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu); extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq); extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu); -extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags, +extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, + ulong dear_flags, ulong esr_flags); extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, - ulong dear_flags, - ulong esr_flags); + ulong srr1_flags, + ulong dar, + ulong dsisr); extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu); extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, - ulong esr_flags); + ulong srr1_flags); + extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu); extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu); @@ -315,7 +326,7 @@ extern struct kvmppc_ops *kvmppc_hv_ops; extern struct kvmppc_ops *kvmppc_pr_ops; static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu, - enum instruction_fetch_type type, u32 *inst) + enum instruction_fetch_type type, ppc_inst_t *inst) { int ret = EMULATE_DONE; u32 fetched_inst; @@ -326,15 +337,30 @@ static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu, ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst); /* Write fetch_failed unswapped if the fetch failed */ - if (ret == EMULATE_DONE) - fetched_inst = kvmppc_need_byteswap(vcpu) ? - swab32(vcpu->arch.last_inst) : - vcpu->arch.last_inst; - else - fetched_inst = vcpu->arch.last_inst; + if (ret != EMULATE_DONE) { + *inst = ppc_inst(KVM_INST_FETCH_FAILED); + return ret; + } + +#ifdef CONFIG_PPC64 + /* Is this a prefixed instruction? */ + if ((vcpu->arch.last_inst >> 32) != 0) { + u32 prefix = vcpu->arch.last_inst >> 32; + u32 suffix = vcpu->arch.last_inst; + if (kvmppc_need_byteswap(vcpu)) { + prefix = swab32(prefix); + suffix = swab32(suffix); + } + *inst = ppc_inst_prefix(prefix, suffix); + return EMULATE_DONE; + } +#endif - *inst = fetched_inst; - return ret; + fetched_inst = kvmppc_need_byteswap(vcpu) ? + swab32(vcpu->arch.last_inst) : + vcpu->arch.last_inst; + *inst = ppc_inst(fetched_inst); + return EMULATE_DONE; } static inline bool is_kvmppc_hv_enabled(struct kvm *kvm) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 8fda87af2fa5e..0774394218929 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -417,6 +417,7 @@ #define FSCR_DSCR __MASK(FSCR_DSCR_LG) #define FSCR_INTR_CAUSE (ASM_CONST(0xFF) << 56) /* interrupt cause */ #define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */ +#define HFSCR_PREFIX __MASK(FSCR_PREFIX_LG) #define HFSCR_MSGP __MASK(FSCR_MSGP_LG) #define HFSCR_TAR __MASK(FSCR_TAR_LG) #define HFSCR_EBB __MASK(FSCR_EBB_LG) diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 57f4e7896d671..686d8d9eda3e5 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -188,10 +188,10 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) } EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio); -void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags) +void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong srr1_flags) { /* might as well deliver this straight away */ - kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_MACHINE_CHECK, flags); + kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_MACHINE_CHECK, srr1_flags); } EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check); @@ -201,29 +201,29 @@ void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL(kvmppc_core_queue_syscall); -void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) +void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong srr1_flags) { /* might as well deliver this straight away */ - kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags); + kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, srr1_flags); } EXPORT_SYMBOL_GPL(kvmppc_core_queue_program); -void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu) +void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu, ulong srr1_flags) { /* might as well deliver this straight away */ - kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, 0); + kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, srr1_flags); } -void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu) +void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu, ulong srr1_flags) { /* might as well deliver this straight away */ - kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, 0); + kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, srr1_flags); } -void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu) +void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu, ulong srr1_flags) { /* might as well deliver this straight away */ - kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, 0); + kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, srr1_flags); } void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) @@ -278,18 +278,18 @@ void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu) kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); } -void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar, - ulong flags) +void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong srr1_flags, + ulong dar, ulong dsisr) { kvmppc_set_dar(vcpu, dar); - kvmppc_set_dsisr(vcpu, flags); - kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0); + kvmppc_set_dsisr(vcpu, dsisr); + kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, srr1_flags); } EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage); -void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags) +void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong srr1_flags) { - kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, flags); + kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, srr1_flags); } EXPORT_SYMBOL_GPL(kvmppc_core_queue_inst_storage); @@ -481,20 +481,42 @@ int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid, return r; } +/* + * Returns prefixed instructions with the prefix in the high 32 bits + * of *inst and suffix in the low 32 bits. This is the same convention + * as used in HEIR, vcpu->arch.last_inst and vcpu->arch.emul_inst. + * Like vcpu->arch.last_inst but unlike vcpu->arch.emul_inst, each + * half of the value needs byte-swapping if the guest endianness is + * different from the host endianness. + */ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, - enum instruction_fetch_type type, u32 *inst) + enum instruction_fetch_type type, unsigned long *inst) { ulong pc = kvmppc_get_pc(vcpu); int r; + u32 iw; if (type == INST_SC) pc -= 4; - r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false); - if (r == EMULATE_DONE) - return r; - else + r = kvmppc_ld(vcpu, &pc, sizeof(u32), &iw, false); + if (r != EMULATE_DONE) return EMULATE_AGAIN; + /* + * If [H]SRR1 indicates that the instruction that caused the + * current interrupt is a prefixed instruction, get the suffix. + */ + if (kvmppc_get_msr(vcpu) & SRR1_PREFIXED) { + u32 suffix; + pc += 4; + r = kvmppc_ld(vcpu, &pc, sizeof(u32), &suffix, false); + if (r != EMULATE_DONE) + return EMULATE_AGAIN; + *inst = ((u64)iw << 32) | suffix; + } else { + *inst = iw; + } + return r; } EXPORT_SYMBOL_GPL(kvmppc_load_last_inst); diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 7006bcbc2e375..af1f060533f2a 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -415,20 +415,25 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, * embodied here.) If the instruction isn't a load or store, then * this doesn't return anything useful. */ -static int instruction_is_store(unsigned int instr) +static int instruction_is_store(ppc_inst_t instr) { unsigned int mask; + unsigned int suffix; mask = 0x10000000; - if ((instr & 0xfc000000) == 0x7c000000) + suffix = ppc_inst_val(instr); + if (ppc_inst_prefixed(instr)) + suffix = ppc_inst_suffix(instr); + else if ((suffix & 0xfc000000) == 0x7c000000) mask = 0x100; /* major opcode 31 */ - return (instr & mask) != 0; + return (suffix & mask) != 0; } int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu, unsigned long gpa, gva_t ea, int is_store) { - u32 last_inst; + ppc_inst_t last_inst; + bool is_prefixed = !!(kvmppc_get_msr(vcpu) & SRR1_PREFIXED); /* * Fast path - check if the guest physical address corresponds to a @@ -443,7 +448,7 @@ int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu, NULL); srcu_read_unlock(&vcpu->kvm->srcu, idx); if (!ret) { - kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); + kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + (is_prefixed ? 8 : 4)); return RESUME_GUEST; } } @@ -458,7 +463,16 @@ int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu, /* * WARNING: We do not know for sure whether the instruction we just * read from memory is the same that caused the fault in the first - * place. If the instruction we read is neither an load or a store, + * place. + * + * If the fault is prefixed but the instruction is not or vice + * versa, try again so that we don't advance pc the wrong amount. + */ + if (ppc_inst_prefixed(last_inst) != is_prefixed) + return RESUME_GUEST; + + /* + * If the instruction we read is neither an load or a store, * then it can't access memory, so we don't need to worry about * enforcing access permissions. So, assuming it is a load or * store, we just check that its direction (load or store) is diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 9d3743ca16d53..461307b89c3a0 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -954,7 +954,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, if (dsisr & DSISR_BADACCESS) { /* Reflect to the guest as DSI */ pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr); - kvmppc_core_queue_data_storage(vcpu, ea, dsisr); + kvmppc_core_queue_data_storage(vcpu, + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, + ea, dsisr); return RESUME_GUEST; } @@ -979,7 +981,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, * Bad address in guest page table tree, or other * unusual error - reflect it to the guest as DSI. */ - kvmppc_core_queue_data_storage(vcpu, ea, dsisr); + kvmppc_core_queue_data_storage(vcpu, + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, + ea, dsisr); return RESUME_GUEST; } return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing); @@ -988,8 +992,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, if (memslot->flags & KVM_MEM_READONLY) { if (writing) { /* give the guest a DSI */ - kvmppc_core_queue_data_storage(vcpu, ea, DSISR_ISSTORE | - DSISR_PROTFAULT); + kvmppc_core_queue_data_storage(vcpu, + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, + ea, DSISR_ISSTORE | DSISR_PROTFAULT); return RESUME_GUEST; } kvm_ro = true; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 6ba68dd6190bd..c973bf556fb3f 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -474,7 +474,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu) for (r = 0; r < vcpu->arch.slb_max; ++r) pr_err(" ESID = %.16llx VSID = %.16llx\n", vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); - pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n", + pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.16lx\n", vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, vcpu->arch.last_inst); } @@ -1412,7 +1412,7 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd) static int kvmppc_emulate_debug_inst(struct kvm_vcpu *vcpu) { - u32 last_inst; + ppc_inst_t last_inst; if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) != EMULATE_DONE) { @@ -1423,12 +1423,13 @@ static int kvmppc_emulate_debug_inst(struct kvm_vcpu *vcpu) return RESUME_GUEST; } - if (last_inst == KVMPPC_INST_SW_BREAKPOINT) { + if (ppc_inst_val(last_inst) == KVMPPC_INST_SW_BREAKPOINT) { vcpu->run->exit_reason = KVM_EXIT_DEBUG; vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu); return RESUME_HOST; } else { - kvmppc_core_queue_program(vcpu, SRR1_PROGILL); + kvmppc_core_queue_program(vcpu, SRR1_PROGILL | + (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); return RESUME_GUEST; } } @@ -1476,9 +1477,11 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu) unsigned long arg; struct kvm *kvm = vcpu->kvm; struct kvm_vcpu *tvcpu; + ppc_inst_t pinst; - if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst) != EMULATE_DONE) + if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst) != EMULATE_DONE) return RESUME_GUEST; + inst = ppc_inst_val(pinst); if (get_op(inst) != 31) return EMULATE_FAIL; rb = get_rb(inst); @@ -1630,7 +1633,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, * so that it knows that the machine check occurred. */ if (!vcpu->kvm->arch.fwnmi_enabled) { - ulong flags = vcpu->arch.shregs.msr & 0x083c0000; + ulong flags = (vcpu->arch.shregs.msr & 0x083c0000) | + (kvmppc_get_msr(vcpu) & SRR1_PREFIXED); kvmppc_core_queue_machine_check(vcpu, flags); r = RESUME_GUEST; break; @@ -1659,7 +1663,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, * as a result of a hypervisor emulation interrupt * (e40) getting turned into a 700 by BML RTAS. */ - flags = vcpu->arch.shregs.msr & 0x1f0000ull; + flags = (vcpu->arch.shregs.msr & 0x1f0000ull) | + (kvmppc_get_msr(vcpu) & SRR1_PREFIXED); kvmppc_core_queue_program(vcpu, flags); r = RESUME_GUEST; break; @@ -1740,6 +1745,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) { kvmppc_core_queue_data_storage(vcpu, + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); r = RESUME_GUEST; break; @@ -1758,6 +1764,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, r = RESUME_PAGE_FAULT; } else { kvmppc_core_queue_data_storage(vcpu, + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, vcpu->arch.fault_dar, err); r = RESUME_GUEST; } @@ -1785,7 +1792,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) { kvmppc_core_queue_inst_storage(vcpu, - vcpu->arch.fault_dsisr); + vcpu->arch.fault_dsisr | + (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); r = RESUME_GUEST; break; } @@ -1802,7 +1810,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, } else if (err == -1) { r = RESUME_PAGE_FAULT; } else { - kvmppc_core_queue_inst_storage(vcpu, err); + kvmppc_core_queue_inst_storage(vcpu, + err | (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); r = RESUME_GUEST; } break; @@ -1823,7 +1832,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { r = kvmppc_emulate_debug_inst(vcpu); } else { - kvmppc_core_queue_program(vcpu, SRR1_PROGILL); + kvmppc_core_queue_program(vcpu, SRR1_PROGILL | + (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); r = RESUME_GUEST; } break; @@ -1864,7 +1874,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, r = kvmppc_tm_unavailable(vcpu); } if (r == EMULATE_FAIL) { - kvmppc_core_queue_program(vcpu, SRR1_PROGILL); + kvmppc_core_queue_program(vcpu, SRR1_PROGILL | + (kvmppc_get_msr(vcpu) & SRR1_PREFIXED)); r = RESUME_GUEST; } break; @@ -1994,14 +2005,15 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu) */ if (!(vcpu->arch.hfscr_permitted & (1UL << cause)) || (vcpu->arch.nested_hfscr & (1UL << cause))) { + ppc_inst_t pinst; vcpu->arch.trap = BOOK3S_INTERRUPT_H_EMUL_ASSIST; /* * If the fetch failed, return to guest and * try executing it again. */ - r = kvmppc_get_last_inst(vcpu, INST_GENERIC, - &vcpu->arch.emul_inst); + r = kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst); + vcpu->arch.emul_inst = ppc_inst_val(pinst); if (r != EMULATE_DONE) r = RESUME_GUEST; else @@ -2918,13 +2930,18 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu) /* * Set the default HFSCR for the guest from the host value. - * This value is only used on POWER9. - * On POWER9, we want to virtualize the doorbell facility, so we + * This value is only used on POWER9 and later. + * On >= POWER9, we want to virtualize the doorbell facility, so we * don't set the HFSCR_MSGP bit, and that causes those instructions * to trap and then we emulate them. */ vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB | HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP; + + /* On POWER10 and later, allow prefixed instructions */ + if (cpu_has_feature(CPU_FTR_ARCH_31)) + vcpu->arch.hfscr |= HFSCR_PREFIX; + if (cpu_has_feature(CPU_FTR_HVMODE)) { vcpu->arch.hfscr &= mfspr(SPRN_HFSCR); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index 5a64a1341e6f1..377d0b4a05eeb 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -1560,7 +1560,9 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu, if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS)) { /* unusual error -> reflect to the guest as a DSI */ - kvmppc_core_queue_data_storage(vcpu, ea, dsisr); + kvmppc_core_queue_data_storage(vcpu, + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, + ea, dsisr); return RESUME_GUEST; } @@ -1570,8 +1572,9 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu, if (memslot->flags & KVM_MEM_READONLY) { if (writing) { /* Give the guest a DSI */ - kvmppc_core_queue_data_storage(vcpu, ea, - DSISR_ISSTORE | DSISR_PROTFAULT); + kvmppc_core_queue_data_storage(vcpu, + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, + ea, DSISR_ISSTORE | DSISR_PROTFAULT); return RESUME_GUEST; } kvm_ro = true; diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index acf80915f406e..800892dab48e8 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -502,8 +502,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) * * *****************************************************************************/ -.global kvmppc_hv_entry -kvmppc_hv_entry: +SYM_CODE_START_LOCAL(kvmppc_hv_entry) /* Required state: * @@ -940,6 +939,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ld r4, VCPU_GPR(R4)(r4) HRFI_TO_GUEST b . +SYM_CODE_END(kvmppc_hv_entry) secondary_too_late: li r12, 0 @@ -1071,11 +1071,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) /* Save HEIR (HV emulation assist reg) in emul_inst if this is an HEI (HV emulation interrupt, e40) */ li r3,KVM_INST_FETCH_FAILED - stw r3,VCPU_LAST_INST(r9) + std r3,VCPU_LAST_INST(r9) cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST bne 11f mfspr r3,SPRN_HEIR -11: stw r3,VCPU_HEIR(r9) +11: std r3,VCPU_HEIR(r9) /* these are volatile across C function calls */ mfctr r3 @@ -1676,7 +1676,7 @@ fast_interrupt_c_return: mtmsrd r3 /* Store the result */ - stw r8, VCPU_LAST_INST(r9) + std r8, VCPU_LAST_INST(r9) /* Unset guest mode. */ li r0, KVM_GUEST_MODE_HOST_HV diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c index a11436720a8ce..bc39c76c9d9fe 100644 --- a/arch/powerpc/kvm/book3s_paired_singles.c +++ b/arch/powerpc/kvm/book3s_paired_singles.c @@ -621,6 +621,7 @@ static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc, int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu) { u32 inst; + ppc_inst_t pinst; enum emulation_result emulated = EMULATE_DONE; int ax_rd, ax_ra, ax_rb, ax_rc; short full_d; @@ -632,7 +633,8 @@ int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu) int i; #endif - emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst); + emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst); + inst = ppc_inst_val(pinst); if (emulated != EMULATE_DONE) return emulated; diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 9fc4dd8f66ebc..da0e888e25211 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -759,7 +759,7 @@ static int kvmppc_handle_pagefault(struct kvm_vcpu *vcpu, flags = DSISR_NOHPTE; if (data) { flags |= vcpu->arch.fault_dsisr & DSISR_ISSTORE; - kvmppc_core_queue_data_storage(vcpu, eaddr, flags); + kvmppc_core_queue_data_storage(vcpu, 0, eaddr, flags); } else { kvmppc_core_queue_inst_storage(vcpu, flags); } @@ -1044,6 +1044,8 @@ void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr) { if (fscr & FSCR_SCV) fscr &= ~FSCR_SCV; /* SCV must not be enabled */ + /* Prohibit prefixed instructions for now */ + fscr &= ~FSCR_PREFIX; if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) { /* TAR got dropped, drop it in shadow too */ kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); @@ -1079,7 +1081,7 @@ static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, unsigned int exit_nr) { enum emulation_result er; ulong flags; - u32 last_inst; + ppc_inst_t last_inst; int emul, r; /* @@ -1100,9 +1102,9 @@ static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, unsigned int exit_nr) if (kvmppc_get_msr(vcpu) & MSR_PR) { #ifdef EXIT_DEBUG pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n", - kvmppc_get_pc(vcpu), last_inst); + kvmppc_get_pc(vcpu), ppc_inst_val(last_inst)); #endif - if ((last_inst & 0xff0007ff) != (INS_DCBZ & 0xfffffff7)) { + if ((ppc_inst_val(last_inst) & 0xff0007ff) != (INS_DCBZ & 0xfffffff7)) { kvmppc_core_queue_program(vcpu, flags); return RESUME_GUEST; } @@ -1119,7 +1121,7 @@ static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, unsigned int exit_nr) break; case EMULATE_FAIL: pr_crit("%s: emulation at %lx failed (%08x)\n", - __func__, kvmppc_get_pc(vcpu), last_inst); + __func__, kvmppc_get_pc(vcpu), ppc_inst_val(last_inst)); kvmppc_core_queue_program(vcpu, flags); r = RESUME_GUEST; break; @@ -1236,7 +1238,7 @@ int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr) r = kvmppc_handle_pagefault(vcpu, dar, exit_nr); srcu_read_unlock(&vcpu->kvm->srcu, idx); } else { - kvmppc_core_queue_data_storage(vcpu, dar, fault_dsisr); + kvmppc_core_queue_data_storage(vcpu, 0, dar, fault_dsisr); r = RESUME_GUEST; } break; @@ -1281,7 +1283,7 @@ int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr) break; case BOOK3S_INTERRUPT_SYSCALL: { - u32 last_sc; + ppc_inst_t last_sc; int emul; /* Get last sc for papr */ @@ -1296,7 +1298,7 @@ int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr) } if (vcpu->arch.papr_enabled && - (last_sc == 0x44000022) && + (ppc_inst_val(last_sc) == 0x44000022) && !(kvmppc_get_msr(vcpu) & MSR_PR)) { /* SC 1 papr hypercalls */ ulong cmd = kvmppc_get_gpr(vcpu, 3); @@ -1348,7 +1350,7 @@ int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr) { int ext_msr = 0; int emul; - u32 last_inst; + ppc_inst_t last_inst; if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) { /* Do paired single instruction emulation */ @@ -1382,15 +1384,15 @@ int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr) } case BOOK3S_INTERRUPT_ALIGNMENT: { - u32 last_inst; + ppc_inst_t last_inst; int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); if (emul == EMULATE_DONE) { u32 dsisr; u64 dar; - dsisr = kvmppc_alignment_dsisr(vcpu, last_inst); - dar = kvmppc_alignment_dar(vcpu, last_inst); + dsisr = kvmppc_alignment_dsisr(vcpu, ppc_inst_val(last_inst)); + dar = kvmppc_alignment_dar(vcpu, ppc_inst_val(last_inst)); kvmppc_set_dsisr(vcpu, dsisr); kvmppc_set_dar(vcpu, dar); diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index 03886ca244983..0a557ffca9fec 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S @@ -123,6 +123,7 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC kvmppc_handler_skip_ins: /* Patch the IP to the next instruction */ + /* Note that prefixed instructions are disabled in PR KVM for now */ mfsrr0 r12 addi r12, r12, 4 mtsrr0 r12 diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 01adffb246678..6a5be025a8afb 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -283,9 +283,10 @@ void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); } -void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, +void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong srr1_flags, ulong dear_flags, ulong esr_flags) { + WARN_ON_ONCE(srr1_flags); vcpu->arch.queued_dear = dear_flags; vcpu->arch.queued_esr = esr_flags; kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE); @@ -316,14 +317,16 @@ void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags) kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); } -void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu) +void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu, ulong srr1_flags) { + WARN_ON_ONCE(srr1_flags); kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL); } #ifdef CONFIG_ALTIVEC -void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu) +void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu, ulong srr1_flags) { + WARN_ON_ONCE(srr1_flags); kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL); } #endif @@ -623,7 +626,7 @@ static void arm_next_watchdog(struct kvm_vcpu *vcpu) spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags); } -void kvmppc_watchdog_func(struct timer_list *t) +static void kvmppc_watchdog_func(struct timer_list *t) { struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.wdt_timer); u32 tsr, new_tsr; @@ -841,7 +844,7 @@ static int emulation_exit(struct kvm_vcpu *vcpu) return RESUME_GUEST; case EMULATE_FAIL: - printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", + printk(KERN_CRIT "%s: emulation at %lx failed (%08lx)\n", __func__, vcpu->arch.regs.nip, vcpu->arch.last_inst); /* For debugging, encode the failing instruction and * report it to userspace. */ @@ -1000,7 +1003,7 @@ static int kvmppc_resume_inst_load(struct kvm_vcpu *vcpu, } } -/** +/* * kvmppc_handle_exit * * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV) @@ -1012,6 +1015,7 @@ int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr) int s; int idx; u32 last_inst = KVM_INST_FETCH_FAILED; + ppc_inst_t pinst; enum emulation_result emulated = EMULATE_DONE; /* Fix irq state (pairs with kvmppc_fix_ee_before_entry()) */ @@ -1031,12 +1035,15 @@ int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr) case BOOKE_INTERRUPT_DATA_STORAGE: case BOOKE_INTERRUPT_DTLB_MISS: case BOOKE_INTERRUPT_HV_PRIV: - emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); + emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst); + last_inst = ppc_inst_val(pinst); break; case BOOKE_INTERRUPT_PROGRAM: /* SW breakpoints arrive as illegal instructions on HV */ - if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) - emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); + if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { + emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst); + last_inst = ppc_inst_val(pinst); + } break; default: break; @@ -1225,7 +1232,7 @@ int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr) #endif case BOOKE_INTERRUPT_DATA_STORAGE: - kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear, + kvmppc_core_queue_data_storage(vcpu, 0, vcpu->arch.fault_dear, vcpu->arch.fault_esr); kvmppc_account_exit(vcpu, DSI_EXITS); r = RESUME_GUEST; @@ -1946,7 +1953,8 @@ static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr, dbg_reg->dbcr0 |= DBCR0_IDM; return 0; } -void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set) +static void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, + bool set) { /* XXX: Add similar MSR protection for BookE-PR */ #ifdef CONFIG_KVM_BOOKE_HV diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h index be9da96d9f060..9c5b8e76014fc 100644 --- a/arch/powerpc/kvm/booke.h +++ b/arch/powerpc/kvm/booke.h @@ -109,4 +109,7 @@ static inline void kvmppc_clear_dbsr(void) { mtspr(SPRN_DBSR, mfspr(SPRN_DBSR)); } + +int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr); + #endif /* __KVM_BOOKE_H__ */ diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S index b5fe6fb53c662..8b4a402217ba4 100644 --- a/arch/powerpc/kvm/bookehv_interrupts.S +++ b/arch/powerpc/kvm/bookehv_interrupts.S @@ -139,7 +139,7 @@ END_BTB_FLUSH_SECTION * kvmppc_get_last_inst(). */ li r9, KVM_INST_FETCH_FAILED - stw r9, VCPU_LAST_INST(r4) + PPC_STL r9, VCPU_LAST_INST(r4) .endif .if \flags & NEED_ESR diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index 05668e9641406..ccb8f16ffe412 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -623,7 +623,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, #ifdef CONFIG_KVM_BOOKE_HV int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, - enum instruction_fetch_type type, u32 *instr) + enum instruction_fetch_type type, unsigned long *instr) { gva_t geaddr; hpa_t addr; @@ -713,7 +713,7 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, } #else int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, - enum instruction_fetch_type type, u32 *instr) + enum instruction_fetch_type type, unsigned long *instr) { return EMULATE_AGAIN; } diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index a309138927ff0..d58df71ace584 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c @@ -168,7 +168,7 @@ static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu *vcpu) kvmppc_booke_vcpu_put(vcpu); } -int kvmppc_e500mc_check_processor_compat(void) +static int kvmppc_e500mc_check_processor_compat(void) { int r; diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index ee1147c98cd8f..355d5206e8aa4 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c @@ -194,6 +194,7 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu) { u32 inst; + ppc_inst_t pinst; int rs, rt, sprn; enum emulation_result emulated; int advance = 1; @@ -201,7 +202,8 @@ int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu) /* this default type might be overwritten by subcategories */ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); - emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst); + emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst); + inst = ppc_inst_val(pinst); if (emulated != EMULATE_DONE) return emulated; @@ -299,6 +301,10 @@ int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu) trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); /* Advance past emulated instruction. */ + /* + * If this ever handles prefixed instructions, the 4 + * will need to become ppc_inst_len(pinst) instead. + */ if (advance) kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c index cfc9114b87d0d..059c08ae0340c 100644 --- a/arch/powerpc/kvm/emulate_loadstore.c +++ b/arch/powerpc/kvm/emulate_loadstore.c @@ -28,7 +28,7 @@ static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu) { if (!(kvmppc_get_msr(vcpu) & MSR_FP)) { - kvmppc_core_queue_fpunavail(vcpu); + kvmppc_core_queue_fpunavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); return true; } @@ -40,7 +40,7 @@ static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu) static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu) { if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) { - kvmppc_core_queue_vsx_unavail(vcpu); + kvmppc_core_queue_vsx_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); return true; } @@ -52,7 +52,7 @@ static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu) static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu) { if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) { - kvmppc_core_queue_vec_unavail(vcpu); + kvmppc_core_queue_vec_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED); return true; } @@ -71,7 +71,7 @@ static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu) */ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) { - u32 inst; + ppc_inst_t inst; enum emulation_result emulated = EMULATE_FAIL; struct instruction_op op; @@ -93,7 +93,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) emulated = EMULATE_FAIL; vcpu->arch.regs.msr = vcpu->arch.shared->msr; - if (analyse_instr(&op, &vcpu->arch.regs, ppc_inst(inst)) == 0) { + if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) { int type = op.type & INSTR_TYPE_MASK; int size = GETSIZE(op.type); @@ -356,11 +356,11 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) } } - trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); + trace_kvm_ppc_instr(ppc_inst_val(inst), kvmppc_get_pc(vcpu), emulated); /* Advance past emulated instruction. */ if (emulated != EMULATE_FAIL) - kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); + kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + ppc_inst_len(inst)); return emulated; } diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 4c5405fc55387..339267c336365 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -304,11 +304,11 @@ int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu) break; case EMULATE_FAIL: { - u32 last_inst; + ppc_inst_t last_inst; kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); kvm_debug_ratelimited("Guest access to device memory using unsupported instruction (opcode: %#08x)\n", - last_inst); + ppc_inst_val(last_inst)); /* * Injecting a Data Storage here is a bit more @@ -321,7 +321,9 @@ int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu) if (vcpu->mmio_is_write) dsisr |= DSISR_ISSTORE; - kvmppc_core_queue_data_storage(vcpu, vcpu->arch.vaddr_accessed, dsisr); + kvmppc_core_queue_data_storage(vcpu, + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, + vcpu->arch.vaddr_accessed, dsisr); } else { /* * BookE does not send a SIGBUS on a bad