Skip to content

Commit

Permalink
Merge tag 'powerpc-5.4-2' of git://git.kernel.org/pub/scm/linux/kerne…
Browse files Browse the repository at this point in the history
…l/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:
 "An assortment of fixes that were either missed by me, or didn't arrive
  quite in time for the first v5.4 pull.

   - Most notable is a fix for an issue with tlbie (broadcast TLB
     invalidation) on Power9, when using the Radix MMU. The tlbie can
     race with an mtpid (move to PID register, essentially MMU context
     switch) on another thread of the core, which can cause stores to
     continue to go to a page after it's unmapped.

   - A fix in our KVM code to add a missing barrier, the lack of which
     has been observed to cause missed IPIs and subsequently stuck CPUs
     in the host.

   - A change to the way we initialise PCR (Processor Compatibility
     Register) to make it forward compatible with future CPUs.

   - On some older PowerVM systems our H_BLOCK_REMOVE support could
     oops, fix it to detect such systems and fallback to the old
     invalidation method.

   - A fix for an oops seen on some machines when using KASAN on 32-bit.

   - A handful of other minor fixes, and two new selftests.

  Thanks to: Alistair Popple, Aneesh Kumar K.V, Christophe Leroy,
  Gustavo Romero, Joel Stanley, Jordan Niethe, Laurent Dufour, Michael
  Roth, Oliver O'Halloran"

* tag 'powerpc-5.4-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/eeh: Fix eeh eeh_debugfs_break_device() with SRIOV devices
  powerpc/nvdimm: use H_SCM_QUERY hcall on H_OVERLAP error
  powerpc/nvdimm: Use HCALL error as the return value
  selftests/powerpc: Add test case for tlbie vs mtpidr ordering issue
  powerpc/mm: Fixup tlbie vs mtpidr/mtlpidr ordering issue on POWER9
  powerpc/book3s64/radix: Rename CPU_FTR_P9_TLBIE_BUG feature flag
  powerpc/book3s64/mm: Don't do tlbie fixup for some hardware revisions
  powerpc/pseries: Call H_BLOCK_REMOVE when supported
  powerpc/pseries: Read TLB Block Invalidate Characteristics
  KVM: PPC: Book3S HV: use smp_mb() when setting/clearing host_ipi flag
  powerpc/mm: Fix an Oops in kasan_mmu_init()
  powerpc/mm: Add a helper to select PAGE_KERNEL_RO or PAGE_READONLY
  powerpc/64s: Set reserved PCR bits
  powerpc: Fix definition of PCR bits to work with old binutils
  powerpc/book3s64/radix: Remove WARN_ON in destroy_context()
  powerpc/tm: Add tm-poison test
  • Loading branch information
Linus Torvalds committed Sep 28, 2019
2 parents f19e00e + 253c892 commit a295320
Show file tree
Hide file tree
Showing 28 changed files with 1,476 additions and 93 deletions.
5 changes: 3 additions & 2 deletions arch/powerpc/include/asm/cputable.h
Original file line number Diff line number Diff line change
Expand Up @@ -209,8 +209,9 @@ static inline void cpu_feature_keys_init(void) { }
#define CPU_FTR_POWER9_DD2_1 LONG_ASM_CONST(0x0000080000000000)
#define CPU_FTR_P9_TM_HV_ASSIST LONG_ASM_CONST(0x0000100000000000)
#define CPU_FTR_P9_TM_XER_SO_BUG LONG_ASM_CONST(0x0000200000000000)
#define CPU_FTR_P9_TLBIE_BUG LONG_ASM_CONST(0x0000400000000000)
#define CPU_FTR_P9_TLBIE_STQ_BUG LONG_ASM_CONST(0x0000400000000000)
#define CPU_FTR_P9_TIDR LONG_ASM_CONST(0x0000800000000000)
#define CPU_FTR_P9_TLBIE_ERAT_BUG LONG_ASM_CONST(0x0001000000000000)

#ifndef __ASSEMBLY__

Expand Down Expand Up @@ -457,7 +458,7 @@ static inline void cpu_feature_keys_init(void) { }
CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_PKEY | \
CPU_FTR_P9_TLBIE_BUG | CPU_FTR_P9_TIDR)
CPU_FTR_P9_TLBIE_STQ_BUG | CPU_FTR_P9_TLBIE_ERAT_BUG | CPU_FTR_P9_TIDR)
#define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9
#define CPU_FTRS_POWER9_DD2_1 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1)
#define CPU_FTRS_POWER9_DD2_2 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1 | \
Expand Down
100 changes: 97 additions & 3 deletions arch/powerpc/include/asm/kvm_ppc.h
Original file line number Diff line number Diff line change
Expand Up @@ -452,9 +452,100 @@ static inline u32 kvmppc_get_xics_latch(void)
return xirr;
}

static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
/*
* To avoid the need to unnecessarily exit fully to the host kernel, an IPI to
* a CPU thread that's running/napping inside of a guest is by default regarded
* as a request to wake the CPU (if needed) and continue execution within the
* guest, potentially to process new state like externally-generated
* interrupts or IPIs sent from within the guest itself (e.g. H_PROD/H_IPI).
*
* To force an exit to the host kernel, kvmppc_set_host_ipi() must be called
* prior to issuing the IPI to set the corresponding 'host_ipi' flag in the
* target CPU's PACA. To avoid unnecessary exits to the host, this flag should
* be immediately cleared via kvmppc_clear_host_ipi() by the IPI handler on
* the receiving side prior to processing the IPI work.
*
* NOTE:
*
* We currently issue an smp_mb() at the beginning of kvmppc_set_host_ipi().
* This is to guard against sequences such as the following:
*
* CPU
* X: smp_muxed_ipi_set_message():
* X: smp_mb()
* X: message[RESCHEDULE] = 1
* X: doorbell_global_ipi(42):
* X: kvmppc_set_host_ipi(42)
* X: ppc_msgsnd_sync()/smp_mb()
* X: ppc_msgsnd() -> 42
* 42: doorbell_exception(): // from CPU X
* 42: ppc_msgsync()
* 105: smp_muxed_ipi_set_message():
* 105: smb_mb()
* // STORE DEFERRED DUE TO RE-ORDERING
* --105: message[CALL_FUNCTION] = 1
* | 105: doorbell_global_ipi(42):
* | 105: kvmppc_set_host_ipi(42)
* | 42: kvmppc_clear_host_ipi(42)
* | 42: smp_ipi_demux_relaxed()
* | 42: // returns to executing guest
* | // RE-ORDERED STORE COMPLETES
* ->105: message[CALL_FUNCTION] = 1
* 105: ppc_msgsnd_sync()/smp_mb()
* 105: ppc_msgsnd() -> 42
* 42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored
* 105: // hangs waiting on 42 to process messages/call_single_queue
*
* We also issue an smp_mb() at the end of kvmppc_clear_host_ipi(). This is
* to guard against sequences such as the following (as well as to create
* a read-side pairing with the barrier in kvmppc_set_host_ipi()):
*
* CPU
* X: smp_muxed_ipi_set_message():
* X: smp_mb()
* X: message[RESCHEDULE] = 1
* X: doorbell_global_ipi(42):
* X: kvmppc_set_host_ipi(42)
* X: ppc_msgsnd_sync()/smp_mb()
* X: ppc_msgsnd() -> 42
* 42: doorbell_exception(): // from CPU X
* 42: ppc_msgsync()
* // STORE DEFERRED DUE TO RE-ORDERING
* -- 42: kvmppc_clear_host_ipi(42)
* | 42: smp_ipi_demux_relaxed()
* | 105: smp_muxed_ipi_set_message():
* | 105: smb_mb()
* | 105: message[CALL_FUNCTION] = 1
* | 105: doorbell_global_ipi(42):
* | 105: kvmppc_set_host_ipi(42)
* | // RE-ORDERED STORE COMPLETES
* -> 42: kvmppc_clear_host_ipi(42)
* 42: // returns to executing guest
* 105: ppc_msgsnd_sync()/smp_mb()
* 105: ppc_msgsnd() -> 42
* 42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored
* 105: // hangs waiting on 42 to process messages/call_single_queue
*/
static inline void kvmppc_set_host_ipi(int cpu)
{
paca_ptrs[cpu]->kvm_hstate.host_ipi = host_ipi;
/*
* order stores of IPI messages vs. setting of host_ipi flag
*
* pairs with the barrier in kvmppc_clear_host_ipi()
*/
smp_mb();
paca_ptrs[cpu]->kvm_hstate.host_ipi = 1;
}

static inline void kvmppc_clear_host_ipi(int cpu)
{
paca_ptrs[cpu]->kvm_hstate.host_ipi = 0;
/*
* order clearing of host_ipi flag vs. processing of IPI messages
*
* pairs with the barrier in kvmppc_set_host_ipi()
*/
smp_mb();
}

static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
Expand Down Expand Up @@ -486,7 +577,10 @@ static inline u32 kvmppc_get_xics_latch(void)
return 0;
}

static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
static inline void kvmppc_set_host_ipi(int cpu)
{}

static inline void kvmppc_clear_host_ipi(int cpu)
{}

static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
Expand Down
9 changes: 6 additions & 3 deletions arch/powerpc/include/asm/reg.h
Original file line number Diff line number Diff line change
Expand Up @@ -475,9 +475,10 @@
#define HMER_DEBUG_TRIG (1ul << (63 - 17)) /* Debug trigger */
#define SPRN_HMEER 0x151 /* Hyp maintenance exception enable reg */
#define SPRN_PCR 0x152 /* Processor compatibility register */
#define PCR_VEC_DIS (1ul << (63-0)) /* Vec. disable (bit NA since POWER8) */
#define PCR_VSX_DIS (1ul << (63-1)) /* VSX disable (bit NA since POWER8) */
#define PCR_TM_DIS (1ul << (63-2)) /* Trans. memory disable (POWER8) */
#define PCR_VEC_DIS (__MASK(63-0)) /* Vec. disable (bit NA since POWER8) */
#define PCR_VSX_DIS (__MASK(63-1)) /* VSX disable (bit NA since POWER8) */
#define PCR_TM_DIS (__MASK(63-2)) /* Trans. memory disable (POWER8) */
#define PCR_HIGH_BITS (PCR_VEC_DIS | PCR_VSX_DIS | PCR_TM_DIS)
/*
* These bits are used in the function kvmppc_set_arch_compat() to specify and
* determine both the compatibility level which we want to emulate and the
Expand All @@ -486,6 +487,8 @@
#define PCR_ARCH_207 0x8 /* Architecture 2.07 */
#define PCR_ARCH_206 0x4 /* Architecture 2.06 */
#define PCR_ARCH_205 0x2 /* Architecture 2.05 */
#define PCR_LOW_BITS (PCR_ARCH_207 | PCR_ARCH_206 | PCR_ARCH_205)
#define PCR_MASK ~(PCR_HIGH_BITS | PCR_LOW_BITS) /* PCR Reserved Bits */
#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */
#define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */
#define SPRN_TLBVPNR 0x155 /* P7 TLB control register */
Expand Down
6 changes: 6 additions & 0 deletions arch/powerpc/kernel/cpu_setup_power.S
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ _GLOBAL(__setup_cpu_power7)
beqlr
li r0,0
mtspr SPRN_LPID,r0
LOAD_REG_IMMEDIATE(r0, PCR_MASK)
mtspr SPRN_PCR,r0
mfspr r3,SPRN_LPCR
li r4,(LPCR_LPES1 >> LPCR_LPES_SH)
Expand All @@ -37,6 +38,7 @@ _GLOBAL(__restore_cpu_power7)
beqlr
li r0,0
mtspr SPRN_LPID,r0
LOAD_REG_IMMEDIATE(r0, PCR_MASK)
mtspr SPRN_PCR,r0
mfspr r3,SPRN_LPCR
li r4,(LPCR_LPES1 >> LPCR_LPES_SH)
Expand All @@ -54,6 +56,7 @@ _GLOBAL(__setup_cpu_power8)
beqlr
li r0,0
mtspr SPRN_LPID,r0
LOAD_REG_IMMEDIATE(r0, PCR_MASK)
mtspr SPRN_PCR,r0
mfspr r3,SPRN_LPCR
ori r3, r3, LPCR_PECEDH
Expand All @@ -76,6 +79,7 @@ _GLOBAL(__restore_cpu_power8)
beqlr
li r0,0
mtspr SPRN_LPID,r0
LOAD_REG_IMMEDIATE(r0, PCR_MASK)
mtspr SPRN_PCR,r0
mfspr r3,SPRN_LPCR
ori r3, r3, LPCR_PECEDH
Expand All @@ -98,6 +102,7 @@ _GLOBAL(__setup_cpu_power9)
mtspr SPRN_PSSCR,r0
mtspr SPRN_LPID,r0
mtspr SPRN_PID,r0
LOAD_REG_IMMEDIATE(r0, PCR_MASK)
mtspr SPRN_PCR,r0
mfspr r3,SPRN_LPCR
LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
Expand All @@ -123,6 +128,7 @@ _GLOBAL(__restore_cpu_power9)
mtspr SPRN_PSSCR,r0
mtspr SPRN_LPID,r0
mtspr SPRN_PID,r0
LOAD_REG_IMMEDIATE(r0, PCR_MASK)
mtspr SPRN_PCR,r0
mfspr r3,SPRN_LPCR
LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
Expand Down
6 changes: 3 additions & 3 deletions arch/powerpc/kernel/dbell.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ void doorbell_global_ipi(int cpu)
{
u32 tag = get_hard_smp_processor_id(cpu);

kvmppc_set_host_ipi(cpu, 1);
kvmppc_set_host_ipi(cpu);
/* Order previous accesses vs. msgsnd, which is treated as a store */
ppc_msgsnd_sync();
ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, tag);
Expand All @@ -48,7 +48,7 @@ void doorbell_core_ipi(int cpu)
{
u32 tag = cpu_thread_in_core(cpu);

kvmppc_set_host_ipi(cpu, 1);
kvmppc_set_host_ipi(cpu);
/* Order previous accesses vs. msgsnd, which is treated as a store */
ppc_msgsnd_sync();
ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, tag);
Expand Down Expand Up @@ -84,7 +84,7 @@ void doorbell_exception(struct pt_regs *regs)

may_hard_irq_enable();

kvmppc_set_host_ipi(smp_processor_id(), 0);
kvmppc_clear_host_ipi(smp_processor_id());
__this_cpu_inc(irq_stat.doorbell_irqs);

smp_ipi_demux_relaxed(); /* already performed the barrier */
Expand Down
35 changes: 32 additions & 3 deletions arch/powerpc/kernel/dt_cpu_ftrs.c
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ static void __restore_cpu_cpufeatures(void)
if (hv_mode) {
mtspr(SPRN_LPID, 0);
mtspr(SPRN_HFSCR, system_registers.hfscr);
mtspr(SPRN_PCR, 0);
mtspr(SPRN_PCR, PCR_MASK);
}
mtspr(SPRN_FSCR, system_registers.fscr);

Expand Down Expand Up @@ -144,6 +144,7 @@ static void __init cpufeatures_setup_cpu(void)
mtspr(SPRN_HFSCR, 0);
}
mtspr(SPRN_FSCR, 0);
mtspr(SPRN_PCR, PCR_MASK);

/*
* LPCR does not get cleared, to match behaviour with secondaries
Expand Down Expand Up @@ -691,9 +692,37 @@ static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
return true;
}

/*
* Handle POWER9 broadcast tlbie invalidation issue using
* cpu feature flag.
*/
static __init void update_tlbie_feature_flag(unsigned long pvr)
{
if (PVR_VER(pvr) == PVR_POWER9) {
/*
* Set the tlbie feature flag for anything below
* Nimbus DD 2.3 and Cumulus DD 1.3
*/
if ((pvr & 0xe000) == 0) {
/* Nimbus */
if ((pvr & 0xfff) < 0x203)
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
} else if ((pvr & 0xc000) == 0) {
/* Cumulus */
if ((pvr & 0xfff) < 0x103)
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
} else {
WARN_ONCE(1, "Unknown PVR");
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_STQ_BUG;
}

cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_ERAT_BUG;
}
}

static __init void cpufeatures_cpu_quirks(void)
{
int version = mfspr(SPRN_PVR);
unsigned long version = mfspr(SPRN_PVR);

/*
* Not all quirks can be derived from the cpufeatures device tree.
Expand All @@ -712,10 +741,10 @@ static __init void cpufeatures_cpu_quirks(void)

if ((version & 0xffff0000) == 0x004e0000) {
cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_BUG;
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TIDR;
}

update_tlbie_feature_flag(version);
/*
* PKEY was not in the initial base or feature node
* specification, but it should become optional in the next
Expand Down
4 changes: 2 additions & 2 deletions arch/powerpc/kernel/eeh.c
Original file line number Diff line number Diff line change
Expand Up @@ -1960,7 +1960,7 @@ static int eeh_debugfs_break_device(struct pci_dev *pdev)
pci_err(pdev, "Going to break: %pR\n", bar);

if (pdev->is_virtfn) {
#ifndef CONFIG_IOV
#ifndef CONFIG_PCI_IOV
return -ENXIO;
#else
/*
Expand All @@ -1980,7 +1980,7 @@ static int eeh_debugfs_break_device(struct pci_dev *pdev)
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
pos += PCI_SRIOV_CTRL;
bit = PCI_SRIOV_CTRL_MSE;
#endif /* !CONFIG_IOV */
#endif /* !CONFIG_PCI_IOV */
} else {
bit = PCI_COMMAND_MEMORY;
pos = PCI_COMMAND;
Expand Down
11 changes: 7 additions & 4 deletions arch/powerpc/kvm/book3s_hv.c
Original file line number Diff line number Diff line change
Expand Up @@ -401,8 +401,11 @@ static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)

spin_lock(&vc->lock);
vc->arch_compat = arch_compat;
/* Set all PCR bits for which guest_pcr_bit <= bit < host_pcr_bit */
vc->pcr = host_pcr_bit - guest_pcr_bit;
/*
* Set all PCR bits for which guest_pcr_bit <= bit < host_pcr_bit
* Also set all reserved PCR bits
*/
vc->pcr = (host_pcr_bit - guest_pcr_bit) | PCR_MASK;
spin_unlock(&vc->lock);

return 0;
Expand Down Expand Up @@ -3410,7 +3413,7 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
}

if (vc->pcr)
mtspr(SPRN_PCR, vc->pcr);
mtspr(SPRN_PCR, vc->pcr | PCR_MASK);
mtspr(SPRN_DPDES, vc->dpdes);
mtspr(SPRN_VTB, vc->vtb);

Expand Down Expand Up @@ -3490,7 +3493,7 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
vc->vtb = mfspr(SPRN_VTB);
mtspr(SPRN_DPDES, 0);
if (vc->pcr)
mtspr(SPRN_PCR, 0);
mtspr(SPRN_PCR, PCR_MASK);

if (vc->tb_offset_applied) {
u64 new_tb = mftb() - vc->tb_offset_applied;
Expand Down
6 changes: 3 additions & 3 deletions arch/powerpc/kvm/book3s_hv_nested.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;

hr->pcr = vc->pcr;
hr->pcr = vc->pcr | PCR_MASK;
hr->dpdes = vc->dpdes;
hr->hfscr = vcpu->arch.hfscr;
hr->tb_offset = vc->tb_offset;
Expand Down Expand Up @@ -65,7 +65,7 @@ static void byteswap_hv_regs(struct hv_guest_state *hr)
hr->lpid = swab32(hr->lpid);
hr->vcpu_token = swab32(hr->vcpu_token);
hr->lpcr = swab64(hr->lpcr);
hr->pcr = swab64(hr->pcr);
hr->pcr = swab64(hr->pcr) | PCR_MASK;
hr->amor = swab64(hr->amor);
hr->dpdes = swab64(hr->dpdes);
hr->hfscr = swab64(hr->hfscr);
Expand Down Expand Up @@ -148,7 +148,7 @@ static void restore_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;

vc->pcr = hr->pcr;
vc->pcr = hr->pcr | PCR_MASK;
vc->dpdes = hr->dpdes;
vcpu->arch.hfscr = hr->hfscr;
vcpu->arch.dawr = hr->dawr0;
Expand Down
Loading

0 comments on commit a295320

Please sign in to comment.