Skip to content

Commit

Permalink
Merge tag 'powerpc-5.15-3' of git://git.kernel.org/pub/scm/linux/kern…
Browse files Browse the repository at this point in the history
…el/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:
 "A bit of a big batch, partly because I didn't send any last week, and
  also just because the BPF fixes happened to land this week.

  Summary:

   - Fix a regression hit by the IPR SCSI driver, introduced by the
     recent addition of MSI domains on pseries.

   - A big series including 8 BPF fixes, some with potential security
     impact and the rest various code generation issues.

   - Fix our program check assembler entry path, which was accidentally
     jumping into a gas macro and generating strange stack frames, which
     could confuse find_bug().

   - A couple of fixes, and related changes, to fix corner cases in our
     machine check handling.

   - Fix our DMA IOMMU ops, which were not always returning the optimal
     DMA mask, leading to at least one device falling back to 32-bit DMA
     when it shouldn't.

   - A fix for KUAP handling on 32-bit Book3S.

   - Fix crashes seen when kdumping on some pseries systems.

  Thanks to Naveen N. Rao, Nicholas Piggin, Alexey Kardashevskiy, Cédric
  Le Goater, Christophe Leroy, Mahesh Salgaonkar, Abdul Haleem,
  Christoph Hellwig, Johan Almbladh, Stan Johnson"

* tag 'powerpc-5.15-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  pseries/eeh: Fix the kdump kernel crash during eeh_pseries_init
  powerpc/32s: Fix kuap_kernel_restore()
  powerpc/pseries/msi: Add an empty irq_write_msi_msg() handler
  powerpc/64s: Fix unrecoverable MCE calling async handler from NMI
  powerpc/64/interrupt: Reconcile soft-mask state in NMI and fix false BUG
  powerpc/64: warn if local irqs are enabled in NMI or hardirq context
  powerpc/traps: do not enable irqs in _exception
  powerpc/64s: fix program check interrupt emergency stack path
  powerpc/bpf ppc32: Fix BPF_SUB when imm == 0x80000000
  powerpc/bpf ppc32: Do not emit zero extend instruction for 64-bit BPF_END
  powerpc/bpf ppc32: Fix JMP32_JSET_K
  powerpc/bpf ppc32: Fix ALU32 BPF_ARSH operation
  powerpc/bpf: Emit stf barrier instruction sequences for BPF_NOSPEC
  powerpc/security: Add a helper to query stf_barrier type
  powerpc/bpf: Fix BPF_SUB when imm == 0x80000000
  powerpc/bpf: Fix BPF_MOD when imm == 1
  powerpc/bpf: Validate branch ranges
  powerpc/lib: Add helper to check if offset is within conditional branch range
  powerpc/iommu: Report the correct most efficient DMA mask for PCI devices
  • Loading branch information
Linus Torvalds committed Oct 10, 2021
2 parents 75cd9b0 + eb8257a commit efb52a7
Show file tree
Hide file tree
Showing 17 changed files with 234 additions and 75 deletions.
8 changes: 8 additions & 0 deletions arch/powerpc/include/asm/book3s/32/kup.h
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,14 @@ static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
if (kuap_is_disabled())
return;

if (unlikely(kuap != KUAP_NONE)) {
current->thread.kuap = KUAP_NONE;
kuap_lock(kuap, false);
}

if (likely(regs->kuap == KUAP_NONE))
return;

current->thread.kuap = regs->kuap;

kuap_unlock(regs->kuap, false);
Expand Down
1 change: 1 addition & 0 deletions arch/powerpc/include/asm/code-patching.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
#define BRANCH_ABSOLUTE 0x2

bool is_offset_in_branch_range(long offset);
bool is_offset_in_cond_branch_range(long offset);
int create_branch(struct ppc_inst *instr, const u32 *addr,
unsigned long target, int flags);
int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
Expand Down
18 changes: 10 additions & 8 deletions arch/powerpc/include/asm/interrupt.h
Original file line number Diff line number Diff line change
Expand Up @@ -265,13 +265,16 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;

if (is_implicit_soft_masked(regs)) {
// Adjust regs->softe soft implicit soft-mask, so
// arch_irq_disabled_regs(regs) behaves as expected.
if (!(regs->msr & MSR_EE) || is_implicit_soft_masked(regs)) {
/*
* Adjust regs->softe to be soft-masked if it had not been
* reconcied (e.g., interrupt entry with MSR[EE]=0 but softe
* not yet set disabled), or if it was in an implicit soft
* masked state. This makes arch_irq_disabled_regs(regs)
* behave as expected.
*/
regs->softe = IRQS_ALL_DISABLED;
}
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE));

/* Don't do any per-CPU operations until interrupt state is fixed */

Expand Down Expand Up @@ -525,10 +528,9 @@ static __always_inline long ____##func(struct pt_regs *regs)
/* kernel/traps.c */
DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
#ifdef CONFIG_PPC_BOOK3S_64
DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception);
#else
DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async);
#endif
DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
DECLARE_INTERRUPT_HANDLER(SMIException);
DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
DECLARE_INTERRUPT_HANDLER(unknown_exception);
Expand Down
5 changes: 5 additions & 0 deletions arch/powerpc/include/asm/security_features.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,11 @@ static inline bool security_ftr_enabled(u64 feature)
return !!(powerpc_security_features & feature);
}

#ifdef CONFIG_PPC_BOOK3S_64
enum stf_barrier_type stf_barrier_type_get(void);
#else
static inline enum stf_barrier_type stf_barrier_type_get(void) { return STF_BARRIER_NONE; }
#endif

// Features indicating support for Spectre/Meltdown mitigations

Expand Down
9 changes: 9 additions & 0 deletions arch/powerpc/kernel/dma-iommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -184,6 +184,15 @@ u64 dma_iommu_get_required_mask(struct device *dev)
struct iommu_table *tbl = get_iommu_table_base(dev);
u64 mask;

if (dev_is_pci(dev)) {
u64 bypass_mask = dma_direct_get_required_mask(dev);

if (dma_iommu_dma_supported(dev, bypass_mask)) {
dev_info(dev, "%s: returning bypass mask 0x%llx\n", __func__, bypass_mask);
return bypass_mask;
}
}

if (!tbl)
return 0;

Expand Down
25 changes: 16 additions & 9 deletions arch/powerpc/kernel/exceptions-64s.S
Original file line number Diff line number Diff line change
Expand Up @@ -1243,7 +1243,7 @@ EXC_COMMON_BEGIN(machine_check_common)
li r10,MSR_RI
mtmsrd r10,1
addi r3,r1,STACK_FRAME_OVERHEAD
bl machine_check_exception
bl machine_check_exception_async
b interrupt_return_srr


Expand Down Expand Up @@ -1303,7 +1303,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
subi r12,r12,1
sth r12,PACA_IN_MCE(r13)

/* Invoke machine_check_exception to print MCE event and panic. */
/*
* Invoke machine_check_exception to print MCE event and panic.
* This is the NMI version of the handler because we are called from
* the early handler which is a true NMI.
*/
addi r3,r1,STACK_FRAME_OVERHEAD
bl machine_check_exception

Expand Down Expand Up @@ -1665,27 +1669,30 @@ EXC_COMMON_BEGIN(program_check_common)
*/

andi. r10,r12,MSR_PR
bne 2f /* If userspace, go normal path */
bne .Lnormal_stack /* If userspace, go normal path */

andis. r10,r12,(SRR1_PROGTM)@h
bne 1f /* If TM, emergency */
bne .Lemergency_stack /* If TM, emergency */

cmpdi r1,-INT_FRAME_SIZE /* check if r1 is in userspace */
blt 2f /* normal path if not */
blt .Lnormal_stack /* normal path if not */

/* Use the emergency stack */
1: andi. r10,r12,MSR_PR /* Set CR0 correctly for label */
.Lemergency_stack:
andi. r10,r12,MSR_PR /* Set CR0 correctly for label */
/* 3 in EXCEPTION_PROLOG_COMMON */
mr r10,r1 /* Save r1 */
ld r1,PACAEMERGSP(r13) /* Use emergency stack */
subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
__ISTACK(program_check)=0
__GEN_COMMON_BODY program_check
b 3f
2:
b .Ldo_program_check

.Lnormal_stack:
__ISTACK(program_check)=1
__GEN_COMMON_BODY program_check
3:

.Ldo_program_check:
addi r3,r1,STACK_FRAME_OVERHEAD
bl program_check_exception
REST_NVGPRS(r1) /* instruction emulation may change GPRs */
Expand Down
6 changes: 6 additions & 0 deletions arch/powerpc/kernel/irq.c
Original file line number Diff line number Diff line change
Expand Up @@ -229,6 +229,9 @@ notrace void arch_local_irq_restore(unsigned long mask)
return;
}

if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
WARN_ON_ONCE(in_nmi() || in_hardirq());

/*
* After the stb, interrupts are unmasked and there are no interrupts
* pending replay. The restart sequence makes this atomic with
Expand Down Expand Up @@ -321,6 +324,9 @@ notrace void arch_local_irq_restore(unsigned long mask)
if (mask)
return;

if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
WARN_ON_ONCE(in_nmi() || in_hardirq());

/*
* From this point onward, we can take interrupts, preempt,
* etc... unless we got hard-disabled. We check if an event
Expand Down
5 changes: 5 additions & 0 deletions arch/powerpc/kernel/security.c
Original file line number Diff line number Diff line change
Expand Up @@ -263,6 +263,11 @@ static int __init handle_no_stf_barrier(char *p)

early_param("no_stf_barrier", handle_no_stf_barrier);

enum stf_barrier_type stf_barrier_type_get(void)
{
return stf_enabled_flush_types;
}

/* This is the generic flag used by other architectures */
static int __init handle_ssbd(char *p)
{
Expand Down
43 changes: 27 additions & 16 deletions arch/powerpc/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -340,10 +340,16 @@ static bool exception_common(int signr, struct pt_regs *regs, int code,
return false;
}

show_signal_msg(signr, regs, code, addr);
/*
* Must not enable interrupts even for user-mode exception, because
* this can be called from machine check, which may be a NMI or IRQ
* which don't like interrupts being enabled. Could check for
* in_hardirq || in_nmi perhaps, but there doesn't seem to be a good
* reason why _exception() should enable irqs for an exception handler,
* the handlers themselves do that directly.
*/

if (arch_irqs_disabled())
interrupt_cond_local_irq_enable(regs);
show_signal_msg(signr, regs, code, addr);

current->thread.trap_nr = code;

Expand Down Expand Up @@ -790,24 +796,22 @@ void die_mce(const char *str, struct pt_regs *regs, long err)
* do_exit() checks for in_interrupt() and panics in that case, so
* exit the irq/nmi before calling die.
*/
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
irq_exit();
else
if (in_nmi())
nmi_exit();
else
irq_exit();
die(str, regs, err);
}

/*
* BOOK3S_64 does not call this handler as a non-maskable interrupt
* BOOK3S_64 does not usually call this handler as a non-maskable interrupt
* (it uses its own early real-mode handler to handle the MCE proper
* and then raises irq_work to call this handler when interrupts are
* enabled).
* enabled). The only time when this is not true is if the early handler
* is unrecoverable, then it does call this directly to try to get a
* message out.
*/
#ifdef CONFIG_PPC_BOOK3S_64
DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception)
#else
DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
#endif
static void __machine_check_exception(struct pt_regs *regs)
{
int recover = 0;

Expand Down Expand Up @@ -841,12 +845,19 @@ DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
/* Must die if the interrupt is not recoverable */
if (regs_is_unrecoverable(regs))
die_mce("Unrecoverable Machine check", regs, SIGBUS);
}

#ifdef CONFIG_PPC_BOOK3S_64
return;
#else
return 0;
DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async)
{
__machine_check_exception(regs);
}
#endif
DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
{
__machine_check_exception(regs);

return 0;
}

DEFINE_INTERRUPT_HANDLER(SMIException) /* async? */
Expand Down
7 changes: 6 additions & 1 deletion arch/powerpc/lib/code-patching.c
Original file line number Diff line number Diff line change
Expand Up @@ -228,6 +228,11 @@ bool is_offset_in_branch_range(long offset)
return (offset >= -0x2000000 && offset <= 0x1fffffc && !(offset & 0x3));
}

bool is_offset_in_cond_branch_range(long offset)
{
return offset >= -0x8000 && offset <= 0x7fff && !(offset & 0x3);
}

/*
* Helper to check if a given instruction is a conditional branch
* Derived from the conditional checks in analyse_instr()
Expand Down Expand Up @@ -280,7 +285,7 @@ int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
offset = offset - (unsigned long)addr;

/* Check we can represent the target in the instruction format */
if (offset < -0x8000 || offset > 0x7FFF || offset & 0x3)
if (!is_offset_in_cond_branch_range(offset))
return 1;

/* Mask out the flags and target, so they don't step on each other. */
Expand Down
33 changes: 21 additions & 12 deletions arch/powerpc/net/bpf_jit.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,16 +24,30 @@
#define EMIT(instr) PLANT_INSTR(image, ctx->idx, instr)

/* Long jump; (unconditional 'branch') */
#define PPC_JMP(dest) EMIT(PPC_INST_BRANCH | \
(((dest) - (ctx->idx * 4)) & 0x03fffffc))
#define PPC_JMP(dest) \
do { \
long offset = (long)(dest) - (ctx->idx * 4); \
if (!is_offset_in_branch_range(offset)) { \
pr_err_ratelimited("Branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \
return -ERANGE; \
} \
EMIT(PPC_INST_BRANCH | (offset & 0x03fffffc)); \
} while (0)

/* blr; (unconditional 'branch' with link) to absolute address */
#define PPC_BL_ABS(dest) EMIT(PPC_INST_BL | \
(((dest) - (unsigned long)(image + ctx->idx)) & 0x03fffffc))
/* "cond" here covers BO:BI fields. */
#define PPC_BCC_SHORT(cond, dest) EMIT(PPC_INST_BRANCH_COND | \
(((cond) & 0x3ff) << 16) | \
(((dest) - (ctx->idx * 4)) & \
0xfffc))
#define PPC_BCC_SHORT(cond, dest) \
do { \
long offset = (long)(dest) - (ctx->idx * 4); \
if (!is_offset_in_cond_branch_range(offset)) { \
pr_err_ratelimited("Conditional branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \
return -ERANGE; \
} \
EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc)); \
} while (0)

/* Sign-extended 32-bit immediate load */
#define PPC_LI32(d, i) do { \
if ((int)(uintptr_t)(i) >= -32768 && \
Expand Down Expand Up @@ -78,11 +92,6 @@
#define PPC_FUNC_ADDR(d,i) do { PPC_LI32(d, i); } while(0)
#endif

static inline bool is_nearbranch(int offset)
{
return (offset < 32768) && (offset >= -32768);
}

/*
* The fly in the ointment of code size changing from pass to pass is
* avoided by padding the short branch case with a NOP. If code size differs
Expand All @@ -91,7 +100,7 @@ static inline bool is_nearbranch(int offset)
* state.
*/
#define PPC_BCC(cond, dest) do { \
if (is_nearbranch((dest) - (ctx->idx * 4))) { \
if (is_offset_in_cond_branch_range((long)(dest) - (ctx->idx * 4))) { \
PPC_BCC_SHORT(cond, dest); \
EMIT(PPC_RAW_NOP()); \
} else { \
Expand Down
8 changes: 4 additions & 4 deletions arch/powerpc/net/bpf_jit64.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,18 +16,18 @@
* with our redzone usage.
*
* [ prev sp ] <-------------
* [ nv gpr save area ] 6*8 |
* [ nv gpr save area ] 5*8 |
* [ tail_call_cnt ] 8 |
* [ local_tmp_var ] 8 |
* [ local_tmp_var ] 16 |
* fp (r31) --> [ ebpf stack space ] upto 512 |
* [ frame header ] 32/112 |
* sp (r1) ---> [ stack pointer ] --------------
*/

/* for gpr non volatile registers BPG_REG_6 to 10 */
#define BPF_PPC_STACK_SAVE (6*8)
#define BPF_PPC_STACK_SAVE (5*8)
/* for bpf JIT code internal usage */
#define BPF_PPC_STACK_LOCALS 16
#define BPF_PPC_STACK_LOCALS 24
/* stack frame excluding BPF stack, ensure this is quadword aligned */
#define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \
BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
Expand Down
6 changes: 5 additions & 1 deletion arch/powerpc/net/bpf_jit_comp.c
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
/* Now build the prologue, body code & epilogue for real. */
cgctx.idx = 0;
bpf_jit_build_prologue(code_base, &cgctx);
bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass);
if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass)) {
bpf_jit_binary_free(bpf_hdr);
fp = org_fp;
goto out_addrs;
}
bpf_jit_build_epilogue(code_base, &cgctx);

if (bpf_jit_enable > 1)
Expand Down
Loading

0 comments on commit efb52a7

Please sign in to comment.