Skip to content

Commit

Permalink
powerpc/64: Rename soft_enabled to irq_soft_mask
Browse files Browse the repository at this point in the history
Rename the paca->soft_enabled to paca->irq_soft_mask as it is no
longer used as a flag for interrupt state, but a mask.

Signed-off-by: Madhavan Srinivasan <maddy@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
  • Loading branch information
Madhavan Srinivasan authored and Michael Ellerman committed Jan 19, 2018
1 parent 01417c6 commit 4e26bc4
Show file tree
Hide file tree
Showing 19 changed files with 74 additions and 81 deletions.
4 changes: 2 additions & 2 deletions arch/powerpc/include/asm/exception-64s.h
Original file line number Diff line number Diff line change
Expand Up @@ -432,7 +432,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
mflr r9; /* Get LR, later save to stack */ \
ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
std r9,_LINK(r1); \
lbz r10,PACASOFTIRQEN(r13); \
lbz r10,PACAIRQSOFTMASK(r13); \
mfspr r11,SPRN_XER; /* save XER in stackframe */ \
std r10,SOFTE(r1); \
std r11,_XER(r1); \
Expand Down Expand Up @@ -498,7 +498,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
#define SOFTEN_VALUE_0xea0 PACA_IRQ_EE

#define __SOFTEN_TEST(h, vec) \
lbz r10,PACASOFTIRQEN(r13); \
lbz r10,PACAIRQSOFTMASK(r13); \
andi. r10,r10,IRQS_DISABLED; \
li r10,SOFTEN_VALUE_##vec; \
bne masked_##h##interrupt
Expand Down
56 changes: 31 additions & 25 deletions arch/powerpc/include/asm/hw_irq.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
#define PACA_IRQ_HMI 0x20

/*
* flags for paca->soft_enabled
* flags for paca->irq_soft_mask
*/
#define IRQS_ENABLED 0
#define IRQS_DISABLED 1
Expand All @@ -49,46 +49,52 @@ extern void unknown_exception(struct pt_regs *regs);
#ifdef CONFIG_PPC64
#include <asm/paca.h>

static inline notrace unsigned long soft_enabled_return(void)
static inline notrace unsigned long irq_soft_mask_return(void)
{
unsigned long flags;

asm volatile(
"lbz %0,%1(13)"
: "=r" (flags)
: "i" (offsetof(struct paca_struct, soft_enabled)));
: "i" (offsetof(struct paca_struct, irq_soft_mask)));

return flags;
}

/*
* The "memory" clobber acts as both a compiler barrier
* for the critical section and as a clobber because
* we changed paca->soft_enabled
* we changed paca->irq_soft_mask
*/
static inline notrace void soft_enabled_set(unsigned long enable)
static inline notrace void irq_soft_mask_set(unsigned long mask)
{
#ifdef CONFIG_TRACE_IRQFLAGS
/*
* mask must always include LINUX bit if any are set, and
* interrupts don't get replayed until the Linux interrupt is
* unmasked. This could be changed to replay partial unmasks
* in future, which would allow Linux masks to nest inside
* other masks, among other things. For now, be very dumb and
* simple.
* The irq mask must always include the STD bit if any are set.
*
* and interrupts don't get replayed until the standard
* interrupt (local_irq_disable()) is unmasked.
*
* Other masks must only provide additional masking beyond
* the standard, and they are also not replayed until the
* standard interrupt becomes unmasked.
*
* This could be changed, but it will require partial
* unmasks to be replayed, among other things. For now, take
* the simple approach.
*/
WARN_ON(mask && !(mask & IRQS_DISABLED));
#endif

asm volatile(
"stb %0,%1(13)"
:
: "r" (enable),
"i" (offsetof(struct paca_struct, soft_enabled))
: "r" (mask),
"i" (offsetof(struct paca_struct, irq_soft_mask))
: "memory");
}

static inline notrace unsigned long soft_enabled_set_return(unsigned long mask)
static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask)
{
unsigned long flags;

Expand All @@ -99,7 +105,7 @@ static inline notrace unsigned long soft_enabled_set_return(unsigned long mask)
asm volatile(
"lbz %0,%1(13); stb %2,%1(13)"
: "=&r" (flags)
: "i" (offsetof(struct paca_struct, soft_enabled)),
: "i" (offsetof(struct paca_struct, irq_soft_mask)),
"r" (mask)
: "memory");

Expand All @@ -108,12 +114,12 @@ static inline notrace unsigned long soft_enabled_set_return(unsigned long mask)

static inline unsigned long arch_local_save_flags(void)
{
return soft_enabled_return();
return irq_soft_mask_return();
}

static inline void arch_local_irq_disable(void)
{
soft_enabled_set(IRQS_DISABLED);
irq_soft_mask_set(IRQS_DISABLED);
}

extern void arch_local_irq_restore(unsigned long);
Expand All @@ -125,7 +131,7 @@ static inline void arch_local_irq_enable(void)

static inline unsigned long arch_local_irq_save(void)
{
return soft_enabled_set_return(IRQS_DISABLED);
return irq_soft_mask_set_return(IRQS_DISABLED);
}

static inline bool arch_irqs_disabled_flags(unsigned long flags)
Expand All @@ -146,13 +152,13 @@ static inline bool arch_irqs_disabled(void)
#define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1)
#endif

#define hard_irq_disable() do { \
unsigned long flags; \
__hard_irq_disable(); \
flags = soft_enabled_set_return(IRQS_DISABLED);\
local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
if (!arch_irqs_disabled_flags(flags)) \
trace_hardirqs_off(); \
#define hard_irq_disable() do { \
unsigned long flags; \
__hard_irq_disable(); \
flags = irq_soft_mask_set_return(IRQS_DISABLED); \
local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
if (!arch_irqs_disabled_flags(flags)) \
trace_hardirqs_off(); \
} while(0)

static inline bool lazy_irq_pending(void)
Expand Down
12 changes: 6 additions & 6 deletions arch/powerpc/include/asm/irqflags.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,14 +47,14 @@
* be clobbered.
*/
#define RECONCILE_IRQ_STATE(__rA, __rB) \
lbz __rA,PACASOFTIRQEN(r13); \
lbz __rA,PACAIRQSOFTMASK(r13); \
lbz __rB,PACAIRQHAPPENED(r13); \
andi. __rA,__rA,IRQS_DISABLED;\
li __rA,IRQS_DISABLED; \
andi. __rA,__rA,IRQS_DISABLED; \
li __rA,IRQS_DISABLED; \
ori __rB,__rB,PACA_IRQ_HARD_DIS; \
stb __rB,PACAIRQHAPPENED(r13); \
bne 44f; \
stb __rA,PACASOFTIRQEN(r13); \
stb __rA,PACAIRQSOFTMASK(r13); \
TRACE_DISABLE_INTS; \
44:

Expand All @@ -64,9 +64,9 @@

#define RECONCILE_IRQ_STATE(__rA, __rB) \
lbz __rA,PACAIRQHAPPENED(r13); \
li __rB,IRQS_DISABLED; \
li __rB,IRQS_DISABLED; \
ori __rA,__rA,PACA_IRQ_HARD_DIS; \
stb __rB,PACASOFTIRQEN(r13); \
stb __rB,PACAIRQSOFTMASK(r13); \
stb __rA,PACAIRQHAPPENED(r13)
#endif
#endif
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/include/asm/kvm_ppc.h
Original file line number Diff line number Diff line change
Expand Up @@ -873,7 +873,7 @@ static inline void kvmppc_fix_ee_before_entry(void)

/* Only need to enable IRQs by hard enabling them after this */
local_paca->irq_happened = 0;
soft_enabled_set(IRQS_ENABLED);
irq_soft_mask_set(IRQS_ENABLED);
#endif
}

Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/include/asm/paca.h
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ struct paca_struct {
u64 saved_r1; /* r1 save for RTAS calls or PM */
u64 saved_msr; /* MSR saved here by enter_rtas */
u16 trap_save; /* Used when bad stack is encountered */
u8 soft_enabled; /* irq soft-enable flag */
u8 irq_soft_mask; /* mask for irq soft masking */
u8 irq_happened; /* irq happened while soft-disabled */
u8 io_sync; /* writel() needs spin_unlock sync */
u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ int main(void)
OFFSET(PACATOC, paca_struct, kernel_toc);
OFFSET(PACAKBASE, paca_struct, kernelbase);
OFFSET(PACAKMSR, paca_struct, kernel_msr);
OFFSET(PACASOFTIRQEN, paca_struct, soft_enabled);
OFFSET(PACAIRQSOFTMASK, paca_struct, irq_soft_mask);
OFFSET(PACAIRQHAPPENED, paca_struct, irq_happened);
#ifdef CONFIG_PPC_BOOK3S
OFFSET(PACACONTEXTID, paca_struct, mm_ctx_id);
Expand Down
12 changes: 6 additions & 6 deletions arch/powerpc/kernel/entry_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
* is correct
*/
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
lbz r10,PACASOFTIRQEN(r13)
lbz r10,PACAIRQSOFTMASK(r13)
1: tdnei r10,IRQS_ENABLED
EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
#endif
Expand Down Expand Up @@ -781,7 +781,7 @@ restore:
* are about to re-enable interrupts
*/
ld r5,SOFTE(r1)
lbz r6,PACASOFTIRQEN(r13)
lbz r6,PACAIRQSOFTMASK(r13)
andi. r5,r5,IRQS_DISABLED
bne .Lrestore_irq_off

Expand All @@ -806,7 +806,7 @@ restore:
.Lrestore_no_replay:
TRACE_ENABLE_INTS
li r0,IRQS_ENABLED
stb r0,PACASOFTIRQEN(r13);
stb r0,PACAIRQSOFTMASK(r13);

/*
* Final return path. BookE is handled in a different file
Expand Down Expand Up @@ -913,8 +913,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1:
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
/* The interrupt should not have soft enabled. */
lbz r7,PACASOFTIRQEN(r13)
1: tdnei r7,IRQS_DISABLED
lbz r7,PACAIRQSOFTMASK(r13)
1: tdeqi r7,IRQS_ENABLED
EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
#endif
b .Ldo_restore
Expand Down Expand Up @@ -1034,7 +1034,7 @@ _GLOBAL(enter_rtas)
/* There is no way it is acceptable to get here with interrupts enabled,
* check it with the asm equivalent of WARN_ON
*/
lbz r0,PACASOFTIRQEN(r13)
lbz r0,PACAIRQSOFTMASK(r13)
1: tdeqi r0,IRQS_ENABLED
EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
#endif
Expand Down
10 changes: 5 additions & 5 deletions arch/powerpc/kernel/exceptions-64e.S
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
mfspr r10,SPRN_ESR
SPECIAL_EXC_STORE(r10,ESR)

lbz r10,PACASOFTIRQEN(r13)
lbz r10,PACAIRQSOFTMASK(r13)
SPECIAL_EXC_STORE(r10,SOFTE)
ld r10,_NIP(r1)
SPECIAL_EXC_STORE(r10,CSRR0)
Expand Down Expand Up @@ -206,7 +206,7 @@ BEGIN_FTR_SECTION
mtspr SPRN_MAS8,r10
END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)

lbz r6,PACASOFTIRQEN(r13)
lbz r6,PACAIRQSOFTMASK(r13)
ld r5,SOFTE(r1)

/* Interrupts had better not already be enabled... */
Expand All @@ -216,7 +216,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
bne 1f

TRACE_ENABLE_INTS
stb r5,PACASOFTIRQEN(r13)
stb r5,PACAIRQSOFTMASK(r13)
1:
/*
* Restore PACAIRQHAPPENED rather than setting it based on
Expand Down Expand Up @@ -351,7 +351,7 @@ ret_from_mc_except:
#define PROLOG_ADDITION_NONE_MC(n)

#define PROLOG_ADDITION_MASKABLE_GEN(n) \
lbz r10,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \
lbz r10,PACAIRQSOFTMASK(r13); /* are irqs soft-masked? */ \
andi. r10,r10,IRQS_DISABLED; /* yes -> go out of line */ \
bne masked_interrupt_book3e_##n

Expand Down Expand Up @@ -397,7 +397,7 @@ exc_##n##_common: \
mfspr r8,SPRN_XER; /* save XER in stackframe */ \
ld r9,excf+EX_R1(r13); /* load orig r1 back from PACA */ \
lwz r10,excf+EX_CR(r13); /* load orig CR back from PACA */ \
lbz r11,PACASOFTIRQEN(r13); /* get current IRQ softe */ \
lbz r11,PACAIRQSOFTMASK(r13); /* get current IRQ softe */ \
ld r12,exception_marker@toc(r2); \
li r0,0; \
std r3,GPR10(r1); /* save r10 to stackframe */ \
Expand Down
6 changes: 3 additions & 3 deletions arch/powerpc/kernel/head_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -766,7 +766,7 @@ _GLOBAL(pmac_secondary_start)
* in the PACA when doing hotplug)
*/
li r0,IRQS_DISABLED
stb r0,PACASOFTIRQEN(r13)
stb r0,PACAIRQSOFTMASK(r13)
li r0,PACA_IRQ_HARD_DIS
stb r0,PACAIRQHAPPENED(r13)

Expand Down Expand Up @@ -823,7 +823,7 @@ __secondary_start:
* in the PACA when doing hotplug)
*/
li r7,IRQS_DISABLED
stb r7,PACASOFTIRQEN(r13)
stb r7,PACAIRQSOFTMASK(r13)
li r0,PACA_IRQ_HARD_DIS
stb r0,PACAIRQHAPPENED(r13)

Expand Down Expand Up @@ -990,7 +990,7 @@ start_here_common:
* in the PACA when doing hotplug)
*/
li r0,IRQS_DISABLED
stb r0,PACASOFTIRQEN(r13)
stb r0,PACAIRQSOFTMASK(r13)
li r0,PACA_IRQ_HARD_DIS
stb r0,PACAIRQHAPPENED(r13)

Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/kernel/idle_book3e.S
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ _GLOBAL(\name)
addi r1,r1,128
#endif
li r0,IRQS_ENABLED
stb r0,PACASOFTIRQEN(r13)
stb r0,PACAIRQSOFTMASK(r13)

/* Interrupts will make use return to LR, so get something we want
* in there
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/kernel/idle_power4.S
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
#endif /* CONFIG_TRACE_IRQFLAGS */

li r0,IRQS_ENABLED
stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */
stb r0,PACAIRQSOFTMASK(r13) /* we'll hard-enable shortly */
BEGIN_FTR_SECTION
DSSALL
sync
Expand Down
23 changes: 5 additions & 18 deletions arch/powerpc/kernel/irq.c
Original file line number Diff line number Diff line change
Expand Up @@ -225,22 +225,9 @@ notrace void arch_local_irq_restore(unsigned long mask)
unsigned int replay;

/* Write the new soft-enabled value */
soft_enabled_set(mask);
if (mask) {
#ifdef CONFIG_TRACE_IRQFLAGS
/*
* mask must always include LINUX bit if any
* are set, and interrupts don't get replayed until
* the Linux interrupt is unmasked. This could be
* changed to replay partial unmasks in future,
* which would allow Linux masks to nest inside
* other masks, among other things. For now, be very
* dumb and simple.
*/
WARN_ON(!(mask & IRQS_DISABLED));
#endif
irq_soft_mask_set(mask);
if (mask)
return;
}

/*
* From this point onward, we can take interrupts, preempt,
Expand Down Expand Up @@ -285,7 +272,7 @@ notrace void arch_local_irq_restore(unsigned long mask)
}
#endif /* CONFIG_TRACE_IRQFLAGS */

soft_enabled_set(IRQS_DISABLED);
irq_soft_mask_set(IRQS_DISABLED);
trace_hardirqs_off();

/*
Expand All @@ -297,7 +284,7 @@ notrace void arch_local_irq_restore(unsigned long mask)

/* We can soft-enable now */
trace_hardirqs_on();
soft_enabled_set(IRQS_ENABLED);
irq_soft_mask_set(IRQS_ENABLED);

/*
* And replay if we have to. This will return with interrupts
Expand Down Expand Up @@ -372,7 +359,7 @@ bool prep_irq_for_idle(void)
* of entering the low power state.
*/
local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
soft_enabled_set(IRQS_ENABLED);
irq_soft_mask_set(IRQS_ENABLED);

/* Tell the caller to enter the low power state */
return true;
Expand Down
Loading

0 comments on commit 4e26bc4

Please sign in to comment.