Skip to content

Commit

Permalink
sched: Extract the basic add/sub preempt_count modifiers
Browse files Browse the repository at this point in the history
Rewrite the preempt_count macros in order to extract the 3 basic
preempt_count value modifiers:

  __preempt_count_add()
  __preempt_count_sub()

and the new:

  __preempt_count_dec_and_test()

And since we're at it anyway, replace the unconventional
$op_preempt_count names with the more conventional preempt_count_$op.

Since these basic operators are equivalent to the previous _notrace()
variants, do away with the _notrace() versions.

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/n/tip-ewbpdbupy9xpsjhg960zwbv8@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Sep 25, 2013
1 parent 0102874 commit bdb4380
Show file tree
Hide file tree
Showing 10 changed files with 113 additions and 103 deletions.
5 changes: 2 additions & 3 deletions arch/mips/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ void *kmap_coherent(struct page *page, unsigned long addr)

BUG_ON(Page_dcache_dirty(page));

inc_preempt_count();
pagefault_disable();
idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
#ifdef CONFIG_MIPS_MT_SMTC
idx += FIX_N_COLOURS * smp_processor_id() +
Expand Down Expand Up @@ -193,8 +193,7 @@ void kunmap_coherent(void)
write_c0_entryhi(old_ctx);
EXIT_CRITICAL(flags);
#endif
dec_preempt_count();
preempt_check_resched();
pagefault_enable();
}

void copy_user_highpage(struct page *to, struct page *from,
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ static inline void conditional_sti(struct pt_regs *regs)

static inline void preempt_conditional_sti(struct pt_regs *regs)
{
inc_preempt_count();
preempt_count_inc();
if (regs->flags & X86_EFLAGS_IF)
local_irq_enable();
}
Expand All @@ -103,7 +103,7 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
{
if (regs->flags & X86_EFLAGS_IF)
local_irq_disable();
dec_preempt_count();
preempt_count_dec();
}

static int __kprobes
Expand Down
35 changes: 35 additions & 0 deletions include/asm-generic/preempt.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,4 +65,39 @@ static __always_inline bool test_preempt_need_resched(void)
return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED);
}

/*
* The various preempt_count add/sub methods
*/

static __always_inline void __preempt_count_add(int val)
{
*preempt_count_ptr() += val;
}

static __always_inline void __preempt_count_sub(int val)
{
*preempt_count_ptr() -= val;
}

static __always_inline bool __preempt_count_dec_and_test(void)
{
return !--*preempt_count_ptr();
}

/*
* Returns true when we need to resched -- even if we can not.
*/
static __always_inline bool need_resched(void)
{
return unlikely(test_preempt_need_resched());
}

/*
* Returns true when we need to resched and can (barring IRQ state).
*/
static __always_inline bool should_resched(void)
{
return unlikely(!*preempt_count_ptr());
}

#endif /* __ASM_PREEMPT_H */
8 changes: 4 additions & 4 deletions include/linux/hardirq.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ extern void rcu_nmi_exit(void);
#define __irq_enter() \
do { \
account_irq_enter_time(current); \
add_preempt_count(HARDIRQ_OFFSET); \
preempt_count_add(HARDIRQ_OFFSET); \
trace_hardirq_enter(); \
} while (0)

Expand All @@ -49,7 +49,7 @@ extern void irq_enter(void);
do { \
trace_hardirq_exit(); \
account_irq_exit_time(current); \
sub_preempt_count(HARDIRQ_OFFSET); \
preempt_count_sub(HARDIRQ_OFFSET); \
} while (0)

/*
Expand All @@ -62,7 +62,7 @@ extern void irq_exit(void);
lockdep_off(); \
ftrace_nmi_enter(); \
BUG_ON(in_nmi()); \
add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
rcu_nmi_enter(); \
trace_hardirq_enter(); \
} while (0)
Expand All @@ -72,7 +72,7 @@ extern void irq_exit(void);
trace_hardirq_exit(); \
rcu_nmi_exit(); \
BUG_ON(!in_nmi()); \
sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
ftrace_nmi_exit(); \
lockdep_on(); \
} while (0)
Expand Down
106 changes: 48 additions & 58 deletions include/linux/preempt.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,97 +18,86 @@
#include <asm/preempt.h>

#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
extern void add_preempt_count(int val);
extern void sub_preempt_count(int val);
extern void preempt_count_add(int val);
extern void preempt_count_sub(int val);
#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
#else
# define add_preempt_count(val) do { *preempt_count_ptr() += (val); } while (0)
# define sub_preempt_count(val) do { *preempt_count_ptr() -= (val); } while (0)
#define preempt_count_add(val) __preempt_count_add(val)
#define preempt_count_sub(val) __preempt_count_sub(val)
#define preempt_count_dec_and_test() __preempt_count_dec_and_test()
#endif

#define inc_preempt_count() add_preempt_count(1)
#define dec_preempt_count() sub_preempt_count(1)

#ifdef CONFIG_PREEMPT

asmlinkage void preempt_schedule(void);

#define preempt_check_resched() \
do { \
if (unlikely(!*preempt_count_ptr())) \
preempt_schedule(); \
} while (0)

#ifdef CONFIG_CONTEXT_TRACKING

void preempt_schedule_context(void);

#define preempt_check_resched_context() \
do { \
if (unlikely(!*preempt_count_ptr())) \
preempt_schedule_context(); \
} while (0)
#else

#define preempt_check_resched_context() preempt_check_resched()

#endif /* CONFIG_CONTEXT_TRACKING */

#else /* !CONFIG_PREEMPT */

#define preempt_check_resched() do { } while (0)
#define preempt_check_resched_context() do { } while (0)

#endif /* CONFIG_PREEMPT */
#define __preempt_count_inc() __preempt_count_add(1)
#define __preempt_count_dec() __preempt_count_sub(1)

#define preempt_count_inc() preempt_count_add(1)
#define preempt_count_dec() preempt_count_sub(1)

#ifdef CONFIG_PREEMPT_COUNT

#define preempt_disable() \
do { \
inc_preempt_count(); \
preempt_count_inc(); \
barrier(); \
} while (0)

#define sched_preempt_enable_no_resched() \
do { \
barrier(); \
dec_preempt_count(); \
preempt_count_dec(); \
} while (0)

#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
#define preempt_enable_no_resched() sched_preempt_enable_no_resched()

#ifdef CONFIG_PREEMPT
asmlinkage void preempt_schedule(void);
#define preempt_enable() \
do { \
preempt_enable_no_resched(); \
preempt_check_resched(); \
barrier(); \
if (unlikely(preempt_count_dec_and_test())) \
preempt_schedule(); \
} while (0)

/* For debugging and tracer internals only! */
#define add_preempt_count_notrace(val) \
do { *preempt_count_ptr() += (val); } while (0)
#define sub_preempt_count_notrace(val) \
do { *preempt_count_ptr() -= (val); } while (0)
#define inc_preempt_count_notrace() add_preempt_count_notrace(1)
#define dec_preempt_count_notrace() sub_preempt_count_notrace(1)
#define preempt_check_resched() \
do { \
if (should_resched()) \
preempt_schedule(); \
} while (0)

#else
#define preempt_enable() preempt_enable_no_resched()
#define preempt_check_resched() do { } while (0)
#endif

#define preempt_disable_notrace() \
do { \
inc_preempt_count_notrace(); \
__preempt_count_inc(); \
barrier(); \
} while (0)

#define preempt_enable_no_resched_notrace() \
do { \
barrier(); \
dec_preempt_count_notrace(); \
__preempt_count_dec(); \
} while (0)

/* preempt_check_resched is OK to trace */
#ifdef CONFIG_PREEMPT

#ifdef CONFIG_CONTEXT_TRACKING
asmlinkage void preempt_schedule_context(void);
#else
#define preempt_schedule_context() preempt_schedule()
#endif

#define preempt_enable_notrace() \
do { \
preempt_enable_no_resched_notrace(); \
preempt_check_resched_context(); \
barrier(); \
if (unlikely(__preempt_count_dec_and_test())) \
preempt_schedule_context(); \
} while (0)
#else
#define preempt_enable_notrace() preempt_enable_no_resched_notrace()
#endif

#else /* !CONFIG_PREEMPT_COUNT */

Expand All @@ -118,10 +107,11 @@ do { \
* that can cause faults and scheduling migrate into our preempt-protected
* region.
*/
#define preempt_disable() barrier()
#define preempt_disable() barrier()
#define sched_preempt_enable_no_resched() barrier()
#define preempt_enable_no_resched() barrier()
#define preempt_enable() barrier()
#define preempt_enable_no_resched() barrier()
#define preempt_enable() barrier()
#define preempt_check_resched() do { } while (0)

#define preempt_disable_notrace() barrier()
#define preempt_enable_no_resched_notrace() barrier()
Expand Down
5 changes: 0 additions & 5 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -2409,11 +2409,6 @@ static inline int signal_pending_state(long state, struct task_struct *p)
return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
}

static inline int need_resched(void)
{
return unlikely(test_preempt_need_resched());
}

/*
* cond_resched() and cond_resched_lock(): latency reduction via
* explicit rescheduling in places that are safe. The return
Expand Down
8 changes: 2 additions & 6 deletions include/linux/uaccess.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
*/
static inline void pagefault_disable(void)
{
inc_preempt_count();
preempt_count_inc();
/*
* make sure to have issued the store before a pagefault
* can hit.
Expand All @@ -30,11 +30,7 @@ static inline void pagefault_enable(void)
* the pagefault handler again.
*/
barrier();
dec_preempt_count();
/*
* make sure we do..
*/
barrier();
preempt_count_dec();
preempt_check_resched();
}

Expand Down
2 changes: 1 addition & 1 deletion kernel/context_tracking.c
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ void context_tracking_user_enter(void)
* instead of preempt_schedule() to exit user context if needed before
* calling the scheduler.
*/
void __sched notrace preempt_schedule_context(void)
asmlinkage void __sched notrace preempt_schedule_context(void)
{
enum ctx_state prev_ctx;

Expand Down
29 changes: 12 additions & 17 deletions kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -2219,7 +2219,7 @@ notrace unsigned long get_parent_ip(unsigned long addr)
#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
defined(CONFIG_PREEMPT_TRACER))

void __kprobes add_preempt_count(int val)
void __kprobes preempt_count_add(int val)
{
#ifdef CONFIG_DEBUG_PREEMPT
/*
Expand All @@ -2228,7 +2228,7 @@ void __kprobes add_preempt_count(int val)
if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
return;
#endif
add_preempt_count_notrace(val);
__preempt_count_add(val);
#ifdef CONFIG_DEBUG_PREEMPT
/*
* Spinlock count overflowing soon?
Expand All @@ -2239,9 +2239,9 @@ void __kprobes add_preempt_count(int val)
if (preempt_count() == val)
trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
}
EXPORT_SYMBOL(add_preempt_count);
EXPORT_SYMBOL(preempt_count_add);

void __kprobes sub_preempt_count(int val)
void __kprobes preempt_count_sub(int val)
{
#ifdef CONFIG_DEBUG_PREEMPT
/*
Expand All @@ -2259,9 +2259,9 @@ void __kprobes sub_preempt_count(int val)

if (preempt_count() == val)
trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
sub_preempt_count_notrace(val);
__preempt_count_sub(val);
}
EXPORT_SYMBOL(sub_preempt_count);
EXPORT_SYMBOL(preempt_count_sub);

#endif

Expand Down Expand Up @@ -2525,9 +2525,9 @@ asmlinkage void __sched notrace preempt_schedule(void)
return;

do {
add_preempt_count_notrace(PREEMPT_ACTIVE);
__preempt_count_add(PREEMPT_ACTIVE);
__schedule();
sub_preempt_count_notrace(PREEMPT_ACTIVE);
__preempt_count_sub(PREEMPT_ACTIVE);

/*
* Check again in case we missed a preemption opportunity
Expand All @@ -2554,11 +2554,11 @@ asmlinkage void __sched preempt_schedule_irq(void)
prev_state = exception_enter();

do {
add_preempt_count(PREEMPT_ACTIVE);
__preempt_count_add(PREEMPT_ACTIVE);
local_irq_enable();
__schedule();
local_irq_disable();
sub_preempt_count(PREEMPT_ACTIVE);
__preempt_count_sub(PREEMPT_ACTIVE);

/*
* Check again in case we missed a preemption opportunity
Expand Down Expand Up @@ -3798,16 +3798,11 @@ SYSCALL_DEFINE0(sched_yield)
return 0;
}

static inline int should_resched(void)
{
return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
}

static void __cond_resched(void)
{
add_preempt_count(PREEMPT_ACTIVE);
__preempt_count_add(PREEMPT_ACTIVE);
__schedule();
sub_preempt_count(PREEMPT_ACTIVE);
__preempt_count_sub(PREEMPT_ACTIVE);
}

int __sched _cond_resched(void)
Expand Down
Loading

0 comments on commit bdb4380

Please sign in to comment.