Skip to content

Commit

Permalink
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm…
Browse files Browse the repository at this point in the history
…/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "Three fixes for scheduler crashes, each triggers in relatively rare,
  hardware environment dependent situations"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/fair: Rework sched_fair time accounting
  math64: Add mul_u64_u32_shr()
  sched: Remove PREEMPT_NEED_RESCHED from generic code
  sched: Initialize power_orig for overlapping groups
  • Loading branch information
Linus Torvalds committed Dec 17, 2013
2 parents 1070d5a + 9dbdb15 commit dd05080
Show file tree
Hide file tree
Showing 8 changed files with 126 additions and 107 deletions.
1 change: 1 addition & 0 deletions arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ config X86
select HAVE_AOUT if X86_32
select HAVE_UNSTABLE_SCHED_CLOCK
select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_SUPPORTS_INT128 if X86_64
select ARCH_WANTS_PROT_NUMA_PROT_NONE
select HAVE_IDE
select HAVE_OPROFILE
Expand Down
11 changes: 11 additions & 0 deletions arch/x86/include/asm/preempt.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,12 @@

DECLARE_PER_CPU(int, __preempt_count);

/*
* We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such
* that a decrement hitting 0 means we can and should reschedule.
*/
#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)

/*
* We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
* that think a non-zero value indicates we cannot preempt.
Expand Down Expand Up @@ -74,6 +80,11 @@ static __always_inline void __preempt_count_sub(int val)
__this_cpu_add_4(__preempt_count, -val);
}

/*
* Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule
* a decrement which hits zero means we have no preempt_count and should
* reschedule.
*/
static __always_inline bool __preempt_count_dec_and_test(void)
{
GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
Expand Down
35 changes: 11 additions & 24 deletions include/asm-generic/preempt.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,25 +3,18 @@

#include <linux/thread_info.h>

/*
* We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
* that think a non-zero value indicates we cannot preempt.
*/
#define PREEMPT_ENABLED (0)

static __always_inline int preempt_count(void)
{
return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED;
return current_thread_info()->preempt_count;
}

static __always_inline int *preempt_count_ptr(void)
{
return &current_thread_info()->preempt_count;
}

/*
* We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the
* alternative is loosing a reschedule. Better schedule too often -- also this
* should be a very rare operation.
*/
static __always_inline void preempt_count_set(int pc)
{
*preempt_count_ptr() = pc;
Expand All @@ -41,28 +34,17 @@ static __always_inline void preempt_count_set(int pc)
task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
} while (0)

/*
* We fold the NEED_RESCHED bit into the preempt count such that
* preempt_enable() can decrement and test for needing to reschedule with a
* single instruction.
*
* We invert the actual bit, so that when the decrement hits 0 we know we both
* need to resched (the bit is cleared) and can resched (no preempt count).
*/

static __always_inline void set_preempt_need_resched(void)
{
*preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED;
}

static __always_inline void clear_preempt_need_resched(void)
{
*preempt_count_ptr() |= PREEMPT_NEED_RESCHED;
}

static __always_inline bool test_preempt_need_resched(void)
{
return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED);
return false;
}

/*
Expand All @@ -81,15 +63,20 @@ static __always_inline void __preempt_count_sub(int val)

static __always_inline bool __preempt_count_dec_and_test(void)
{
return !--*preempt_count_ptr();
/*
* Because of load-store architectures cannot do per-cpu atomic
* operations; we cannot use PREEMPT_NEED_RESCHED because it might get
* lost.
*/
return !--*preempt_count_ptr() && tif_need_resched();
}

/*
* Returns true when we need to resched and can (barring IRQ state).
*/
static __always_inline bool should_resched(void)
{
return unlikely(!*preempt_count_ptr());
return unlikely(!preempt_count() && tif_need_resched());
}

#ifdef CONFIG_PREEMPT
Expand Down
30 changes: 30 additions & 0 deletions include/linux/math64.h
Original file line number Diff line number Diff line change
Expand Up @@ -133,4 +133,34 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
return ret;
}

#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)

#ifndef mul_u64_u32_shr
static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
{
return (u64)(((unsigned __int128)a * mul) >> shift);
}
#endif /* mul_u64_u32_shr */

#else

#ifndef mul_u64_u32_shr
static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
{
u32 ah, al;
u64 ret;

al = a;
ah = a >> 32;

ret = ((u64)al * mul) >> shift;
if (ah)
ret += ((u64)ah * mul) << (32 - shift);

return ret;
}
#endif /* mul_u64_u32_shr */

#endif

#endif /* _LINUX_MATH64_H */
5 changes: 2 additions & 3 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -440,8 +440,6 @@ struct task_cputime {
.sum_exec_runtime = 0, \
}

#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED)

#ifdef CONFIG_PREEMPT_COUNT
#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED)
#else
Expand Down Expand Up @@ -932,7 +930,8 @@ struct pipe_inode_info;
struct uts_namespace;

struct load_weight {
unsigned long weight, inv_weight;
unsigned long weight;
u32 inv_weight;
};

struct sched_avg {
Expand Down
6 changes: 6 additions & 0 deletions init/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -809,6 +809,12 @@ config GENERIC_SCHED_CLOCK
config ARCH_SUPPORTS_NUMA_BALANCING
bool

#
# For architectures that know their GCC __int128 support is sound
#
config ARCH_SUPPORTS_INT128
bool

# For architectures that (ab)use NUMA to represent different memory regions
# all cpu-local but of different latencies, such as SuperH.
#
Expand Down
1 change: 1 addition & 0 deletions kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -5112,6 +5112,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
* die on a /0 trap.
*/
sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span);
sg->sgp->power_orig = sg->sgp->power;

/*
* Make sure the first group of this domain contains the
Expand Down
Loading

0 comments on commit dd05080

Please sign in to comment.