Skip to content

Commit

Permalink
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm…
Browse files Browse the repository at this point in the history
…/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "A couple of regression fixes mostly hitting virtualized setups, but
  also some bare metal systems"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/x86/tsc: Initialize multiplier to 0
  sched/clock: Fixup early initialization
  sched/preempt/x86: Fix voluntary preempt for x86
  Revert "sched: Fix sleep time double accounting in enqueue entity"
  • Loading branch information
Linus Torvalds committed Jan 25, 2014
2 parents d4a63a8 + 5e3c1af commit f6d13da
Show file tree
Hide file tree
Showing 4 changed files with 43 additions and 25 deletions.
2 changes: 1 addition & 1 deletion arch/x86/kernel/tsc.c
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)

static void cyc2ns_data_init(struct cyc2ns_data *data)
{
data->cyc2ns_mul = 1U << CYC2NS_SCALE_FACTOR;
data->cyc2ns_mul = 0;
data->cyc2ns_shift = CYC2NS_SCALE_FACTOR;
data->cyc2ns_offset = 0;
data->__count = 0;
Expand Down
5 changes: 0 additions & 5 deletions include/linux/preempt.h
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,6 @@ do { \
#undef preempt_check_resched
#endif

#ifdef CONFIG_PREEMPT
#define preempt_set_need_resched() \
do { \
set_preempt_need_resched(); \
Expand All @@ -144,10 +143,6 @@ do { \
if (tif_need_resched()) \
set_preempt_need_resched(); \
} while (0)
#else
#define preempt_set_need_resched() do { } while (0)
#define preempt_fold_need_resched() do { } while (0)
#endif

#ifdef CONFIG_PREEMPT_NOTIFIERS

Expand Down
53 changes: 41 additions & 12 deletions kernel/sched/clock.c
Original file line number Diff line number Diff line change
Expand Up @@ -77,35 +77,50 @@ __read_mostly int sched_clock_running;

#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
static struct static_key __sched_clock_stable = STATIC_KEY_INIT;
static int __sched_clock_stable_early;

int sched_clock_stable(void)
{
if (static_key_false(&__sched_clock_stable))
return false;
return true;
return static_key_false(&__sched_clock_stable);
}

void set_sched_clock_stable(void)
static void __set_sched_clock_stable(void)
{
if (!sched_clock_stable())
static_key_slow_dec(&__sched_clock_stable);
static_key_slow_inc(&__sched_clock_stable);
}

void set_sched_clock_stable(void)
{
__sched_clock_stable_early = 1;

smp_mb(); /* matches sched_clock_init() */

if (!sched_clock_running)
return;

__set_sched_clock_stable();
}

static void __clear_sched_clock_stable(struct work_struct *work)
{
/* XXX worry about clock continuity */
if (sched_clock_stable())
static_key_slow_inc(&__sched_clock_stable);
static_key_slow_dec(&__sched_clock_stable);
}

static DECLARE_WORK(sched_clock_work, __clear_sched_clock_stable);

void clear_sched_clock_stable(void)
{
if (keventd_up())
schedule_work(&sched_clock_work);
else
__clear_sched_clock_stable(&sched_clock_work);
__sched_clock_stable_early = 0;

smp_mb(); /* matches sched_clock_init() */

if (!sched_clock_running)
return;

schedule_work(&sched_clock_work);
}

struct sched_clock_data {
Expand Down Expand Up @@ -140,6 +155,20 @@ void sched_clock_init(void)
}

sched_clock_running = 1;

/*
* Ensure that it is impossible to not do a static_key update.
*
* Either {set,clear}_sched_clock_stable() must see sched_clock_running
* and do the update, or we must see their __sched_clock_stable_early
* and do the update, or both.
*/
smp_mb(); /* matches {set,clear}_sched_clock_stable() */

if (__sched_clock_stable_early)
__set_sched_clock_stable();
else
__clear_sched_clock_stable(NULL);
}

/*
Expand Down Expand Up @@ -340,7 +369,7 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
*/
u64 cpu_clock(int cpu)
{
if (static_key_false(&__sched_clock_stable))
if (!sched_clock_stable())
return sched_clock_cpu(cpu);

return sched_clock();
Expand All @@ -355,7 +384,7 @@ u64 cpu_clock(int cpu)
*/
u64 local_clock(void)
{
if (static_key_false(&__sched_clock_stable))
if (!sched_clock_stable())
return sched_clock_cpu(raw_smp_processor_id());

return sched_clock();
Expand Down
8 changes: 1 addition & 7 deletions kernel/sched/fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -2360,13 +2360,7 @@ static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
}
wakeup = 0;
} else {
/*
* Task re-woke on same cpu (or else migrate_task_rq_fair()
* would have made count negative); we must be careful to avoid
* double-accounting blocked time after synchronizing decays.
*/
se->avg.last_runnable_update += __synchronize_entity_decay(se)
<< 20;
__synchronize_entity_decay(se);
}

/* migrated tasks did not contribute to our blocked load */
Expand Down

0 comments on commit f6d13da

Please sign in to comment.