Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 234550
b: refs/heads/master
c: ac53db5
h: refs/heads/master
v: v3
  • Loading branch information
Rik van Riel authored and Ingo Molnar committed Feb 3, 2011
1 parent 3e3036d commit d14f009
Show file tree
Hide file tree
Showing 6 changed files with 91 additions and 72 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 2c13c919d9e9a3db9896143a501f83dcbbe1ced4
refs/heads/master: ac53db596cc08ecb8040cfb6f71ae40c6f2041c4
2 changes: 0 additions & 2 deletions trunk/include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -1942,8 +1942,6 @@ int sched_rt_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);

extern unsigned int sysctl_sched_compat_yield;

#ifdef CONFIG_SCHED_AUTOGROUP
extern unsigned int sysctl_sched_autogroup_enabled;

Expand Down
2 changes: 1 addition & 1 deletion trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -324,7 +324,7 @@ struct cfs_rq {
* 'curr' points to currently running entity on this cfs_rq.
* It is set to NULL otherwise (i.e when none are currently running).
*/
struct sched_entity *curr, *next, *last;
struct sched_entity *curr, *next, *last, *skip;

unsigned int nr_spread_over;

Expand Down
2 changes: 1 addition & 1 deletion trunk/kernel/sched_debug.c
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)

raw_spin_lock_irqsave(&rq->lock, flags);
if (cfs_rq->rb_leftmost)
MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime;
MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
last = __pick_last_entity(cfs_rq);
if (last)
max_vruntime = last->vruntime;
Expand Down
148 changes: 88 additions & 60 deletions trunk/kernel/sched_fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -68,14 +68,6 @@ static unsigned int sched_nr_latency = 8;
*/
unsigned int sysctl_sched_child_runs_first __read_mostly;

/*
* sys_sched_yield() compat mode
*
* This option switches the agressive yield implementation of the
* old scheduler back on.
*/
unsigned int __read_mostly sysctl_sched_compat_yield;

/*
* SCHED_OTHER wake-up granularity.
* (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
Expand Down Expand Up @@ -419,7 +411,7 @@ static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
}

static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
static struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
{
struct rb_node *left = cfs_rq->rb_leftmost;

Expand All @@ -429,6 +421,17 @@ static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
return rb_entry(left, struct sched_entity, run_node);
}

static struct sched_entity *__pick_next_entity(struct sched_entity *se)
{
struct rb_node *next = rb_next(&se->run_node);

if (!next)
return NULL;

return rb_entry(next, struct sched_entity, run_node);
}

#ifdef CONFIG_SCHED_DEBUG
static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
{
struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
Expand All @@ -443,7 +446,6 @@ static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
* Scheduling class statistics methods:
*/

#ifdef CONFIG_SCHED_DEBUG
int sched_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
Expand Down Expand Up @@ -1017,13 +1019,27 @@ static void __clear_buddies_next(struct sched_entity *se)
}
}

static void __clear_buddies_skip(struct sched_entity *se)
{
for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);
if (cfs_rq->skip == se)
cfs_rq->skip = NULL;
else
break;
}
}

static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
if (cfs_rq->last == se)
__clear_buddies_last(se);

if (cfs_rq->next == se)
__clear_buddies_next(se);

if (cfs_rq->skip == se)
__clear_buddies_skip(se);
}

static void
Expand Down Expand Up @@ -1099,7 +1115,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
return;

if (cfs_rq->nr_running > 1) {
struct sched_entity *se = __pick_next_entity(cfs_rq);
struct sched_entity *se = __pick_first_entity(cfs_rq);
s64 delta = curr->vruntime - se->vruntime;

if (delta < 0)
Expand Down Expand Up @@ -1143,20 +1159,40 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
static int
wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);

/*
* Pick the next process, keeping these things in mind, in this order:
* 1) keep things fair between processes/task groups
* 2) pick the "next" process, since someone really wants that to run
* 3) pick the "last" process, for cache locality
* 4) do not run the "skip" process, if something else is available
*/
static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
{
struct sched_entity *se = __pick_next_entity(cfs_rq);
struct sched_entity *se = __pick_first_entity(cfs_rq);
struct sched_entity *left = se;

if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
se = cfs_rq->next;
/*
* Avoid running the skip buddy, if running something else can
* be done without getting too unfair.
*/
if (cfs_rq->skip == se) {
struct sched_entity *second = __pick_next_entity(se);
if (second && wakeup_preempt_entity(second, left) < 1)
se = second;
}

/*
* Prefer last buddy, try to return the CPU to a preempted task.
*/
if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
se = cfs_rq->last;

/*
* Someone really wants this to run. If it's not unfair, run it.
*/
if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
se = cfs_rq->next;

clear_buddies(cfs_rq, se);

return se;
Expand Down Expand Up @@ -1333,52 +1369,6 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
hrtick_update(rq);
}

/*
* sched_yield() support is very simple - we dequeue and enqueue.
*
* If compat_yield is turned on then we requeue to the end of the tree.
*/
static void yield_task_fair(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
struct sched_entity *rightmost, *se = &curr->se;

/*
* Are we the only task in the tree?
*/
if (unlikely(rq->nr_running == 1))
return;

clear_buddies(cfs_rq, se);

if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
update_rq_clock(rq);
/*
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);

return;
}
/*
* Find the rightmost entry in the rbtree:
*/
rightmost = __pick_last_entity(cfs_rq);
/*
* Already in the rightmost position?
*/
if (unlikely(!rightmost || entity_before(rightmost, se)))
return;

/*
* Minimally necessary key value to be last in the tree:
* Upon rescheduling, sched_class::put_prev_task() will place
* 'current' within the tree based on its new key value.
*/
se->vruntime = rightmost->vruntime + 1;
}

#ifdef CONFIG_SMP

static void task_waking_fair(struct rq *rq, struct task_struct *p)
Expand Down Expand Up @@ -1849,6 +1839,14 @@ static void set_next_buddy(struct sched_entity *se)
}
}

static void set_skip_buddy(struct sched_entity *se)
{
if (likely(task_of(se)->policy != SCHED_IDLE)) {
for_each_sched_entity(se)
cfs_rq_of(se)->skip = se;
}
}

/*
* Preempt the current task with a newly woken task if needed:
*/
Expand Down Expand Up @@ -1947,6 +1945,36 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
}
}

/*
* sched_yield() is very simple
*
* The magic of dealing with the ->skip buddy is in pick_next_entity.
*/
static void yield_task_fair(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
struct sched_entity *se = &curr->se;

/*
* Are we the only task in the tree?
*/
if (unlikely(rq->nr_running == 1))
return;

clear_buddies(cfs_rq, se);

if (curr->policy != SCHED_BATCH) {
update_rq_clock(rq);
/*
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
}

set_skip_buddy(se);
}

#ifdef CONFIG_SMP
/**************************************************
* Fair scheduling class load-balancing methods:
Expand Down
7 changes: 0 additions & 7 deletions trunk/kernel/sysctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -360,13 +360,6 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = sched_rt_handler,
},
{
.procname = "sched_compat_yield",
.data = &sysctl_sched_compat_yield,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#ifdef CONFIG_SCHED_AUTOGROUP
{
.procname = "sched_autogroup_enabled",
Expand Down

0 comments on commit d14f009

Please sign in to comment.