Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 158336
b: refs/heads/master
c: 3f029d3
h: refs/heads/master
v: v3
  • Loading branch information
Gregory Haskins authored and Ingo Molnar committed Aug 2, 2009
1 parent 5d36b2e commit 95a6b49
Show file tree
Hide file tree
Showing 4 changed files with 62 additions and 54 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: c3a2ae3d93c0f10d29c071f599764d00b8de00cb
refs/heads/master: 3f029d3c6d62068d59301d90c18dbde8ee402107
1 change: 0 additions & 1 deletion trunk/include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -1047,7 +1047,6 @@ struct sched_class {
struct rq *busiest, struct sched_domain *sd,
enum cpu_idle_type idle);
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
int (*needs_post_schedule) (struct rq *this_rq);
void (*post_schedule) (struct rq *this_rq);
void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);

Expand Down
82 changes: 50 additions & 32 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -616,6 +616,7 @@ struct rq {

unsigned char idle_at_tick;
/* For active balancing */
int post_schedule;
int active_balance;
int push_cpu;
/* cpu of this runqueue: */
Expand Down Expand Up @@ -2839,17 +2840,11 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
* with the lock held can cause deadlocks; see schedule() for
* details.)
*/
static int finish_task_switch(struct rq *rq, struct task_struct *prev)
static void finish_task_switch(struct rq *rq, struct task_struct *prev)
__releases(rq->lock)
{
struct mm_struct *mm = rq->prev_mm;
long prev_state;
int post_schedule = 0;

#ifdef CONFIG_SMP
if (current->sched_class->needs_post_schedule)
post_schedule = current->sched_class->needs_post_schedule(rq);
#endif

rq->prev_mm = NULL;

Expand Down Expand Up @@ -2880,10 +2875,44 @@ static int finish_task_switch(struct rq *rq, struct task_struct *prev)
kprobe_flush_task(prev);
put_task_struct(prev);
}
}

#ifdef CONFIG_SMP

/* assumes rq->lock is held */
static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
{
if (prev->sched_class->pre_schedule)
prev->sched_class->pre_schedule(rq, prev);
}

/* rq->lock is NOT held, but preemption is disabled */
static inline void post_schedule(struct rq *rq)
{
if (rq->post_schedule) {
unsigned long flags;

spin_lock_irqsave(&rq->lock, flags);
if (rq->curr->sched_class->post_schedule)
rq->curr->sched_class->post_schedule(rq);
spin_unlock_irqrestore(&rq->lock, flags);

rq->post_schedule = 0;
}
}

#else

return post_schedule;
static inline void pre_schedule(struct rq *rq, struct task_struct *p)
{
}

static inline void post_schedule(struct rq *rq)
{
}

#endif

/**
* schedule_tail - first thing a freshly forked thread must call.
* @prev: the thread we just switched away from.
Expand All @@ -2892,14 +2921,14 @@ asmlinkage void schedule_tail(struct task_struct *prev)
__releases(rq->lock)
{
struct rq *rq = this_rq();
int post_schedule;

post_schedule = finish_task_switch(rq, prev);
finish_task_switch(rq, prev);

#ifdef CONFIG_SMP
if (post_schedule)
current->sched_class->post_schedule(rq);
#endif
/*
* FIXME: do we need to worry about rq being invalidated by the
* task_switch?
*/
post_schedule(rq);

#ifdef __ARCH_WANT_UNLOCKED_CTXSW
/* In this case, finish_task_switch does not reenable preemption */
Expand All @@ -2913,7 +2942,7 @@ asmlinkage void schedule_tail(struct task_struct *prev)
* context_switch - switch to the new MM and the new
* thread's register state.
*/
static inline int
static inline void
context_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next)
{
Expand Down Expand Up @@ -2960,7 +2989,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
* CPUs since it called schedule(), thus the 'rq' on its stack
* frame will be invalid.
*/
return finish_task_switch(this_rq(), prev);
finish_task_switch(this_rq(), prev);
}

/*
Expand Down Expand Up @@ -5371,7 +5400,6 @@ asmlinkage void __sched schedule(void)
{
struct task_struct *prev, *next;
unsigned long *switch_count;
int post_schedule = 0;
struct rq *rq;
int cpu;

Expand Down Expand Up @@ -5403,10 +5431,7 @@ asmlinkage void __sched schedule(void)
switch_count = &prev->nvcsw;
}

#ifdef CONFIG_SMP
if (prev->sched_class->pre_schedule)
prev->sched_class->pre_schedule(rq, prev);
#endif
pre_schedule(rq, prev);

if (unlikely(!rq->nr_running))
idle_balance(cpu, rq);
Expand All @@ -5422,25 +5447,17 @@ asmlinkage void __sched schedule(void)
rq->curr = next;
++*switch_count;

post_schedule = context_switch(rq, prev, next); /* unlocks the rq */
context_switch(rq, prev, next); /* unlocks the rq */
/*
* the context switch might have flipped the stack from under
* us, hence refresh the local variables.
*/
cpu = smp_processor_id();
rq = cpu_rq(cpu);
} else {
#ifdef CONFIG_SMP
if (current->sched_class->needs_post_schedule)
post_schedule = current->sched_class->needs_post_schedule(rq);
#endif
} else
spin_unlock_irq(&rq->lock);
}

#ifdef CONFIG_SMP
if (post_schedule)
current->sched_class->post_schedule(rq);
#endif
post_schedule(rq);

if (unlikely(reacquire_kernel_lock(current) < 0))
goto need_resched_nonpreemptible;
Expand Down Expand Up @@ -9403,6 +9420,7 @@ void __init sched_init(void)
#ifdef CONFIG_SMP
rq->sd = NULL;
rq->rd = NULL;
rq->post_schedule = 0;
rq->active_balance = 0;
rq->next_balance = jiffies;
rq->push_cpu = 0;
Expand Down
31 changes: 11 additions & 20 deletions trunk/kernel/sched_rt.c
Original file line number Diff line number Diff line change
Expand Up @@ -1056,6 +1056,11 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
return p;
}

static inline int has_pushable_tasks(struct rq *rq)
{
return !plist_head_empty(&rq->rt.pushable_tasks);
}

static struct task_struct *pick_next_task_rt(struct rq *rq)
{
struct task_struct *p = _pick_next_task_rt(rq);
Expand All @@ -1064,6 +1069,12 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
if (p)
dequeue_pushable_task(rq, p);

/*
* We detect this state here so that we can avoid taking the RQ
* lock again later if there is no need to push
*/
rq->post_schedule = has_pushable_tasks(rq);

return p;
}

Expand Down Expand Up @@ -1262,11 +1273,6 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
return lowest_rq;
}

static inline int has_pushable_tasks(struct rq *rq)
{
return !plist_head_empty(&rq->rt.pushable_tasks);
}

static struct task_struct *pick_next_pushable_task(struct rq *rq)
{
struct task_struct *p;
Expand Down Expand Up @@ -1466,23 +1472,9 @@ static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
pull_rt_task(rq);
}

/*
* assumes rq->lock is held
*/
static int needs_post_schedule_rt(struct rq *rq)
{
return has_pushable_tasks(rq);
}

static void post_schedule_rt(struct rq *rq)
{
/*
* This is only called if needs_post_schedule_rt() indicates that
* we need to push tasks away
*/
spin_lock_irq(&rq->lock);
push_rt_tasks(rq);
spin_unlock_irq(&rq->lock);
}

/*
Expand Down Expand Up @@ -1758,7 +1750,6 @@ static const struct sched_class rt_sched_class = {
.rq_online = rq_online_rt,
.rq_offline = rq_offline_rt,
.pre_schedule = pre_schedule_rt,
.needs_post_schedule = needs_post_schedule_rt,
.post_schedule = post_schedule_rt,
.task_wake_up = task_wake_up_rt,
.switched_from = switched_from_rt,
Expand Down

0 comments on commit 95a6b49

Please sign in to comment.