Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 175583
b: refs/heads/master
c: cd29fe6
h: refs/heads/master
i:
  175581: ca33bbf
  175579: 2c60fb9
  175575: 765a04a
  175567: c135f46
  175551: 58d0d90
v: v3
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Dec 9, 2009
1 parent 04da720 commit 458168c
Show file tree
Hide file tree
Showing 4 changed files with 35 additions and 44 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: ab19cb23313733c55e0517607844b86720b35f5f
refs/heads/master: cd29fe6f2637cc2ccbda5ac65f5332d6bf5fa3c6
2 changes: 1 addition & 1 deletion trunk/include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -1102,7 +1102,7 @@ struct sched_class {

void (*set_curr_task) (struct rq *rq);
void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
void (*task_new) (struct rq *rq, struct task_struct *p);
void (*task_fork) (struct task_struct *p);

void (*switched_from) (struct rq *this_rq, struct task_struct *task,
int running);
Expand Down
47 changes: 18 additions & 29 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -1811,6 +1811,20 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)

static void calc_load_account_active(struct rq *this_rq);

static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
{
set_task_rq(p, cpu);
#ifdef CONFIG_SMP
/*
* After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
* successfuly executed on another CPU. We must ensure that updates of
* per-task data have been completed by this moment.
*/
smp_wmb();
task_thread_info(p)->cpu = cpu;
#endif
}

#include "sched_stats.h"
#include "sched_idletask.c"
#include "sched_fair.c"
Expand Down Expand Up @@ -1967,20 +1981,6 @@ inline int task_curr(const struct task_struct *p)
return cpu_curr(task_cpu(p)) == p;
}

static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
{
set_task_rq(p, cpu);
#ifdef CONFIG_SMP
/*
* After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
* successfuly executed on another CPU. We must ensure that updates of
* per-task data have been completed by this moment.
*/
smp_wmb();
task_thread_info(p)->cpu = cpu;
#endif
}

static inline void check_class_changed(struct rq *rq, struct task_struct *p,
const struct sched_class *prev_class,
int oldprio, int running)
Expand Down Expand Up @@ -2552,7 +2552,6 @@ static void __sched_fork(struct task_struct *p)
void sched_fork(struct task_struct *p, int clone_flags)
{
int cpu = get_cpu();
unsigned long flags;

__sched_fork(p);

Expand Down Expand Up @@ -2586,13 +2585,13 @@ void sched_fork(struct task_struct *p, int clone_flags)
if (!rt_prio(p->prio))
p->sched_class = &fair_sched_class;

if (p->sched_class->task_fork)
p->sched_class->task_fork(p);

#ifdef CONFIG_SMP
cpu = select_task_rq(p, SD_BALANCE_FORK, 0);
#endif
local_irq_save(flags);
update_rq_clock(cpu_rq(cpu));
set_task_cpu(p, cpu);
local_irq_restore(flags);

#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
if (likely(sched_info_on()))
Expand Down Expand Up @@ -2625,17 +2624,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
rq = task_rq_lock(p, &flags);
BUG_ON(p->state != TASK_RUNNING);
update_rq_clock(rq);

if (!p->sched_class->task_new || !current->se.on_rq) {
activate_task(rq, p, 0);
} else {
/*
* Let the scheduling class do new task startup
* management (if any):
*/
p->sched_class->task_new(rq, p);
inc_nr_running(rq);
}
activate_task(rq, p, 0);
trace_sched_wakeup_new(rq, p, 1);
check_preempt_curr(rq, p, WF_FORK);
#ifdef CONFIG_SMP
Expand Down
28 changes: 15 additions & 13 deletions trunk/kernel/sched_fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -1922,28 +1922,30 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
}

/*
* Share the fairness runtime between parent and child, thus the
* total amount of pressure for CPU stays equal - new tasks
* get a chance to run but frequent forkers are not allowed to
* monopolize the CPU. Note: the parent runqueue is locked,
* the child is not running yet.
* called on fork with the child task as argument from the parent's context
* - child not yet on the tasklist
* - preemption disabled
*/
static void task_new_fair(struct rq *rq, struct task_struct *p)
static void task_fork_fair(struct task_struct *p)
{
struct cfs_rq *cfs_rq = task_cfs_rq(p);
struct cfs_rq *cfs_rq = task_cfs_rq(current);
struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
int this_cpu = smp_processor_id();
struct rq *rq = this_rq();
unsigned long flags;

spin_lock_irqsave(&rq->lock, flags);

sched_info_queued(p);
if (unlikely(task_cpu(p) != this_cpu))
__set_task_cpu(p, this_cpu);

update_curr(cfs_rq);

if (curr)
se->vruntime = curr->vruntime;
place_entity(cfs_rq, se, 1);

/* 'curr' will be NULL if the child belongs to a different group */
if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
curr && entity_before(curr, se)) {
if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
/*
* Upon rescheduling, sched_class::put_prev_task() will place
* 'current' within the tree based on its new key value.
Expand All @@ -1952,7 +1954,7 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
resched_task(rq->curr);
}

enqueue_task_fair(rq, p, 0);
spin_unlock_irqrestore(&rq->lock, flags);
}

/*
Expand Down Expand Up @@ -2052,7 +2054,7 @@ static const struct sched_class fair_sched_class = {

.set_curr_task = set_curr_task_fair,
.task_tick = task_tick_fair,
.task_new = task_new_fair,
.task_fork = task_fork_fair,

.prio_changed = prio_changed_fair,
.switched_to = switched_to_fair,
Expand Down

0 comments on commit 458168c

Please sign in to comment.