Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 163011
b: refs/heads/master
c: 0763a66
h: refs/heads/master
i:
  163009: 1374e47
  163007: 21b9d1b
v: v3
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Sep 15, 2009
1 parent 0245a85 commit c6aa321
Show file tree
Hide file tree
Showing 5 changed files with 12 additions and 12 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 8e6598af3f35629c37249a610cf13e73f70db279
refs/heads/master: 0763a660a84220cc3900fd32abdd7ad109e2278d
2 changes: 1 addition & 1 deletion trunk/include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -1037,7 +1037,7 @@ struct sched_class {
void (*put_prev_task) (struct rq *rq, struct task_struct *p);

#ifdef CONFIG_SMP
int (*select_task_rq)(struct task_struct *p, int flag, int sync);
int (*select_task_rq)(struct task_struct *p, int sd_flag, int sync);

unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
struct rq *busiest, unsigned long max_load_move,
Expand Down
14 changes: 7 additions & 7 deletions trunk/kernel/sched_fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -1331,15 +1331,15 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
*
* preempt must be disabled.
*/
static int select_task_rq_fair(struct task_struct *p, int flag, int sync)
static int select_task_rq_fair(struct task_struct *p, int sd_flag, int sync)
{
struct sched_domain *tmp, *sd = NULL;
int cpu = smp_processor_id();
int prev_cpu = task_cpu(p);
int new_cpu = cpu;
int want_affine = 0;

if (flag & SD_BALANCE_WAKE) {
if (sd_flag & SD_BALANCE_WAKE) {
if (sched_feat(AFFINE_WAKEUPS))
want_affine = 1;
new_cpu = prev_cpu;
Expand Down Expand Up @@ -1368,7 +1368,7 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync)
break;
}

switch (flag) {
switch (sd_flag) {
case SD_BALANCE_WAKE:
if (!sched_feat(LB_WAKEUP_UPDATE))
break;
Expand All @@ -1392,7 +1392,7 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync)
want_affine = 0;
}

if (!(tmp->flags & flag))
if (!(tmp->flags & sd_flag))
continue;

sd = tmp;
Expand All @@ -1402,12 +1402,12 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync)
struct sched_group *group;
int weight;

if (!(sd->flags & flag)) {
if (!(sd->flags & sd_flag)) {
sd = sd->child;
continue;
}

group = find_idlest_group(sd, p, cpu, flag);
group = find_idlest_group(sd, p, cpu, sd_flag);
if (!group) {
sd = sd->child;
continue;
Expand All @@ -1427,7 +1427,7 @@ static int select_task_rq_fair(struct task_struct *p, int flag, int sync)
for_each_domain(cpu, tmp) {
if (weight <= cpumask_weight(sched_domain_span(tmp)))
break;
if (tmp->flags & flag)
if (tmp->flags & sd_flag)
sd = tmp;
}
/* while loop will break here if sd == NULL */
Expand Down
2 changes: 1 addition & 1 deletion trunk/kernel/sched_idletask.c
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
*/

#ifdef CONFIG_SMP
static int select_task_rq_idle(struct task_struct *p, int flag, int sync)
static int select_task_rq_idle(struct task_struct *p, int sd_flag, int sync)
{
return task_cpu(p); /* IDLE tasks as never migrated */
}
Expand Down
4 changes: 2 additions & 2 deletions trunk/kernel/sched_rt.c
Original file line number Diff line number Diff line change
Expand Up @@ -938,11 +938,11 @@ static void yield_task_rt(struct rq *rq)
#ifdef CONFIG_SMP
static int find_lowest_rq(struct task_struct *task);

static int select_task_rq_rt(struct task_struct *p, int flag, int sync)
static int select_task_rq_rt(struct task_struct *p, int sd_flag, int sync)
{
struct rq *rq = task_rq(p);

if (flag != SD_BALANCE_WAKE)
if (sd_flag != SD_BALANCE_WAKE)
return smp_processor_id();

/*
Expand Down

0 comments on commit c6aa321

Please sign in to comment.