Skip to content

Commit

Permalink
sched: clean up __normal_prio() position
Browse files Browse the repository at this point in the history
clean up: move __normal_prio() in head of normal_prio().

no code changed.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Ingo Molnar committed Jul 9, 2007
1 parent 71f8bd4 commit 1453118
Showing 1 changed file with 29 additions and 29 deletions.
58 changes: 29 additions & 29 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -671,35 +671,6 @@ static inline void resched_task(struct task_struct *p)

#include "sched_stats.h"

/*
* __normal_prio - return the priority that is based on the static
* priority but is modified by bonuses/penalties.
*
* We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
* into the -5 ... 0 ... +5 bonus/penalty range.
*
* We use 25% of the full 0...39 priority range so that:
*
* 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
* 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
*
* Both properties are important to certain workloads.
*/

static inline int __normal_prio(struct task_struct *p)
{
int bonus, prio;

bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;

prio = p->static_prio - bonus;
if (prio < MAX_RT_PRIO)
prio = MAX_RT_PRIO;
if (prio > MAX_PRIO-1)
prio = MAX_PRIO-1;
return prio;
}

/*
* To aid in avoiding the subversion of "niceness" due to uneven distribution
* of tasks with abnormal "nice" values across CPUs the contribution that
Expand Down Expand Up @@ -802,6 +773,35 @@ enqueue_task_head(struct task_struct *p, struct prio_array *array)
p->array = array;
}

/*
* __normal_prio - return the priority that is based on the static
* priority but is modified by bonuses/penalties.
*
* We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
* into the -5 ... 0 ... +5 bonus/penalty range.
*
* We use 25% of the full 0...39 priority range so that:
*
* 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
* 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
*
* Both properties are important to certain workloads.
*/

static inline int __normal_prio(struct task_struct *p)
{
int bonus, prio;

bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;

prio = p->static_prio - bonus;
if (prio < MAX_RT_PRIO)
prio = MAX_RT_PRIO;
if (prio > MAX_PRIO-1)
prio = MAX_PRIO-1;
return prio;
}

/*
* Calculate the expected normal priority: i.e. priority
* without taking RT-inheritance into account. Might be
Expand Down

0 comments on commit 1453118

Please sign in to comment.