Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 58280
b: refs/heads/master
c: 1453118
h: refs/heads/master
v: v3
  • Loading branch information
Ingo Molnar committed Jul 9, 2007
1 parent f593698 commit c729b41
Show file tree
Hide file tree
Showing 2 changed files with 30 additions and 30 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 71f8bd4600521fecb08644072052b85853a5a615
refs/heads/master: 14531189f0a1071b928586e9e1a89eceac91d95f
58 changes: 29 additions & 29 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -671,35 +671,6 @@ static inline void resched_task(struct task_struct *p)

#include "sched_stats.h"

/*
* __normal_prio - return the priority that is based on the static
* priority but is modified by bonuses/penalties.
*
* We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
* into the -5 ... 0 ... +5 bonus/penalty range.
*
* We use 25% of the full 0...39 priority range so that:
*
* 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
* 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
*
* Both properties are important to certain workloads.
*/

static inline int __normal_prio(struct task_struct *p)
{
int bonus, prio;

bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;

prio = p->static_prio - bonus;
if (prio < MAX_RT_PRIO)
prio = MAX_RT_PRIO;
if (prio > MAX_PRIO-1)
prio = MAX_PRIO-1;
return prio;
}

/*
* To aid in avoiding the subversion of "niceness" due to uneven distribution
* of tasks with abnormal "nice" values across CPUs the contribution that
Expand Down Expand Up @@ -802,6 +773,35 @@ enqueue_task_head(struct task_struct *p, struct prio_array *array)
p->array = array;
}

/*
* __normal_prio - return the priority that is based on the static
* priority but is modified by bonuses/penalties.
*
* We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
* into the -5 ... 0 ... +5 bonus/penalty range.
*
* We use 25% of the full 0...39 priority range so that:
*
* 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
* 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
*
* Both properties are important to certain workloads.
*/

static inline int __normal_prio(struct task_struct *p)
{
int bonus, prio;

bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;

prio = p->static_prio - bonus;
if (prio < MAX_RT_PRIO)
prio = MAX_RT_PRIO;
if (prio > MAX_PRIO-1)
prio = MAX_PRIO-1;
return prio;
}

/*
* Calculate the expected normal priority: i.e. priority
* without taking RT-inheritance into account. Might be
Expand Down

0 comments on commit c729b41

Please sign in to comment.