Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 76117
b: refs/heads/master
c: 6b2d770
h: refs/heads/master
i:
  76115: 4cfdeb4
v: v3
  • Loading branch information
Srivatsa Vaddagiri authored and Ingo Molnar committed Jan 25, 2008
1 parent ece13bf commit 605e7ba
Show file tree
Hide file tree
Showing 5 changed files with 332 additions and 46 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: a183561567b5446d3362b4839bd4f744f4b2af1e
refs/heads/master: 6b2d7700266b9402e12824e11e0099ae6a4a6a79
4 changes: 4 additions & 0 deletions trunk/include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -1453,6 +1453,10 @@ extern unsigned int sysctl_sched_child_runs_first;
extern unsigned int sysctl_sched_features;
extern unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_nr_migrate;
#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP)
extern unsigned int sysctl_sched_min_bal_int_shares;
extern unsigned int sysctl_sched_max_bal_int_shares;
#endif

int sched_nr_latency_handler(struct ctl_table *table, int write,
struct file *file, void __user *buffer, size_t *length,
Expand Down
270 changes: 256 additions & 14 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,43 @@ struct task_group {
struct sched_entity **se;
/* runqueue "owned" by this group on each cpu */
struct cfs_rq **cfs_rq;

/*
* shares assigned to a task group governs how much of cpu bandwidth
* is allocated to the group. The more shares a group has, the more is
* the cpu bandwidth allocated to it.
*
* For ex, lets say that there are three task groups, A, B and C which
* have been assigned shares 1000, 2000 and 3000 respectively. Then,
* cpu bandwidth allocated by the scheduler to task groups A, B and C
* should be:
*
* Bw(A) = 1000/(1000+2000+3000) * 100 = 16.66%
* Bw(B) = 2000/(1000+2000+3000) * 100 = 33.33%
* Bw(C) = 3000/(1000+2000+3000) * 100 = 50%
*
* The weight assigned to a task group's schedulable entities on every
* cpu (task_group.se[a_cpu]->load.weight) is derived from the task
* group's shares. For ex: lets say that task group A has been
* assigned shares of 1000 and there are two CPUs in a system. Then,
*
* tg_A->se[0]->load.weight = tg_A->se[1]->load.weight = 1000;
*
* Note: It's not necessary that each of a task's group schedulable
* entity have the same weight on all CPUs. If the group
* has 2 of its tasks on CPU0 and 1 task on CPU1, then a
* better distribution of weight could be:
*
* tg_A->se[0]->load.weight = 2/3 * 2000 = 1333
* tg_A->se[1]->load.weight = 1/2 * 2000 = 667
*
* rebalance_shares() is responsible for distributing the shares of a
* task groups like this among the group's schedulable entities across
* cpus.
*
*/
unsigned long shares;

struct rcu_head rcu;
};

Expand All @@ -188,6 +224,14 @@ static DEFINE_MUTEX(task_group_mutex);
/* doms_cur_mutex serializes access to doms_cur[] array */
static DEFINE_MUTEX(doms_cur_mutex);

#ifdef CONFIG_SMP
/* kernel thread that runs rebalance_shares() periodically */
static struct task_struct *lb_monitor_task;
static int load_balance_monitor(void *unused);
#endif

static void set_se_shares(struct sched_entity *se, unsigned long shares);

/* Default task group.
* Every task in system belong to this group at bootup.
*/
Expand All @@ -202,6 +246,8 @@ struct task_group init_task_group = {
# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
#endif

#define MIN_GROUP_SHARES 2

static int init_task_group_load = INIT_TASK_GROUP_LOAD;

/* return group to which a task belongs */
Expand Down Expand Up @@ -6736,6 +6782,21 @@ void __init sched_init_smp(void)
if (set_cpus_allowed(current, non_isolated_cpus) < 0)
BUG();
sched_init_granularity();

#ifdef CONFIG_FAIR_GROUP_SCHED
if (nr_cpu_ids == 1)
return;

lb_monitor_task = kthread_create(load_balance_monitor, NULL,
"group_balance");
if (!IS_ERR(lb_monitor_task)) {
lb_monitor_task->flags |= PF_NOFREEZE;
wake_up_process(lb_monitor_task);
} else {
printk(KERN_ERR "Could not create load balance monitor thread"
"(error = %ld) \n", PTR_ERR(lb_monitor_task));
}
#endif
}
#else
void __init sched_init_smp(void)
Expand Down Expand Up @@ -6988,6 +7049,157 @@ void set_curr_task(int cpu, struct task_struct *p)

#ifdef CONFIG_FAIR_GROUP_SCHED

#ifdef CONFIG_SMP
/*
* distribute shares of all task groups among their schedulable entities,
* to reflect load distrbution across cpus.
*/
static int rebalance_shares(struct sched_domain *sd, int this_cpu)
{
struct cfs_rq *cfs_rq;
struct rq *rq = cpu_rq(this_cpu);
cpumask_t sdspan = sd->span;
int balanced = 1;

/* Walk thr' all the task groups that we have */
for_each_leaf_cfs_rq(rq, cfs_rq) {
int i;
unsigned long total_load = 0, total_shares;
struct task_group *tg = cfs_rq->tg;

/* Gather total task load of this group across cpus */
for_each_cpu_mask(i, sdspan)
total_load += tg->cfs_rq[i]->load.weight;

/* Nothing to do if this group has no load */
if (!total_load)
continue;

/*
* tg->shares represents the number of cpu shares the task group
* is eligible to hold on a single cpu. On N cpus, it is
* eligible to hold (N * tg->shares) number of cpu shares.
*/
total_shares = tg->shares * cpus_weight(sdspan);

/*
* redistribute total_shares across cpus as per the task load
* distribution.
*/
for_each_cpu_mask(i, sdspan) {
unsigned long local_load, local_shares;

local_load = tg->cfs_rq[i]->load.weight;
local_shares = (local_load * total_shares) / total_load;
if (!local_shares)
local_shares = MIN_GROUP_SHARES;
if (local_shares == tg->se[i]->load.weight)
continue;

spin_lock_irq(&cpu_rq(i)->lock);
set_se_shares(tg->se[i], local_shares);
spin_unlock_irq(&cpu_rq(i)->lock);
balanced = 0;
}
}

return balanced;
}

/*
* How frequently should we rebalance_shares() across cpus?
*
* The more frequently we rebalance shares, the more accurate is the fairness
* of cpu bandwidth distribution between task groups. However higher frequency
* also implies increased scheduling overhead.
*
* sysctl_sched_min_bal_int_shares represents the minimum interval between
* consecutive calls to rebalance_shares() in the same sched domain.
*
* sysctl_sched_max_bal_int_shares represents the maximum interval between
* consecutive calls to rebalance_shares() in the same sched domain.
*
* These settings allows for the appropriate tradeoff between accuracy of
* fairness and the associated overhead.
*
*/

/* default: 8ms, units: milliseconds */
const_debug unsigned int sysctl_sched_min_bal_int_shares = 8;

/* default: 128ms, units: milliseconds */
const_debug unsigned int sysctl_sched_max_bal_int_shares = 128;

/* kernel thread that runs rebalance_shares() periodically */
static int load_balance_monitor(void *unused)
{
unsigned int timeout = sysctl_sched_min_bal_int_shares;
struct sched_param schedparm;
int ret;

/*
* We don't want this thread's execution to be limited by the shares
* assigned to default group (init_task_group). Hence make it run
* as a SCHED_RR RT task at the lowest priority.
*/
schedparm.sched_priority = 1;
ret = sched_setscheduler(current, SCHED_RR, &schedparm);
if (ret)
printk(KERN_ERR "Couldn't set SCHED_RR policy for load balance"
" monitor thread (error = %d) \n", ret);

while (!kthread_should_stop()) {
int i, cpu, balanced = 1;

/* Prevent cpus going down or coming up */
lock_cpu_hotplug();
/* lockout changes to doms_cur[] array */
lock_doms_cur();
/*
* Enter a rcu read-side critical section to safely walk rq->sd
* chain on various cpus and to walk task group list
* (rq->leaf_cfs_rq_list) in rebalance_shares().
*/
rcu_read_lock();

for (i = 0; i < ndoms_cur; i++) {
cpumask_t cpumap = doms_cur[i];
struct sched_domain *sd = NULL, *sd_prev = NULL;

cpu = first_cpu(cpumap);

/* Find the highest domain at which to balance shares */
for_each_domain(cpu, sd) {
if (!(sd->flags & SD_LOAD_BALANCE))
continue;
sd_prev = sd;
}

sd = sd_prev;
/* sd == NULL? No load balance reqd in this domain */
if (!sd)
continue;

balanced &= rebalance_shares(sd, cpu);
}

rcu_read_unlock();

unlock_doms_cur();
unlock_cpu_hotplug();

if (!balanced)
timeout = sysctl_sched_min_bal_int_shares;
else if (timeout < sysctl_sched_max_bal_int_shares)
timeout *= 2;

msleep_interruptible(timeout);
}

return 0;
}
#endif /* CONFIG_SMP */

/* allocate runqueue etc for a new task group */
struct task_group *sched_create_group(void)
{
Expand Down Expand Up @@ -7144,47 +7356,77 @@ void sched_move_task(struct task_struct *tsk)
task_rq_unlock(rq, &flags);
}

/* rq->lock to be locked by caller */
static void set_se_shares(struct sched_entity *se, unsigned long shares)
{
struct cfs_rq *cfs_rq = se->cfs_rq;
struct rq *rq = cfs_rq->rq;
int on_rq;

spin_lock_irq(&rq->lock);
if (!shares)
shares = MIN_GROUP_SHARES;

on_rq = se->on_rq;
if (on_rq)
if (on_rq) {
dequeue_entity(cfs_rq, se, 0);
dec_cpu_load(rq, se->load.weight);
}

se->load.weight = shares;
se->load.inv_weight = div64_64((1ULL<<32), shares);

if (on_rq)
if (on_rq) {
enqueue_entity(cfs_rq, se, 0);

spin_unlock_irq(&rq->lock);
inc_cpu_load(rq, se->load.weight);
}
}

int sched_group_set_shares(struct task_group *tg, unsigned long shares)
{
int i;

/*
* A weight of 0 or 1 can cause arithmetics problems.
* (The default weight is 1024 - so there's no practical
* limitation from this.)
*/
if (shares < 2)
shares = 2;
struct cfs_rq *cfs_rq;
struct rq *rq;

lock_task_group_list();
if (tg->shares == shares)
goto done;

if (shares < MIN_GROUP_SHARES)
shares = MIN_GROUP_SHARES;

/*
* Prevent any load balance activity (rebalance_shares,
* load_balance_fair) from referring to this group first,
* by taking it off the rq->leaf_cfs_rq_list on each cpu.
*/
for_each_possible_cpu(i) {
cfs_rq = tg->cfs_rq[i];
list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
}

/* wait for any ongoing reference to this group to finish */
synchronize_sched();

/*
* Now we are free to modify the group's share on each cpu
* w/o tripping rebalance_share or load_balance_fair.
*/
tg->shares = shares;
for_each_possible_cpu(i)
for_each_possible_cpu(i) {
spin_lock_irq(&cpu_rq(i)->lock);
set_se_shares(tg->se[i], shares);
spin_unlock_irq(&cpu_rq(i)->lock);
}

/*
* Enable load balance activity on this group, by inserting it back on
* each cpu's rq->leaf_cfs_rq_list.
*/
for_each_possible_cpu(i) {
rq = cpu_rq(i);
cfs_rq = tg->cfs_rq[i];
list_add_rcu(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
}
done:
unlock_task_group_list();
return 0;
Expand Down
Loading

0 comments on commit 605e7ba

Please sign in to comment.