Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 269155
b: refs/heads/master
c: d3d9dc3
h: refs/heads/master
i:
  269153: ad7de5b
  269151: d0679a7
v: v3
  • Loading branch information
Paul Turner authored and Ingo Molnar committed Aug 14, 2011
1 parent 34ba958 commit 56f457c
Show file tree
Hide file tree
Showing 2 changed files with 51 additions and 3 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 8cb120d3e41a0464a559d639d519cef563717a4e
refs/heads/master: d3d9dc3302368269acf94b7381663b93000fe2fe
52 changes: 50 additions & 2 deletions trunk/kernel/sched_fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -970,6 +970,8 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
se->vruntime = vruntime;
}

static void check_enqueue_throttle(struct cfs_rq *cfs_rq);

static void
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
Expand Down Expand Up @@ -999,8 +1001,10 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
__enqueue_entity(cfs_rq, se);
se->on_rq = 1;

if (cfs_rq->nr_running == 1)
if (cfs_rq->nr_running == 1) {
list_add_leaf_cfs_rq(cfs_rq);
check_enqueue_throttle(cfs_rq);
}
}

static void __clear_buddies_last(struct sched_entity *se)
Expand Down Expand Up @@ -1202,6 +1206,8 @@ static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
return se;
}

static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);

static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
{
/*
Expand All @@ -1211,6 +1217,9 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
if (prev->on_rq)
update_curr(cfs_rq);

/* throttle cfs_rqs exceeding runtime */
check_cfs_rq_runtime(cfs_rq);

check_spread(cfs_rq, prev);
if (prev->on_rq) {
update_stats_wait_start(cfs_rq, prev);
Expand Down Expand Up @@ -1464,7 +1473,7 @@ static int tg_throttle_down(struct task_group *tg, void *data)
return 0;
}

static __used void throttle_cfs_rq(struct cfs_rq *cfs_rq)
static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
{
struct rq *rq = rq_of(cfs_rq);
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
Expand Down Expand Up @@ -1657,9 +1666,48 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)

return idle;
}

/*
* When a group wakes up we want to make sure that its quota is not already
* expired/exceeded, otherwise it may be allowed to steal additional ticks of
* runtime as update_curr() throttling can not not trigger until it's on-rq.
*/
static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
{
/* an active group must be handled by the update_curr()->put() path */
if (!cfs_rq->runtime_enabled || cfs_rq->curr)
return;

/* ensure the group is not already throttled */
if (cfs_rq_throttled(cfs_rq))
return;

/* update runtime allocation */
account_cfs_rq_runtime(cfs_rq, 0);
if (cfs_rq->runtime_remaining <= 0)
throttle_cfs_rq(cfs_rq);
}

/* conditionally throttle active cfs_rq's from put_prev_entity() */
static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
{
if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
return;

/*
* it's possible for a throttled entity to be forced into a running
* state (e.g. set_curr_task), in this case we're finished.
*/
if (cfs_rq_throttled(cfs_rq))
return;

throttle_cfs_rq(cfs_rq);
}
#else
static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
unsigned long delta_exec) {}
static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}

static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
{
Expand Down

0 comments on commit 56f457c

Please sign in to comment.