Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 121336
b: refs/heads/master
c: 70574a9
h: refs/heads/master
v: v3
  • Loading branch information
Alexey Dobriyan authored and Ingo Molnar committed Nov 28, 2008
1 parent 3b90824 commit 76781ab
Show file tree
Hide file tree
Showing 3 changed files with 34 additions and 39 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: f1860c34b3ed829ac774647f266abf1074cd58cd
refs/heads/master: 70574a996fc7a70c5586eb56bd92a544eccf18b6
67 changes: 33 additions & 34 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -1581,6 +1581,39 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)

#endif

/*
* double_lock_balance - lock the busiest runqueue, this_rq is locked already.
*/
static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
__releases(this_rq->lock)
__acquires(busiest->lock)
__acquires(this_rq->lock)
{
int ret = 0;

if (unlikely(!irqs_disabled())) {
/* printk() doesn't work good under rq->lock */
spin_unlock(&this_rq->lock);
BUG_ON(1);
}
if (unlikely(!spin_trylock(&busiest->lock))) {
if (busiest < this_rq) {
spin_unlock(&this_rq->lock);
spin_lock(&busiest->lock);
spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
ret = 1;
} else
spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
}
return ret;
}

static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
__releases(busiest->lock)
{
spin_unlock(&busiest->lock);
lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
}
#endif

#ifdef CONFIG_FAIR_GROUP_SCHED
Expand Down Expand Up @@ -2780,40 +2813,6 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
__release(rq2->lock);
}

/*
* double_lock_balance - lock the busiest runqueue, this_rq is locked already.
*/
static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
__releases(this_rq->lock)
__acquires(busiest->lock)
__acquires(this_rq->lock)
{
int ret = 0;

if (unlikely(!irqs_disabled())) {
/* printk() doesn't work good under rq->lock */
spin_unlock(&this_rq->lock);
BUG_ON(1);
}
if (unlikely(!spin_trylock(&busiest->lock))) {
if (busiest < this_rq) {
spin_unlock(&this_rq->lock);
spin_lock(&busiest->lock);
spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
ret = 1;
} else
spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
}
return ret;
}

static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
__releases(busiest->lock)
{
spin_unlock(&busiest->lock);
lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
}

/*
* If dest_cpu is allowed for this process, migrate the task to it.
* This is accomplished by forcing the cpu_allowed mask to only
Expand Down
4 changes: 0 additions & 4 deletions trunk/kernel/sched_rt.c
Original file line number Diff line number Diff line change
Expand Up @@ -909,10 +909,6 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
/* Only try algorithms three times */
#define RT_MAX_TRIES 3

static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
static inline void double_unlock_balance(struct rq *this_rq,
struct rq *busiest);

static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);

static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
Expand Down

0 comments on commit 76781ab

Please sign in to comment.