Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 135676
b: refs/heads/master
c: 8f45e2b
h: refs/heads/master
v: v3
  • Loading branch information
Gregory Haskins committed Dec 29, 2008
1 parent 8733403 commit 7a86603
Show file tree
Hide file tree
Showing 2 changed files with 45 additions and 8 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 7e96fa5875d4a9be18d74d3ca7b90518d05bc426
refs/heads/master: 8f45e2b516201d1bf681e6026fa5276385def565
51 changes: 44 additions & 7 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -1608,21 +1608,42 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)

#endif

#ifdef CONFIG_PREEMPT

/*
* double_lock_balance - lock the busiest runqueue, this_rq is locked already.
* fair double_lock_balance: Safely acquires both rq->locks in a fair
* way at the expense of forcing extra atomic operations in all
* invocations. This assures that the double_lock is acquired using the
* same underlying policy as the spinlock_t on this architecture, which
* reduces latency compared to the unfair variant below. However, it
* also adds more overhead and therefore may reduce throughput.
*/
static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
__releases(this_rq->lock)
__acquires(busiest->lock)
__acquires(this_rq->lock)
{
spin_unlock(&this_rq->lock);
double_rq_lock(this_rq, busiest);

return 1;
}

#else
/*
* Unfair double_lock_balance: Optimizes throughput at the expense of
* latency by eliminating extra atomic operations when the locks are
* already in proper order on entry. This favors lower cpu-ids and will
* grant the double lock to lower cpus over higher ids under contention,
* regardless of entry order into the function.
*/
static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
__releases(this_rq->lock)
__acquires(busiest->lock)
__acquires(this_rq->lock)
{
int ret = 0;

if (unlikely(!irqs_disabled())) {
/* printk() doesn't work good under rq->lock */
spin_unlock(&this_rq->lock);
BUG_ON(1);
}
if (unlikely(!spin_trylock(&busiest->lock))) {
if (busiest < this_rq) {
spin_unlock(&this_rq->lock);
Expand All @@ -1635,6 +1656,22 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
return ret;
}

#endif /* CONFIG_PREEMPT */

/*
* double_lock_balance - lock the busiest runqueue, this_rq is locked already.
*/
static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
{
if (unlikely(!irqs_disabled())) {
/* printk() doesn't work good under rq->lock */
spin_unlock(&this_rq->lock);
BUG_ON(1);
}

return _double_lock_balance(this_rq, busiest);
}

static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
__releases(busiest->lock)
{
Expand Down

0 comments on commit 7a86603

Please sign in to comment.