Skip to content

Commit

Permalink
sched: Prepare for Core-wide rq->lock
Browse files Browse the repository at this point in the history
When switching on core-sched, CPUs need to agree which lock to use for
their RQ.

The new rule will be that rq->core_enabled will be toggled while
holding all rq->__locks that belong to a core. This means we need to
double check the rq->core_enabled value after each lock acquire and
retry if it changed.

This also has implications for those sites that take multiple RQ
locks, they need to be careful that the second lock doesn't end up
being the first lock.

Verify the lock pointer after acquiring the first lock, because if
they're on the same core, holding any of the rq->__lock instances will
pin the core state.

While there, change the rq->__lock order to CPU number, instead of rq
address, this greatly simplifies the next patch.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Don Hiatt <dhiatt@digitalocean.com>
Tested-by: Hongyu Ning <hongyu.ning@linux.intel.com>
Tested-by: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lkml.kernel.org/r/YJUNY0dmrJMD/BIm@hirez.programming.kicks-ass.net
  • Loading branch information
Peter Zijlstra committed May 12, 2021
1 parent 5cb9eaa commit d66f1b0
Show file tree
Hide file tree
Showing 2 changed files with 63 additions and 33 deletions.
48 changes: 46 additions & 2 deletions kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -186,19 +186,63 @@ int sysctl_sched_rt_runtime = 950000;

void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
{
raw_spin_lock_nested(rq_lockp(rq), subclass);
raw_spinlock_t *lock;

if (sched_core_disabled()) {
raw_spin_lock_nested(&rq->__lock, subclass);
return;
}

for (;;) {
lock = rq_lockp(rq);
raw_spin_lock_nested(lock, subclass);
if (likely(lock == rq_lockp(rq)))
return;
raw_spin_unlock(lock);
}
}

bool raw_spin_rq_trylock(struct rq *rq)
{
return raw_spin_trylock(rq_lockp(rq));
raw_spinlock_t *lock;
bool ret;

if (sched_core_disabled())
return raw_spin_trylock(&rq->__lock);

for (;;) {
lock = rq_lockp(rq);
ret = raw_spin_trylock(lock);
if (!ret || (likely(lock == rq_lockp(rq))))
return ret;
raw_spin_unlock(lock);
}
}

void raw_spin_rq_unlock(struct rq *rq)
{
raw_spin_unlock(rq_lockp(rq));
}

#ifdef CONFIG_SMP
/*
* double_rq_lock - safely lock two runqueues
*/
void double_rq_lock(struct rq *rq1, struct rq *rq2)
{
lockdep_assert_irqs_disabled();

if (rq_order_less(rq2, rq1))
swap(rq1, rq2);

raw_spin_rq_lock(rq1);
if (rq_lockp(rq1) == rq_lockp(rq2))
return;

raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
}
#endif

/*
* __task_rq_lock - lock the rq @p resides on.
*/
Expand Down
48 changes: 17 additions & 31 deletions kernel/sched/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -1113,6 +1113,11 @@ static inline bool is_migration_disabled(struct task_struct *p)
#endif
}

static inline bool sched_core_disabled(void)
{
return true;
}

static inline raw_spinlock_t *rq_lockp(struct rq *rq)
{
return &rq->__lock;
Expand Down Expand Up @@ -2231,10 +2236,17 @@ unsigned long arch_scale_freq_capacity(int cpu)
}
#endif


#ifdef CONFIG_SMP
#ifdef CONFIG_PREEMPTION

static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
static inline bool rq_order_less(struct rq *rq1, struct rq *rq2)
{
return rq1->cpu < rq2->cpu;
}

extern void double_rq_lock(struct rq *rq1, struct rq *rq2);

#ifdef CONFIG_PREEMPTION

/*
* fair double_lock_balance: Safely acquires both rq->locks in a fair
Expand Down Expand Up @@ -2274,14 +2286,13 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
if (likely(raw_spin_rq_trylock(busiest)))
return 0;

if (rq_lockp(busiest) >= rq_lockp(this_rq)) {
if (rq_order_less(this_rq, busiest)) {
raw_spin_rq_lock_nested(busiest, SINGLE_DEPTH_NESTING);
return 0;
}

raw_spin_rq_unlock(this_rq);
raw_spin_rq_lock(busiest);
raw_spin_rq_lock_nested(this_rq, SINGLE_DEPTH_NESTING);
double_rq_lock(this_rq, busiest);

return 1;
}
Expand Down Expand Up @@ -2333,31 +2344,6 @@ static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
}

/*
* double_rq_lock - safely lock two runqueues
*
* Note this does not disable interrupts like task_rq_lock,
* you need to do so manually before calling.
*/
static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
__acquires(rq1->lock)
__acquires(rq2->lock)
{
BUG_ON(!irqs_disabled());
if (rq_lockp(rq1) == rq_lockp(rq2)) {
raw_spin_rq_lock(rq1);
__acquire(rq2->lock); /* Fake it out ;) */
} else {
if (rq_lockp(rq1) < rq_lockp(rq2)) {
raw_spin_rq_lock(rq1);
raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
} else {
raw_spin_rq_lock(rq2);
raw_spin_rq_lock_nested(rq1, SINGLE_DEPTH_NESTING);
}
}
}

/*
* double_rq_unlock - safely unlock two runqueues
*
Expand All @@ -2368,11 +2354,11 @@ static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
__releases(rq1->lock)
__releases(rq2->lock)
{
raw_spin_rq_unlock(rq1);
if (rq_lockp(rq1) != rq_lockp(rq2))
raw_spin_rq_unlock(rq2);
else
__release(rq2->lock);
raw_spin_rq_unlock(rq1);
}

extern void set_rq_online (struct rq *rq);
Expand Down

0 comments on commit d66f1b0

Please sign in to comment.