Skip to content

Commit

Permalink
mutex: Move mutex spinning code from sched/core.c back to mutex.c
Browse files Browse the repository at this point in the history
As mentioned by Ingo, the SCHED_FEAT_OWNER_SPIN scheduler
feature bit was really just an early hack to make with/without
mutex-spinning testable. So it is no longer necessary.

This patch removes the SCHED_FEAT_OWNER_SPIN feature bit and
move the mutex spinning code from kernel/sched/core.c back to
kernel/mutex.c which is where they should belong.

Signed-off-by: Waiman Long <Waiman.Long@hp.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Chandramouleeswaran Aswin <aswin@hp.com>
Cc: Davidlohr Bueso <davidlohr.bueso@hp.com>
Cc: Norton Scott J <scott.norton@hp.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Dave Jones <davej@redhat.com>
Cc: Clark Williams <williams@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1366226594-5506-2-git-send-email-Waiman.Long@hp.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
Waiman Long authored and Ingo Molnar committed Apr 19, 2013
1 parent 8184004 commit 41fcb9f
Show file tree
Hide file tree
Showing 4 changed files with 46 additions and 53 deletions.
1 change: 0 additions & 1 deletion include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -320,7 +320,6 @@ extern signed long schedule_timeout_killable(signed long timeout);
extern signed long schedule_timeout_uninterruptible(signed long timeout);
asmlinkage void schedule(void);
extern void schedule_preempt_disabled(void);
extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);

struct nsproxy;
struct user_namespace;
Expand Down
46 changes: 46 additions & 0 deletions kernel/mutex.c
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,52 @@ void __sched mutex_lock(struct mutex *lock)
EXPORT_SYMBOL(mutex_lock);
#endif

#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
/*
* Mutex spinning code migrated from kernel/sched/core.c
*/

static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
{
if (lock->owner != owner)
return false;

/*
* Ensure we emit the owner->on_cpu, dereference _after_ checking
* lock->owner still matches owner, if that fails, owner might
* point to free()d memory, if it still matches, the rcu_read_lock()
* ensures the memory stays valid.
*/
barrier();

return owner->on_cpu;
}

/*
* Look out! "owner" is an entirely speculative pointer
* access and not reliable.
*/
static noinline
int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
{
rcu_read_lock();
while (owner_running(lock, owner)) {
if (need_resched())
break;

arch_mutex_cpu_relax();
}
rcu_read_unlock();

/*
* We break out the loop above on need_resched() and when the
* owner changed, which is a sign for heavy contention. Return
* success only when lock->owner is NULL.
*/
return lock->owner == NULL;
}
#endif

static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);

/**
Expand Down
45 changes: 0 additions & 45 deletions kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -2997,51 +2997,6 @@ void __sched schedule_preempt_disabled(void)
preempt_disable();
}

#ifdef CONFIG_MUTEX_SPIN_ON_OWNER

static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
{
if (lock->owner != owner)
return false;

/*
* Ensure we emit the owner->on_cpu, dereference _after_ checking
* lock->owner still matches owner, if that fails, owner might
* point to free()d memory, if it still matches, the rcu_read_lock()
* ensures the memory stays valid.
*/
barrier();

return owner->on_cpu;
}

/*
* Look out! "owner" is an entirely speculative pointer
* access and not reliable.
*/
int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
{
if (!sched_feat(OWNER_SPIN))
return 0;

rcu_read_lock();
while (owner_running(lock, owner)) {
if (need_resched())
break;

arch_mutex_cpu_relax();
}
rcu_read_unlock();

/*
* We break out the loop above on need_resched() and when the
* owner changed, which is a sign for heavy contention. Return
* success only when lock->owner is NULL.
*/
return lock->owner == NULL;
}
#endif

#ifdef CONFIG_PREEMPT
/*
* this is the entry point to schedule() from in-kernel preemption
Expand Down
7 changes: 0 additions & 7 deletions kernel/sched/features.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,13 +45,6 @@ SCHED_FEAT(HRTICK, false)
SCHED_FEAT(DOUBLE_TICK, false)
SCHED_FEAT(LB_BIAS, true)

/*
* Spin-wait on mutex acquisition when the mutex owner is running on
* another cpu -- assumes that when the owner is running, it will soon
* release the lock. Decreases scheduling overhead.
*/
SCHED_FEAT(OWNER_SPIN, true)

/*
* Decrement CPU power based on time not spent running tasks
*/
Expand Down

0 comments on commit 41fcb9f

Please sign in to comment.