Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 362828
b: refs/heads/master
c: 2bd2c92
h: refs/heads/master
v: v3
  • Loading branch information
Waiman Long authored and Ingo Molnar committed Apr 19, 2013
1 parent e3f6933 commit d6c53d8
Show file tree
Hide file tree
Showing 3 changed files with 94 additions and 2 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 0dc8c730c98a06a4d927f8d08bd0dd6de973b8dd
refs/heads/master: 2bd2c92cf07cc4a373bf316c75b78ac465fefd35
3 changes: 3 additions & 0 deletions trunk/include/linux/mutex.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,9 @@ struct mutex {
#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
struct task_struct *owner;
#endif
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
void *spin_mlock; /* Spinner MCS lock */
#endif
#ifdef CONFIG_DEBUG_MUTEXES
const char *name;
void *magic;
Expand Down
91 changes: 90 additions & 1 deletion trunk/kernel/mutex.c
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,9 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
spin_lock_init(&lock->wait_lock);
INIT_LIST_HEAD(&lock->wait_list);
mutex_clear_owner(lock);
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
lock->spin_mlock = NULL;
#endif

debug_mutex_init(lock, name, key);
}
Expand Down Expand Up @@ -107,6 +110,60 @@ EXPORT_SYMBOL(mutex_lock);
#endif

#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
/*
* In order to avoid a stampede of mutex spinners from acquiring the mutex
* more or less simultaneously, the spinners need to acquire a MCS lock
* first before spinning on the owner field.
*
* We don't inline mspin_lock() so that perf can correctly account for the
* time spent in this lock function.
*/
struct mspin_node {
struct mspin_node *next ;
int locked; /* 1 if lock acquired */
};
#define MLOCK(mutex) ((struct mspin_node **)&((mutex)->spin_mlock))

static noinline
void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
{
struct mspin_node *prev;

/* Init node */
node->locked = 0;
node->next = NULL;

prev = xchg(lock, node);
if (likely(prev == NULL)) {
/* Lock acquired */
node->locked = 1;
return;
}
ACCESS_ONCE(prev->next) = node;
smp_wmb();
/* Wait until the lock holder passes the lock down */
while (!ACCESS_ONCE(node->locked))
arch_mutex_cpu_relax();
}

static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
{
struct mspin_node *next = ACCESS_ONCE(node->next);

if (likely(!next)) {
/*
* Release the lock by setting it to NULL
*/
if (cmpxchg(lock, node, NULL) == node)
return;
/* Wait until the next pointer is set */
while (!(next = ACCESS_ONCE(node->next)))
arch_mutex_cpu_relax();
}
ACCESS_ONCE(next->locked) = 1;
smp_wmb();
}

/*
* Mutex spinning code migrated from kernel/sched/core.c
*/
Expand Down Expand Up @@ -150,6 +207,24 @@ int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
*/
return lock->owner == NULL;
}

/*
* Initial check for entering the mutex spinning loop
*/
static inline int mutex_can_spin_on_owner(struct mutex *lock)
{
int retval = 1;

rcu_read_lock();
if (lock->owner)
retval = lock->owner->on_cpu;
rcu_read_unlock();
/*
* if lock->owner is not set, the mutex owner may have just acquired
* it and not set the owner yet or the mutex has been released.
*/
return retval;
}
#endif

static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
Expand Down Expand Up @@ -215,26 +290,39 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
*
* We can't do this for DEBUG_MUTEXES because that relies on wait_lock
* to serialize everything.
*
* The mutex spinners are queued up using MCS lock so that only one
* spinner can compete for the mutex. However, if mutex spinning isn't
* going to happen, there is no point in going through the lock/unlock
* overhead.
*/
if (!mutex_can_spin_on_owner(lock))
goto slowpath;

for (;;) {
struct task_struct *owner;
struct mspin_node node;

/*
* If there's an owner, wait for it to either
* release the lock or go to sleep.
*/
mspin_lock(MLOCK(lock), &node);
owner = ACCESS_ONCE(lock->owner);
if (owner && !mutex_spin_on_owner(lock, owner))
if (owner && !mutex_spin_on_owner(lock, owner)) {
mspin_unlock(MLOCK(lock), &node);
break;
}

if ((atomic_read(&lock->count) == 1) &&
(atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
lock_acquired(&lock->dep_map, ip);
mutex_set_owner(lock);
mspin_unlock(MLOCK(lock), &node);
preempt_enable();
return 0;
}
mspin_unlock(MLOCK(lock), &node);

/*
* When there's no owner, we might have preempted between the
Expand All @@ -253,6 +341,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
*/
arch_mutex_cpu_relax();
}
slowpath:
#endif
spin_lock_mutex(&lock->wait_lock, flags);

Expand Down

0 comments on commit d6c53d8

Please sign in to comment.