diff --git a/[refs] b/[refs] index 881dee34c0b5..4836ca24edf9 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 4d576b57b50a92801e6493e76e5243d6cff193d2 +refs/heads/master: 2496afbf1e50c70f80992656bcb730c8583ddac3 diff --git a/trunk/arch/x86/xen/spinlock.c b/trunk/arch/x86/xen/spinlock.c index 2f91e5651926..36a5141108df 100644 --- a/trunk/arch/x86/xen/spinlock.c +++ b/trunk/arch/x86/xen/spinlock.c @@ -326,8 +326,13 @@ static void xen_spin_unlock(struct raw_spinlock *lock) smp_wmb(); /* make sure no writes get moved after unlock */ xl->lock = 0; /* release lock */ - /* make sure unlock happens before kick */ - barrier(); + /* + * Make sure unlock happens before checking for waiting + * spinners. We need a strong barrier to enforce the + * write-read ordering to different memory locations, as the + * CPU makes no implied guarantees about their ordering. + */ + mb(); if (unlikely(xl->spinners)) xen_spin_unlock_slow(xl);