Skip to content

Commit

Permalink
s390/spinlock: avoid diagnose loop
Browse files Browse the repository at this point in the history
The spinlock implementation calls the diagnose 0x9c / 0x44 immediately
if the SIGP sense running reported the target CPU as not running.

The diagnose 0x9c is a hint to the hypervisor to schedule the target
CPU in preference to the source CPU that issued the diagnose. It can
happen that on return from the diagnose the target CPU has not been
scheduled yet, e.g. if the target logical CPU is on another physical
CPU and the hypervisor did not want to migrate the logical CPU.

Avoid the immediate repeat of the diagnose instruction, instead do
the retry loop before the next invocation of diagnose 0x9c.

Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
  • Loading branch information
Martin Schwidefsky committed Nov 27, 2015
1 parent 1a2c584 commit db1c451
Showing 1 changed file with 19 additions and 9 deletions.
28 changes: 19 additions & 9 deletions arch/s390/lib/spinlock.c
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,9 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
{
unsigned int cpu = SPINLOCK_LOCKVAL;
unsigned int owner;
int count;
int count, first_diag;

first_diag = 1;
while (1) {
owner = ACCESS_ONCE(lp->lock);
/* Try to get the lock if it is free. */
Expand All @@ -51,9 +52,10 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
return;
continue;
}
/* Check if the lock owner is running. */
if (!smp_vcpu_scheduled(~owner)) {
/* First iteration: check if the lock owner is running. */
if (first_diag && !smp_vcpu_scheduled(~owner)) {
smp_yield_cpu(~owner);
first_diag = 0;
continue;
}
/* Loop for a while on the lock value. */
Expand All @@ -67,10 +69,13 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
continue;
/*
* For multiple layers of hypervisors, e.g. z/VM + LPAR
* yield the CPU if the lock is still unavailable.
* yield the CPU unconditionally. For LPAR rely on the
* sense running status.
*/
if (!MACHINE_IS_LPAR)
if (!MACHINE_IS_LPAR || !smp_vcpu_scheduled(~owner)) {
smp_yield_cpu(~owner);
first_diag = 0;
}
}
}
EXPORT_SYMBOL(arch_spin_lock_wait);
Expand All @@ -79,9 +84,10 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
{
unsigned int cpu = SPINLOCK_LOCKVAL;
unsigned int owner;
int count;
int count, first_diag;

local_irq_restore(flags);
first_diag = 1;
while (1) {
owner = ACCESS_ONCE(lp->lock);
/* Try to get the lock if it is free. */
Expand All @@ -92,8 +98,9 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
local_irq_restore(flags);
}
/* Check if the lock owner is running. */
if (!smp_vcpu_scheduled(~owner)) {
if (first_diag && !smp_vcpu_scheduled(~owner)) {
smp_yield_cpu(~owner);
first_diag = 0;
continue;
}
/* Loop for a while on the lock value. */
Expand All @@ -107,10 +114,13 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
continue;
/*
* For multiple layers of hypervisors, e.g. z/VM + LPAR
* yield the CPU if the lock is still unavailable.
* yield the CPU unconditionally. For LPAR rely on the
* sense running status.
*/
if (!MACHINE_IS_LPAR)
if (!MACHINE_IS_LPAR || !smp_vcpu_scheduled(~owner)) {
smp_yield_cpu(~owner);
first_diag = 0;
}
}
}
EXPORT_SYMBOL(arch_spin_lock_wait_flags);
Expand Down

0 comments on commit db1c451

Please sign in to comment.