diff --git a/[refs] b/[refs] index 989225c5d69f..bd23d4a4a889 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 61e2cd0acc248c14793cefd7e23e209be9e0b70d +refs/heads/master: 4a7f340c6a75ec5fca23d9c80a59f3f28cc4a61e diff --git a/trunk/arch/x86/include/asm/spinlock.h b/trunk/arch/x86/include/asm/spinlock.h index f5695eeb83ff..972c260919a3 100644 --- a/trunk/arch/x86/include/asm/spinlock.h +++ b/trunk/arch/x86/include/asm/spinlock.h @@ -49,10 +49,6 @@ * issues and should be optimal for the uncontended case. Note the tail must be * in the high part, because a wide xadd increment of the low part would carry * up and contaminate the high part. - * - * With fewer than 2^8 possible CPUs, we can use x86's partial registers to - * save some instructions and make the code more elegant. There really isn't - * much between them in performance though, especially as locks are out of line. */ static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) {