diff --git a/[refs] b/[refs] index f8378220dba3..f592b51fe182 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 930f8b8bcde30b501fdf00fb7624aefb9bf35f47 +refs/heads/master: 8c40ad02e5b026902b8ce134f895b3b09803db39 diff --git a/trunk/arch/i386/kernel/smp.c b/trunk/arch/i386/kernel/smp.c index ffc4f65c5189..9bd9637ae692 100644 --- a/trunk/arch/i386/kernel/smp.c +++ b/trunk/arch/i386/kernel/smp.c @@ -375,8 +375,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, /* * i'm not happy about this global shared spinlock in the * MM hot path, but we'll see how contended it is. - * Temporarily this turns IRQs off, so that lockups are - * detected by the NMI watchdog. + * AK: x86-64 has a faster method that could be ported. */ spin_lock(&tlbstate_lock); @@ -401,7 +400,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, while (!cpus_empty(flush_cpumask)) /* nothing. lockup detection does not belong here */ - mb(); + cpu_relax(); flush_mm = NULL; flush_va = 0;