Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 305491
b: refs/heads/master
c: 7d007d2
h: refs/heads/master
i:
  305489: 4d7ac54
  305487: c72f232
v: v3
  • Loading branch information
Don Zickus authored and Ingo Molnar committed May 14, 2012
1 parent 18665d1 commit 882fbbe
Show file tree
Hide file tree
Showing 2 changed files with 57 additions and 6 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 5d2b86d90f7cc4a41316cef3d41560da6141f45c
refs/heads/master: 7d007d21e539dbecb6942c5734e6649f720982cf
61 changes: 56 additions & 5 deletions trunk/arch/x86/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
#include <asm/mmu_context.h>
#include <asm/proto.h>
#include <asm/apic.h>
#include <asm/nmi.h>
/*
* Some notes on x86 processor bugs affecting SMP operation:
*
Expand Down Expand Up @@ -108,6 +109,8 @@
* about nothing of note with C stepping upwards.
*/

static atomic_t stopping_cpu = ATOMIC_INIT(-1);

/*
* this function sends a 'reschedule' IPI to another CPU.
* it goes straight through and wastes no time serializing
Expand Down Expand Up @@ -148,6 +151,17 @@ void native_send_call_func_ipi(const struct cpumask *mask)
free_cpumask_var(allbutself);
}

static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
{
/* We are registered on stopping cpu too, avoid spurious NMI */
if (raw_smp_processor_id() == atomic_read(&stopping_cpu))
return NMI_HANDLED;

stop_this_cpu(NULL);

return NMI_HANDLED;
}

/*
* this function calls the 'stop' function on all other CPUs in the system.
*/
Expand All @@ -171,13 +185,25 @@ static void native_stop_other_cpus(int wait)
/*
* Use an own vector here because smp_call_function
* does lots of things not suitable in a panic situation.
* On most systems we could also use an NMI here,
* but there are a few systems around where NMI
* is problematic so stay with an non NMI for now
* (this implies we cannot stop CPUs spinning with irq off
* currently)
*/

/*
* We start by using the REBOOT_VECTOR irq.
* The irq is treated as a sync point to allow critical
* regions of code on other cpus to release their spin locks
* and re-enable irqs. Jumping straight to an NMI might
* accidentally cause deadlocks with further shutdown/panic
* code. By syncing, we give the cpus up to one second to
* finish their work before we force them off with the NMI.
*/
if (num_online_cpus() > 1) {
/* did someone beat us here? */
if (atomic_cmpxchg(&stopping_cpu, -1, safe_smp_processor_id()) != -1)
return;

/* sync above data before sending IRQ */
wmb();

apic->send_IPI_allbutself(REBOOT_VECTOR);

/*
Expand All @@ -188,7 +214,32 @@ static void native_stop_other_cpus(int wait)
while (num_online_cpus() > 1 && (wait || timeout--))
udelay(1);
}

/* if the REBOOT_VECTOR didn't work, try with the NMI */
if ((num_online_cpus() > 1)) {
if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
NMI_FLAG_FIRST, "smp_stop"))
/* Note: we ignore failures here */
/* Hope the REBOOT_IRQ is good enough */
goto finish;

/* sync above data before sending IRQ */
wmb();

pr_emerg("Shutting down cpus with NMI\n");

apic->send_IPI_allbutself(NMI_VECTOR);

/*
* Don't wait longer than a 10 ms if the caller
* didn't ask us to wait.
*/
timeout = USEC_PER_MSEC * 10;
while (num_online_cpus() > 1 && (wait || timeout--))
udelay(1);
}

finish:
local_irq_save(flags);
disable_local_APIC();
local_irq_restore(flags);
Expand Down

0 comments on commit 882fbbe

Please sign in to comment.