Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 269035
b: refs/heads/master
c: 9c48f1c
h: refs/heads/master
i:
  269033: 77d5f65
  269031: 6020902
v: v3
  • Loading branch information
Don Zickus authored and Ingo Molnar committed Oct 10, 2011
1 parent aa2738b commit a28449e
Show file tree
Hide file tree
Showing 17 changed files with 125 additions and 282 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: c9126b2ee8adb9235941cedbf558d39a9e65642d
refs/heads/master: 9c48f1c629ecfa114850c03f875c6691003214de
20 changes: 0 additions & 20 deletions trunk/arch/x86/include/asm/nmi.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,26 +22,6 @@ void arch_trigger_all_cpu_backtrace(void);
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
#endif

/*
* Define some priorities for the nmi notifier call chain.
*
* Create a local nmi bit that has a higher priority than
* external nmis, because the local ones are more frequent.
*
* Also setup some default high/normal/low settings for
* subsystems to registers with. Using 4 bits to separate
* the priorities. This can go a lot higher if needed be.
*/

#define NMI_LOCAL_SHIFT 16 /* randomly picked */
#define NMI_LOCAL_BIT (1ULL << NMI_LOCAL_SHIFT)
#define NMI_HIGH_PRIOR (1ULL << 8)
#define NMI_NORMAL_PRIOR (1ULL << 4)
#define NMI_LOW_PRIOR (1ULL << 0)
#define NMI_LOCAL_HIGH_PRIOR (NMI_LOCAL_BIT | NMI_HIGH_PRIOR)
#define NMI_LOCAL_NORMAL_PRIOR (NMI_LOCAL_BIT | NMI_NORMAL_PRIOR)
#define NMI_LOCAL_LOW_PRIOR (NMI_LOCAL_BIT | NMI_LOW_PRIOR)

#define NMI_FLAG_FIRST 1

enum {
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/x86/include/asm/reboot.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ void machine_real_restart(unsigned int type);
#define MRR_BIOS 0
#define MRR_APM 1

typedef void (*nmi_shootdown_cb)(int, struct die_args*);
typedef void (*nmi_shootdown_cb)(int, struct pt_regs*);
void nmi_shootdown_cpus(nmi_shootdown_cb callback);

#endif /* _ASM_X86_REBOOT_H */
27 changes: 5 additions & 22 deletions trunk/arch/x86/kernel/apic/hw_nmi.c
Original file line number Diff line number Diff line change
Expand Up @@ -60,22 +60,10 @@ void arch_trigger_all_cpu_backtrace(void)
}

static int __kprobes
arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
unsigned long cmd, void *__args)
arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs)
{
struct die_args *args = __args;
struct pt_regs *regs;
int cpu;

switch (cmd) {
case DIE_NMI:
break;

default:
return NOTIFY_DONE;
}

regs = args->regs;
cpu = smp_processor_id();

if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
Expand All @@ -86,21 +74,16 @@ arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
show_regs(regs);
arch_spin_unlock(&lock);
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
return NOTIFY_STOP;
return NMI_HANDLED;
}

return NOTIFY_DONE;
return NMI_DONE;
}

static __read_mostly struct notifier_block backtrace_notifier = {
.notifier_call = arch_trigger_all_cpu_backtrace_handler,
.next = NULL,
.priority = NMI_LOCAL_LOW_PRIOR,
};

static int __init register_trigger_all_cpu_backtrace(void)
{
register_die_notifier(&backtrace_notifier);
register_nmi_handler(NMI_LOCAL, arch_trigger_all_cpu_backtrace_handler,
0, "arch_bt");
return 0;
}
early_initcall(register_trigger_all_cpu_backtrace);
Expand Down
20 changes: 4 additions & 16 deletions trunk/arch/x86/kernel/apic/x2apic_uv_x.c
Original file line number Diff line number Diff line change
Expand Up @@ -672,18 +672,11 @@ void __cpuinit uv_cpu_init(void)
/*
* When NMI is received, print a stack trace.
*/
int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
{
unsigned long real_uv_nmi;
int bid;

if (reason != DIE_NMIUNKNOWN)
return NOTIFY_OK;

if (in_crash_kexec)
/* do nothing if entering the crash kernel */
return NOTIFY_OK;

/*
* Each blade has an MMR that indicates when an NMI has been sent
* to cpus on the blade. If an NMI is detected, atomically
Expand All @@ -704,7 +697,7 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
}

if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count))
return NOTIFY_DONE;
return NMI_DONE;

__get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count;

Expand All @@ -717,17 +710,12 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
dump_stack();
spin_unlock(&uv_nmi_lock);

return NOTIFY_STOP;
return NMI_HANDLED;
}

static struct notifier_block uv_dump_stack_nmi_nb = {
.notifier_call = uv_handle_nmi,
.priority = NMI_LOCAL_LOW_PRIOR - 1,
};

void uv_register_nmi_notifier(void)
{
if (register_die_notifier(&uv_dump_stack_nmi_nb))
if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv"))
printk(KERN_WARNING "UV NMI handler failed to register\n");
}

Expand Down
20 changes: 7 additions & 13 deletions trunk/arch/x86/kernel/cpu/mcheck/mce-inject.c
Original file line number Diff line number Diff line change
Expand Up @@ -78,27 +78,20 @@ static void raise_exception(struct mce *m, struct pt_regs *pregs)

static cpumask_var_t mce_inject_cpumask;

static int mce_raise_notify(struct notifier_block *self,
unsigned long val, void *data)
static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs)
{
struct die_args *args = (struct die_args *)data;
int cpu = smp_processor_id();
struct mce *m = &__get_cpu_var(injectm);
if (val != DIE_NMI || !cpumask_test_cpu(cpu, mce_inject_cpumask))
return NOTIFY_DONE;
if (!cpumask_test_cpu(cpu, mce_inject_cpumask))
return NMI_DONE;
cpumask_clear_cpu(cpu, mce_inject_cpumask);
if (m->inject_flags & MCJ_EXCEPTION)
raise_exception(m, args->regs);
raise_exception(m, regs);
else if (m->status)
raise_poll(m);
return NOTIFY_STOP;
return NMI_HANDLED;
}

static struct notifier_block mce_raise_nb = {
.notifier_call = mce_raise_notify,
.priority = NMI_LOCAL_NORMAL_PRIOR,
};

/* Inject mce on current CPU */
static int raise_local(void)
{
Expand Down Expand Up @@ -216,7 +209,8 @@ static int inject_init(void)
return -ENOMEM;
printk(KERN_INFO "Machine check injector initialized\n");
mce_chrdev_ops.write = mce_write;
register_die_notifier(&mce_raise_nb);
register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0,
"mce_notify");
return 0;
}

Expand Down
3 changes: 0 additions & 3 deletions trunk/arch/x86/kernel/cpu/mcheck/mce.c
Original file line number Diff line number Diff line change
Expand Up @@ -908,9 +908,6 @@ void do_machine_check(struct pt_regs *regs, long error_code)

percpu_inc(mce_exception_count);

if (notify_die(DIE_NMI, "machine check", regs, error_code,
18, SIGKILL) == NOTIFY_STOP)
goto out;
if (!banks)
goto out;

Expand Down
69 changes: 4 additions & 65 deletions trunk/arch/x86/kernel/cpu/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -1058,76 +1058,15 @@ void perf_events_lapic_init(void)
apic_write(APIC_LVTPC, APIC_DM_NMI);
}

struct pmu_nmi_state {
unsigned int marked;
int handled;
};

static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);

static int __kprobes
perf_event_nmi_handler(struct notifier_block *self,
unsigned long cmd, void *__args)
perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
{
struct die_args *args = __args;
unsigned int this_nmi;
int handled;

if (!atomic_read(&active_events))
return NOTIFY_DONE;

switch (cmd) {
case DIE_NMI:
break;
case DIE_NMIUNKNOWN:
this_nmi = percpu_read(irq_stat.__nmi_count);
if (this_nmi != __this_cpu_read(pmu_nmi.marked))
/* let the kernel handle the unknown nmi */
return NOTIFY_DONE;
/*
* This one is a PMU back-to-back nmi. Two events
* trigger 'simultaneously' raising two back-to-back
* NMIs. If the first NMI handles both, the latter
* will be empty and daze the CPU. So, we drop it to
* avoid false-positive 'unknown nmi' messages.
*/
return NOTIFY_STOP;
default:
return NOTIFY_DONE;
}

handled = x86_pmu.handle_irq(args->regs);
if (!handled)
return NOTIFY_DONE;
return NMI_DONE;

this_nmi = percpu_read(irq_stat.__nmi_count);
if ((handled > 1) ||
/* the next nmi could be a back-to-back nmi */
((__this_cpu_read(pmu_nmi.marked) == this_nmi) &&
(__this_cpu_read(pmu_nmi.handled) > 1))) {
/*
* We could have two subsequent back-to-back nmis: The
* first handles more than one counter, the 2nd
* handles only one counter and the 3rd handles no
* counter.
*
* This is the 2nd nmi because the previous was
* handling more than one counter. We will mark the
* next (3rd) and then drop it if unhandled.
*/
__this_cpu_write(pmu_nmi.marked, this_nmi + 1);
__this_cpu_write(pmu_nmi.handled, handled);
}

return NOTIFY_STOP;
return x86_pmu.handle_irq(regs);
}

static __read_mostly struct notifier_block perf_event_nmi_notifier = {
.notifier_call = perf_event_nmi_handler,
.next = NULL,
.priority = NMI_LOCAL_LOW_PRIOR,
};

struct event_constraint emptyconstraint;
struct event_constraint unconstrained;

Expand Down Expand Up @@ -1232,7 +1171,7 @@ static int __init init_hw_perf_events(void)
((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;

perf_events_lapic_init();
register_die_notifier(&perf_event_nmi_notifier);
register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");

unconstrained = (struct event_constraint)
__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
Expand Down
5 changes: 1 addition & 4 deletions trunk/arch/x86/kernel/crash.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,15 +32,12 @@ int in_crash_kexec;

#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)

static void kdump_nmi_callback(int cpu, struct die_args *args)
static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
{
struct pt_regs *regs;
#ifdef CONFIG_X86_32
struct pt_regs fixed_regs;
#endif

regs = args->regs;

#ifdef CONFIG_X86_32
if (!user_mode_vm(regs)) {
crash_fixup_ss_esp(&fixed_regs, regs);
Expand Down
60 changes: 45 additions & 15 deletions trunk/arch/x86/kernel/kgdb.c
Original file line number Diff line number Diff line change
Expand Up @@ -511,28 +511,37 @@ single_step_cont(struct pt_regs *regs, struct die_args *args)

static int was_in_debug_nmi[NR_CPUS];

static int __kgdb_notify(struct die_args *args, unsigned long cmd)
static int kgdb_nmi_handler(unsigned int cmd, struct pt_regs *regs)
{
struct pt_regs *regs = args->regs;

switch (cmd) {
case DIE_NMI:
case NMI_LOCAL:
if (atomic_read(&kgdb_active) != -1) {
/* KGDB CPU roundup */
kgdb_nmicallback(raw_smp_processor_id(), regs);
was_in_debug_nmi[raw_smp_processor_id()] = 1;
touch_nmi_watchdog();
return NOTIFY_STOP;
return NMI_HANDLED;
}
return NOTIFY_DONE;
break;

case DIE_NMIUNKNOWN:
case NMI_UNKNOWN:
if (was_in_debug_nmi[raw_smp_processor_id()]) {
was_in_debug_nmi[raw_smp_processor_id()] = 0;
return NOTIFY_STOP;
return NMI_HANDLED;
}
return NOTIFY_DONE;
break;
default:
/* do nothing */
break;
}
return NMI_DONE;
}

static int __kgdb_notify(struct die_args *args, unsigned long cmd)
{
struct pt_regs *regs = args->regs;

switch (cmd) {
case DIE_DEBUG:
if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
if (user_mode(regs))
Expand Down Expand Up @@ -590,11 +599,6 @@ kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)

static struct notifier_block kgdb_notifier = {
.notifier_call = kgdb_notify,

/*
* Lowest-prio notifier priority, we want to be notified last:
*/
.priority = NMI_LOCAL_LOW_PRIOR,
};

/**
Expand All @@ -605,7 +609,31 @@ static struct notifier_block kgdb_notifier = {
*/
int kgdb_arch_init(void)
{
return register_die_notifier(&kgdb_notifier);
int retval;

retval = register_die_notifier(&kgdb_notifier);
if (retval)
goto out;

retval = register_nmi_handler(NMI_LOCAL, kgdb_nmi_handler,
0, "kgdb");
if (retval)
goto out1;

retval = register_nmi_handler(NMI_UNKNOWN, kgdb_nmi_handler,
0, "kgdb");

if (retval)
goto out2;

return retval;

out2:
unregister_nmi_handler(NMI_LOCAL, "kgdb");
out1:
unregister_die_notifier(&kgdb_notifier);
out:
return retval;
}

static void kgdb_hw_overflow_handler(struct perf_event *event,
Expand Down Expand Up @@ -673,6 +701,8 @@ void kgdb_arch_exit(void)
breakinfo[i].pev = NULL;
}
}
unregister_nmi_handler(NMI_UNKNOWN, "kgdb");
unregister_nmi_handler(NMI_LOCAL, "kgdb");
unregister_die_notifier(&kgdb_notifier);
}

Expand Down
Loading

0 comments on commit a28449e

Please sign in to comment.