Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 324014
b: refs/heads/master
c: e57dbaf
h: refs/heads/master
v: v3
  • Loading branch information
Borislav Petkov committed Sep 17, 2012
1 parent 6d3eec7 commit b7d4ede
Show file tree
Hide file tree
Showing 6 changed files with 47 additions and 193 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: f1f652447694f92beff8a534d94b36ea441c939a
refs/heads/master: e57dbaf77f372ac461b5b0b353c65efae9739a00
1 change: 1 addition & 0 deletions trunk/arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -871,6 +871,7 @@ config X86_REROUTE_FOR_BROKEN_BOOT_IRQS

config X86_MCE
bool "Machine Check / overheating reporting"
default y
---help---
Machine Check support allows the processor to notify the
kernel if it detects a problem (e.g. overheating, data corruption).
Expand Down
8 changes: 0 additions & 8 deletions trunk/arch/x86/kernel/cpu/mcheck/mce-inject.c
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,6 @@ static void raise_exception(struct mce *m, struct pt_regs *pregs)
}

static cpumask_var_t mce_inject_cpumask;
static DEFINE_MUTEX(mce_inject_mutex);

static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs)
{
Expand Down Expand Up @@ -195,11 +194,7 @@ static void raise_mce(struct mce *m)
put_online_cpus();
} else
#endif
{
preempt_disable();
raise_local();
preempt_enable();
}
}

/* Error injection interface */
Expand Down Expand Up @@ -230,10 +225,7 @@ static ssize_t mce_write(struct file *filp, const char __user *ubuf,
* so do it a jiffie or two later everywhere.
*/
schedule_timeout(2);

mutex_lock(&mce_inject_mutex);
raise_mce(&m);
mutex_unlock(&mce_inject_mutex);
return usize;
}

Expand Down
12 changes: 0 additions & 12 deletions trunk/arch/x86/kernel/cpu/mcheck/mce-internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,18 +28,6 @@ extern int mce_ser;

extern struct mce_bank *mce_banks;

#ifdef CONFIG_X86_MCE_INTEL
unsigned long mce_intel_adjust_timer(unsigned long interval);
void mce_intel_cmci_poll(void);
void mce_intel_hcpu_update(unsigned long cpu);
#else
# define mce_intel_adjust_timer mce_adjust_timer_default
static inline void mce_intel_cmci_poll(void) { }
static inline void mce_intel_hcpu_update(unsigned long cpu) { }
#endif

void mce_timer_kick(unsigned long interval);

#ifdef CONFIG_ACPI_APEI
int apei_write_mce(struct mce *m);
ssize_t apei_read_mce(struct mce *m, u64 *record_id);
Expand Down
84 changes: 25 additions & 59 deletions trunk/arch/x86/kernel/cpu/mcheck/mce.c
Original file line number Diff line number Diff line change
Expand Up @@ -1266,14 +1266,6 @@ static unsigned long check_interval = 5 * 60; /* 5 minutes */
static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
static DEFINE_PER_CPU(struct timer_list, mce_timer);

static unsigned long mce_adjust_timer_default(unsigned long interval)
{
return interval;
}

static unsigned long (*mce_adjust_timer)(unsigned long interval) =
mce_adjust_timer_default;

static void mce_timer_fn(unsigned long data)
{
struct timer_list *t = &__get_cpu_var(mce_timer);
Expand All @@ -1284,46 +1276,21 @@ static void mce_timer_fn(unsigned long data)
if (mce_available(__this_cpu_ptr(&cpu_info))) {
machine_check_poll(MCP_TIMESTAMP,
&__get_cpu_var(mce_poll_banks));
mce_intel_cmci_poll();
}

/*
* Alert userspace if needed. If we logged an MCE, reduce the
* polling interval, otherwise increase the polling interval.
*/
iv = __this_cpu_read(mce_next_interval);
if (mce_notify_irq()) {
if (mce_notify_irq())
iv = max(iv / 2, (unsigned long) HZ/100);
} else {
else
iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
iv = mce_adjust_timer(iv);
}
__this_cpu_write(mce_next_interval, iv);
/* Might have become 0 after CMCI storm subsided */
if (iv) {
t->expires = jiffies + iv;
add_timer_on(t, smp_processor_id());
}
}

/*
* Ensure that the timer is firing in @interval from now.
*/
void mce_timer_kick(unsigned long interval)
{
struct timer_list *t = &__get_cpu_var(mce_timer);
unsigned long when = jiffies + interval;
unsigned long iv = __this_cpu_read(mce_next_interval);

if (timer_pending(t)) {
if (time_before(when, t->expires))
mod_timer_pinned(t, when);
} else {
t->expires = round_jiffies(when);
add_timer_on(t, smp_processor_id());
}
if (interval < iv)
__this_cpu_write(mce_next_interval, interval);
t->expires = jiffies + iv;
add_timer_on(t, smp_processor_id());
}

/* Must not be called in IRQ context where del_timer_sync() can deadlock */
Expand Down Expand Up @@ -1618,7 +1585,6 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
switch (c->x86_vendor) {
case X86_VENDOR_INTEL:
mce_intel_feature_init(c);
mce_adjust_timer = mce_intel_adjust_timer;
break;
case X86_VENDOR_AMD:
mce_amd_feature_init(c);
Expand All @@ -1628,28 +1594,23 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
}
}

static void mce_start_timer(unsigned int cpu, struct timer_list *t)
static void __mcheck_cpu_init_timer(void)
{
unsigned long iv = mce_adjust_timer(check_interval * HZ);
struct timer_list *t = &__get_cpu_var(mce_timer);
unsigned long iv = check_interval * HZ;

__this_cpu_write(mce_next_interval, iv);
setup_timer(t, mce_timer_fn, smp_processor_id());

if (mce_ignore_ce || !iv)
if (mce_ignore_ce)
return;

__this_cpu_write(mce_next_interval, iv);
if (!iv)
return;
t->expires = round_jiffies(jiffies + iv);
add_timer_on(t, smp_processor_id());
}

static void __mcheck_cpu_init_timer(void)
{
struct timer_list *t = &__get_cpu_var(mce_timer);
unsigned int cpu = smp_processor_id();

setup_timer(t, mce_timer_fn, cpu);
mce_start_timer(cpu, t);
}

/* Handle unconfigured int18 (should never happen) */
static void unexpected_machine_check(struct pt_regs *regs, long error_code)
{
Expand Down Expand Up @@ -2333,33 +2294,38 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
unsigned int cpu = (unsigned long)hcpu;
struct timer_list *t = &per_cpu(mce_timer, cpu);

switch (action & ~CPU_TASKS_FROZEN) {
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
mce_device_create(cpu);
if (threshold_cpu_callback)
threshold_cpu_callback(action, cpu);
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
if (threshold_cpu_callback)
threshold_cpu_callback(action, cpu);
mce_device_remove(cpu);
mce_intel_hcpu_update(cpu);
break;
case CPU_DOWN_PREPARE:
smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
case CPU_DOWN_PREPARE_FROZEN:
del_timer_sync(t);
smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
break;
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
if (!mce_ignore_ce && check_interval) {
t->expires = round_jiffies(jiffies +
per_cpu(mce_next_interval, cpu));
add_timer_on(t, cpu);
}
smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
mce_start_timer(cpu, t);
break;
}

if (action == CPU_POST_DEAD) {
case CPU_POST_DEAD:
/* intentionally ignoring frozen here */
cmci_rediscover(cpu);
break;
}

return NOTIFY_OK;
}

Expand Down
Loading

0 comments on commit b7d4ede

Please sign in to comment.