Skip to content

Commit

Permalink
x86: Unify fixup_irqs() for 32-bit and 64-bit kernels
Browse files Browse the repository at this point in the history
There is no reason to have different fixup_irqs() for 32-bit and
64-bit kernels. Unify by using the superior 64-bit version for
both the kernels.

Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: Gary Hade <garyhade@us.ibm.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
LKML-Reference: <20091026230001.562512739@sbs-t61.sc.intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
  • Loading branch information
Suresh Siddha authored and Ingo Molnar committed Nov 2, 2009
1 parent 6f9b410 commit 7a7732b
Show file tree
Hide file tree
Showing 3 changed files with 59 additions and 103 deletions.
59 changes: 59 additions & 0 deletions arch/x86/kernel/irq.c
Original file line number Diff line number Diff line change
Expand Up @@ -276,3 +276,62 @@ void smp_generic_interrupt(struct pt_regs *regs)
}

EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);

#ifdef CONFIG_HOTPLUG_CPU
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
void fixup_irqs(void)
{
unsigned int irq;
static int warned;
struct irq_desc *desc;

for_each_irq_desc(irq, desc) {
int break_affinity = 0;
int set_affinity = 1;
const struct cpumask *affinity;

if (!desc)
continue;
if (irq == 2)
continue;

/* interrupt's are disabled at this point */
spin_lock(&desc->lock);

affinity = desc->affinity;
if (!irq_has_action(irq) ||
cpumask_equal(affinity, cpu_online_mask)) {
spin_unlock(&desc->lock);
continue;
}

if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
break_affinity = 1;
affinity = cpu_all_mask;
}

if (desc->chip->mask)
desc->chip->mask(irq);

if (desc->chip->set_affinity)
desc->chip->set_affinity(irq, affinity);
else if (!(warned++))
set_affinity = 0;

if (desc->chip->unmask)
desc->chip->unmask(irq);

spin_unlock(&desc->lock);

if (break_affinity && set_affinity)
printk("Broke affinity for irq %i\n", irq);
else if (!set_affinity)
printk("Cannot set affinity for irq %i\n", irq);
}

/* That doesn't seem sufficient. Give it 1ms. */
local_irq_enable();
mdelay(1);
local_irq_disable();
}
#endif
45 changes: 0 additions & 45 deletions arch/x86/kernel/irq_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -211,48 +211,3 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)

return true;
}

#ifdef CONFIG_HOTPLUG_CPU

/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
void fixup_irqs(void)
{
unsigned int irq;
struct irq_desc *desc;

for_each_irq_desc(irq, desc) {
const struct cpumask *affinity;

if (!desc)
continue;
if (irq == 2)
continue;

affinity = desc->affinity;
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
printk("Breaking affinity for irq %i\n", irq);
affinity = cpu_all_mask;
}
if (desc->chip->set_affinity)
desc->chip->set_affinity(irq, affinity);
else if (desc->action)
printk_once("Cannot set affinity for irq %i\n", irq);
}

#if 0
barrier();
/* Ingo Molnar says: "after the IO-APIC masks have been redirected
[note the nop - the interrupt-enable boundary on x86 is two
instructions from sti] - to flush out pending hardirqs and
IPIs. After this point nothing is supposed to reach this CPU." */
__asm__ __volatile__("sti; nop; cli");
barrier();
#else
/* That doesn't seem sufficient. Give it 1ms. */
local_irq_enable();
mdelay(1);
local_irq_disable();
#endif
}
#endif

58 changes: 0 additions & 58 deletions arch/x86/kernel/irq_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -62,64 +62,6 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
return true;
}

#ifdef CONFIG_HOTPLUG_CPU
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
void fixup_irqs(void)
{
unsigned int irq;
static int warned;
struct irq_desc *desc;

for_each_irq_desc(irq, desc) {
int break_affinity = 0;
int set_affinity = 1;
const struct cpumask *affinity;

if (!desc)
continue;
if (irq == 2)
continue;

/* interrupt's are disabled at this point */
spin_lock(&desc->lock);

affinity = desc->affinity;
if (!irq_has_action(irq) ||
cpumask_equal(affinity, cpu_online_mask)) {
spin_unlock(&desc->lock);
continue;
}

if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
break_affinity = 1;
affinity = cpu_all_mask;
}

if (desc->chip->mask)
desc->chip->mask(irq);

if (desc->chip->set_affinity)
desc->chip->set_affinity(irq, affinity);
else if (!(warned++))
set_affinity = 0;

if (desc->chip->unmask)
desc->chip->unmask(irq);

spin_unlock(&desc->lock);

if (break_affinity && set_affinity)
printk("Broke affinity for irq %i\n", irq);
else if (!set_affinity)
printk("Cannot set affinity for irq %i\n", irq);
}

/* That doesn't seem sufficient. Give it 1ms. */
local_irq_enable();
mdelay(1);
local_irq_disable();
}
#endif

extern void call_softirq(void);

Expand Down

0 comments on commit 7a7732b

Please sign in to comment.