mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
x86: Unify fixup_irqs() for 32-bit and 64-bit kernels
There is no reason to have different fixup_irqs() for 32-bit and 64-bit kernels. Unify by using the superior 64-bit version for both the kernels. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Gary Hade <garyhade@us.ibm.com> Cc: Eric W. Biederman <ebiederm@xmission.com> LKML-Reference: <20091026230001.562512739@sbs-t61.sc.intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
6f9b41006a
commit
7a7732bc0f
3 changed files with 59 additions and 103 deletions
|
@ -276,3 +276,62 @@ void smp_generic_interrupt(struct pt_regs *regs)
|
|||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
|
||||
void fixup_irqs(void)
|
||||
{
|
||||
unsigned int irq;
|
||||
static int warned;
|
||||
struct irq_desc *desc;
|
||||
|
||||
for_each_irq_desc(irq, desc) {
|
||||
int break_affinity = 0;
|
||||
int set_affinity = 1;
|
||||
const struct cpumask *affinity;
|
||||
|
||||
if (!desc)
|
||||
continue;
|
||||
if (irq == 2)
|
||||
continue;
|
||||
|
||||
/* interrupt's are disabled at this point */
|
||||
spin_lock(&desc->lock);
|
||||
|
||||
affinity = desc->affinity;
|
||||
if (!irq_has_action(irq) ||
|
||||
cpumask_equal(affinity, cpu_online_mask)) {
|
||||
spin_unlock(&desc->lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
|
||||
break_affinity = 1;
|
||||
affinity = cpu_all_mask;
|
||||
}
|
||||
|
||||
if (desc->chip->mask)
|
||||
desc->chip->mask(irq);
|
||||
|
||||
if (desc->chip->set_affinity)
|
||||
desc->chip->set_affinity(irq, affinity);
|
||||
else if (!(warned++))
|
||||
set_affinity = 0;
|
||||
|
||||
if (desc->chip->unmask)
|
||||
desc->chip->unmask(irq);
|
||||
|
||||
spin_unlock(&desc->lock);
|
||||
|
||||
if (break_affinity && set_affinity)
|
||||
printk("Broke affinity for irq %i\n", irq);
|
||||
else if (!set_affinity)
|
||||
printk("Cannot set affinity for irq %i\n", irq);
|
||||
}
|
||||
|
||||
/* That doesn't seem sufficient. Give it 1ms. */
|
||||
local_irq_enable();
|
||||
mdelay(1);
|
||||
local_irq_disable();
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -211,48 +211,3 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
|
|||
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
|
||||
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
|
||||
void fixup_irqs(void)
|
||||
{
|
||||
unsigned int irq;
|
||||
struct irq_desc *desc;
|
||||
|
||||
for_each_irq_desc(irq, desc) {
|
||||
const struct cpumask *affinity;
|
||||
|
||||
if (!desc)
|
||||
continue;
|
||||
if (irq == 2)
|
||||
continue;
|
||||
|
||||
affinity = desc->affinity;
|
||||
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
|
||||
printk("Breaking affinity for irq %i\n", irq);
|
||||
affinity = cpu_all_mask;
|
||||
}
|
||||
if (desc->chip->set_affinity)
|
||||
desc->chip->set_affinity(irq, affinity);
|
||||
else if (desc->action)
|
||||
printk_once("Cannot set affinity for irq %i\n", irq);
|
||||
}
|
||||
|
||||
#if 0
|
||||
barrier();
|
||||
/* Ingo Molnar says: "after the IO-APIC masks have been redirected
|
||||
[note the nop - the interrupt-enable boundary on x86 is two
|
||||
instructions from sti] - to flush out pending hardirqs and
|
||||
IPIs. After this point nothing is supposed to reach this CPU." */
|
||||
__asm__ __volatile__("sti; nop; cli");
|
||||
barrier();
|
||||
#else
|
||||
/* That doesn't seem sufficient. Give it 1ms. */
|
||||
local_irq_enable();
|
||||
mdelay(1);
|
||||
local_irq_disable();
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -62,64 +62,6 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
|
|||
return true;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
|
||||
void fixup_irqs(void)
|
||||
{
|
||||
unsigned int irq;
|
||||
static int warned;
|
||||
struct irq_desc *desc;
|
||||
|
||||
for_each_irq_desc(irq, desc) {
|
||||
int break_affinity = 0;
|
||||
int set_affinity = 1;
|
||||
const struct cpumask *affinity;
|
||||
|
||||
if (!desc)
|
||||
continue;
|
||||
if (irq == 2)
|
||||
continue;
|
||||
|
||||
/* interrupt's are disabled at this point */
|
||||
spin_lock(&desc->lock);
|
||||
|
||||
affinity = desc->affinity;
|
||||
if (!irq_has_action(irq) ||
|
||||
cpumask_equal(affinity, cpu_online_mask)) {
|
||||
spin_unlock(&desc->lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
|
||||
break_affinity = 1;
|
||||
affinity = cpu_all_mask;
|
||||
}
|
||||
|
||||
if (desc->chip->mask)
|
||||
desc->chip->mask(irq);
|
||||
|
||||
if (desc->chip->set_affinity)
|
||||
desc->chip->set_affinity(irq, affinity);
|
||||
else if (!(warned++))
|
||||
set_affinity = 0;
|
||||
|
||||
if (desc->chip->unmask)
|
||||
desc->chip->unmask(irq);
|
||||
|
||||
spin_unlock(&desc->lock);
|
||||
|
||||
if (break_affinity && set_affinity)
|
||||
printk("Broke affinity for irq %i\n", irq);
|
||||
else if (!set_affinity)
|
||||
printk("Cannot set affinity for irq %i\n", irq);
|
||||
}
|
||||
|
||||
/* That doesn't seem sufficient. Give it 1ms. */
|
||||
local_irq_enable();
|
||||
mdelay(1);
|
||||
local_irq_disable();
|
||||
}
|
||||
#endif
|
||||
|
||||
extern void call_softirq(void);
|
||||
|
||||
|
|
Loading…
Reference in a new issue