mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 19:26:25 +00:00
generic: sparse irqs: use irq_desc() together with dyn_array, instead of irq_desc[]
add CONFIG_HAVE_SPARSE_IRQ to for use condensed array. Get rid of irq_desc[] array assumptions. Preallocate 32 irq_desc, and irq_desc() will try to get more. ( No change in functionality is expected anywhere, except the odd build failure where we missed a code site or where a crossing commit itroduces new irq_desc[] usage. ) v2: according to Eric, change get_irq_desc() to irq_desc() Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
bfea1238be
commit
08678b0841
28 changed files with 405 additions and 188 deletions
|
@ -105,3 +105,7 @@ config HAVE_CLK
|
|||
|
||||
config HAVE_DYN_ARRAY
|
||||
def_bool n
|
||||
|
||||
config HAVE_SPARSE_IRQ
|
||||
def_bool n
|
||||
|
||||
|
|
|
@ -34,6 +34,7 @@ config X86
|
|||
select HAVE_GENERIC_DMA_COHERENT if X86_32
|
||||
select HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
select HAVE_DYN_ARRAY
|
||||
select HAVE_SPARSE_IRQ if X86_64
|
||||
|
||||
config ARCH_DEFCONFIG
|
||||
string
|
||||
|
|
|
@ -345,6 +345,7 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
|
|||
struct irq_pin_list *entry = irq_2_pin + irq;
|
||||
unsigned int apicid_value;
|
||||
cpumask_t tmp;
|
||||
struct irq_desc *desc;
|
||||
|
||||
cpus_and(tmp, cpumask, cpu_online_map);
|
||||
if (cpus_empty(tmp))
|
||||
|
@ -365,7 +366,8 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
|
|||
break;
|
||||
entry = irq_2_pin + entry->next;
|
||||
}
|
||||
irq_desc[irq].affinity = cpumask;
|
||||
desc = irq_to_desc(irq);
|
||||
desc->affinity = cpumask;
|
||||
spin_unlock_irqrestore(&ioapic_lock, flags);
|
||||
}
|
||||
|
||||
|
@ -475,10 +477,12 @@ static inline void balance_irq(int cpu, int irq)
|
|||
static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
|
||||
{
|
||||
int i, j;
|
||||
struct irq_desc *desc;
|
||||
|
||||
for_each_online_cpu(i) {
|
||||
for (j = 0; j < nr_irqs; j++) {
|
||||
if (!irq_desc[j].action)
|
||||
desc = irq_to_desc(j);
|
||||
if (!desc->action)
|
||||
continue;
|
||||
/* Is it a significant load ? */
|
||||
if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i), j) <
|
||||
|
@ -505,6 +509,7 @@ static void do_irq_balance(void)
|
|||
unsigned long tmp_cpu_irq;
|
||||
unsigned long imbalance = 0;
|
||||
cpumask_t allowed_mask, target_cpu_mask, tmp;
|
||||
struct irq_desc *desc;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
int package_index;
|
||||
|
@ -515,7 +520,8 @@ static void do_irq_balance(void)
|
|||
for (j = 0; j < nr_irqs; j++) {
|
||||
unsigned long value_now, delta;
|
||||
/* Is this an active IRQ or balancing disabled ? */
|
||||
if (!irq_desc[j].action || irq_balancing_disabled(j))
|
||||
desc = irq_to_desc(j);
|
||||
if (!desc->action || irq_balancing_disabled(j))
|
||||
continue;
|
||||
if (package_index == i)
|
||||
IRQ_DELTA(package_index, j) = 0;
|
||||
|
@ -609,7 +615,8 @@ tryanotherirq:
|
|||
selected_irq = -1;
|
||||
for (j = 0; j < nr_irqs; j++) {
|
||||
/* Is this an active IRQ? */
|
||||
if (!irq_desc[j].action)
|
||||
desc = irq_to_desc(j);
|
||||
if (!desc->action)
|
||||
continue;
|
||||
if (imbalance <= IRQ_DELTA(max_loaded, j))
|
||||
continue;
|
||||
|
@ -682,10 +689,12 @@ static int balanced_irq(void *unused)
|
|||
int i;
|
||||
unsigned long prev_balance_time = jiffies;
|
||||
long time_remaining = balanced_irq_interval;
|
||||
struct irq_desc *desc;
|
||||
|
||||
/* push everything to CPU 0 to give us a starting point. */
|
||||
for (i = 0 ; i < nr_irqs ; i++) {
|
||||
irq_desc[i].pending_mask = cpumask_of_cpu(0);
|
||||
desc = irq_to_desc(i);
|
||||
desc->pending_mask = cpumask_of_cpu(0);
|
||||
set_pending_irq(i, cpumask_of_cpu(0));
|
||||
}
|
||||
|
||||
|
@ -1254,13 +1263,16 @@ static struct irq_chip ioapic_chip;
|
|||
|
||||
static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
|
||||
trigger == IOAPIC_LEVEL) {
|
||||
irq_desc[irq].status |= IRQ_LEVEL;
|
||||
desc->status |= IRQ_LEVEL;
|
||||
set_irq_chip_and_handler_name(irq, &ioapic_chip,
|
||||
handle_fasteoi_irq, "fasteoi");
|
||||
} else {
|
||||
irq_desc[irq].status &= ~IRQ_LEVEL;
|
||||
desc->status &= ~IRQ_LEVEL;
|
||||
set_irq_chip_and_handler_name(irq, &ioapic_chip,
|
||||
handle_edge_irq, "edge");
|
||||
}
|
||||
|
@ -2027,6 +2039,7 @@ static struct irq_chip ioapic_chip __read_mostly = {
|
|||
static inline void init_IO_APIC_traps(void)
|
||||
{
|
||||
int irq;
|
||||
struct irq_desc *desc;
|
||||
|
||||
/*
|
||||
* NOTE! The local APIC isn't very good at handling
|
||||
|
@ -2048,9 +2061,11 @@ static inline void init_IO_APIC_traps(void)
|
|||
*/
|
||||
if (irq < 16)
|
||||
make_8259A_irq(irq);
|
||||
else
|
||||
else {
|
||||
desc = irq_to_desc(irq);
|
||||
/* Strange. Oh, well.. */
|
||||
irq_desc[irq].chip = &no_irq_chip;
|
||||
desc->chip = &no_irq_chip;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2089,7 +2104,10 @@ static struct irq_chip lapic_chip __read_mostly = {
|
|||
|
||||
static void lapic_register_intr(int irq, int vector)
|
||||
{
|
||||
irq_desc[irq].status &= ~IRQ_LEVEL;
|
||||
struct irq_desc *desc;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
desc->status &= ~IRQ_LEVEL;
|
||||
set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
|
||||
"edge");
|
||||
set_intr_gate(vector, interrupt[irq]);
|
||||
|
@ -2556,6 +2574,7 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
|
|||
unsigned int dest;
|
||||
cpumask_t tmp;
|
||||
int vector;
|
||||
struct irq_desc *desc;
|
||||
|
||||
cpus_and(tmp, mask, cpu_online_map);
|
||||
if (cpus_empty(tmp))
|
||||
|
@ -2575,7 +2594,8 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
|
|||
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
|
||||
|
||||
write_msi_msg(irq, &msg);
|
||||
irq_desc[irq].affinity = mask;
|
||||
desc = irq_to_desc(irq);
|
||||
desc->affinity = mask;
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
|
@ -2649,6 +2669,7 @@ static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
|
|||
{
|
||||
unsigned int dest;
|
||||
cpumask_t tmp;
|
||||
struct irq_desc *desc;
|
||||
|
||||
cpus_and(tmp, mask, cpu_online_map);
|
||||
if (cpus_empty(tmp))
|
||||
|
@ -2659,7 +2680,8 @@ static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
|
|||
dest = cpu_mask_to_apicid(mask);
|
||||
|
||||
target_ht_irq(irq, dest);
|
||||
irq_desc[irq].affinity = mask;
|
||||
desc = irq_to_desc(irq);
|
||||
desc->affinity = mask;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -345,6 +345,7 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
|
|||
unsigned long flags;
|
||||
unsigned int dest;
|
||||
cpumask_t tmp;
|
||||
struct irq_desc *desc;
|
||||
|
||||
cpus_and(tmp, mask, cpu_online_map);
|
||||
if (cpus_empty(tmp))
|
||||
|
@ -361,9 +362,10 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
|
|||
*/
|
||||
dest = SET_APIC_LOGICAL_ID(dest);
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
spin_lock_irqsave(&ioapic_lock, flags);
|
||||
__target_IO_APIC_irq(irq, dest, cfg->vector);
|
||||
irq_desc[irq].affinity = mask;
|
||||
desc->affinity = mask;
|
||||
spin_unlock_irqrestore(&ioapic_lock, flags);
|
||||
}
|
||||
#endif
|
||||
|
@ -933,14 +935,17 @@ static struct irq_chip ir_ioapic_chip;
|
|||
|
||||
static void ioapic_register_intr(int irq, unsigned long trigger)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
if (trigger)
|
||||
irq_desc[irq].status |= IRQ_LEVEL;
|
||||
desc->status |= IRQ_LEVEL;
|
||||
else
|
||||
irq_desc[irq].status &= ~IRQ_LEVEL;
|
||||
desc->status &= ~IRQ_LEVEL;
|
||||
|
||||
#ifdef CONFIG_INTR_REMAP
|
||||
if (irq_remapped(irq)) {
|
||||
irq_desc[irq].status |= IRQ_MOVE_PCNTXT;
|
||||
desc->status |= IRQ_MOVE_PCNTXT;
|
||||
if (trigger)
|
||||
set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
|
||||
handle_fasteoi_irq,
|
||||
|
@ -1596,10 +1601,10 @@ static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
|
|||
static void migrate_ioapic_irq(int irq, cpumask_t mask)
|
||||
{
|
||||
struct irq_cfg *cfg = irq_cfg + irq;
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
struct irq_desc *desc;
|
||||
cpumask_t tmp, cleanup_mask;
|
||||
struct irte irte;
|
||||
int modify_ioapic_rte = desc->status & IRQ_LEVEL;
|
||||
int modify_ioapic_rte;
|
||||
unsigned int dest;
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -1616,6 +1621,8 @@ static void migrate_ioapic_irq(int irq, cpumask_t mask)
|
|||
cpus_and(tmp, cfg->domain, mask);
|
||||
dest = cpu_mask_to_apicid(tmp);
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
modify_ioapic_rte = desc->status & IRQ_LEVEL;
|
||||
if (modify_ioapic_rte) {
|
||||
spin_lock_irqsave(&ioapic_lock, flags);
|
||||
__target_IO_APIC_irq(irq, dest, cfg->vector);
|
||||
|
@ -1637,12 +1644,13 @@ static void migrate_ioapic_irq(int irq, cpumask_t mask)
|
|||
cfg->move_in_progress = 0;
|
||||
}
|
||||
|
||||
irq_desc[irq].affinity = mask;
|
||||
desc->affinity = mask;
|
||||
}
|
||||
|
||||
static int migrate_irq_remapped_level(int irq)
|
||||
{
|
||||
int ret = -1;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
mask_IO_APIC_irq(irq);
|
||||
|
||||
|
@ -1658,11 +1666,11 @@ static int migrate_irq_remapped_level(int irq)
|
|||
}
|
||||
|
||||
/* everthing is clear. we have right of way */
|
||||
migrate_ioapic_irq(irq, irq_desc[irq].pending_mask);
|
||||
migrate_ioapic_irq(irq, desc->pending_mask);
|
||||
|
||||
ret = 0;
|
||||
irq_desc[irq].status &= ~IRQ_MOVE_PENDING;
|
||||
cpus_clear(irq_desc[irq].pending_mask);
|
||||
desc->status &= ~IRQ_MOVE_PENDING;
|
||||
cpus_clear(desc->pending_mask);
|
||||
|
||||
unmask:
|
||||
unmask_IO_APIC_irq(irq);
|
||||
|
@ -1674,7 +1682,7 @@ static void ir_irq_migration(struct work_struct *work)
|
|||
int irq;
|
||||
|
||||
for (irq = 0; irq < nr_irqs; irq++) {
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
if (desc->status & IRQ_MOVE_PENDING) {
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -1686,8 +1694,7 @@ static void ir_irq_migration(struct work_struct *work)
|
|||
continue;
|
||||
}
|
||||
|
||||
desc->chip->set_affinity(irq,
|
||||
irq_desc[irq].pending_mask);
|
||||
desc->chip->set_affinity(irq, desc->pending_mask);
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
}
|
||||
|
@ -1698,9 +1705,11 @@ static void ir_irq_migration(struct work_struct *work)
|
|||
*/
|
||||
static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
|
||||
{
|
||||
if (irq_desc[irq].status & IRQ_LEVEL) {
|
||||
irq_desc[irq].status |= IRQ_MOVE_PENDING;
|
||||
irq_desc[irq].pending_mask = mask;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
if (desc->status & IRQ_LEVEL) {
|
||||
desc->status |= IRQ_MOVE_PENDING;
|
||||
desc->pending_mask = mask;
|
||||
migrate_irq_remapped_level(irq);
|
||||
return;
|
||||
}
|
||||
|
@ -1725,7 +1734,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
|
|||
if (irq >= nr_irqs)
|
||||
continue;
|
||||
|
||||
desc = irq_desc + irq;
|
||||
desc = irq_to_desc(irq);
|
||||
cfg = irq_cfg + irq;
|
||||
spin_lock(&desc->lock);
|
||||
if (!cfg->move_cleanup_count)
|
||||
|
@ -1791,7 +1800,7 @@ static void ack_apic_level(unsigned int irq)
|
|||
irq_complete_move(irq);
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
/* If we are moving the irq we need to mask it */
|
||||
if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
|
||||
if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) {
|
||||
do_unmask_irq = 1;
|
||||
mask_IO_APIC_irq(irq);
|
||||
}
|
||||
|
@ -1868,6 +1877,7 @@ static struct irq_chip ir_ioapic_chip __read_mostly = {
|
|||
static inline void init_IO_APIC_traps(void)
|
||||
{
|
||||
int irq;
|
||||
struct irq_desc *desc;
|
||||
|
||||
/*
|
||||
* NOTE! The local APIC isn't very good at handling
|
||||
|
@ -1889,9 +1899,11 @@ static inline void init_IO_APIC_traps(void)
|
|||
*/
|
||||
if (irq < 16)
|
||||
make_8259A_irq(irq);
|
||||
else
|
||||
else {
|
||||
desc = irq_to_desc(irq);
|
||||
/* Strange. Oh, well.. */
|
||||
irq_desc[irq].chip = &no_irq_chip;
|
||||
desc->chip = &no_irq_chip;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1926,7 +1938,10 @@ static struct irq_chip lapic_chip __read_mostly = {
|
|||
|
||||
static void lapic_register_intr(int irq)
|
||||
{
|
||||
irq_desc[irq].status &= ~IRQ_LEVEL;
|
||||
struct irq_desc *desc;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
desc->status &= ~IRQ_LEVEL;
|
||||
set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
|
||||
"edge");
|
||||
}
|
||||
|
@ -2402,6 +2417,7 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
|
|||
struct msi_msg msg;
|
||||
unsigned int dest;
|
||||
cpumask_t tmp;
|
||||
struct irq_desc *desc;
|
||||
|
||||
cpus_and(tmp, mask, cpu_online_map);
|
||||
if (cpus_empty(tmp))
|
||||
|
@ -2421,7 +2437,8 @@ static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
|
|||
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
|
||||
|
||||
write_msi_msg(irq, &msg);
|
||||
irq_desc[irq].affinity = mask;
|
||||
desc = irq_to_desc(irq);
|
||||
desc->affinity = mask;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INTR_REMAP
|
||||
|
@ -2435,6 +2452,7 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
|
|||
unsigned int dest;
|
||||
cpumask_t tmp, cleanup_mask;
|
||||
struct irte irte;
|
||||
struct irq_desc *desc;
|
||||
|
||||
cpus_and(tmp, mask, cpu_online_map);
|
||||
if (cpus_empty(tmp))
|
||||
|
@ -2469,7 +2487,8 @@ static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
|
|||
cfg->move_in_progress = 0;
|
||||
}
|
||||
|
||||
irq_desc[irq].affinity = mask;
|
||||
desc = irq_to_desc(irq);
|
||||
desc->affinity = mask;
|
||||
}
|
||||
#endif
|
||||
#endif /* CONFIG_SMP */
|
||||
|
@ -2543,7 +2562,7 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq)
|
|||
|
||||
#ifdef CONFIG_INTR_REMAP
|
||||
if (irq_remapped(irq)) {
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
/*
|
||||
* irq migration in process context
|
||||
*/
|
||||
|
@ -2655,6 +2674,7 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
|
|||
struct msi_msg msg;
|
||||
unsigned int dest;
|
||||
cpumask_t tmp;
|
||||
struct irq_desc *desc;
|
||||
|
||||
cpus_and(tmp, mask, cpu_online_map);
|
||||
if (cpus_empty(tmp))
|
||||
|
@ -2674,7 +2694,8 @@ static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
|
|||
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
|
||||
|
||||
dmar_msi_write(irq, &msg);
|
||||
irq_desc[irq].affinity = mask;
|
||||
desc = irq_to_desc(irq);
|
||||
desc->affinity = mask;
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
|
@ -2731,6 +2752,7 @@ static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
|
|||
struct irq_cfg *cfg = irq_cfg + irq;
|
||||
unsigned int dest;
|
||||
cpumask_t tmp;
|
||||
struct irq_desc *desc;
|
||||
|
||||
cpus_and(tmp, mask, cpu_online_map);
|
||||
if (cpus_empty(tmp))
|
||||
|
@ -2743,7 +2765,8 @@ static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
|
|||
dest = cpu_mask_to_apicid(tmp);
|
||||
|
||||
target_ht_irq(irq, dest, cfg->vector);
|
||||
irq_desc[irq].affinity = mask;
|
||||
desc = irq_to_desc(irq);
|
||||
desc->affinity = mask;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -224,7 +224,7 @@ unsigned int do_IRQ(struct pt_regs *regs)
|
|||
struct pt_regs *old_regs;
|
||||
/* high bit used in ret_from_ code */
|
||||
int overflow, irq = ~regs->orig_ax;
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
if (unlikely((unsigned)irq >= nr_irqs)) {
|
||||
printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
|
||||
|
@ -273,15 +273,16 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
|
||||
if (i < nr_irqs) {
|
||||
unsigned any_count = 0;
|
||||
struct irq_desc *desc = irq_to_desc(i);
|
||||
|
||||
spin_lock_irqsave(&irq_desc[i].lock, flags);
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
#ifndef CONFIG_SMP
|
||||
any_count = kstat_irqs(i);
|
||||
#else
|
||||
for_each_online_cpu(j)
|
||||
any_count |= kstat_cpu(j).irqs[i];
|
||||
#endif
|
||||
action = irq_desc[i].action;
|
||||
action = desc->action;
|
||||
if (!action && !any_count)
|
||||
goto skip;
|
||||
seq_printf(p, "%3d: ",i);
|
||||
|
@ -291,8 +292,8 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
||||
#endif
|
||||
seq_printf(p, " %8s", irq_desc[i].chip->name);
|
||||
seq_printf(p, "-%-8s", irq_desc[i].name);
|
||||
seq_printf(p, " %8s", desc->chip->name);
|
||||
seq_printf(p, "-%-8s", desc->name);
|
||||
|
||||
if (action) {
|
||||
seq_printf(p, " %s", action->name);
|
||||
|
@ -302,7 +303,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
|
||||
seq_putc(p, '\n');
|
||||
skip:
|
||||
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
} else if (i == nr_irqs) {
|
||||
seq_printf(p, "NMI: ");
|
||||
for_each_online_cpu(j)
|
||||
|
@ -398,17 +399,20 @@ void fixup_irqs(cpumask_t map)
|
|||
|
||||
for (irq = 0; irq < nr_irqs; irq++) {
|
||||
cpumask_t mask;
|
||||
struct irq_desc *desc;
|
||||
|
||||
if (irq == 2)
|
||||
continue;
|
||||
|
||||
cpus_and(mask, irq_desc[irq].affinity, map);
|
||||
desc = irq_to_desc(irq);
|
||||
cpus_and(mask, desc->affinity, map);
|
||||
if (any_online_cpu(mask) == NR_CPUS) {
|
||||
printk("Breaking affinity for irq %i\n", irq);
|
||||
mask = map;
|
||||
}
|
||||
if (irq_desc[irq].chip->set_affinity)
|
||||
irq_desc[irq].chip->set_affinity(irq, mask);
|
||||
else if (irq_desc[irq].action && !(warned++))
|
||||
if (desc->chip->set_affinity)
|
||||
desc->chip->set_affinity(irq, mask);
|
||||
else if (desc->action && !(warned++))
|
||||
printk("Cannot set affinity for irq %i\n", irq);
|
||||
}
|
||||
|
||||
|
|
|
@ -83,15 +83,16 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
|
||||
if (i < nr_irqs) {
|
||||
unsigned any_count = 0;
|
||||
struct irq_desc *desc = irq_to_desc(i);
|
||||
|
||||
spin_lock_irqsave(&irq_desc[i].lock, flags);
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
#ifndef CONFIG_SMP
|
||||
any_count = kstat_irqs(i);
|
||||
#else
|
||||
for_each_online_cpu(j)
|
||||
any_count |= kstat_cpu(j).irqs[i];
|
||||
#endif
|
||||
action = irq_desc[i].action;
|
||||
action = desc->action;
|
||||
if (!action && !any_count)
|
||||
goto skip;
|
||||
seq_printf(p, "%3d: ",i);
|
||||
|
@ -101,8 +102,8 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
||||
#endif
|
||||
seq_printf(p, " %8s", irq_desc[i].chip->name);
|
||||
seq_printf(p, "-%-8s", irq_desc[i].name);
|
||||
seq_printf(p, " %8s", desc->chip->name);
|
||||
seq_printf(p, "-%-8s", desc->name);
|
||||
|
||||
if (action) {
|
||||
seq_printf(p, " %s", action->name);
|
||||
|
@ -111,7 +112,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
}
|
||||
seq_putc(p, '\n');
|
||||
skip:
|
||||
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
} else if (i == nr_irqs) {
|
||||
seq_printf(p, "NMI: ");
|
||||
for_each_online_cpu(j)
|
||||
|
@ -228,37 +229,39 @@ void fixup_irqs(cpumask_t map)
|
|||
cpumask_t mask;
|
||||
int break_affinity = 0;
|
||||
int set_affinity = 1;
|
||||
struct irq_desc *desc;
|
||||
|
||||
if (irq == 2)
|
||||
continue;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
/* interrupt's are disabled at this point */
|
||||
spin_lock(&irq_desc[irq].lock);
|
||||
spin_lock(&desc->lock);
|
||||
|
||||
if (!irq_has_action(irq) ||
|
||||
cpus_equal(irq_desc[irq].affinity, map)) {
|
||||
spin_unlock(&irq_desc[irq].lock);
|
||||
cpus_equal(desc->affinity, map)) {
|
||||
spin_unlock(&desc->lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
cpus_and(mask, irq_desc[irq].affinity, map);
|
||||
cpus_and(mask, desc->affinity, map);
|
||||
if (cpus_empty(mask)) {
|
||||
break_affinity = 1;
|
||||
mask = map;
|
||||
}
|
||||
|
||||
if (irq_desc[irq].chip->mask)
|
||||
irq_desc[irq].chip->mask(irq);
|
||||
if (desc->chip->mask)
|
||||
desc->chip->mask(irq);
|
||||
|
||||
if (irq_desc[irq].chip->set_affinity)
|
||||
irq_desc[irq].chip->set_affinity(irq, mask);
|
||||
if (desc->chip->set_affinity)
|
||||
desc->chip->set_affinity(irq, mask);
|
||||
else if (!(warned++))
|
||||
set_affinity = 0;
|
||||
|
||||
if (irq_desc[irq].chip->unmask)
|
||||
irq_desc[irq].chip->unmask(irq);
|
||||
if (desc->chip->unmask)
|
||||
desc->chip->unmask(irq);
|
||||
|
||||
spin_unlock(&irq_desc[irq].lock);
|
||||
spin_unlock(&desc->lock);
|
||||
|
||||
if (break_affinity && set_affinity)
|
||||
printk("Broke affinity for irq %i\n", irq);
|
||||
|
|
|
@ -143,9 +143,11 @@ void __init init_ISA_irqs(void)
|
|||
init_8259A(0);
|
||||
|
||||
for (i = 0; i < nr_irqs; i++) {
|
||||
irq_desc[i].status = IRQ_DISABLED;
|
||||
irq_desc[i].action = NULL;
|
||||
irq_desc[i].depth = 1;
|
||||
struct irq_desc *desc = irq_to_desc(i);
|
||||
|
||||
desc->status = IRQ_DISABLED;
|
||||
desc->action = NULL;
|
||||
desc->depth = 1;
|
||||
|
||||
if (i < 16) {
|
||||
/*
|
||||
|
@ -157,7 +159,7 @@ void __init init_ISA_irqs(void)
|
|||
/*
|
||||
* 'high' PCI IRQs filled in on demand
|
||||
*/
|
||||
irq_desc[i].chip = &no_irq_chip;
|
||||
desc->chip = &no_irq_chip;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -484,10 +484,11 @@ static void disable_cobalt_irq(unsigned int irq)
|
|||
static unsigned int startup_cobalt_irq(unsigned int irq)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
spin_lock_irqsave(&cobalt_lock, flags);
|
||||
if ((irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING)))
|
||||
irq_desc[irq].status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING);
|
||||
if ((desc->status & (IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING)))
|
||||
desc->status &= ~(IRQ_DISABLED | IRQ_INPROGRESS | IRQ_WAITING);
|
||||
enable_cobalt_irq(irq);
|
||||
spin_unlock_irqrestore(&cobalt_lock, flags);
|
||||
return 0;
|
||||
|
@ -506,9 +507,10 @@ static void ack_cobalt_irq(unsigned int irq)
|
|||
static void end_cobalt_irq(unsigned int irq)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
spin_lock_irqsave(&cobalt_lock, flags);
|
||||
if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
|
||||
if (!(desc->status & (IRQ_DISABLED | IRQ_INPROGRESS)))
|
||||
enable_cobalt_irq(irq);
|
||||
spin_unlock_irqrestore(&cobalt_lock, flags);
|
||||
}
|
||||
|
@ -626,7 +628,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
|
|||
|
||||
spin_unlock_irqrestore(&i8259A_lock, flags);
|
||||
|
||||
desc = irq_desc + realirq;
|
||||
desc = irq_to_desc(realirq);
|
||||
|
||||
/*
|
||||
* handle this 'virtual interrupt' as a Cobalt one now.
|
||||
|
@ -662,27 +664,29 @@ void init_VISWS_APIC_irqs(void)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < CO_IRQ_APIC0 + CO_APIC_LAST + 1; i++) {
|
||||
irq_desc[i].status = IRQ_DISABLED;
|
||||
irq_desc[i].action = 0;
|
||||
irq_desc[i].depth = 1;
|
||||
struct irq_desc *desc = irq_to_desc(i);
|
||||
|
||||
desc->status = IRQ_DISABLED;
|
||||
desc->action = 0;
|
||||
desc->depth = 1;
|
||||
|
||||
if (i == 0) {
|
||||
irq_desc[i].chip = &cobalt_irq_type;
|
||||
desc->chip = &cobalt_irq_type;
|
||||
}
|
||||
else if (i == CO_IRQ_IDE0) {
|
||||
irq_desc[i].chip = &cobalt_irq_type;
|
||||
desc->chip = &cobalt_irq_type;
|
||||
}
|
||||
else if (i == CO_IRQ_IDE1) {
|
||||
irq_desc[i].chip = &cobalt_irq_type;
|
||||
desc->chip = &cobalt_irq_type;
|
||||
}
|
||||
else if (i == CO_IRQ_8259) {
|
||||
irq_desc[i].chip = &piix4_master_irq_type;
|
||||
desc->chip = &piix4_master_irq_type;
|
||||
}
|
||||
else if (i < CO_IRQ_APIC0) {
|
||||
irq_desc[i].chip = &piix4_virtual_irq_type;
|
||||
desc->chip = &piix4_virtual_irq_type;
|
||||
}
|
||||
else if (IS_CO_APIC(i)) {
|
||||
irq_desc[i].chip = &cobalt_irq_type;
|
||||
desc->chip = &cobalt_irq_type;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1483,7 +1483,7 @@ static void disable_local_vic_irq(unsigned int irq)
|
|||
* the interrupt off to another CPU */
|
||||
static void before_handle_vic_irq(unsigned int irq)
|
||||
{
|
||||
irq_desc_t *desc = irq_desc + irq;
|
||||
irq_desc_t *desc = irq_to_desc(irq);
|
||||
__u8 cpu = smp_processor_id();
|
||||
|
||||
_raw_spin_lock(&vic_irq_lock);
|
||||
|
@ -1518,7 +1518,7 @@ static void before_handle_vic_irq(unsigned int irq)
|
|||
/* Finish the VIC interrupt: basically mask */
|
||||
static void after_handle_vic_irq(unsigned int irq)
|
||||
{
|
||||
irq_desc_t *desc = irq_desc + irq;
|
||||
irq_desc_t *desc = irq_to_desc(irq);
|
||||
|
||||
_raw_spin_lock(&vic_irq_lock);
|
||||
{
|
||||
|
|
|
@ -1058,7 +1058,7 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip)
|
|||
|
||||
if (!is_out) {
|
||||
int irq = gpio_to_irq(gpio);
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
/* This races with request_irq(), set_irq_type(),
|
||||
* and set_irq_wake() ... but those are "rare".
|
||||
|
|
|
@ -123,7 +123,7 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc)
|
|||
irqnr = asic->irq_base +
|
||||
(ASIC3_GPIOS_PER_BANK * bank)
|
||||
+ i;
|
||||
desc = irq_desc + irqnr;
|
||||
desc = irq_to_desc(irqnr);
|
||||
desc->handle_irq(irqnr, desc);
|
||||
if (asic->irq_bothedge[bank] & bit)
|
||||
asic3_irq_flip_edge(asic, base,
|
||||
|
@ -136,7 +136,7 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc)
|
|||
for (i = ASIC3_NUM_GPIOS; i < ASIC3_NR_IRQS; i++) {
|
||||
/* They start at bit 4 and go up */
|
||||
if (status & (1 << (i - ASIC3_NUM_GPIOS + 4))) {
|
||||
desc = irq_desc + asic->irq_base + i;
|
||||
desc = irq_to_desc(asic->irq_base + i);
|
||||
desc->handle_irq(asic->irq_base + i,
|
||||
desc);
|
||||
}
|
||||
|
|
|
@ -112,7 +112,7 @@ static void egpio_handler(unsigned int irq, struct irq_desc *desc)
|
|||
/* Run irq handler */
|
||||
pr_debug("got IRQ %d\n", irqpin);
|
||||
irq = ei->irq_start + irqpin;
|
||||
desc = &irq_desc[irq];
|
||||
desc = irq_to_desc(irq);
|
||||
desc->handle_irq(irq, desc);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -298,7 +298,8 @@ struct pci_port_ops dino_port_ops = {
|
|||
|
||||
static void dino_disable_irq(unsigned int irq)
|
||||
{
|
||||
struct dino_device *dino_dev = irq_desc[irq].chip_data;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct dino_device *dino_dev = desc->chip_data;
|
||||
int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
|
||||
|
||||
DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, irq);
|
||||
|
@ -310,7 +311,8 @@ static void dino_disable_irq(unsigned int irq)
|
|||
|
||||
static void dino_enable_irq(unsigned int irq)
|
||||
{
|
||||
struct dino_device *dino_dev = irq_desc[irq].chip_data;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct dino_device *dino_dev = desc->chip_data;
|
||||
int local_irq = gsc_find_local_irq(irq, dino_dev->global_irq, DINO_LOCAL_IRQS);
|
||||
u32 tmp;
|
||||
|
||||
|
|
|
@ -346,10 +346,10 @@ static int __init eisa_probe(struct parisc_device *dev)
|
|||
}
|
||||
|
||||
/* Reserve IRQ2 */
|
||||
irq_desc[2].action = &irq2_action;
|
||||
irq_to_desc(2)->action = &irq2_action;
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
irq_desc[i].chip = &eisa_interrupt_type;
|
||||
irq_to_desc(i)->chip = &eisa_interrupt_type;
|
||||
}
|
||||
|
||||
EISA_bus = 1;
|
||||
|
|
|
@ -108,7 +108,8 @@ int gsc_find_local_irq(unsigned int irq, int *global_irqs, int limit)
|
|||
|
||||
static void gsc_asic_disable_irq(unsigned int irq)
|
||||
{
|
||||
struct gsc_asic *irq_dev = irq_desc[irq].chip_data;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct gsc_asic *irq_dev = desc->chip_data;
|
||||
int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32);
|
||||
u32 imr;
|
||||
|
||||
|
@ -123,7 +124,8 @@ static void gsc_asic_disable_irq(unsigned int irq)
|
|||
|
||||
static void gsc_asic_enable_irq(unsigned int irq)
|
||||
{
|
||||
struct gsc_asic *irq_dev = irq_desc[irq].chip_data;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct gsc_asic *irq_dev = desc->chip_data;
|
||||
int local_irq = gsc_find_local_irq(irq, irq_dev->global_irq, 32);
|
||||
u32 imr;
|
||||
|
||||
|
@ -159,12 +161,14 @@ static struct hw_interrupt_type gsc_asic_interrupt_type = {
|
|||
int gsc_assign_irq(struct hw_interrupt_type *type, void *data)
|
||||
{
|
||||
static int irq = GSC_IRQ_BASE;
|
||||
struct irq_desc *desc;
|
||||
|
||||
if (irq > GSC_IRQ_MAX)
|
||||
return NO_IRQ;
|
||||
|
||||
irq_desc[irq].chip = type;
|
||||
irq_desc[irq].chip_data = data;
|
||||
desc = irq_to_desc(irq);
|
||||
desc->chip = type;
|
||||
desc->chip_data = data;
|
||||
return irq++;
|
||||
}
|
||||
|
||||
|
|
|
@ -619,7 +619,9 @@ iosapic_set_irt_data( struct vector_info *vi, u32 *dp0, u32 *dp1)
|
|||
|
||||
static struct vector_info *iosapic_get_vector(unsigned int irq)
|
||||
{
|
||||
return irq_desc[irq].chip_data;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
return desc->chip_data;
|
||||
}
|
||||
|
||||
static void iosapic_disable_irq(unsigned int irq)
|
||||
|
|
|
@ -363,7 +363,9 @@ int superio_fixup_irq(struct pci_dev *pcidev)
|
|||
#endif
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
irq_desc[i].chip = &superio_interrupt_type;
|
||||
struct irq_desc *desc = irq_to_desc(i);
|
||||
|
||||
desc->chip = &superio_interrupt_type;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -233,15 +233,18 @@ static struct hw_interrupt_type hd64465_ss_irq_type = {
|
|||
*/
|
||||
static void hs_map_irq(hs_socket_t *sp, unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
|
||||
DPRINTK("hs_map_irq(sock=%d irq=%d)\n", sp->number, irq);
|
||||
|
||||
if (irq >= HS_NUM_MAPPED_IRQS)
|
||||
return;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
hs_mapped_irq[irq].sock = sp;
|
||||
/* insert ourselves as the irq controller */
|
||||
hs_mapped_irq[irq].old_handler = irq_desc[irq].chip;
|
||||
irq_desc[irq].chip = &hd64465_ss_irq_type;
|
||||
hs_mapped_irq[irq].old_handler = desc->chip;
|
||||
desc->chip = &hd64465_ss_irq_type;
|
||||
}
|
||||
|
||||
|
||||
|
@ -250,13 +253,16 @@ static void hs_map_irq(hs_socket_t *sp, unsigned int irq)
|
|||
*/
|
||||
static void hs_unmap_irq(hs_socket_t *sp, unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
|
||||
DPRINTK("hs_unmap_irq(sock=%d irq=%d)\n", sp->number, irq);
|
||||
|
||||
if (irq >= HS_NUM_MAPPED_IRQS)
|
||||
return;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
/* restore the original irq controller */
|
||||
irq_desc[irq].chip = hs_mapped_irq[irq].old_handler;
|
||||
desc->chip = hs_mapped_irq[irq].old_handler;
|
||||
}
|
||||
|
||||
/*============================================================*/
|
||||
|
|
|
@ -125,7 +125,7 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
|
|||
|
||||
BUG_ON(irq == -1);
|
||||
#ifdef CONFIG_SMP
|
||||
irq_desc[irq].affinity = cpumask_of_cpu(cpu);
|
||||
irq_to_desc(irq)->affinity = cpumask_of_cpu(cpu);
|
||||
#endif
|
||||
|
||||
__clear_bit(chn, cpu_evtchn_mask[cpu_evtchn[chn]]);
|
||||
|
@ -139,8 +139,10 @@ static void init_evtchn_cpu_bindings(void)
|
|||
#ifdef CONFIG_SMP
|
||||
int i;
|
||||
/* By default all event channels notify CPU#0. */
|
||||
for (i = 0; i < nr_irqs; i++)
|
||||
irq_desc[i].affinity = cpumask_of_cpu(0);
|
||||
for (i = 0; i < nr_irqs; i++) {
|
||||
struct irq_desc *desc = irq_to_desc(i);
|
||||
desc->affinity = cpumask_of_cpu(0);
|
||||
}
|
||||
#endif
|
||||
|
||||
memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
|
||||
|
|
|
@ -152,6 +152,10 @@ struct irq_chip {
|
|||
* @name: flow handler name for /proc/interrupts output
|
||||
*/
|
||||
struct irq_desc {
|
||||
unsigned int irq;
|
||||
#ifdef CONFIG_HAVE_SPARSE_IRQ
|
||||
struct irq_desc *next;
|
||||
#endif
|
||||
irq_flow_handler_t handle_irq;
|
||||
struct irq_chip *chip;
|
||||
struct msi_desc *msi_desc;
|
||||
|
@ -179,9 +183,9 @@ struct irq_desc {
|
|||
const char *name;
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
#ifdef CONFIG_HAVE_DYN_ARRAY
|
||||
extern struct irq_desc *irq_desc;
|
||||
#else
|
||||
extern struct irq_desc *irq_to_desc(unsigned int irq);
|
||||
#ifndef CONFIG_HAVE_DYN_ARRAY
|
||||
/* could be removed if we get rid of all irq_desc reference */
|
||||
extern struct irq_desc irq_desc[NR_IRQS];
|
||||
#endif
|
||||
|
||||
|
@ -249,7 +253,10 @@ extern int no_irq_affinity;
|
|||
|
||||
static inline int irq_balancing_disabled(unsigned int irq)
|
||||
{
|
||||
return irq_desc[irq].status & IRQ_NO_BALANCING_MASK;
|
||||
struct irq_desc *desc;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
return desc->status & IRQ_NO_BALANCING_MASK;
|
||||
}
|
||||
|
||||
/* Handle irq action chains: */
|
||||
|
@ -281,7 +288,7 @@ extern unsigned int __do_IRQ(unsigned int irq);
|
|||
*/
|
||||
static inline void generic_handle_irq(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
#ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
|
||||
desc->handle_irq(irq, desc);
|
||||
|
@ -325,7 +332,10 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
|
|||
static inline void __set_irq_handler_unlocked(int irq,
|
||||
irq_flow_handler_t handler)
|
||||
{
|
||||
irq_desc[irq].handle_irq = handler;
|
||||
struct irq_desc *desc;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
desc->handle_irq = handler;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -359,7 +369,7 @@ extern void destroy_irq(unsigned int irq);
|
|||
/* Test to see if a driver has successfully requested an irq */
|
||||
static inline int irq_has_action(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
return desc->action != NULL;
|
||||
}
|
||||
|
||||
|
@ -374,10 +384,10 @@ extern int set_irq_chip_data(unsigned int irq, void *data);
|
|||
extern int set_irq_type(unsigned int irq, unsigned int type);
|
||||
extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
|
||||
|
||||
#define get_irq_chip(irq) (irq_desc[irq].chip)
|
||||
#define get_irq_chip_data(irq) (irq_desc[irq].chip_data)
|
||||
#define get_irq_data(irq) (irq_desc[irq].handler_data)
|
||||
#define get_irq_msi(irq) (irq_desc[irq].msi_desc)
|
||||
#define get_irq_chip(irq) (irq_to_desc(irq)->chip)
|
||||
#define get_irq_chip_data(irq) (irq_to_desc(irq)->chip_data)
|
||||
#define get_irq_data(irq) (irq_to_desc(irq)->handler_data)
|
||||
#define get_irq_msi(irq) (irq_to_desc(irq)->msi_desc)
|
||||
|
||||
#endif /* CONFIG_GENERIC_HARDIRQS */
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ unsigned long probe_irq_on(void)
|
|||
* flush such a longstanding irq before considering it as spurious.
|
||||
*/
|
||||
for (i = nr_irqs-1; i > 0; i--) {
|
||||
desc = irq_desc + i;
|
||||
desc = irq_to_desc(i);
|
||||
|
||||
spin_lock_irq(&desc->lock);
|
||||
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
|
||||
|
@ -69,7 +69,7 @@ unsigned long probe_irq_on(void)
|
|||
* happened in the previous stage, it may have masked itself)
|
||||
*/
|
||||
for (i = nr_irqs-1; i > 0; i--) {
|
||||
desc = irq_desc + i;
|
||||
desc = irq_to_desc(i);
|
||||
|
||||
spin_lock_irq(&desc->lock);
|
||||
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
|
||||
|
@ -92,7 +92,7 @@ unsigned long probe_irq_on(void)
|
|||
for (i = 0; i < nr_irqs; i++) {
|
||||
unsigned int status;
|
||||
|
||||
desc = irq_desc + i;
|
||||
desc = irq_to_desc(i);
|
||||
spin_lock_irq(&desc->lock);
|
||||
status = desc->status;
|
||||
|
||||
|
@ -131,7 +131,7 @@ unsigned int probe_irq_mask(unsigned long val)
|
|||
|
||||
mask = 0;
|
||||
for (i = 0; i < nr_irqs; i++) {
|
||||
struct irq_desc *desc = irq_desc + i;
|
||||
struct irq_desc *desc = irq_to_desc(i);
|
||||
unsigned int status;
|
||||
|
||||
spin_lock_irq(&desc->lock);
|
||||
|
@ -174,7 +174,7 @@ int probe_irq_off(unsigned long val)
|
|||
int i, irq_found = 0, nr_irqs = 0;
|
||||
|
||||
for (i = 0; i < nr_irqs; i++) {
|
||||
struct irq_desc *desc = irq_desc + i;
|
||||
struct irq_desc *desc = irq_to_desc(i);
|
||||
unsigned int status;
|
||||
|
||||
spin_lock_irq(&desc->lock);
|
||||
|
|
|
@ -33,7 +33,7 @@ void dynamic_irq_init(unsigned int irq)
|
|||
}
|
||||
|
||||
/* Ensure we don't have left over values from a previous use of this irq */
|
||||
desc = irq_desc + irq;
|
||||
desc = irq_to_desc(irq);
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
desc->status = IRQ_DISABLED;
|
||||
desc->chip = &no_irq_chip;
|
||||
|
@ -65,7 +65,7 @@ void dynamic_irq_cleanup(unsigned int irq)
|
|||
return;
|
||||
}
|
||||
|
||||
desc = irq_desc + irq;
|
||||
desc = irq_to_desc(irq);
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
if (desc->action) {
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
|
@ -100,7 +100,7 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip)
|
|||
if (!chip)
|
||||
chip = &no_irq_chip;
|
||||
|
||||
desc = irq_desc + irq;
|
||||
desc = irq_to_desc(irq);
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
irq_chip_set_defaults(chip);
|
||||
desc->chip = chip;
|
||||
|
@ -126,7 +126,7 @@ int set_irq_type(unsigned int irq, unsigned int type)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
desc = irq_desc + irq;
|
||||
desc = irq_to_desc(irq);
|
||||
if (type == IRQ_TYPE_NONE)
|
||||
return 0;
|
||||
|
||||
|
@ -155,7 +155,7 @@ int set_irq_data(unsigned int irq, void *data)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
desc = irq_desc + irq;
|
||||
desc = irq_to_desc(irq);
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
desc->handler_data = data;
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
|
@ -180,7 +180,7 @@ int set_irq_msi(unsigned int irq, struct msi_desc *entry)
|
|||
"Trying to install msi data for IRQ%d\n", irq);
|
||||
return -EINVAL;
|
||||
}
|
||||
desc = irq_desc + irq;
|
||||
desc = irq_to_desc(irq);
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
desc->msi_desc = entry;
|
||||
if (entry)
|
||||
|
@ -198,9 +198,10 @@ int set_irq_msi(unsigned int irq, struct msi_desc *entry)
|
|||
*/
|
||||
int set_irq_chip_data(unsigned int irq, void *data)
|
||||
{
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
struct irq_desc *desc;
|
||||
unsigned long flags;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
if (irq >= nr_irqs || !desc->chip) {
|
||||
printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq);
|
||||
return -EINVAL;
|
||||
|
@ -219,8 +220,9 @@ EXPORT_SYMBOL(set_irq_chip_data);
|
|||
*/
|
||||
static void default_enable(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
struct irq_desc *desc;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
desc->chip->unmask(irq);
|
||||
desc->status &= ~IRQ_MASKED;
|
||||
}
|
||||
|
@ -237,7 +239,10 @@ static void default_disable(unsigned int irq)
|
|||
*/
|
||||
static unsigned int default_startup(unsigned int irq)
|
||||
{
|
||||
irq_desc[irq].chip->enable(irq);
|
||||
struct irq_desc *desc;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
desc->chip->enable(irq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -247,8 +252,9 @@ static unsigned int default_startup(unsigned int irq)
|
|||
*/
|
||||
static void default_shutdown(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
struct irq_desc *desc;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
desc->chip->mask(irq);
|
||||
desc->status |= IRQ_MASKED;
|
||||
}
|
||||
|
@ -551,7 +557,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
|
|||
return;
|
||||
}
|
||||
|
||||
desc = irq_desc + irq;
|
||||
desc = irq_to_desc(irq);
|
||||
|
||||
if (!handle)
|
||||
handle = handle_bad_irq;
|
||||
|
@ -616,7 +622,7 @@ void __init set_irq_noprobe(unsigned int irq)
|
|||
return;
|
||||
}
|
||||
|
||||
desc = irq_desc + irq;
|
||||
desc = irq_to_desc(irq);
|
||||
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
desc->status |= IRQ_NOPROBE;
|
||||
|
@ -634,7 +640,7 @@ void __init set_irq_probe(unsigned int irq)
|
|||
return;
|
||||
}
|
||||
|
||||
desc = irq_desc + irq;
|
||||
desc = irq_to_desc(irq);
|
||||
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
desc->status &= ~IRQ_NOPROBE;
|
||||
|
|
|
@ -18,6 +18,14 @@
|
|||
|
||||
#include "internals.h"
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
|
||||
/*
|
||||
* lockdep: we want to handle all irq_desc locks as a single lock-class:
|
||||
*/
|
||||
static struct lock_class_key irq_desc_lock_class;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* handle_bad_irq - handle spurious and unhandled irqs
|
||||
* @irq: the interrupt number
|
||||
|
@ -51,7 +59,8 @@ int nr_irqs = NR_IRQS;
|
|||
EXPORT_SYMBOL_GPL(nr_irqs);
|
||||
|
||||
#ifdef CONFIG_HAVE_DYN_ARRAY
|
||||
static struct irq_desc irq_desc_init __initdata = {
|
||||
static struct irq_desc irq_desc_init = {
|
||||
.irq = -1U,
|
||||
.status = IRQ_DISABLED,
|
||||
.chip = &no_irq_chip,
|
||||
.handle_irq = handle_bad_irq,
|
||||
|
@ -62,6 +71,27 @@ static struct irq_desc irq_desc_init __initdata = {
|
|||
#endif
|
||||
};
|
||||
|
||||
|
||||
static void init_one_irq_desc(struct irq_desc *desc)
|
||||
{
|
||||
memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HAVE_SPARSE_IRQ
|
||||
static int nr_irq_desc = 32;
|
||||
|
||||
static int __init parse_nr_irq_desc(char *arg)
|
||||
{
|
||||
if (arg)
|
||||
nr_irq_desc = simple_strtoul(arg, NULL, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
early_param("nr_irq_desc", parse_nr_irq_desc);
|
||||
|
||||
static void __init init_work(void *data)
|
||||
{
|
||||
struct dyn_array *da = data;
|
||||
|
@ -71,12 +101,83 @@ static void __init init_work(void *data)
|
|||
desc = *da->name;
|
||||
|
||||
for (i = 0; i < *da->nr; i++)
|
||||
memcpy(&desc[i], &irq_desc_init, sizeof(struct irq_desc));
|
||||
init_one_irq_desc(&desc[i]);
|
||||
|
||||
for (i = 1; i < *da->nr; i++)
|
||||
desc[i-1].next = &desc[i];
|
||||
}
|
||||
|
||||
struct irq_desc *irq_desc;
|
||||
static struct irq_desc *sparse_irqs;
|
||||
DEFINE_DYN_ARRAY(sparse_irqs, sizeof(struct irq_desc), nr_irq_desc, PAGE_SIZE, init_work);
|
||||
|
||||
extern int after_bootmem;
|
||||
extern void *__alloc_bootmem_nopanic(unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
struct irq_desc *irq_to_desc(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc, *desc_pri;
|
||||
int i;
|
||||
int count = 0;
|
||||
|
||||
BUG_ON(irq == -1U);
|
||||
|
||||
desc_pri = desc = &sparse_irqs[0];
|
||||
while (desc) {
|
||||
if (desc->irq == irq)
|
||||
return desc;
|
||||
|
||||
if (desc->irq == -1U) {
|
||||
desc->irq = irq;
|
||||
return desc;
|
||||
}
|
||||
desc_pri = desc;
|
||||
desc = desc->next;
|
||||
count++;
|
||||
}
|
||||
|
||||
/*
|
||||
* we run out of pre-allocate ones, allocate more
|
||||
*/
|
||||
printk(KERN_DEBUG "try to get more irq_desc %d\n", nr_irq_desc);
|
||||
|
||||
if (after_bootmem)
|
||||
desc = kzalloc(sizeof(struct irq_desc)*nr_irq_desc, GFP_ATOMIC);
|
||||
else
|
||||
desc = __alloc_bootmem_nopanic(sizeof(struct irq_desc)*nr_irq_desc, PAGE_SIZE, 0);
|
||||
|
||||
if (!desc)
|
||||
panic("please boot with nr_irq_desc= %d\n", count * 2);
|
||||
|
||||
for (i = 0; i < nr_irq_desc; i++)
|
||||
init_one_irq_desc(&desc[i]);
|
||||
|
||||
for (i = 1; i < nr_irq_desc; i++)
|
||||
desc[i-1].next = &desc[i];
|
||||
|
||||
desc->irq = irq;
|
||||
desc_pri->next = desc;
|
||||
|
||||
return desc;
|
||||
}
|
||||
#else
|
||||
static void __init init_work(void *data)
|
||||
{
|
||||
struct dyn_array *da = data;
|
||||
int i;
|
||||
struct irq_desc *desc;
|
||||
|
||||
desc = *da->name;
|
||||
|
||||
for (i = 0; i < *da->nr; i++)
|
||||
init_one_irq_desc(&desc[i]);
|
||||
|
||||
}
|
||||
static struct irq_desc *irq_desc;
|
||||
DEFINE_DYN_ARRAY(irq_desc, sizeof(struct irq_desc), nr_irqs, PAGE_SIZE, init_work);
|
||||
|
||||
#endif
|
||||
|
||||
#else
|
||||
|
||||
struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
|
||||
|
@ -85,12 +186,23 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
|
|||
.chip = &no_irq_chip,
|
||||
.handle_irq = handle_bad_irq,
|
||||
.depth = 1,
|
||||
.lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
|
||||
.lock = __SPIN_LOCK_UNLOCKED(sparse_irqs->lock),
|
||||
#ifdef CONFIG_SMP
|
||||
.affinity = CPU_MASK_ALL
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_HAVE_SPARSE_IRQ
|
||||
struct irq_desc *irq_to_desc(unsigned int irq)
|
||||
{
|
||||
if (irq < nr_irqs)
|
||||
return &irq_desc[irq];
|
||||
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -99,7 +211,10 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
|
|||
*/
|
||||
static void ack_bad(unsigned int irq)
|
||||
{
|
||||
print_irq_desc(irq, irq_desc + irq);
|
||||
struct irq_desc *desc;
|
||||
|
||||
desc = irq_to_desc(irq);
|
||||
print_irq_desc(irq, desc);
|
||||
ack_bad_irq(irq);
|
||||
}
|
||||
|
||||
|
@ -196,7 +311,7 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
|
|||
*/
|
||||
unsigned int __do_IRQ(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct irqaction *action;
|
||||
unsigned int status;
|
||||
|
||||
|
@ -287,19 +402,16 @@ out:
|
|||
}
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
|
||||
/*
|
||||
* lockdep: we want to handle all irq_desc locks as a single lock-class:
|
||||
*/
|
||||
static struct lock_class_key irq_desc_lock_class;
|
||||
|
||||
void early_init_irq_lock_class(void)
|
||||
{
|
||||
#ifndef CONFIG_HAVE_DYN_ARRAY
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr_irqs; i++)
|
||||
lockdep_set_class(&irq_desc[i].lock, &irq_desc_lock_class);
|
||||
}
|
||||
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ cpumask_t irq_default_affinity = CPU_MASK_ALL;
|
|||
*/
|
||||
void synchronize_irq(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
unsigned int status;
|
||||
|
||||
if (irq >= nr_irqs)
|
||||
|
@ -64,7 +64,7 @@ EXPORT_SYMBOL(synchronize_irq);
|
|||
*/
|
||||
int irq_can_set_affinity(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip ||
|
||||
!desc->chip->set_affinity)
|
||||
|
@ -81,7 +81,7 @@ int irq_can_set_affinity(unsigned int irq)
|
|||
*/
|
||||
int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
|
||||
{
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
if (!desc->chip->set_affinity)
|
||||
return -EINVAL;
|
||||
|
@ -111,14 +111,16 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
|
|||
int irq_select_affinity(unsigned int irq)
|
||||
{
|
||||
cpumask_t mask;
|
||||
struct irq_desc *desc;
|
||||
|
||||
if (!irq_can_set_affinity(irq))
|
||||
return 0;
|
||||
|
||||
cpus_and(mask, cpu_online_map, irq_default_affinity);
|
||||
|
||||
irq_desc[irq].affinity = mask;
|
||||
irq_desc[irq].chip->set_affinity(irq, mask);
|
||||
desc = irq_to_desc(irq);
|
||||
desc->affinity = mask;
|
||||
desc->chip->set_affinity(irq, mask);
|
||||
|
||||
set_balance_irq_affinity(irq, mask);
|
||||
return 0;
|
||||
|
@ -140,7 +142,7 @@ int irq_select_affinity(unsigned int irq)
|
|||
*/
|
||||
void disable_irq_nosync(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
unsigned long flags;
|
||||
|
||||
if (irq >= nr_irqs)
|
||||
|
@ -169,7 +171,7 @@ EXPORT_SYMBOL(disable_irq_nosync);
|
|||
*/
|
||||
void disable_irq(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
if (irq >= nr_irqs)
|
||||
return;
|
||||
|
@ -211,7 +213,7 @@ static void __enable_irq(struct irq_desc *desc, unsigned int irq)
|
|||
*/
|
||||
void enable_irq(unsigned int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
unsigned long flags;
|
||||
|
||||
if (irq >= nr_irqs)
|
||||
|
@ -225,7 +227,7 @@ EXPORT_SYMBOL(enable_irq);
|
|||
|
||||
static int set_irq_wake_real(unsigned int irq, unsigned int on)
|
||||
{
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
int ret = -ENXIO;
|
||||
|
||||
if (desc->chip->set_wake)
|
||||
|
@ -248,7 +250,7 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on)
|
|||
*/
|
||||
int set_irq_wake(unsigned int irq, unsigned int on)
|
||||
{
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -288,12 +290,13 @@ EXPORT_SYMBOL(set_irq_wake);
|
|||
*/
|
||||
int can_request_irq(unsigned int irq, unsigned long irqflags)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct irqaction *action;
|
||||
|
||||
if (irq >= nr_irqs || irq_desc[irq].status & IRQ_NOREQUEST)
|
||||
if (irq >= nr_irqs || desc->status & IRQ_NOREQUEST)
|
||||
return 0;
|
||||
|
||||
action = irq_desc[irq].action;
|
||||
action = desc->action;
|
||||
if (action)
|
||||
if (irqflags & action->flags & IRQF_SHARED)
|
||||
action = NULL;
|
||||
|
@ -349,7 +352,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
|
|||
*/
|
||||
int setup_irq(unsigned int irq, struct irqaction *new)
|
||||
{
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct irqaction *old, **p;
|
||||
const char *old_name = NULL;
|
||||
unsigned long flags;
|
||||
|
@ -518,7 +521,7 @@ void free_irq(unsigned int irq, void *dev_id)
|
|||
if (irq >= nr_irqs)
|
||||
return;
|
||||
|
||||
desc = irq_desc + irq;
|
||||
desc = irq_to_desc(irq);
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
p = &desc->action;
|
||||
for (;;) {
|
||||
|
@ -615,6 +618,7 @@ int request_irq(unsigned int irq, irq_handler_t handler,
|
|||
{
|
||||
struct irqaction *action;
|
||||
int retval;
|
||||
struct irq_desc *desc;
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
/*
|
||||
|
@ -632,7 +636,8 @@ int request_irq(unsigned int irq, irq_handler_t handler,
|
|||
return -EINVAL;
|
||||
if (irq >= nr_irqs)
|
||||
return -EINVAL;
|
||||
if (irq_desc[irq].status & IRQ_NOREQUEST)
|
||||
desc = irq_to_desc(irq);
|
||||
if (desc->status & IRQ_NOREQUEST)
|
||||
return -EINVAL;
|
||||
if (!handler)
|
||||
return -EINVAL;
|
||||
|
|
|
@ -3,18 +3,18 @@
|
|||
|
||||
void set_pending_irq(unsigned int irq, cpumask_t mask)
|
||||
{
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
desc->status |= IRQ_MOVE_PENDING;
|
||||
irq_desc[irq].pending_mask = mask;
|
||||
desc->pending_mask = mask;
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
|
||||
void move_masked_irq(int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
cpumask_t tmp;
|
||||
|
||||
if (likely(!(desc->status & IRQ_MOVE_PENDING)))
|
||||
|
@ -30,7 +30,7 @@ void move_masked_irq(int irq)
|
|||
|
||||
desc->status &= ~IRQ_MOVE_PENDING;
|
||||
|
||||
if (unlikely(cpus_empty(irq_desc[irq].pending_mask)))
|
||||
if (unlikely(cpus_empty(desc->pending_mask)))
|
||||
return;
|
||||
|
||||
if (!desc->chip->set_affinity)
|
||||
|
@ -38,7 +38,7 @@ void move_masked_irq(int irq)
|
|||
|
||||
assert_spin_locked(&desc->lock);
|
||||
|
||||
cpus_and(tmp, irq_desc[irq].pending_mask, cpu_online_map);
|
||||
cpus_and(tmp, desc->pending_mask, cpu_online_map);
|
||||
|
||||
/*
|
||||
* If there was a valid mask to work with, please
|
||||
|
@ -55,12 +55,12 @@ void move_masked_irq(int irq)
|
|||
if (likely(!cpus_empty(tmp))) {
|
||||
desc->chip->set_affinity(irq,tmp);
|
||||
}
|
||||
cpus_clear(irq_desc[irq].pending_mask);
|
||||
cpus_clear(desc->pending_mask);
|
||||
}
|
||||
|
||||
void move_native_irq(int irq)
|
||||
{
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
if (likely(!(desc->status & IRQ_MOVE_PENDING)))
|
||||
return;
|
||||
|
|
|
@ -19,7 +19,7 @@ static struct proc_dir_entry *root_irq_dir;
|
|||
|
||||
static int irq_affinity_proc_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct irq_desc *desc = irq_desc + (long)m->private;
|
||||
struct irq_desc *desc = irq_to_desc((long)m->private);
|
||||
cpumask_t *mask = &desc->affinity;
|
||||
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
|
@ -43,7 +43,7 @@ static ssize_t irq_affinity_proc_write(struct file *file,
|
|||
cpumask_t new_value;
|
||||
int err;
|
||||
|
||||
if (!irq_desc[irq].chip->set_affinity || no_irq_affinity ||
|
||||
if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity ||
|
||||
irq_balancing_disabled(irq))
|
||||
return -EIO;
|
||||
|
||||
|
@ -132,20 +132,20 @@ static const struct file_operations default_affinity_proc_fops = {
|
|||
static int irq_spurious_read(char *page, char **start, off_t off,
|
||||
int count, int *eof, void *data)
|
||||
{
|
||||
struct irq_desc *d = &irq_desc[(long) data];
|
||||
struct irq_desc *desc = irq_to_desc((long) data);
|
||||
return sprintf(page, "count %u\n"
|
||||
"unhandled %u\n"
|
||||
"last_unhandled %u ms\n",
|
||||
d->irq_count,
|
||||
d->irqs_unhandled,
|
||||
jiffies_to_msecs(d->last_unhandled));
|
||||
desc->irq_count,
|
||||
desc->irqs_unhandled,
|
||||
jiffies_to_msecs(desc->last_unhandled));
|
||||
}
|
||||
|
||||
#define MAX_NAMELEN 128
|
||||
|
||||
static int name_unique(unsigned int irq, struct irqaction *new_action)
|
||||
{
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct irqaction *action;
|
||||
unsigned long flags;
|
||||
int ret = 1;
|
||||
|
@ -165,8 +165,9 @@ static int name_unique(unsigned int irq, struct irqaction *new_action)
|
|||
void register_handler_proc(unsigned int irq, struct irqaction *action)
|
||||
{
|
||||
char name [MAX_NAMELEN];
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
if (!irq_desc[irq].dir || action->dir || !action->name ||
|
||||
if (!desc->dir || action->dir || !action->name ||
|
||||
!name_unique(irq, action))
|
||||
return;
|
||||
|
||||
|
@ -174,7 +175,7 @@ void register_handler_proc(unsigned int irq, struct irqaction *action)
|
|||
snprintf(name, MAX_NAMELEN, "%s", action->name);
|
||||
|
||||
/* create /proc/irq/1234/handler/ */
|
||||
action->dir = proc_mkdir(name, irq_desc[irq].dir);
|
||||
action->dir = proc_mkdir(name, desc->dir);
|
||||
}
|
||||
|
||||
#undef MAX_NAMELEN
|
||||
|
@ -185,25 +186,24 @@ void register_irq_proc(unsigned int irq)
|
|||
{
|
||||
char name [MAX_NAMELEN];
|
||||
struct proc_dir_entry *entry;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
if (!root_irq_dir ||
|
||||
(irq_desc[irq].chip == &no_irq_chip) ||
|
||||
irq_desc[irq].dir)
|
||||
if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir)
|
||||
return;
|
||||
|
||||
memset(name, 0, MAX_NAMELEN);
|
||||
sprintf(name, "%d", irq);
|
||||
|
||||
/* create /proc/irq/1234 */
|
||||
irq_desc[irq].dir = proc_mkdir(name, root_irq_dir);
|
||||
desc->dir = proc_mkdir(name, root_irq_dir);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* create /proc/irq/<irq>/smp_affinity */
|
||||
proc_create_data("smp_affinity", 0600, irq_desc[irq].dir,
|
||||
proc_create_data("smp_affinity", 0600, desc->dir,
|
||||
&irq_affinity_proc_fops, (void *)(long)irq);
|
||||
#endif
|
||||
|
||||
entry = create_proc_entry("spurious", 0444, irq_desc[irq].dir);
|
||||
entry = create_proc_entry("spurious", 0444, desc->dir);
|
||||
if (entry) {
|
||||
entry->data = (void *)(long)irq;
|
||||
entry->read_proc = irq_spurious_read;
|
||||
|
@ -214,8 +214,10 @@ void register_irq_proc(unsigned int irq)
|
|||
|
||||
void unregister_handler_proc(unsigned int irq, struct irqaction *action)
|
||||
{
|
||||
if (action->dir)
|
||||
remove_proc_entry(action->dir->name, irq_desc[irq].dir);
|
||||
if (action->dir) {
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
remove_proc_entry(action->dir->name, desc->dir);
|
||||
}
|
||||
}
|
||||
|
||||
void register_default_affinity_proc(void)
|
||||
|
|
|
@ -36,7 +36,7 @@ static void resend_irqs(unsigned long arg)
|
|||
while (!bitmap_empty(irqs_resend, nr_irqs)) {
|
||||
irq = find_first_bit(irqs_resend, nr_irqs);
|
||||
clear_bit(irq, irqs_resend);
|
||||
desc = irq_desc + irq;
|
||||
desc = irq_to_desc(irq);
|
||||
local_irq_disable();
|
||||
desc->handle_irq(irq, desc);
|
||||
local_irq_enable();
|
||||
|
|
|
@ -92,11 +92,12 @@ static int misrouted_irq(int irq)
|
|||
int ok = 0;
|
||||
|
||||
for (i = 1; i < nr_irqs; i++) {
|
||||
struct irq_desc *desc = irq_desc + i;
|
||||
struct irq_desc *desc;
|
||||
|
||||
if (i == irq) /* Already tried */
|
||||
continue;
|
||||
|
||||
desc = irq_to_desc(i);
|
||||
if (try_one_irq(i, desc))
|
||||
ok = 1;
|
||||
}
|
||||
|
@ -108,7 +109,7 @@ static void poll_spurious_irqs(unsigned long dummy)
|
|||
{
|
||||
int i;
|
||||
for (i = 1; i < nr_irqs; i++) {
|
||||
struct irq_desc *desc = irq_desc + i;
|
||||
struct irq_desc *desc = irq_to_desc(i);
|
||||
unsigned int status;
|
||||
|
||||
/* Racy but it doesn't matter */
|
||||
|
|
Loading…
Reference in a new issue