linux/arch/i386/kernel/bios32.c:
o added back compatible setting of right IRQ in PCI_INTERRUPT_LINE
- made the ne2k-pci happy and working without Alans patches, satisfies also
scanpci
linux/arch/i386/kernel/io_apic.c:
o changed the disable_edge_ioapic_irq routine that nothing gets through
o changed level to edge handling for all IRQs that use dest_LowestPrio
delivery_mode:
- According to Intel Architecture Soft. Dev. Man. Vol 3, Chapter 7, Page
7-26pp:
Delivery Mode
000(Fixed) Deliver ..<snip>..
The fixed interrupt is treated as an
edge-triggered interrupt even if programmed otherwise.
^^^^^^^^^^^^^^^^^^^^^^^^
001(Lowest Priority)
Same as fixed mode, ..<snip>..
- Realtek PCI NE2000 clone replies flood pings without any problems
linux/arch/i386/kernel/irq.c:
o added printout of all unused/disabled interrupts to /proc/interrupts
o disable interrupts, if no action is there in free_irq, probe_irq_off
- Save my machine from irq storms of my RIVA128 card
Any comments??
-- »»»» Harald Hoyer ««»» mailto:HarryH@Royal.Net ««»» http://hot.spotline.de «««« ··············································································· Are we running light with overbyte? --------------9CC8D876E1706DBF9FE76637 Content-Type: text/plain; charset=us-ascii; name="irq_patch.diff" Content-Transfer-Encoding: 7bit Content-Disposition: inline; filename="irq_patch.diff"Index: linux/arch/i386/kernel/bios32.c =================================================================== RCS file: /home/saturn/Develop/linux/arch/i386/kernel/bios32.c,v retrieving revision 1.1.1.22 diff -u -r1.1.1.22 bios32.c --- bios32.c 1998/06/08 07:27:29 1.1.1.22 +++ bios32.c 1998/06/17 13:23:18 @@ -1026,6 +1026,9 @@ dev->bus->number, PCI_SLOT(dev->devfn), pin, irq); dev->irq = irq; } + if(irq < NR_IRQS) + pci_write_config_byte(dev, + PCI_INTERRUPT_LINE, irq); } } #endif Index: linux/arch/i386/kernel/io_apic.c =================================================================== RCS file: /home/saturn/Develop/linux/arch/i386/kernel/io_apic.c,v retrieving revision 1.1.1.15 diff -u -r1.1.1.15 io_apic.c --- io_apic.c 1998/06/08 07:36:45 1.1.1.15 +++ io_apic.c 1998/06/17 13:24:04 @@ -205,6 +205,256 @@ } +#ifdef __SMP__ + +/* + * In the SMP+IOAPIC case it might happen that there are an unspecified + * number of pending IRQ events unhandled. These cases are very rare, + * so we 'resend' these IRQs via IPIs, to the same CPU. It's much + * better to do it this way as thus we do not have to be aware of + * 'pending' interrupts in the IRQ path, except at this point. + */ +static inline void self_IPI (unsigned int irq) +{ + irq_desc_t *desc = irq_desc + irq; + + if (desc->events && !desc->ipi) { + desc->ipi = 1; + send_IPI(APIC_DEST_SELF, IO_APIC_VECTOR(irq)); + } +} + +/* + * Edge triggered needs to resend any interrupt + * that was delayed. + */ +static void enable_edge_ioapic_irq(unsigned int irq) +{ + int pin = irq_2_pin[irq]; + struct IO_APIC_route_entry entry; + + self_IPI(irq); + if (pin != -1) { + *(((int *)&entry)+0) = io_apic_read(0x10+pin*2); + *(((int *)&entry)+1) = io_apic_read(0x11+pin*2); + entry.delivery_mode = dest_LowestPrio; + entry.dest.logical.logical_dest = 0xff; + entry.mask = 0; + io_apic_write(0x10+2*pin, *(((int *)&entry)+0)); + io_apic_write(0x11+2*pin, *(((int *)&entry)+1)); + } +} + +static void disable_edge_ioapic_irq(unsigned int irq) +{ + int pin = irq_2_pin[irq]; + struct IO_APIC_route_entry entry; + + if (pin != -1) { + *(((int *)&entry)+0) = io_apic_read(0x10+pin*2); + *(((int *)&entry)+1) = io_apic_read(0x11+pin*2); + entry.delivery_mode = dest_Fixed; + entry.dest.logical.logical_dest = 0x0; + entry.mask = 1; + io_apic_write(0x10+2*pin, *(((int *)&entry)+0)); + io_apic_write(0x11+2*pin, *(((int *)&entry)+1)); + io_apic_sync(); + } +} + +/* + * Level triggered interrupts can just be masked + */ +static void enable_level_ioapic_irq(unsigned int irq) +{ + unmask_IO_APIC_irq(irq); +} + +static void disable_level_ioapic_irq(unsigned int irq) +{ + mask_IO_APIC_irq(irq); +} + +/* + * Enter and exit the irq handler context.. + */ +static inline void enter_ioapic_irq(int cpu) +{ + hardirq_enter(cpu); + while (test_bit(0,&global_irq_lock)) barrier(); +} + +static inline void exit_ioapic_irq(int cpu) +{ + hardirq_exit(cpu); + release_irqlock(cpu); +} + +static void do_edge_ioapic_IRQ(unsigned int irq, int cpu, struct pt_regs * regs) +{ + irq_desc_t *desc = irq_desc + irq; + struct irqaction * action; + + spin_lock(&irq_controller_lock); + + /* + * Edge triggered IRQs can be acked immediately + * and do not need to be masked. + */ + ack_APIC_irq(); + desc->ipi = 0; + desc->events = 1; + + /* + * If the irq is disabled for whatever reason, we cannot + * use the action we have.. + */ + action = NULL; + if (!(desc->status & (IRQ_DISABLED | IRQ_INPROGRESS))) { + action = desc->action; + desc->status = IRQ_INPROGRESS; + desc->events = 0; + } + + spin_unlock(&irq_controller_lock); + + /* + * If there is no IRQ handler or it was disabled, exit early. + */ + + if (!action) + return; + + + enter_ioapic_irq(cpu); + + /* + * Edge triggered interrupts need to remember + * pending events.. + */ + for (;;) { + int pending; + + handle_IRQ_event(irq, regs); + + spin_lock(&irq_controller_lock); + pending = desc->events; + desc->events = 0; + if (!pending) + break; + spin_unlock(&irq_controller_lock); + } + desc->status &= IRQ_DISABLED; + spin_unlock(&irq_controller_lock); + + exit_ioapic_irq(cpu); +} + +static void do_level_ioapic_IRQ (unsigned int irq, int cpu, + struct pt_regs * regs) +{ + irq_desc_t *desc = irq_desc + irq; + struct irqaction * action; + + spin_lock(&irq_controller_lock); + /* + * In the level triggered case we first disable the IRQ + * in the IO-APIC, then we 'early ACK' the IRQ, then we + * handle it and enable the IRQ when finished. + * + * disable has to happen before the ACK, to avoid IRQ storms. + * So this all has to be within the spinlock. + */ + mask_IO_APIC_irq(irq); + + desc->ipi = 0; + + /* + * If the irq is disabled for whatever reason, we must + * not enter the irq action. + */ + action = NULL; + if (!(desc->status & (IRQ_DISABLED | IRQ_INPROGRESS))) { + action = desc->action; + desc->status = IRQ_INPROGRESS; + } + + ack_APIC_irq(); + spin_unlock(&irq_controller_lock); + + /* Exit early if we had no action or it was disabled */ + if (!action) + return; + + + enter_ioapic_irq(cpu); + + handle_IRQ_event(irq, regs); + + spin_lock(&irq_controller_lock); + desc->status &= ~IRQ_INPROGRESS; + if (!desc->status) + unmask_IO_APIC_irq(irq); + spin_unlock(&irq_controller_lock); + + exit_ioapic_irq(cpu); +} + +/* + * Level and edge triggered IO-APIC interrupts need different handling, + * so we use two separate irq descriptors. edge triggered IRQs can be + * handled with the level-triggered descriptor, but that one has slightly + * more overhead. Level-triggered interrupts cannot be handled with the + * edge-triggered handler, without risking IRQ storms and other ugly + * races. + */ + +static struct hw_interrupt_type ioapic_edge_irq_type = { + "IO-APIC-edge", + do_edge_ioapic_IRQ, + enable_edge_ioapic_irq, + disable_edge_ioapic_irq +}; + +static struct hw_interrupt_type ioapic_level_irq_type = { + "IO-APIC-level", + do_level_ioapic_IRQ, + enable_level_ioapic_irq, + disable_level_ioapic_irq +}; + +void init_IO_APIC_traps(void) +{ + int i; + /* + * NOTE! The local APIC isn't very good at handling + * multiple interrupts at the same interrupt level. + * As the interrupt level is determined by taking the + * vector number and shifting that right by 4, we + * want to spread these out a bit so that they don't + * all fall in the same interrupt level + * + * also, we've got to be careful not to trash gate + * 0x80, because int 0x80 is hm, kindof importantish ;) + */ + for (i = 0; i < NR_IRQS ; i++) { + if ((IO_APIC_VECTOR(i) <= 0xfe) /* HACK */ && + (IO_APIC_IRQ(i))) { + if (IO_APIC_irq_trigger(i)) + irq_desc[i].handler = &ioapic_level_irq_type; + else + irq_desc[i].handler = &ioapic_edge_irq_type; + /* + * disable it in the 8259A: + */ + cached_irq_mask |= 1 << i; + if (i < 16) + set_8259A_irq_mask(i); + } + } +} +#endif + /* * support for broken MP BIOSes, enables hand-redirection of PIRQ0-7 to * specific CPU-side IRQs. @@ -572,6 +822,9 @@ io_apic_write(0x10+2*pin, *(((int *)&entry)+0)); io_apic_write(0x11+2*pin, *(((int *)&entry)+1)); + + /* dest_LowestPrio are always edge triggered */ + irq_desc[irq].handler = &ioapic_edge_irq_type; } if (!first_notcon) @@ -599,6 +852,9 @@ io_apic_write(0x10+2*irq, *(((int *)&entry)+0)); io_apic_write(0x11+2*irq, *(((int *)&entry)+1)); + + /* dest_LowestPrio are always edge triggered */ + irq_desc[irq].handler = &ioapic_edge_irq_type; } /* @@ -903,228 +1159,6 @@ return 0; } -#ifdef __SMP__ - -/* - * In the SMP+IOAPIC case it might happen that there are an unspecified - * number of pending IRQ events unhandled. These cases are very rare, - * so we 'resend' these IRQs via IPIs, to the same CPU. It's much - * better to do it this way as thus we do not have to be aware of - * 'pending' interrupts in the IRQ path, except at this point. - */ -static inline void self_IPI (unsigned int irq) -{ - irq_desc_t *desc = irq_desc + irq; - - if (desc->events && !desc->ipi) { - desc->ipi = 1; - send_IPI(APIC_DEST_SELF, IO_APIC_VECTOR(irq)); - } -} - -/* - * Edge triggered needs to resend any interrupt - * that was delayed. - */ -static void enable_edge_ioapic_irq(unsigned int irq) -{ - self_IPI(irq); - enable_IO_APIC_irq(irq); -} - -static void disable_edge_ioapic_irq(unsigned int irq) -{ - disable_IO_APIC_irq(irq); -} - -/* - * Level triggered interrupts can just be masked - */ -static void enable_level_ioapic_irq(unsigned int irq) -{ - unmask_IO_APIC_irq(irq); -} - -static void disable_level_ioapic_irq(unsigned int irq) -{ - mask_IO_APIC_irq(irq); -} - -/* - * Enter and exit the irq handler context.. - */ -static inline void enter_ioapic_irq(int cpu) -{ - hardirq_enter(cpu); - while (test_bit(0,&global_irq_lock)) barrier(); -} - -static inline void exit_ioapic_irq(int cpu) -{ - hardirq_exit(cpu); - release_irqlock(cpu); -} - -static void do_edge_ioapic_IRQ(unsigned int irq, int cpu, struct pt_regs * regs) -{ - irq_desc_t *desc = irq_desc + irq; - struct irqaction * action; - - spin_lock(&irq_controller_lock); - - /* - * Edge triggered IRQs can be acked immediately - * and do not need to be masked. - */ - ack_APIC_irq(); - desc->ipi = 0; - desc->events = 1; - - /* - * If the irq is disabled for whatever reason, we cannot - * use the action we have.. - */ - action = NULL; - if (!(desc->status & (IRQ_DISABLED | IRQ_INPROGRESS))) { - action = desc->action; - desc->status = IRQ_INPROGRESS; - desc->events = 0; - } - spin_unlock(&irq_controller_lock); - - /* - * If there is no IRQ handler or it was disabled, exit early. - */ - if (!action) - return; - - enter_ioapic_irq(cpu); - - /* - * Edge triggered interrupts need to remember - * pending events.. - */ - for (;;) { - int pending; - - handle_IRQ_event(irq, regs); - - spin_lock(&irq_controller_lock); - pending = desc->events; - desc->events = 0; - if (!pending) - break; - spin_unlock(&irq_controller_lock); - } - desc->status &= IRQ_DISABLED; - spin_unlock(&irq_controller_lock); - - exit_ioapic_irq(cpu); -} - -static void do_level_ioapic_IRQ (unsigned int irq, int cpu, - struct pt_regs * regs) -{ - irq_desc_t *desc = irq_desc + irq; - struct irqaction * action; - - spin_lock(&irq_controller_lock); - /* - * In the level triggered case we first disable the IRQ - * in the IO-APIC, then we 'early ACK' the IRQ, then we - * handle it and enable the IRQ when finished. - * - * disable has to happen before the ACK, to avoid IRQ storms. - * So this all has to be within the spinlock. - */ - mask_IO_APIC_irq(irq); - - desc->ipi = 0; - - /* - * If the irq is disabled for whatever reason, we must - * not enter the irq action. - */ - action = NULL; - if (!(desc->status & (IRQ_DISABLED | IRQ_INPROGRESS))) { - action = desc->action; - desc->status = IRQ_INPROGRESS; - } - - ack_APIC_irq(); - spin_unlock(&irq_controller_lock); - - /* Exit early if we had no action or it was disabled */ - if (!action) - return; - - enter_ioapic_irq(cpu); - - handle_IRQ_event(irq, regs); - - spin_lock(&irq_controller_lock); - desc->status &= ~IRQ_INPROGRESS; - if (!desc->status) - unmask_IO_APIC_irq(irq); - spin_unlock(&irq_controller_lock); - - exit_ioapic_irq(cpu); -} - -/* - * Level and edge triggered IO-APIC interrupts need different handling, - * so we use two separate irq descriptors. edge triggered IRQs can be - * handled with the level-triggered descriptor, but that one has slightly - * more overhead. Level-triggered interrupts cannot be handled with the - * edge-triggered handler, without risking IRQ storms and other ugly - * races. - */ - -static struct hw_interrupt_type ioapic_edge_irq_type = { - "IO-APIC-edge", - do_edge_ioapic_IRQ, - enable_edge_ioapic_irq, - disable_edge_ioapic_irq -}; - -static struct hw_interrupt_type ioapic_level_irq_type = { - "IO-APIC-level", - do_level_ioapic_IRQ, - enable_level_ioapic_irq, - disable_level_ioapic_irq -}; - -void init_IO_APIC_traps(void) -{ - int i; - /* - * NOTE! The local APIC isn't very good at handling - * multiple interrupts at the same interrupt level. - * As the interrupt level is determined by taking the - * vector number and shifting that right by 4, we - * want to spread these out a bit so that they don't - * all fall in the same interrupt level - * - * also, we've got to be careful not to trash gate - * 0x80, because int 0x80 is hm, kindof importantish ;) - */ - for (i = 0; i < NR_IRQS ; i++) { - if ((IO_APIC_VECTOR(i) <= 0xfe) /* HACK */ && - (IO_APIC_IRQ(i))) { - if (IO_APIC_irq_trigger(i)) - irq_desc[i].handler = &ioapic_level_irq_type; - else - irq_desc[i].handler = &ioapic_edge_irq_type; - /* - * disable it in the 8259A: - */ - cached_irq_mask |= 1 << i; - if (i < 16) - set_8259A_irq_mask(i); - } - } -} -#endif /* * This code may look a bit paranoid, but it's supposed to cooperate with Index: linux/arch/i386/kernel/irq.c =================================================================== RCS file: /home/saturn/Develop/linux/arch/i386/kernel/irq.c,v retrieving revision 1.1.1.37 diff -u -r1.1.1.37 irq.c --- irq.c 1998/06/08 07:27:31 1.1.1.37 +++ irq.c 1998/06/17 13:25:45 @@ -286,8 +286,7 @@ for (i = 0 ; i < NR_IRQS ; i++) { action = irq_desc[i].action; - if (!action) - continue; + p += sprintf(p, "%3d: ",i); #ifndef __SMP__ p += sprintf(p, "%10u ", kstat_irqs(i)); @@ -297,11 +296,18 @@ kstat.irqs[cpu_logical_map(j)][i]); #endif p += sprintf(p, " %14s", irq_desc[i].handler->typename); - p += sprintf(p, " %s", action->name); - - for (action=action->next; action; action = action->next) { - p += sprintf(p, ", %s", action->name); + if (action) + { + p += sprintf(p, " %s", action->name); + + for (action=action->next; action; + action = action->next) { + p += sprintf(p, ", %s", action->name); + } } + if(irq_desc[i].status & IRQ_DISABLED) + p += sprintf(p, " disabled"); + *p++ = '\n'; } p += sprintf(p, "NMI: %10u\n", atomic_read(&nmi_counter)); @@ -894,8 +900,10 @@ /* Found it - now free it */ *p = action->next; kfree(action); - if (!irq_desc[irq].action) + if (!irq_desc[irq].action) { irq_desc[irq].handler->disable(irq); + irq_desc[irq].status = IRQ_DISABLED; + } goto out; } printk("Trying to free free IRQ%d\n",irq); @@ -966,6 +974,12 @@ if (irq_found == -1) irq_found = 0; out: + for (i = NR_IRQS-1; i > 0; i--) { + if (!irq_desc[i].action) { + irq_desc[i].status = IRQ_DISABLED; + irq_desc[i].handler->disable(i); + } + } spin_unlock_irq(&irq_controller_lock); return irq_found; }
--------------9CC8D876E1706DBF9FE76637--
- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.rutgers.edu