diff -ur linux-2.5.59-mjb1/arch/i386/Kconfig linux-2.5.59-numaq-mjb1/arch/i386/Kconfig --- linux-2.5.59-mjb1/arch/i386/Kconfig Sun Jan 26 23:16:19 2003 +++ linux-2.5.59-numaq-mjb1/arch/i386/Kconfig Mon Jan 27 10:59:46 2003 @@ -363,6 +363,11 @@ depends on MK8 || MPENTIUM4 default y +config X86_CMOV + bool + depends on M686 || MPENTIUMII || MPENTIUMIII || MPENTIUM4 || MK8 || MCRUSOE + default y + config HUGETLB_PAGE bool "Huge TLB Page Support" help diff -ur linux-2.5.59-mjb1/arch/i386/kernel/apic.c linux-2.5.59-numaq-mjb1/arch/i386/kernel/apic.c --- linux-2.5.59-mjb1/arch/i386/kernel/apic.c Sun Jan 26 23:14:51 2003 +++ linux-2.5.59-numaq-mjb1/arch/i386/kernel/apic.c Sun Jan 26 14:23:22 2003 @@ -1038,7 +1038,8 @@ * interrupt as well. Thus we cannot inline the local irq ... ] */ -void smp_apic_timer_interrupt(struct pt_regs regs) +struct pt_regs * smp_apic_timer_interrupt(struct pt_regs* regs) __attribute__((regparm(1))); +struct pt_regs * smp_apic_timer_interrupt(struct pt_regs* regs) { int cpu = smp_processor_id(); @@ -1058,14 +1059,16 @@ * interrupt lock, which is the WrongThing (tm) to do. */ irq_enter(); - smp_local_timer_interrupt(®s); + smp_local_timer_interrupt(regs); irq_exit(); + return regs; } /* * This interrupt should _never_ happen with our APIC/SMP architecture */ -asmlinkage void smp_spurious_interrupt(void) +struct pt_regs * smp_spurious_interrupt(struct pt_regs* regs) __attribute__((regparm(1))); +struct pt_regs * smp_spurious_interrupt(struct pt_regs* regs) { unsigned long v; @@ -1083,13 +1086,15 @@ printk(KERN_INFO "spurious APIC interrupt on CPU#%d, should never happen.\n", smp_processor_id()); irq_exit(); + return regs; } /* * This interrupt should never happen with our APIC/SMP architecture */ -asmlinkage void smp_error_interrupt(void) +struct pt_regs * smp_error_interrupt(struct pt_regs* regs) __attribute__((regparm(1))); +struct pt_regs * smp_error_interrupt(struct pt_regs* regs) { unsigned long v, v1; @@ -1114,6 +1119,7 @@ printk (KERN_INFO "APIC error on CPU%d: %02lx(%02lx)\n", smp_processor_id(), v , v1); irq_exit(); + return regs; } /* diff -ur linux-2.5.59-mjb1/arch/i386/kernel/cpu/mcheck/p4.c linux-2.5.59-numaq-mjb1/arch/i386/kernel/cpu/mcheck/p4.c --- linux-2.5.59-mjb1/arch/i386/kernel/cpu/mcheck/p4.c Sun Jan 26 23:14:51 2003 +++ linux-2.5.59-numaq-mjb1/arch/i386/kernel/cpu/mcheck/p4.c Sun Jan 26 23:23:08 2003 @@ -61,7 +61,8 @@ /* Thermal interrupt handler for this CPU setup */ static void (*vendor_thermal_interrupt)(struct pt_regs *regs) = unexpected_thermal_interrupt; -asmlinkage void smp_thermal_interrupt(struct pt_regs regs) +asmlinkage struct pt_regs * smp_thermal_interrupt(struct pt_regs *regs) __attribute__((regparm(1))); +struct pt_regs* smp_thermal_interrupt(struct pt_regs* regs) { irq_enter(); vendor_thermal_interrupt(®s); diff -ur linux-2.5.59-mjb1/arch/i386/kernel/entry.S linux-2.5.59-numaq-mjb1/arch/i386/kernel/entry.S --- linux-2.5.59-mjb1/arch/i386/kernel/entry.S Sun Jan 26 23:16:19 2003 +++ linux-2.5.59-numaq-mjb1/arch/i386/kernel/entry.S Mon Jan 27 11:14:08 2003 @@ -138,8 +138,6 @@ .long 1b,2b; \ .previous - - ENTRY(lcall7) pushfl # We get a different stack layout with call # gates, which has to be cleaned up later.. @@ -388,55 +386,76 @@ vector=vector+1 .endr - ALIGN -common_interrupt: - SAVE_ALL - - GET_THREAD_INFO(%ebx) - movl TI_IRQ_STACK(%ebx),%ecx - movl TI_TASK(%ebx),%edx - movl %esp,%eax - leal (THREAD_SIZE-4)(%ecx),%esi # %ecx+THREAD_SIZE is next stack - # -4 keeps us in the right one - testl %ecx,%ecx # is there a valid irq_stack? - - # switch to the irq stack -#ifdef CONFIG_X86_HAVE_CMOV - cmovnz %esi,%esp +# lets play optimizing compiler... +#ifdef CONFIG_X86_CMOV +#define COND_MOVE cmovnz %esi,%esp; #else - jz 1f - mov %esi,%esp +#define COND_MOVE \ + jz 1f; \ + mov %esi,%esp; \ 1: #endif - - # update the task pointer in the irq stack - GET_THREAD_INFO(%esi) - movl %edx,TI_TASK(%esi) - - # update the preempt count in the irq stack - movl TI_PRE_COUNT(%ebx),%ecx - movl %ecx,TI_PRE_COUNT(%esi) - - call do_IRQ - movl %eax,%esp # potentially restore non-irq stack - - # copy flags from the irq stack back into the task's thread_info - # %esi is saved over the do_IRQ call and contains the irq stack - # thread_info pointer - # %ebx contains the original thread_info pointer - movl TI_FLAGS(%esi),%eax - movl $0,TI_FLAGS(%esi) - LOCK orl %eax,TI_FLAGS(%ebx) +# These macros will switch you to, and from a per-cpu interrupt stack +# They take the pt_regs arg and move it from the normal place on the +# stack to %eax. Any handler function can retrieve it using regparm(1). +# The handlers are expected to return the stack to switch back to in +# the same register. +# +# This means that the irq handlers need to return their arg +# +# SWITCH_TO_IRQSTACK clobbers %ebx, %ecx, %edx, %esi +# old stack in %eax + +#define SWITCH_TO_IRQSTACK \ + GET_THREAD_INFO(%ebx); \ + movl TI_IRQ_STACK(%ebx),%ecx; \ + movl TI_TASK(%ebx),%edx; \ + movl %esp,%eax; \ + \ + /* %ecx+THREAD_SIZE is next stack -4 keeps us in the right one */ \ + leal (THREAD_SIZE-4)(%ecx),%esi; \ + \ + /* is there a valid irq_stack? */ \ + testl %ecx,%ecx; \ + COND_MOVE; \ + \ + /* update the task pointer in the irq stack */ \ + GET_THREAD_INFO(%esi); \ + movl %edx,TI_TASK(%esi); \ + \ + /* update the preempt count in the irq stack */ \ + movl TI_PRE_COUNT(%ebx),%ecx; \ + movl %ecx,TI_PRE_COUNT(%esi); + +# copy flags from the irq stack back into the task's thread_info +# %esi is saved over the irq handler call and contains the irq stack +# thread_info pointer +# %eax was returned from the handler, as described above +# %ebx contains the original thread_info pointer + +#define RESTORE_FROM_IRQSTACK \ + movl %eax,%esp; \ + movl TI_FLAGS(%esi),%eax; \ + movl $0,TI_FLAGS(%esi); \ + LOCK orl %eax,TI_FLAGS(%ebx); + ALIGN +common_interrupt: + SAVE_ALL + SWITCH_TO_IRQSTACK + call do_IRQ + RESTORE_FROM_IRQSTACK jmp ret_from_intr #define BUILD_INTERRUPT(name, nr) \ ENTRY(name) \ - pushl $nr-256; \ + pushl $nr-256; \ SAVE_ALL \ - call smp_/**/name; \ + SWITCH_TO_IRQSTACK \ + call smp_/**/name; \ + RESTORE_FROM_IRQSTACK \ jmp ret_from_intr; /* The include is where all of the SMP etc. interrupts come from */ diff -ur linux-2.5.59-mjb1/arch/i386/kernel/smp.c linux-2.5.59-numaq-mjb1/arch/i386/kernel/smp.c --- linux-2.5.59-mjb1/arch/i386/kernel/smp.c Sun Jan 26 23:16:19 2003 +++ linux-2.5.59-numaq-mjb1/arch/i386/kernel/smp.c Sun Jan 26 14:37:09 2003 @@ -305,7 +305,8 @@ * 2) Leave the mm if we are in the lazy tlb mode. */ -asmlinkage void smp_invalidate_interrupt (void) +struct pt_regs *smp_invalidate_interrupt(struct pt_regs *regs) __attribute__((regparm(1))); +struct pt_regs *smp_invalidate_interrupt(struct pt_regs *regs) { unsigned long cpu; @@ -336,6 +337,7 @@ out: put_cpu_no_resched(); + return regs; } static void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm, @@ -598,12 +600,17 @@ * all the work is done automatically when * we return from the interrupt. */ -asmlinkage void smp_reschedule_interrupt(void) + +asmlinkage struct pt_regs * smp_reschedule_interrupt(struct pt_regs *regs) __attribute__((regparm(1))); +struct pt_regs * smp_reschedule_interrupt(struct pt_regs *regs) { ack_APIC_irq(); + return regs; } -asmlinkage void smp_call_function_interrupt(struct pt_regs regs) + +asmlinkage struct pt_regs * smp_call_function_interrupt(struct pt_regs *regs) __attribute__((regparm(1))); +struct pt_regs * smp_call_function_interrupt(struct pt_regs *regs) { void (*func) (void *info, struct pt_regs *) = (void (*)(void *, struct pt_regs*))call_data->func; void *info = call_data->info; @@ -620,12 +627,13 @@ * At this point the info structure may be out of scope unless wait==1 */ irq_enter(); - (*func)(info, ®s); + (*func)(info, regs); irq_exit(); if (wait) { mb(); atomic_inc(&call_data->finished); } + return regs; }