diff -urP linux-2.5.62/arch/alpha/kernel/process.c linux-2.5.62_patched/arch/alpha/kernel/process.c --- linux-2.5.62/arch/alpha/kernel/process.c Mon Feb 17 23:56:54 2003 +++ linux-2.5.62_patched/arch/alpha/kernel/process.c Sat Feb 22 02:02:17 2003 @@ -155,10 +155,7 @@ struct halt_info args; args.mode = mode; args.restart_cmd = restart_cmd; -#ifdef CONFIG_SMP - smp_call_function(common_shutdown_1, &args, 1, 0); -#endif - common_shutdown_1(&args); + on_each_cpu(common_shutdown_1, &args, 1, 0); } void diff -urP linux-2.5.62/arch/alpha/kernel/smp.c linux-2.5.62_patched/arch/alpha/kernel/smp.c --- linux-2.5.62/arch/alpha/kernel/smp.c Mon Feb 17 23:56:10 2003 +++ linux-2.5.62_patched/arch/alpha/kernel/smp.c Sat Feb 22 02:02:17 2003 @@ -899,10 +899,8 @@ smp_imb(void) { /* Must wait other processors to flush their icache before continue. */ - if (smp_call_function(ipi_imb, NULL, 1, 1)) + if (on_each_cpu(ipi_imb, NULL, 1, 1)) printk(KERN_CRIT "smp_imb: timed out\n"); - - imb(); } static void @@ -916,11 +914,9 @@ { /* Although we don't have any data to pass, we do want to synchronize with the other processors. */ - if (smp_call_function(ipi_flush_tlb_all, NULL, 1, 1)) { + if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) { printk(KERN_CRIT "flush_tlb_all: timed out\n"); } - - tbia(); } #define asn_locked() (cpu_data[smp_processor_id()].asn_lock) @@ -938,6 +934,8 @@ void flush_tlb_mm(struct mm_struct *mm) { + preempt_disable(); + if (mm == current->active_mm) { flush_tlb_current(mm); if (atomic_read(&mm->mm_users) <= 1) { @@ -948,6 +946,7 @@ if (mm->context[cpu]) mm->context[cpu] = 0; } + preempt_enable(); return; } } @@ -955,6 +954,8 @@ if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) { printk(KERN_CRIT "flush_tlb_mm: timed out\n"); } + + preempt_enable(); } struct flush_tlb_page_struct { @@ -981,6 +982,8 @@ struct flush_tlb_page_struct data; struct mm_struct *mm = vma->vm_mm; + preempt_disable(); + if (mm == current->active_mm) { flush_tlb_current_page(mm, vma, addr); if (atomic_read(&mm->mm_users) <= 1) { @@ -991,6 +994,7 @@ if (mm->context[cpu]) mm->context[cpu] = 0; } + preempt_enable(); return; } } @@ -1002,6 +1006,8 @@ if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) { printk(KERN_CRIT "flush_tlb_page: timed out\n"); } + + preempt_enable(); } void @@ -1030,6 +1036,8 @@ if ((vma->vm_flags & VM_EXEC) == 0) return; + preempt_disable(); + if (mm == current->active_mm) { __load_new_mm_context(mm); if (atomic_read(&mm->mm_users) <= 1) { @@ -1040,6 +1048,7 @@ if (mm->context[cpu]) mm->context[cpu] = 0; } + preempt_enable(); return; } } @@ -1047,6 +1056,8 @@ if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) { printk(KERN_CRIT "flush_icache_page: timed out\n"); } + + preempt_enable(); } #ifdef CONFIG_DEBUG_SPINLOCK diff -urP linux-2.5.62/arch/i386/kernel/cpuid.c linux-2.5.62_patched/arch/i386/kernel/cpuid.c --- linux-2.5.62/arch/i386/kernel/cpuid.c Mon Feb 17 23:55:49 2003 +++ linux-2.5.62_patched/arch/i386/kernel/cpuid.c Sat Feb 22 02:02:17 2003 @@ -44,8 +44,6 @@ #include #include -#ifdef CONFIG_SMP - struct cpuid_command { int cpu; u32 reg; @@ -64,24 +62,12 @@ { struct cpuid_command cmd; - if ( cpu == smp_processor_id() ) { - cpuid(reg, &data[0], &data[1], &data[2], &data[3]); - } else { - cmd.cpu = cpu; - cmd.reg = reg; - cmd.data = data; - - smp_call_function(cpuid_smp_cpuid, &cmd, 1, 1); - } -} -#else /* ! CONFIG_SMP */ - -static inline void do_cpuid(int cpu, u32 reg, u32 *data) -{ - cpuid(reg, &data[0], &data[1], &data[2], &data[3]); + cmd.cpu = cpu; + cmd.reg = reg; + cmd.data = data; + + on_each_cpu(cpuid_smp_cpuid, &cmd, 1, 1); } - -#endif /* ! CONFIG_SMP */ static loff_t cpuid_seek(struct file *file, loff_t offset, int orig) { diff -urP linux-2.5.62/arch/i386/kernel/io_apic.c linux-2.5.62_patched/arch/i386/kernel/io_apic.c --- linux-2.5.62/arch/i386/kernel/io_apic.c Mon Feb 17 23:56:10 2003 +++ linux-2.5.62_patched/arch/i386/kernel/io_apic.c Sat Feb 22 02:02:17 2003 @@ -1376,8 +1376,7 @@ void print_all_local_APICs (void) { - smp_call_function(print_local_APIC, NULL, 1, 1); - print_local_APIC(NULL); + on_each_cpu(print_local_APIC, NULL, 1, 1); } void /*__init*/ print_PIC(void) @@ -1855,8 +1854,7 @@ */ printk(KERN_INFO "activating NMI Watchdog ..."); - smp_call_function(enable_NMI_through_LVT0, NULL, 1, 1); - enable_NMI_through_LVT0(NULL); + on_each_cpu(enable_NMI_through_LVT0, NULL, 1, 1); printk(" done.\n"); } diff -urP linux-2.5.62/arch/i386/kernel/ldt.c linux-2.5.62_patched/arch/i386/kernel/ldt.c --- linux-2.5.62/arch/i386/kernel/ldt.c Mon Feb 17 23:56:25 2003 +++ linux-2.5.62_patched/arch/i386/kernel/ldt.c Sat Feb 22 02:02:17 2003 @@ -55,13 +55,13 @@ wmb(); if (reload) { + preempt_disable(); load_LDT(pc); #ifdef CONFIG_SMP - preempt_disable(); if (current->mm->cpu_vm_mask != (1 << smp_processor_id())) smp_call_function(flush_ldt, 0, 1, 1); - preempt_enable(); #endif + preempt_enable(); } if (oldsize) { if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE) diff -urP linux-2.5.62/arch/i386/kernel/microcode.c linux-2.5.62_patched/arch/i386/kernel/microcode.c --- linux-2.5.62/arch/i386/kernel/microcode.c Mon Feb 17 23:56:02 2003 +++ linux-2.5.62_patched/arch/i386/kernel/microcode.c Sat Feb 22 02:02:17 2003 @@ -183,11 +183,8 @@ int i, error = 0, err; struct microcode *m; - if (smp_call_function(do_update_one, NULL, 1, 1) != 0) { - printk(KERN_ERR "microcode: IPI timeout, giving up\n"); + if (on_each_cpu(do_update_one, NULL, 1, 1) != 0) return -EIO; - } - do_update_one(NULL); for (i=0; iactive_mm && atomic_read(&mm->mm_users) == 1)) + { + local_finish_flush_tlb_mm(mm); return; + } /* * We could optimize this further by using mm->cpu_vm_mask to track which CPUs @@ -226,7 +226,7 @@ * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is * rather trivial. */ - smp_call_function((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1); + on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1); } /* diff -urP linux-2.5.62/arch/mips64/kernel/smp.c linux-2.5.62_patched/arch/mips64/kernel/smp.c --- linux-2.5.62/arch/mips64/kernel/smp.c Mon Feb 17 23:56:27 2003 +++ linux-2.5.62_patched/arch/mips64/kernel/smp.c Sat Feb 22 02:02:17 2003 @@ -195,8 +195,7 @@ void flush_tlb_all(void) { - smp_call_function(flush_tlb_all_ipi, 0, 1, 1); - _flush_tlb_all(); + on_each_cpu(flush_tlb_all_ipi, 0, 1, 1); } static void flush_tlb_mm_ipi(void *mm) @@ -219,6 +218,8 @@ void flush_tlb_mm(struct mm_struct *mm) { + preempt_disable(); + if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1); } else { @@ -228,6 +229,8 @@ CPU_CONTEXT(i, mm) = 0; } _flush_tlb_mm(mm); + + preempt_enable(); } struct flush_tlb_data { @@ -246,6 +249,8 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { + preempt_disable(); + if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { struct flush_tlb_data fd; @@ -260,6 +265,8 @@ CPU_CONTEXT(i, mm) = 0; } _flush_tlb_range(mm, start, end); + + preempt_enable(); } static void flush_tlb_page_ipi(void *info) @@ -271,6 +278,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { + preempt_disable(); + if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { struct flush_tlb_data fd; @@ -284,5 +293,7 @@ CPU_CONTEXT(i, vma->vm_mm) = 0; } _flush_tlb_page(vma, page); + + preempt_enable(); } diff -urP linux-2.5.62/arch/parisc/kernel/cache.c linux-2.5.62_patched/arch/parisc/kernel/cache.c --- linux-2.5.62/arch/parisc/kernel/cache.c Mon Feb 17 23:56:13 2003 +++ linux-2.5.62_patched/arch/parisc/kernel/cache.c Sat Feb 22 02:02:17 2003 @@ -39,8 +39,7 @@ void flush_data_cache(void) { - smp_call_function((void (*)(void *))flush_data_cache_local, NULL, 1, 1); - flush_data_cache_local(); + on_each_cpu((void (*)(void *))flush_data_cache_local, NULL, 1, 1); } #endif diff -urP linux-2.5.62/arch/parisc/kernel/irq.c linux-2.5.62_patched/arch/parisc/kernel/irq.c --- linux-2.5.62/arch/parisc/kernel/irq.c Mon Feb 17 23:56:13 2003 +++ linux-2.5.62_patched/arch/parisc/kernel/irq.c Sat Feb 22 02:02:17 2003 @@ -61,20 +61,17 @@ static spinlock_t irq_lock = SPIN_LOCK_UNLOCKED; /* protect IRQ regions */ -#ifdef CONFIG_SMP static void cpu_set_eiem(void *info) { set_eiem((unsigned long) info); } -#endif static inline void disable_cpu_irq(void *unused, int irq) { unsigned long eirr_bit = EIEM_MASK(irq); cpu_eiem &= ~eirr_bit; - set_eiem(cpu_eiem); - smp_call_function(cpu_set_eiem, (void *) cpu_eiem, 1, 1); + on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1); } static void enable_cpu_irq(void *unused, int irq) @@ -83,8 +80,7 @@ mtctl(eirr_bit, 23); /* clear EIRR bit before unmasking */ cpu_eiem |= eirr_bit; - smp_call_function(cpu_set_eiem, (void *) cpu_eiem, 1, 1); - set_eiem(cpu_eiem); + on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1); } /* mask and disable are the same at the CPU level @@ -100,8 +96,7 @@ ** handle *any* unmasked pending interrupts. ** ie We don't need to check for pending interrupts here. */ - smp_call_function(cpu_set_eiem, (void *) cpu_eiem, 1, 1); - set_eiem(cpu_eiem); + on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1); } /* diff -urP linux-2.5.62/arch/parisc/kernel/smp.c linux-2.5.62_patched/arch/parisc/kernel/smp.c --- linux-2.5.62/arch/parisc/kernel/smp.c Mon Feb 17 23:57:18 2003 +++ linux-2.5.62_patched/arch/parisc/kernel/smp.c Sat Feb 22 02:02:17 2003 @@ -401,7 +401,7 @@ __setup("maxcpus=", maxcpus); /* - * Flush all other CPU's tlb and then mine. Do this with smp_call_function() + * Flush all other CPU's tlb and then mine. Do this with on_each_cpu() * as we want to ensure all TLB's flushed before proceeding. */ @@ -410,8 +410,7 @@ void smp_flush_tlb_all(void) { - smp_call_function((void (*)(void *))flush_tlb_all_local, NULL, 1, 1); - flush_tlb_all_local(); + on_each_cpu((void (*)(void *))flush_tlb_all_local, NULL, 1, 1); } diff -urP linux-2.5.62/arch/parisc/mm/init.c linux-2.5.62_patched/arch/parisc/mm/init.c --- linux-2.5.62/arch/parisc/mm/init.c Mon Feb 17 23:56:14 2003 +++ linux-2.5.62_patched/arch/parisc/mm/init.c Sat Feb 22 02:02:17 2003 @@ -974,8 +974,7 @@ do_recycle++; } spin_unlock(&sid_lock); - smp_call_function((void (*)(void *))flush_tlb_all_local, NULL, 1, 1); - flush_tlb_all_local(); + on_each_cpu((void (*)(void *))flush_tlb_all_local, NULL, 1, 1); if (do_recycle) { spin_lock(&sid_lock); recycle_sids(recycle_ndirty,recycle_dirty_array); diff -urP linux-2.5.62/arch/ppc/kernel/temp.c linux-2.5.62_patched/arch/ppc/kernel/temp.c --- linux-2.5.62/arch/ppc/kernel/temp.c Mon Feb 17 23:56:16 2003 +++ linux-2.5.62_patched/arch/ppc/kernel/temp.c Sat Feb 22 02:02:17 2003 @@ -194,10 +194,7 @@ /* schedule ourselves to be run again */ mod_timer(&tau_timer, jiffies + shrink_timer) ; -#ifdef CONFIG_SMP - smp_call_function(tau_timeout, NULL, 1, 0); -#endif - tau_timeout(NULL); + on_each_cpu(tau_timeout, NULL, 1, 0); } /* @@ -239,10 +236,7 @@ tau_timer.expires = jiffies + shrink_timer; add_timer(&tau_timer); -#ifdef CONFIG_SMP - smp_call_function(TAU_init_smp, NULL, 1, 0); -#endif - TAU_init_smp(NULL); + on_each_cpu(TAU_init_smp, NULL, 1, 0); printk("Thermal assist unit "); #ifdef CONFIG_TAU_INT diff -urP linux-2.5.62/arch/s390/kernel/smp.c linux-2.5.62_patched/arch/s390/kernel/smp.c --- linux-2.5.62/arch/s390/kernel/smp.c Mon Feb 17 23:55:49 2003 +++ linux-2.5.62_patched/arch/s390/kernel/smp.c Sat Feb 22 02:02:17 2003 @@ -228,8 +228,7 @@ void machine_restart_smp(char * __unused) { cpu_restart_map = cpu_online_map; - smp_call_function(do_machine_restart, NULL, 0, 0); - do_machine_restart(NULL); + on_each_cpu(do_machine_restart, NULL, 0, 0); } static void do_machine_halt(void * __unused) @@ -247,8 +246,7 @@ void machine_halt_smp(void) { - smp_call_function(do_machine_halt, NULL, 0, 0); - do_machine_halt(NULL); + on_each_cpu(do_machine_halt, NULL, 0, 0); } static void do_machine_power_off(void * __unused) @@ -266,8 +264,7 @@ void machine_power_off_smp(void) { - smp_call_function(do_machine_power_off, NULL, 0, 0); - do_machine_power_off(NULL); + on_each_cpu(do_machine_power_off, NULL, 0, 0); } /* @@ -339,8 +336,7 @@ void smp_ptlb_all(void) { - smp_call_function(smp_ptlb_callback, NULL, 0, 1); - local_flush_tlb(); + on_each_cpu(smp_ptlb_callback, NULL, 0, 1); } /* @@ -400,8 +396,7 @@ parms.end_ctl = cr; parms.orvals[cr] = 1 << bit; parms.andvals[cr] = 0xFFFFFFFF; - smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); - __ctl_set_bit(cr, bit); + on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); } /* @@ -414,8 +409,7 @@ parms.end_ctl = cr; parms.orvals[cr] = 0x00000000; parms.andvals[cr] = ~(1 << bit); - smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); - __ctl_clear_bit(cr, bit); + on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); } /* diff -urP linux-2.5.62/arch/s390x/kernel/smp.c linux-2.5.62_patched/arch/s390x/kernel/smp.c --- linux-2.5.62/arch/s390x/kernel/smp.c Mon Feb 17 23:56:15 2003 +++ linux-2.5.62_patched/arch/s390x/kernel/smp.c Sat Feb 22 02:02:17 2003 @@ -227,8 +227,7 @@ void machine_restart_smp(char * __unused) { cpu_restart_map = cpu_online_map; - smp_call_function(do_machine_restart, NULL, 0, 0); - do_machine_restart(NULL); + on_each_cpu(do_machine_restart, NULL, 0, 0); } static void do_machine_halt(void * __unused) @@ -246,8 +245,7 @@ void machine_halt_smp(void) { - smp_call_function(do_machine_halt, NULL, 0, 0); - do_machine_halt(NULL); + on_each_cpu(do_machine_halt, NULL, 0, 0); } static void do_machine_power_off(void * __unused) @@ -265,8 +263,7 @@ void machine_power_off_smp(void) { - smp_call_function(do_machine_power_off, NULL, 0, 0); - do_machine_power_off(NULL); + on_each_cpu(do_machine_power_off, NULL, 0, 0); } /* @@ -383,8 +380,7 @@ parms.end_ctl = cr; parms.orvals[cr] = 1 << bit; parms.andvals[cr] = -1L; - smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); - __ctl_set_bit(cr, bit); + on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); } /* @@ -397,8 +393,7 @@ parms.end_ctl = cr; parms.orvals[cr] = 0; parms.andvals[cr] = ~(1L << bit); - smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); - __ctl_clear_bit(cr, bit); + on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); } diff -urP linux-2.5.62/arch/x86_64/kernel/bluesmoke.c linux-2.5.62_patched/arch/x86_64/kernel/bluesmoke.c --- linux-2.5.62/arch/x86_64/kernel/bluesmoke.c Mon Feb 17 23:56:55 2003 +++ linux-2.5.62_patched/arch/x86_64/kernel/bluesmoke.c Sat Feb 22 02:02:18 2003 @@ -111,11 +111,7 @@ { u32 low, high; int i; - unsigned int *cpu = info; - BUG_ON (*cpu != smp_processor_id()); - - preempt_disable(); for (i=0; isize = mincount; wmb(); if (reload) { + preempt_disable(); load_LDT(pc); #ifdef CONFIG_SMP - preempt_disable(); if (current->mm->cpu_vm_mask != (1< PAGE_SIZE) diff -urP linux-2.5.62/arch/x86_64/kernel/msr.c linux-2.5.62_patched/arch/x86_64/kernel/msr.c --- linux-2.5.62/arch/x86_64/kernel/msr.c Mon Feb 17 23:55:57 2003 +++ linux-2.5.62_patched/arch/x86_64/kernel/msr.c Sat Feb 22 02:02:18 2003 @@ -120,36 +120,28 @@ { struct msr_command cmd; - if ( cpu == smp_processor_id() ) { - return wrmsr_eio(reg, eax, edx); - } else { - cmd.cpu = cpu; - cmd.reg = reg; - cmd.data[0] = eax; - cmd.data[1] = edx; - - smp_call_function(msr_smp_wrmsr, &cmd, 1, 1); - return cmd.err; - } + cmd.cpu = cpu; + cmd.reg = reg; + cmd.data[0] = eax; + cmd.data[1] = edx; + + on_each_cpu(msr_smp_wrmsr, &cmd, 1, 1); + return cmd.err; } static inline int do_rdmsr(int cpu, u32 reg, u32 *eax, u32 *edx) { struct msr_command cmd; - if ( cpu == smp_processor_id() ) { - return rdmsr_eio(reg, eax, edx); - } else { - cmd.cpu = cpu; - cmd.reg = reg; - - smp_call_function(msr_smp_rdmsr, &cmd, 1, 1); - - *eax = cmd.data[0]; - *edx = cmd.data[1]; + cmd.cpu = cpu; + cmd.reg = reg; + + on_each_cpu(msr_smp_rdmsr, &cmd, 1, 1); + + *eax = cmd.data[0]; + *edx = cmd.data[1]; - return cmd.err; - } + return cmd.err; } #else /* ! CONFIG_SMP */ diff -urP linux-2.5.62/arch/x86_64/kernel/smp.c linux-2.5.62_patched/arch/x86_64/kernel/smp.c --- linux-2.5.62/arch/x86_64/kernel/smp.c Mon Feb 17 23:56:09 2003 +++ linux-2.5.62_patched/arch/x86_64/kernel/smp.c Sat Feb 22 02:02:18 2003 @@ -328,7 +328,7 @@ preempt_enable(); } -static inline void do_flush_tlb_all_local(void) +void do_flush_tlb_all(void* info) { unsigned long cpu = smp_processor_id(); @@ -337,16 +337,9 @@ leave_mm(cpu); } -static void flush_tlb_all_ipi(void* info) -{ - do_flush_tlb_all_local(); -} - void flush_tlb_all(void) { - smp_call_function (flush_tlb_all_ipi,0,1,1); - - do_flush_tlb_all_local(); + on_each_cpu(do_flush_tlb_all, 0, 1, 1); } void smp_kdb_stop(void) diff -urP linux-2.5.62/arch/x86_64/mm/pageattr.c linux-2.5.62_patched/arch/x86_64/mm/pageattr.c --- linux-2.5.62/arch/x86_64/mm/pageattr.c Mon Feb 17 23:56:14 2003 +++ linux-2.5.62_patched/arch/x86_64/mm/pageattr.c Sat Feb 22 02:02:18 2003 @@ -122,11 +122,8 @@ } static inline void flush_map(unsigned long address) -{ -#ifdef CONFIG_SMP - smp_call_function(flush_kernel_map, (void *)address, 1, 1); -#endif - flush_kernel_map((void *)address); +{ + on_each_cpu(flush_kernel_map, (void *)address, 1, 1); } struct deferred_page { diff -urP linux-2.5.62/drivers/char/agp/agp.h linux-2.5.62_patched/drivers/char/agp/agp.h --- linux-2.5.62/drivers/char/agp/agp.h Mon Feb 17 23:56:13 2003 +++ linux-2.5.62_patched/drivers/char/agp/agp.h Sat Feb 22 02:02:18 2003 @@ -34,24 +34,10 @@ #define PFX "agpgart: " -#ifdef CONFIG_SMP -static void ipi_handler(void *null) -{ - flush_agp_cache(); -} - -static void __attribute__((unused)) global_cache_flush(void) -{ - if (smp_call_function(ipi_handler, NULL, 1, 1) != 0) - panic(PFX "timed out waiting for the other CPUs!\n"); - flush_agp_cache(); -} -#else static inline void global_cache_flush(void) { - flush_agp_cache(); + on_each_cpu(flush_agp_cache, NULL, 1, 1); } -#endif /* !CONFIG_SMP */ enum aper_size_type { U8_APER_SIZE, diff -urP linux-2.5.62/drivers/s390/char/sclp.c linux-2.5.62_patched/drivers/s390/char/sclp.c --- linux-2.5.62/drivers/s390/char/sclp.c Mon Feb 17 23:56:20 2003 +++ linux-2.5.62_patched/drivers/s390/char/sclp.c Sat Feb 22 02:02:18 2003 @@ -481,8 +481,7 @@ do_machine_quiesce(void) { cpu_quiesce_map = cpu_online_map; - smp_call_function(do_load_quiesce_psw, NULL, 0, 0); - do_load_quiesce_psw(NULL); + on_each_cpu(do_load_quiesce_psw, NULL, 0, 0); } #else static void diff -urP linux-2.5.62/drivers/s390/net/iucv.c linux-2.5.62_patched/drivers/s390/net/iucv.c --- linux-2.5.62/drivers/s390/net/iucv.c Mon Feb 17 23:55:52 2003 +++ linux-2.5.62_patched/drivers/s390/net/iucv.c Sat Feb 22 02:02:18 2003 @@ -617,10 +617,7 @@ ulong b2f0_result = 0x0deadbeef; iucv_debug(1, "entering"); - if (smp_processor_id() == 0) - iucv_declare_buffer_cpu0(&b2f0_result); - else - smp_call_function(iucv_declare_buffer_cpu0, &b2f0_result, 0, 1); + on_each_cpu(iucv_declare_buffer_cpu0, &b2f0_result, 0, 1); iucv_debug(1, "Address of EIB = %p", iucv_external_int_buffer); if (b2f0_result == 0x0deadbeef) b2f0_result = 0xaa; @@ -639,10 +636,7 @@ { iucv_debug(1, "entering"); if (declare_flag) { - if (smp_processor_id() == 0) - iucv_retrieve_buffer_cpu0(0); - else - smp_call_function(iucv_retrieve_buffer_cpu0, 0, 0, 1); + on_each_cpu(iucv_retrieve_buffer_cpu0, 0, 0, 1); declare_flag = 0; } iucv_debug(1, "exiting"); diff -urP linux-2.5.62/fs/buffer.c linux-2.5.62_patched/fs/buffer.c --- linux-2.5.62/fs/buffer.c Mon Feb 17 23:56:17 2003 +++ linux-2.5.62_patched/fs/buffer.c Sat Feb 22 02:02:18 2003 @@ -1404,10 +1404,7 @@ static void invalidate_bh_lrus(void) { - preempt_disable(); - invalidate_bh_lru(NULL); - smp_call_function(invalidate_bh_lru, NULL, 1, 1); - preempt_enable(); + on_each_cpu(invalidate_bh_lru, NULL, 1, 1); } void set_bh_page(struct buffer_head *bh, diff -urP linux-2.5.62/include/asm-alpha/agp.h linux-2.5.62_patched/include/asm-alpha/agp.h --- linux-2.5.62/include/asm-alpha/agp.h Mon Feb 17 23:55:50 2003 +++ linux-2.5.62_patched/include/asm-alpha/agp.h Sat Feb 22 02:02:18 2003 @@ -8,6 +8,10 @@ #define map_page_into_agp(page) #define unmap_page_from_agp(page) #define flush_agp_mappings() -#define flush_agp_cache() mb() + +static void flush_agp_cache(void* info) +{ + mb(); +} #endif diff -urP linux-2.5.62/include/asm-i386/agp.h linux-2.5.62_patched/include/asm-i386/agp.h --- linux-2.5.62/include/asm-i386/agp.h Mon Feb 17 23:55:49 2003 +++ linux-2.5.62_patched/include/asm-i386/agp.h Sat Feb 22 02:02:18 2003 @@ -18,6 +18,9 @@ /* Could use CLFLUSH here if the cpu supports it. But then it would need to be called for each cacheline of the whole page so it may not be worth it. Would need a page for it. */ -#define flush_agp_cache() asm volatile("wbinvd":::"memory") +static void flush_agp_cache(void *info) +{ + __asm__ __volatile__ ("wbinvd": : :"memory"); +} #endif diff -urP linux-2.5.62/include/asm-ia64/agp.h linux-2.5.62_patched/include/asm-ia64/agp.h --- linux-2.5.62/include/asm-ia64/agp.h Mon Feb 17 23:56:15 2003 +++ linux-2.5.62_patched/include/asm-ia64/agp.h Sat Feb 22 02:02:18 2003 @@ -16,7 +16,14 @@ #define map_page_into_agp(page) /* nothing */ #define unmap_page_from_agp(page) /* nothing */ #define flush_agp_mappings() /* nothing */ -#define flush_agp_cache() mb() + +/* Could use CLFLUSH here if the cpu supports it. But then it would + need to be called for each cacheline of the whole page so it may not be + worth it. Would need a page for it. */ +static void flush_agp_cache(void* info) +{ + mb(); +} /* Page-protection value to be used for AGP memory mapped into kernel space. */ #define PAGE_AGP PAGE_KERNEL diff -urP linux-2.5.62/include/asm-parisc/cacheflush.h linux-2.5.62_patched/include/asm-parisc/cacheflush.h --- linux-2.5.62/include/asm-parisc/cacheflush.h Mon Feb 17 23:57:01 2003 +++ linux-2.5.62_patched/include/asm-parisc/cacheflush.h Sat Feb 22 02:02:18 2003 @@ -25,16 +25,10 @@ extern void flush_cache_all_local(void); -#ifdef CONFIG_SMP static inline void flush_cache_all(void) { - smp_call_function((void (*)(void *))flush_cache_all_local, NULL, 1, 1); - flush_cache_all_local(); + on_each_cpu((void (*)(void *))flush_cache_all_local, NULL, 1, 1); } -#else -#define flush_cache_all flush_cache_all_local -#endif - /* The following value needs to be tuned and probably scaled with the * cache size. diff -urP linux-2.5.62/include/asm-sparc64/agp.h linux-2.5.62_patched/include/asm-sparc64/agp.h --- linux-2.5.62/include/asm-sparc64/agp.h Mon Feb 17 23:55:55 2003 +++ linux-2.5.62_patched/include/asm-sparc64/agp.h Sat Feb 22 02:02:18 2003 @@ -6,6 +6,13 @@ #define map_page_into_agp(page) #define unmap_page_from_agp(page) #define flush_agp_mappings() -#define flush_agp_cache() mb() + +/* Could use CLFLUSH here if the cpu supports it. But then it would + need to be called for each cacheline of the whole page so it may not be + worth it. Would need a page for it. */ +static void flush_agp_cache(void* info) +{ + mb(); +} #endif diff -urP linux-2.5.62/include/asm-x86_64/agp.h linux-2.5.62_patched/include/asm-x86_64/agp.h --- linux-2.5.62/include/asm-x86_64/agp.h Mon Feb 17 23:55:49 2003 +++ linux-2.5.62_patched/include/asm-x86_64/agp.h Sat Feb 22 02:02:18 2003 @@ -18,6 +18,9 @@ /* Could use CLFLUSH here if the cpu supports it. But then it would need to be called for each cacheline of the whole page so it may not be worth it. Would need a page for it. */ -#define flush_agp_cache() asm volatile("wbinvd":::"memory") +static void flush_agp_cache(void* info) +{ + __asm__ __volatile__ ("wbinvd": : :"memory"); +} #endif diff -urP linux-2.5.62/include/linux/smp.h linux-2.5.62_patched/include/linux/smp.h --- linux-2.5.62/include/linux/smp.h Mon Feb 17 23:56:16 2003 +++ linux-2.5.62_patched/include/linux/smp.h Sat Feb 22 02:06:57 2003 @@ -10,9 +10,10 @@ #ifdef CONFIG_SMP +#include #include #include -#include +#include #include #include @@ -54,6 +55,31 @@ int retry, int wait); /* + * Call a function on all processors + */ +static inline int on_each_cpu(void (*func) (void *info), void *info, + int retry, int wait) +{ + int ret = 0; + + preempt_disable(); + + if(num_online_cpus() == 1) + goto only_one; + + ret = smp_call_function(func, info, retry, wait); + if(ret != 0) + printk(KERN_ERR "%p: IPI timeout, giving up\n", + __builtin_return_address(0)); + +only_one: + func(info); + preempt_enable(); + + return ret; +} + +/* * True once the per process idle is forked */ extern int smp_threads_ready; @@ -96,6 +122,7 @@ #define hard_smp_processor_id() 0 #define smp_threads_ready 1 #define smp_call_function(func,info,retry,wait) ({ 0; }) +#define on_each_cpu(func,info,retry,wait) ({ func(info); 0; }) static inline void smp_send_reschedule(int cpu) { } static inline void smp_send_reschedule_all(void) { } #define cpu_online_map 1 diff -urP linux-2.5.62/mm/slab.c linux-2.5.62_patched/mm/slab.c --- linux-2.5.62/mm/slab.c Mon Feb 17 23:56:45 2003 +++ linux-2.5.62_patched/mm/slab.c Sat Feb 22 02:02:18 2003 @@ -1116,12 +1116,16 @@ static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg) { check_irq_on(); + preempt_disable(); + local_irq_disable(); func(arg); local_irq_enable(); if (smp_call_function(func, arg, 1, 1)) BUG(); + + preempt_enable(); } static void free_block (kmem_cache_t* cachep, void** objpp, int len);