Re: [PATCH][2.5][3/14] smp_call_function_on_cpu - ia64

From: Zwane Mwaikambo (zwane@holomorphy.com)
Date: Fri Feb 14 2003 - 07:40:53 EST


I had to add a one liner to fix SMP on UP boxen where we return -EINVAL
when num_cpus == 0.

Index: linux-2.5.60/arch/ia64/kernel/smp.c
===================================================================
RCS file: /build/cvsroot/linux-2.5.60/arch/ia64/kernel/smp.c,v
retrieving revision 1.1.1.1
diff -u -r1.1.1.1 smp.c
--- linux-2.5.60/arch/ia64/kernel/smp.c 10 Feb 2003 22:15:36 -0000 1.1.1.1
+++ linux-2.5.60/arch/ia64/kernel/smp.c 14 Feb 2003 12:18:27 -0000
@@ -230,29 +230,33 @@
 }
 
 /*
- * Run a function on another CPU
- * <func> The function to run. This must be fast and non-blocking.
- * <info> An arbitrary pointer to pass to the function.
- * <nonatomic> Currently unused.
- * <wait> If true, wait until function has completed on other CPUs.
- * [RETURNS] 0 on success, else a negative status code.
+ * smp_call_function_on_cpu - Runs func on all processors in the mask
  *
- * Does not return until the remote CPU is nearly ready to execute <func>
- * or is or has executed.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ * @mask: The bitmask of CPUs to call the function
+ *
+ * Returns 0 on success, else a negative status code. Does not return until
+ * remote CPUs are nearly ready to execute func or have executed it.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
  */
 
-int
-smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int nonatomic,
- int wait)
+int smp_call_function_on_cpu (void (*func) (void *info), void *info, int wait,
+ unsigned long mask)
 {
         struct call_data_struct data;
- int cpus = 1;
+ int num_cpus, cpu, i;
 
- if (cpuid == smp_processor_id()) {
- printk("%s: trying to call self\n", __FUNCTION__);
- return -EBUSY;
+ cpu = get_cpu();
+ mask &= ~(1UL << cpu);
+ num_cpus = hweight64(mask);
+ if (num_cpus == 0) {
+ put_cpu_no_resched();
+ return 0;
         }
-
         data.func = func;
         data.info = info;
         atomic_set(&data.started, 0);
@@ -264,73 +268,56 @@
 
         call_data = &data;
         mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
- send_IPI_single(cpuid, IPI_CALL_FUNC);
+ for (i = 0; i < NR_CPUS; i++) {
+ if (cpu_online(i) && ((1UL << i) & mask))
+ send_IPI_single(i, IPI_CALL_FUNC);
+ }
 
         /* Wait for response */
- while (atomic_read(&data.started) != cpus)
+ while (atomic_read(&data.started) != num_cpus)
                 barrier();
 
         if (wait)
- while (atomic_read(&data.finished) != cpus)
+ while (atomic_read(&data.finished) != num_cpus)
                         barrier();
         call_data = NULL;
 
         spin_unlock_bh(&call_lock);
+ put_cpu_no_resched();
         return 0;
 }
 
+/* This is here for API compatibility reasons, please remove in 2.7 */
+int smp_call_function_single (int cpuid, void (*func) (void *info), void *info,
+ int retry, int wait)
+{
+ return smp_call_function_on_cpu(func, info, wait, 1UL << cpuid);
+}
+
 /*
  * this function sends a 'generic call function' IPI to all other CPUs
  * in the system.
  */
 
 /*
- * [SUMMARY] Run a function on all other CPUs.
- * <func> The function to run. This must be fast and non-blocking.
- * <info> An arbitrary pointer to pass to the function.
- * <nonatomic> currently unused.
- * <wait> If true, wait (atomically) until function has completed on other CPUs.
- * [RETURNS] 0 on success, else a negative status code.
+ * smp_call_function_on_cpu - Runs func on all other processors
  *
- * Does not return until remote CPUs are nearly ready to execute <func> or are or have
- * executed.
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @nonatomic: unused
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ *
+ * Returns 0 on success, else a negative status code. Does not return until
+ * remote CPUs are nearly ready to execute func or have executed it.
  *
  * You must not call this function with disabled interrupts or from a
  * hardware interrupt handler or from a bottom half handler.
  */
+
 int
 smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait)
 {
- struct call_data_struct data;
- int cpus = num_online_cpus()-1;
-
- if (!cpus)
- return 0;
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- spin_lock(&call_lock);
-
- call_data = &data;
- mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
- send_IPI_allbutself(IPI_CALL_FUNC);
-
- /* Wait for response */
- while (atomic_read(&data.started) != cpus)
- barrier();
-
- if (wait)
- while (atomic_read(&data.finished) != cpus)
- barrier();
- call_data = NULL;
-
- spin_unlock(&call_lock);
- return 0;
+ return smp_call_function_on_cpu(func, info, wait, cpu_online_map);
 }
 
 void
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/



This archive was generated by hypermail 2b29 : Sat Feb 15 2003 - 22:00:53 EST