Index: linux-2.5.59-bk/arch/ppc64/kernel/smp.c
===================================================================
RCS file: /build/cvsroot/linux-2.5.59-bk/arch/ppc64/kernel/smp.c,v
retrieving revision 1.1.1.1
diff -u -r1.1.1.1 smp.c
--- linux-2.5.59-bk/arch/ppc64/kernel/smp.c 9 Feb 2003 09:08:33 -0000 1.1.1.1
+++ linux-2.5.59-bk/arch/ppc64/kernel/smp.c 9 Feb 2003 09:23:30 -0000
@@ -430,7 +430,7 @@
void smp_send_stop(void)
{
- smp_call_function(stop_this_cpu, NULL, 1, 0);
+ smp_call_function(stop_this_cpu, NULL, 0);
}
/*
@@ -458,7 +458,6 @@
* [SUMMARY] Run a function on all other CPUs.
* <func> The function to run. This must be fast and non-blocking.
* <info> An arbitrary pointer to pass to the function.
- * <nonatomic> currently unused.
* <wait> If true, wait (atomically) until function has completed on other CPUs.
* [RETURNS] 0 on success, else a negative status code. Does not return until
* remote CPUs are nearly ready to execute <<func>> or are or have executed.
@@ -466,8 +465,7 @@
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
-int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
- int wait)
+int smp_call_function (void (*func) (void *info), void *info, int wait)
{
struct call_data_struct data;
int ret = -1, cpus = num_online_cpus()-1;
@@ -525,6 +523,95 @@
out:
HMT_medium();
spin_unlock(&call_lock);
+ return ret;
+}
+
+/*
+ * smp_call_function_on_cpu - Runs func on all processors in the mask
+ *
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
+ * @mask The bitmask of CPUs to call the function
+ *
+ * Returns 0 on success, else a negative status code. Does not return until
+ * remote CPUs are nearly ready to execute func or have executed it.
+ *
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler.
+ */
+
+int smp_call_function_on_cpu (void (*func) (void *info), void *info, int wait,
+ unsigned long mask)
+{
+ struct call_data_struct data;
+ int ret = 0, cpu, i, num_cpus = hweight64(mask);
+ unsigned long timeout;
+
+ if (num_cpus == 0)
+ return -EINVAL;
+
+ cpu = get_cpu();
+ if ((1UL << cpu) & mask) {
+ put_cpu_no_resched();
+ return -EINVAL;
+ }
+
+ data.func = func;
+ data.info = info;
+ atomic_set(&data.started, 0);
+ data.wait = wait;
+ if (wait)
+ atomic_set(&data.finished, 0);
+
+ spin_lock(&call_lock);
+ call_data = &data;
+ wmb();
+ /* Send a message to all other CPUs and wait for them to respond */
+ for (i = 0; i < NR_CPUS; i++) {
+ if (cpu_online(i) && ((1UL << i) & mask))
+ smp_message_pass(i, PPC_MSG_CALL_FUNCTION, 0, 0);
+ }
+
+ /* Wait for response */
+ timeout = SMP_CALL_TIMEOUT;
+ while (atomic_read(&data.started) != num_cpus) {
+ HMT_low();
+ if (--timeout == 0) {
+ if (debugger)
+ debugger(0);
+ printk("smp_call_function on cpu %d: other cpus not "
+ "responding (%d)\n", cpu,
+ atomic_read(&data.started));
+ ret = -EIO;
+ goto out;
+ }
+ }
+
+ if (wait) {
+ timeout = SMP_CALL_TIMEOUT;
+ while (atomic_read(&data.finished) != num_cpus) {
+ HMT_low();
+ if (--timeout == 0) {
+ if (debugger)
+ debugger(0);
+ printk("smp_call_function on cpu %d: other "
+ "cpus not finishing (%d/%d)\n",
+ cpu,
+ atomic_read(&data.finished),
+ atomic_read(&data.started));
+ ret = -EIO;
+ goto out;
+ }
+ }
+ }
+
+ ret = 0;
+
+out:
+ HMT_medium();
+ spin_unlock(&call_lock);
+ put_cpu_no_resched();
return ret;
}
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/