
From: Li Shaohua <shaohua.li@intel.com>

After a CPU is booted but before it's officially up (set online map, and
enable interrupt), the CPU possibly will receive a broadcast IPI.  After
it's up, it will handle the stale interrupt soon and maybe cause oops if
it's a smp-call-function-interrupt.  This is quite possible in CPU hotplug
case, but nearly can't occur at boot time.  Below patch replaces broadcast
IPI with send_ipi_mask just like the cluster mode.

Signed-off-by: Li Shaohua<shaohua.li@intel.com>
Cc: Zwane Mwaikambo <zwane@arm.linux.org.uk>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 arch/i386/kernel/smp.c   |   15 ++++++++-------
 arch/x86_64/kernel/smp.c |    4 +++-
 2 files changed, 11 insertions(+), 8 deletions(-)

diff -puN arch/i386/kernel/smp.c~broadcast-ipi-race-condition-on-cpu-hotplug arch/i386/kernel/smp.c
--- 25/arch/i386/kernel/smp.c~broadcast-ipi-race-condition-on-cpu-hotplug	Fri Apr 29 16:28:43 2005
+++ 25-akpm/arch/i386/kernel/smp.c	Fri Apr 29 16:30:12 2005
@@ -494,13 +494,9 @@ struct call_data_struct {
 static struct call_data_struct * call_data;
 
 /*
- * this function sends a 'generic call function' IPI to all other CPUs
+ * This function sends a 'generic call function' IPI to all other CPUs
  * in the system.
- */
-
-int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
-			int wait)
-/*
+ *
  * [SUMMARY] Run a function on all other CPUs.
  * <func> The function to run. This must be fast and non-blocking.
  * <info> An arbitrary pointer to pass to the function.
@@ -512,9 +508,12 @@ int smp_call_function (void (*func) (voi
  * You must not call this function with disabled interrupts or from a
  * hardware interrupt handler or from a bottom half handler.
  */
+int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
+			int wait)
 {
 	struct call_data_struct data;
 	int cpus = num_online_cpus()-1;
+	cpumask_t mask;
 
 	if (!cpus)
 		return 0;
@@ -531,10 +530,12 @@ int smp_call_function (void (*func) (voi
 
 	spin_lock(&call_lock);
 	call_data = &data;
+	mask = cpu_online_map;
+	cpu_clear(smp_processor_id(), mask);
 	mb();
 	
 	/* Send a message to all other CPUs and wait for them to respond */
-	send_IPI_allbutself(CALL_FUNCTION_VECTOR);
+	send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
 
 	/* Wait for response */
 	while (atomic_read(&data.started) != cpus)
diff -puN arch/x86_64/kernel/smp.c~broadcast-ipi-race-condition-on-cpu-hotplug arch/x86_64/kernel/smp.c
--- 25/arch/x86_64/kernel/smp.c~broadcast-ipi-race-condition-on-cpu-hotplug	Fri Apr 29 16:28:43 2005
+++ 25-akpm/arch/x86_64/kernel/smp.c	Fri Apr 29 16:28:43 2005
@@ -292,10 +292,12 @@ static void __smp_call_function (void (*
 {
 	struct call_data_struct data;
 	int cpus = num_online_cpus()-1;
+	cpumask_t mask = cpu_online_map;
 
 	if (!cpus)
 		return;
 
+	cpu_clear(smp_processor_id(), mask);
 	data.func = func;
 	data.info = info;
 	atomic_set(&data.started, 0);
@@ -306,7 +308,7 @@ static void __smp_call_function (void (*
 	call_data = &data;
 	wmb();
 	/* Send a message to all other CPUs and wait for them to respond */
-	send_IPI_allbutself(CALL_FUNCTION_VECTOR);
+	send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
 
 	/* Wait for response */
 	while (atomic_read(&data.started) != cpus)
_
