aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/smp.c')
-rw-r--r--arch/powerpc/kernel/smp.c78
1 files changed, 56 insertions, 22 deletions
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 8ebc670..fe04b4a 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -18,7 +18,7 @@
#undef DEBUG
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
@@ -33,7 +33,7 @@
#include <linux/topology.h>
#include <asm/ptrace.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -70,6 +70,10 @@
static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
+
+/* State of each CPU during hotplug phases */
+static DEFINE_PER_CPU(int, cpu_state) = { 0 };
+
#else
static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
#define get_idle_for_cpu(x) (idle_thread_array[(x)])
@@ -104,12 +108,25 @@ int __devinit smp_generic_kick_cpu(int nr)
* cpu_start field to become non-zero After we set cpu_start,
* the processor will continue on to secondary_start
*/
- paca[nr].cpu_start = 1;
- smp_mb();
+ if (!paca[nr].cpu_start) {
+ paca[nr].cpu_start = 1;
+ smp_mb();
+ return 0;
+ }
+
+#ifdef CONFIG_HOTPLUG_CPU
+ /*
+ * Ok it's not there, so it might be soft-unplugged, let's
+ * try to bring it back
+ */
+ per_cpu(cpu_state, nr) = CPU_UP_PREPARE;
+ smp_wmb();
+ smp_send_reschedule(nr);
+#endif /* CONFIG_HOTPLUG_CPU */
return 0;
}
-#endif
+#endif /* CONFIG_PPC64 */
static irqreturn_t call_function_action(int irq, void *data)
{
@@ -170,7 +187,7 @@ int smp_request_message_ipi(int virq, int msg)
return 1;
}
#endif
- err = request_irq(virq, smp_ipi_action[msg], IRQF_DISABLED|IRQF_PERCPU,
+ err = request_irq(virq, smp_ipi_action[msg], IRQF_PERCPU,
smp_ipi_name[msg], 0);
WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
virq, smp_ipi_name[msg], err);
@@ -197,19 +214,18 @@ void smp_muxed_ipi_message_pass(int cpu, int msg)
struct cpu_messages *info = &per_cpu(ipi_message, cpu);
char *message = (char *)&info->messages;
+ /*
+ * Order previous accesses before accesses in the IPI handler.
+ */
+ smp_mb();
message[msg] = 1;
- mb();
+ /*
+ * cause_ipi functions are required to include a full barrier
+ * before doing whatever causes the IPI.
+ */
smp_ops->cause_ipi(cpu, info->data);
}
-void smp_muxed_ipi_resend(void)
-{
- struct cpu_messages *info = &__get_cpu_var(ipi_message);
-
- if (info->messages)
- smp_ops->cause_ipi(smp_processor_id(), info->data);
-}
-
irqreturn_t smp_ipi_demux(void)
{
struct cpu_messages *info = &__get_cpu_var(ipi_message);
@@ -218,7 +234,7 @@ irqreturn_t smp_ipi_demux(void)
mb(); /* order any irq clear */
do {
- all = xchg_local(&info->messages, 0);
+ all = xchg(&info->messages, 0);
#ifdef __BIG_ENDIAN
if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION)))
@@ -238,15 +254,26 @@ irqreturn_t smp_ipi_demux(void)
}
#endif /* CONFIG_PPC_SMP_MUXED_IPI */
+static inline void do_message_pass(int cpu, int msg)
+{
+ if (smp_ops->message_pass)
+ smp_ops->message_pass(cpu, msg);
+#ifdef CONFIG_PPC_SMP_MUXED_IPI
+ else
+ smp_muxed_ipi_message_pass(cpu, msg);
+#endif
+}
+
void smp_send_reschedule(int cpu)
{
if (likely(smp_ops))
- smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
+ do_message_pass(cpu, PPC_MSG_RESCHEDULE);
}
+EXPORT_SYMBOL_GPL(smp_send_reschedule);
void arch_send_call_function_single_ipi(int cpu)
{
- smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
+ do_message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
@@ -254,7 +281,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
unsigned int cpu;
for_each_cpu(cpu, mask)
- smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
+ do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
}
#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
@@ -268,7 +295,7 @@ void smp_send_debugger_break(void)
for_each_online_cpu(cpu)
if (cpu != me)
- smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
+ do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
}
#endif
@@ -303,6 +330,10 @@ struct thread_info *current_set[NR_CPUS];
static void __devinit smp_store_cpu_info(int id)
{
per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
+#ifdef CONFIG_PPC_FSL_BOOK3E
+ per_cpu(next_tlbcam_idx, id)
+ = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
+#endif
}
void __init smp_prepare_cpus(unsigned int max_cpus)
@@ -350,8 +381,6 @@ void __devinit smp_prepare_boot_cpu(void)
}
#ifdef CONFIG_HOTPLUG_CPU
-/* State of each CPU during hotplug phases */
-static DEFINE_PER_CPU(int, cpu_state) = { 0 };
int generic_cpu_disable(void)
{
@@ -399,6 +428,11 @@ void generic_set_cpu_dead(unsigned int cpu)
{
per_cpu(cpu_state, cpu) = CPU_DEAD;
}
+
+int generic_check_cpu_restart(unsigned int cpu)
+{
+ return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
+}
#endif
struct create_idle {