aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/smp.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2011-05-25 23:34:12 +0000
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2011-06-20 11:21:32 +1000
commit9ca980dce523760ce04a798470d36fd5aa596b78 (patch)
treebef37cf5113f56a8e77d6bebf2f57a9df9ae8c21 /arch/powerpc/kernel/smp.c
parent7ac87abb8166b99584149fcfb2efef5773a078e9 (diff)
downloadkernel_samsung_smdk4412-9ca980dce523760ce04a798470d36fd5aa596b78.zip
kernel_samsung_smdk4412-9ca980dce523760ce04a798470d36fd5aa596b78.tar.gz
kernel_samsung_smdk4412-9ca980dce523760ce04a798470d36fd5aa596b78.tar.bz2
powerpc: Avoid extra indirect function call in sending IPIs
On many platforms (including pSeries), smp_ops->message_pass is always smp_muxed_ipi_message_pass. This changes arch/powerpc/kernel/smp.c so that if smp_ops->message_pass is NULL, it calls smp_muxed_ipi_message_pass directly. This means that a platform doesn't need to set both .message_pass and .cause_ipi, only one of them. It is a slight performance improvement in that it gets rid of an indirect function call at the expense of a predictable conditional branch. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel/smp.c')
-rw-r--r--arch/powerpc/kernel/smp.c18
1 files changed, 14 insertions, 4 deletions
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 8ebc670..2975f64 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -238,15 +238,25 @@ irqreturn_t smp_ipi_demux(void)
}
#endif /* CONFIG_PPC_SMP_MUXED_IPI */
+static inline void do_message_pass(int cpu, int msg)
+{
+ if (smp_ops->message_pass)
+ smp_ops->message_pass(cpu, msg);
+#ifdef CONFIG_PPC_SMP_MUXED_IPI
+ else
+ smp_muxed_ipi_message_pass(cpu, msg);
+#endif
+}
+
void smp_send_reschedule(int cpu)
{
if (likely(smp_ops))
- smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
+ do_message_pass(cpu, PPC_MSG_RESCHEDULE);
}
void arch_send_call_function_single_ipi(int cpu)
{
- smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
+ do_message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
@@ -254,7 +264,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
unsigned int cpu;
for_each_cpu(cpu, mask)
- smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
+ do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
}
#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
@@ -268,7 +278,7 @@ void smp_send_debugger_break(void)
for_each_online_cpu(cpu)
if (cpu != me)
- smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
+ do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
}
#endif