aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid Daney <david.daney@cavium.com>2015-08-03 17:48:43 -0700
committerBen Hutchings <ben@decadent.org.uk>2015-10-13 03:46:02 +0100
commita7c4bf9e8f7f847d7a5b33c40efde8541d652c41 (patch)
tree15371cc344f350c302bf18a5a41dc621050cf825 /arch
parentbc230ada485b9bcd42bfe17b8010ecfa21ed526b (diff)
downloadkernel_samsung_smdk4412-a7c4bf9e8f7f847d7a5b33c40efde8541d652c41.zip
kernel_samsung_smdk4412-a7c4bf9e8f7f847d7a5b33c40efde8541d652c41.tar.gz
kernel_samsung_smdk4412-a7c4bf9e8f7f847d7a5b33c40efde8541d652c41.tar.bz2
MIPS: Make set_pte() SMP safe.
commit 46011e6ea39235e4aca656673c500eac81a07a17 upstream. On MIPS the GLOBAL bit of the PTE must have the same value in any aligned pair of PTEs. These pairs of PTEs are referred to as "buddies". In a SMP system is is possible for two CPUs to be calling set_pte() on adjacent PTEs at the same time. There is a race between setting the PTE and a different CPU setting the GLOBAL bit in its buddy PTE. This race can be observed when multiple CPUs are executing vmap()/vfree() at the same time. Make setting the buddy PTE's GLOBAL bit an atomic operation to close the race condition. The case of CONFIG_64BIT_PHYS_ADDR && CONFIG_CPU_MIPS32 is *not* handled. Signed-off-by: David Daney <david.daney@cavium.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/10835/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org> Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/include/asm/pgtable.h31
1 files changed, 31 insertions, 0 deletions
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index b2202a6..95bcedb 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -153,8 +153,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
* Make sure the buddy is global too (if it's !none,
* it better already be global)
*/
+#ifdef CONFIG_SMP
+ /*
+ * For SMP, multiple CPUs can race, so we need to do
+ * this atomically.
+ */
+#ifdef CONFIG_64BIT
+#define LL_INSN "lld"
+#define SC_INSN "scd"
+#else /* CONFIG_32BIT */
+#define LL_INSN "ll"
+#define SC_INSN "sc"
+#endif
+ unsigned long page_global = _PAGE_GLOBAL;
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+ " .set push\n"
+ " .set noreorder\n"
+ "1: " LL_INSN " %[tmp], %[buddy]\n"
+ " bnez %[tmp], 2f\n"
+ " or %[tmp], %[tmp], %[global]\n"
+ " " SC_INSN " %[tmp], %[buddy]\n"
+ " beqz %[tmp], 1b\n"
+ " nop\n"
+ "2:\n"
+ " .set pop"
+ : [buddy] "+m" (buddy->pte),
+ [tmp] "=&r" (tmp)
+ : [global] "r" (page_global));
+#else /* !CONFIG_SMP */
if (pte_none(*buddy))
pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
+#endif /* CONFIG_SMP */
}
#endif
}