From 7a07d3c3824ca293447d4d9c48c0aeb378120835 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 6 May 2013 10:51:00 +0000 Subject: powerpc: Fix build errors STRICT_MM_TYPECHECKS commit 83d5e64b7efa7f39b10ff5e92792e807a720289c upstream. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Ben Hutchings --- arch/powerpc/include/asm/pte-hash64-64k.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/include/asm/pte-hash64-64k.h') diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h index 59247e8..4169eac 100644 --- a/arch/powerpc/include/asm/pte-hash64-64k.h +++ b/arch/powerpc/include/asm/pte-hash64-64k.h @@ -47,7 +47,7 @@ * generic accessors and iterators here */ #define __real_pte(e,p) ((real_pte_t) { \ - (e), ((e) & _PAGE_COMBO) ? \ + (e), (pte_val(e) & _PAGE_COMBO) ? \ (pte_val(*((p) + PTRS_PER_PTE))) : 0 }) #define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \ (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf)) -- cgit v1.1 From f9b211847bfb9c50483c7b62875e9aefbbce6f78 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Wed, 13 Aug 2014 12:32:03 +0530 Subject: powerpc/mm: Use read barrier when creating real_pte commit 85c1fafd7262e68ad821ee1808686b1392b1167d upstream. On ppc64 we support 4K hash pte with 64K page size. That requires us to track the hash pte slot information on a per 4k basis. We do that by storing the slot details in the second half of pte page. The pte bit _PAGE_COMBO is used to indicate whether the second half need to be looked while building real_pte. We need to use read memory barrier while doing that so that load of hidx is not reordered w.r.t _PAGE_COMBO check. On the store side we already do a lwsync in __hash_page_4K Signed-off-by: Aneesh Kumar K.V Signed-off-by: Benjamin Herrenschmidt [bwh: Backported to 3.2: include to ensure smp_rmb() is defined; cell_defconfig fails to build without this] Signed-off-by: Ben Hutchings --- arch/powerpc/include/asm/pte-hash64-64k.h | 32 ++++++++++++++++++++++++++----- 1 file changed, 27 insertions(+), 5 deletions(-) (limited to 'arch/powerpc/include/asm/pte-hash64-64k.h') diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h index 4169eac..4ac50bb 100644 --- a/arch/powerpc/include/asm/pte-hash64-64k.h +++ b/arch/powerpc/include/asm/pte-hash64-64k.h @@ -40,17 +40,39 @@ #ifndef __ASSEMBLY__ +#include /* for smp_rmb() */ + /* * With 64K pages on hash table, we have a special PTE format that * uses a second "half" of the page table to encode sub-page information * in order to deal with 64K made of 4K HW pages. Thus we override the * generic accessors and iterators here */ -#define __real_pte(e,p) ((real_pte_t) { \ - (e), (pte_val(e) & _PAGE_COMBO) ? \ - (pte_val(*((p) + PTRS_PER_PTE))) : 0 }) -#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \ - (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf)) +#define __real_pte __real_pte +static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep) +{ + real_pte_t rpte; + + rpte.pte = pte; + rpte.hidx = 0; + if (pte_val(pte) & _PAGE_COMBO) { + /* + * Make sure we order the hidx load against the _PAGE_COMBO + * check. The store side ordering is done in __hash_page_4K + */ + smp_rmb(); + rpte.hidx = pte_val(*((ptep) + PTRS_PER_PTE)); + } + return rpte; +} + +static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index) +{ + if ((pte_val(rpte.pte) & _PAGE_COMBO)) + return (rpte.hidx >> (index<<2)) & 0xf; + return (pte_val(rpte.pte) >> 12) & 0xf; +} + #define __rpte_to_pte(r) ((r).pte) #define __rpte_sub_valid(rpte, index) \ (pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index))) -- cgit v1.1