aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJack Miller <jack@codezen.org>2011-04-14 22:32:05 +0000
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2011-04-27 13:02:16 +1000
commitf0aae3238fc1c28b543cbaaa0e7c5d57685f5f89 (patch)
tree7d2f01cebc6ac4079fc069b96ea823a235cfbe7e
parent1a51dde139d5305b2592c716c50c005d6ab9624b (diff)
downloadkernel_samsung_smdk4412-f0aae3238fc1c28b543cbaaa0e7c5d57685f5f89.zip
kernel_samsung_smdk4412-f0aae3238fc1c28b543cbaaa0e7c5d57685f5f89.tar.gz
kernel_samsung_smdk4412-f0aae3238fc1c28b543cbaaa0e7c5d57685f5f89.tar.bz2
powerpc/book3e: Flush IPROT protected TLB entries leftover by firmware
When we set up the TLB for ourselves on Book3E, we need to flush out any old mappings established by the firmware or bootloader. At present we attempt this with a tlbilx to flush everything, but this will leave behind any entries with the IPROT bit set. There are several good reason firmware might establish mappings with IPROT, and in fact ePAPR compliant firmwares are required to establish their initial mapped area with IPROT. This patch, therefore adds more complex code to scan through the TLB upon entry and flush away any entries that are not our own. Signed-off-by: Jack Miller <jack@codezen.org> Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h1
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S45
2 files changed, 45 insertions, 1 deletions
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index 80d68af..ec61e7b 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -43,6 +43,7 @@
#define MAS0_TLBSEL(x) (((x) << 28) & 0x30000000)
#define MAS0_ESEL(x) (((x) << 16) & 0x0FFF0000)
#define MAS0_NV(x) ((x) & 0x00000FFF)
+#define MAS0_ESEL_MASK 0x0FFF0000
#define MAS0_HES 0x00004000
#define MAS0_WQ_ALLWAYS 0x00000000
#define MAS0_WQ_COND 0x00001000
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 8fe0fc2..23bd83b 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -886,8 +886,51 @@ have_hes:
bctr
1: /* We are now running at PAGE_OFFSET, clean the TLB of everything
- * else (XXX we should scan for bolted crap from the firmware too)
+ * else (including IPROTed things left by firmware)
+ * r4 = TLBnCFG
+ * r3 = current address (more or less)
*/
+
+ li r5,0
+ mtspr SPRN_MAS6,r5
+ tlbsx 0,r3
+
+ rlwinm r9,r4,0,TLBnCFG_N_ENTRY
+ rlwinm r10,r4,8,0xff
+ addi r10,r10,-1 /* Get inner loop mask */
+
+ li r3,1
+
+ mfspr r5,SPRN_MAS1
+ rlwinm r5,r5,0,(~(MAS1_VALID|MAS1_IPROT))
+
+ mfspr r6,SPRN_MAS2
+ rldicr r6,r6,0,51 /* Extract EPN */
+
+ mfspr r7,SPRN_MAS0
+ rlwinm r7,r7,0,0xffff0fff /* Clear HES and WQ */
+
+ rlwinm r8,r7,16,0xfff /* Extract ESEL */
+
+2: add r4,r3,r8
+ and r4,r4,r10
+
+ rlwimi r7,r4,16,MAS0_ESEL_MASK
+
+ mtspr SPRN_MAS0,r7
+ mtspr SPRN_MAS1,r5
+ mtspr SPRN_MAS2,r6
+ tlbwe
+
+ addi r3,r3,1
+ and. r4,r3,r10
+
+ bne 3f
+ addis r6,r6,(1<<30)@h
+3:
+ cmpw r3,r9
+ blt 2b
+
PPC_TLBILX(0,0,0)
sync
isync