From f67f4ef5fcdfdeeddcb0ed4ab2c85d9bb4185d5f Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Wed, 22 Jun 2011 11:25:42 +0000 Subject: powerpc/book3e-64: use a separate TLB handler when linear map is bolted On MMUs such as FSL where we can guarantee the entire linear mapping is bolted, we don't need to worry about linear TLB misses. If on top of that we do a full table walk, we get rid of all recursive TLB faults, and can dispense with some state saving. This gains a few percent on TLB-miss-heavy workloads, and around 50% on a benchmark that had a high rate of virtual page table faults under the normal handler. While touching the EX_TLB layout, remove EX_TLB_MMUCR0, EX_TLB_SRR0, and EX_TLB_SRR1 as they're not used. [BenH: Fixed build with 64K pages (wsp config)] Signed-off-by: Scott Wood Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/paca.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/include/asm/paca.h') diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 7412676..c1f65f5 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -103,11 +103,12 @@ struct paca_struct { #endif /* CONFIG_PPC_STD_MMU_64 */ #ifdef CONFIG_PPC_BOOK3E - pgd_t *pgd; /* Current PGD */ - pgd_t *kernel_pgd; /* Kernel PGD */ u64 exgen[8] __attribute__((aligned(0x80))); + /* Keep pgd in the same cacheline as the start of extlb */ + pgd_t *pgd __attribute__((aligned(0x80))); /* Current PGD */ + pgd_t *kernel_pgd; /* Kernel PGD */ /* We can have up to 3 levels of reentrancy in the TLB miss handler */ - u64 extlb[3][EX_TLB_SIZE / sizeof(u64)] __attribute__((aligned(0x80))); + u64 extlb[3][EX_TLB_SIZE / sizeof(u64)]; u64 exmc[8]; /* used for machine checks */ u64 excrit[8]; /* used for crit interrupts */ u64 exdbg[8]; /* used for debug interrupts */ -- cgit v1.1