From 74bf4312fff083ab25c3f357cc653ada7995e5f6 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:29:18 -0800 Subject: [SPARC64]: Move away from virtual page tables, part 1. We now use the TSB hardware assist features of the UltraSPARC MMUs. SMP is currently knowingly broken, we need to find another place to store the per-cpu base pointers. We hid them away in the TSB base register, and that obviously will not work any more :-) Another known broken case is non-8KB base page size. Also noticed that flush_tlb_all() is not referenced anywhere, only the internal __flush_tlb_all() (local cpu only) is used by the sparc64 port, so we can get rid of flush_tlb_all(). The kernel gets it's own 8KB TSB (swapper_tsb) and each address space gets it's own private 8K TSB. Later we can add code to dynamically increase the size of per-process TSB as the RSS grows. An 8KB TSB is good enough for up to about a 4MB RSS, after which the TSB starts to incur many capacity and conflict misses. We even accumulate OBP translations into the kernel TSB. Another area for refinement is large page size support. We could use a secondary address space TSB to handle those. Signed-off-by: David S. Miller --- arch/sparc64/mm/Makefile | 2 +- arch/sparc64/mm/init.c | 91 +++--------------------------------------------- arch/sparc64/mm/tlb.c | 61 ++------------------------------ arch/sparc64/mm/tsb.c | 84 ++++++++++++++++++++++++++++++++++++++++++++ arch/sparc64/mm/ultra.S | 58 ------------------------------ 5 files changed, 92 insertions(+), 204 deletions(-) create mode 100644 arch/sparc64/mm/tsb.c (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/Makefile b/arch/sparc64/mm/Makefile index 9d0960e..e415bf9 100644 --- a/arch/sparc64/mm/Makefile +++ b/arch/sparc64/mm/Makefile @@ -5,6 +5,6 @@ EXTRA_AFLAGS := -ansi EXTRA_CFLAGS := -Werror -obj-y := ultra.o tlb.o fault.o init.o generic.o +obj-y := ultra.o tlb.o tsb.o fault.o init.o generic.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 1e44ee2..da068f6 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -408,8 +408,7 @@ unsigned long prom_virt_to_phys(unsigned long promva, int *error) /* The obp translations are saved based on 8k pagesize, since obp can * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> - * HI_OBP_ADDRESS range are handled in ktlb.S and do not use the vpte - * scheme (also, see rant in inherit_locked_prom_mappings()). + * HI_OBP_ADDRESS range are handled in ktlb.S. */ static inline int in_obp_range(unsigned long vaddr) { @@ -539,75 +538,6 @@ static void __init inherit_prom_mappings(void) prom_printf("done.\n"); } -/* The OBP specifications for sun4u mark 0xfffffffc00000000 and - * upwards as reserved for use by the firmware (I wonder if this - * will be the same on Cheetah...). We use this virtual address - * range for the VPTE table mappings of the nucleus so we need - * to zap them when we enter the PROM. -DaveM - */ -static void __flush_nucleus_vptes(void) -{ - unsigned long prom_reserved_base = 0xfffffffc00000000UL; - int i; - - /* Only DTLB must be checked for VPTE entries. */ - if (tlb_type == spitfire) { - for (i = 0; i < 63; i++) { - unsigned long tag; - - /* Spitfire Errata #32 workaround */ - /* NOTE: Always runs on spitfire, so no cheetah+ - * page size encodings. - */ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "flush %%g6" - : /* No outputs */ - : "r" (0), - "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); - - tag = spitfire_get_dtlb_tag(i); - if (((tag & ~(PAGE_MASK)) == 0) && - ((tag & (PAGE_MASK)) >= prom_reserved_base)) { - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : /* no outputs */ - : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); - spitfire_put_dtlb_data(i, 0x0UL); - } - } - } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { - for (i = 0; i < 512; i++) { - unsigned long tag = cheetah_get_dtlb_tag(i, 2); - - if ((tag & ~PAGE_MASK) == 0 && - (tag & PAGE_MASK) >= prom_reserved_base) { - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : /* no outputs */ - : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); - cheetah_put_dtlb_data(i, 0x0UL, 2); - } - - if (tlb_type != cheetah_plus) - continue; - - tag = cheetah_get_dtlb_tag(i, 3); - - if ((tag & ~PAGE_MASK) == 0 && - (tag & PAGE_MASK) >= prom_reserved_base) { - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : /* no outputs */ - : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); - cheetah_put_dtlb_data(i, 0x0UL, 3); - } - } - } else { - /* Implement me :-) */ - BUG(); - } -} - static int prom_ditlb_set; struct prom_tlb_entry { int tlb_ent; @@ -635,9 +565,6 @@ void prom_world(int enter) : "i" (PSTATE_IE)); if (enter) { - /* Kick out nucleus VPTEs. */ - __flush_nucleus_vptes(); - /* Install PROM world. */ for (i = 0; i < 16; i++) { if (prom_dtlb[i].tlb_ent != -1) { @@ -1039,18 +966,7 @@ out: struct pgtable_cache_struct pgt_quicklists; #endif -/* OK, we have to color these pages. The page tables are accessed - * by non-Dcache enabled mapping in the VPTE area by the dtlb_backend.S - * code, as well as by PAGE_OFFSET range direct-mapped addresses by - * other parts of the kernel. By coloring, we make sure that the tlbmiss - * fast handlers do not get data from old/garbage dcache lines that - * correspond to an old/stale virtual address (user/kernel) that - * previously mapped the pagetable page while accessing vpte range - * addresses. The idea is that if the vpte color and PAGE_OFFSET range - * color is the same, then when the kernel initializes the pagetable - * using the later address range, accesses with the first address - * range will see the newly initialized data rather than the garbage. - */ +/* XXX We don't need to color these things in the D-cache any longer. */ #ifdef DCACHE_ALIASING_POSSIBLE #define DC_ALIAS_SHIFT 1 #else @@ -1419,6 +1335,9 @@ void kernel_map_pages(struct page *page, int numpages, int enable) kernel_map_range(phys_start, phys_end, (enable ? PAGE_KERNEL : __pgprot(0))); + flush_tsb_kernel_range(PAGE_OFFSET + phys_start, + PAGE_OFFSET + phys_end); + /* we should perform an IPI and flush all tlbs, * but that can deadlock->flush only current cpu. */ diff --git a/arch/sparc64/mm/tlb.c b/arch/sparc64/mm/tlb.c index 8b104be..78357cc 100644 --- a/arch/sparc64/mm/tlb.c +++ b/arch/sparc64/mm/tlb.c @@ -25,6 +25,8 @@ void flush_tlb_pending(void) struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); if (mp->tlb_nr) { + flush_tsb_user(mp); + if (CTX_VALID(mp->mm->context)) { #ifdef CONFIG_SMP smp_flush_tlb_pending(mp->mm, mp->tlb_nr, @@ -89,62 +91,3 @@ no_cache_flush: if (nr >= TLB_BATCH_NR) flush_tlb_pending(); } - -void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) -{ - struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); - unsigned long nr = mp->tlb_nr; - long s = start, e = end, vpte_base; - - if (mp->fullmm) - return; - - /* If start is greater than end, that is a real problem. */ - BUG_ON(start > end); - - /* However, straddling the VA space hole is quite normal. */ - s &= PMD_MASK; - e = (e + PMD_SIZE - 1) & PMD_MASK; - - vpte_base = (tlb_type == spitfire ? - VPTE_BASE_SPITFIRE : - VPTE_BASE_CHEETAH); - - if (unlikely(nr != 0 && mm != mp->mm)) { - flush_tlb_pending(); - nr = 0; - } - - if (nr == 0) - mp->mm = mm; - - start = vpte_base + (s >> (PAGE_SHIFT - 3)); - end = vpte_base + (e >> (PAGE_SHIFT - 3)); - - /* If the request straddles the VA space hole, we - * need to swap start and end. The reason this - * occurs is that "vpte_base" is the center of - * the linear page table mapping area. Thus, - * high addresses with the sign bit set map to - * addresses below vpte_base and non-sign bit - * addresses map to addresses above vpte_base. - */ - if (end < start) { - unsigned long tmp = start; - - start = end; - end = tmp; - } - - while (start < end) { - mp->vaddrs[nr] = start; - mp->tlb_nr = ++nr; - if (nr >= TLB_BATCH_NR) { - flush_tlb_pending(); - nr = 0; - } - start += PAGE_SIZE; - } - if (nr) - flush_tlb_pending(); -} diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c new file mode 100644 index 0000000..15e8af5 --- /dev/null +++ b/arch/sparc64/mm/tsb.c @@ -0,0 +1,84 @@ +/* arch/sparc64/mm/tsb.c + * + * Copyright (C) 2006 David S. Miller + */ + +#include +#include +#include +#include +#include + +#define TSB_ENTRY_ALIGNMENT 16 + +struct tsb { + unsigned long tag; + unsigned long pte; +} __attribute__((aligned(TSB_ENTRY_ALIGNMENT))); + +/* We use an 8K TSB for the whole kernel, this allows to + * handle about 4MB of modules and vmalloc mappings without + * incurring many hash conflicts. + */ +#define KERNEL_TSB_SIZE_BYTES 8192 +#define KERNEL_TSB_NENTRIES \ + (KERNEL_TSB_SIZE_BYTES / sizeof(struct tsb)) + +extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; + +static inline unsigned long tsb_hash(unsigned long vaddr) +{ + vaddr >>= PAGE_SHIFT; + return vaddr & (KERNEL_TSB_NENTRIES - 1); +} + +static inline int tag_compare(struct tsb *entry, unsigned long vaddr, unsigned long context) +{ + if (context == ~0UL) + return 1; + + return (entry->tag == ((vaddr >> 22) | (context << 48))); +} + +/* TSB flushes need only occur on the processor initiating the address + * space modification, not on each cpu the address space has run on. + * Only the TLB flush needs that treatment. + */ + +void flush_tsb_kernel_range(unsigned long start, unsigned long end) +{ + unsigned long v; + + for (v = start; v < end; v += PAGE_SIZE) { + struct tsb *ent = &swapper_tsb[tsb_hash(v)]; + + if (tag_compare(ent, v, 0)) { + ent->tag = 0UL; + membar_storeload_storestore(); + } + } +} + +void flush_tsb_user(struct mmu_gather *mp) +{ + struct mm_struct *mm = mp->mm; + struct tsb *tsb = (struct tsb *) mm->context.sparc64_tsb; + unsigned long ctx = ~0UL; + int i; + + if (CTX_VALID(mm->context)) + ctx = CTX_HWBITS(mm->context); + + for (i = 0; i < mp->tlb_nr; i++) { + unsigned long v = mp->vaddrs[i]; + struct tsb *ent; + + v &= ~0x1UL; + + ent = &tsb[tsb_hash(v)]; + if (tag_compare(ent, v, ctx)) { + ent->tag = 0UL; + membar_storeload_storestore(); + } + } +} diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S index e4c9151..22791f2 100644 --- a/arch/sparc64/mm/ultra.S +++ b/arch/sparc64/mm/ultra.S @@ -453,64 +453,6 @@ xcall_flush_dcache_page_spitfire: /* %g1 == physical page address nop nop - .data - -errata32_hwbug: - .xword 0 - - .text - - /* These two are not performance critical... */ - .globl xcall_flush_tlb_all_spitfire -xcall_flush_tlb_all_spitfire: - /* Spitfire Errata #32 workaround. */ - sethi %hi(errata32_hwbug), %g4 - stx %g0, [%g4 + %lo(errata32_hwbug)] - - clr %g2 - clr %g3 -1: ldxa [%g3] ASI_DTLB_DATA_ACCESS, %g4 - and %g4, _PAGE_L, %g5 - brnz,pn %g5, 2f - mov TLB_TAG_ACCESS, %g7 - - stxa %g0, [%g7] ASI_DMMU - membar #Sync - stxa %g0, [%g3] ASI_DTLB_DATA_ACCESS - membar #Sync - - /* Spitfire Errata #32 workaround. */ - sethi %hi(errata32_hwbug), %g4 - stx %g0, [%g4 + %lo(errata32_hwbug)] - -2: ldxa [%g3] ASI_ITLB_DATA_ACCESS, %g4 - and %g4, _PAGE_L, %g5 - brnz,pn %g5, 2f - mov TLB_TAG_ACCESS, %g7 - - stxa %g0, [%g7] ASI_IMMU - membar #Sync - stxa %g0, [%g3] ASI_ITLB_DATA_ACCESS - membar #Sync - - /* Spitfire Errata #32 workaround. */ - sethi %hi(errata32_hwbug), %g4 - stx %g0, [%g4 + %lo(errata32_hwbug)] - -2: add %g2, 1, %g2 - cmp %g2, SPITFIRE_HIGHEST_LOCKED_TLBENT - ble,pt %icc, 1b - sll %g2, 3, %g3 - flush %g6 - retry - - .globl xcall_flush_tlb_all_cheetah -xcall_flush_tlb_all_cheetah: - mov 0x80, %g2 - stxa %g0, [%g2] ASI_DMMU_DEMAP - stxa %g0, [%g2] ASI_IMMU_DEMAP - retry - /* These just get rescheduled to PIL vectors. */ .globl xcall_call_function xcall_call_function: -- cgit v1.1 From 05e28f9de65a38bb0c769080e91b6976e7e1e70c Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:30:13 -0800 Subject: [SPARC64]: No need to D-cache color page tables any longer. Unlike the virtual page tables, the new TSB scheme does not require this ugly hack. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 71 +++++--------------------------------------------- 1 file changed, 6 insertions(+), 65 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index da068f6..936ae1a 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -145,6 +145,10 @@ int bigkernel = 0; #define PGT_CACHE_LOW 25 #define PGT_CACHE_HIGH 50 +#ifndef CONFIG_SMP +struct pgtable_cache_struct pgt_quicklists; +#endif + void check_pgt_cache(void) { preempt_disable(); @@ -152,10 +156,8 @@ void check_pgt_cache(void) do { if (pgd_quicklist) free_pgd_slow(get_pgd_fast()); - if (pte_quicklist[0]) - free_pte_slow(pte_alloc_one_fast(NULL, 0)); - if (pte_quicklist[1]) - free_pte_slow(pte_alloc_one_fast(NULL, 1 << (PAGE_SHIFT + 10))); + if (pte_quicklist) + free_pte_slow(pte_alloc_one_fast()); } while (pgtable_cache_size > PGT_CACHE_LOW); } preempt_enable(); @@ -962,67 +964,6 @@ out: spin_unlock(&ctx_alloc_lock); } -#ifndef CONFIG_SMP -struct pgtable_cache_struct pgt_quicklists; -#endif - -/* XXX We don't need to color these things in the D-cache any longer. */ -#ifdef DCACHE_ALIASING_POSSIBLE -#define DC_ALIAS_SHIFT 1 -#else -#define DC_ALIAS_SHIFT 0 -#endif -pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) -{ - struct page *page; - unsigned long color; - - { - pte_t *ptep = pte_alloc_one_fast(mm, address); - - if (ptep) - return ptep; - } - - color = VPTE_COLOR(address); - page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, DC_ALIAS_SHIFT); - if (page) { - unsigned long *to_free; - unsigned long paddr; - pte_t *pte; - -#ifdef DCACHE_ALIASING_POSSIBLE - set_page_count(page, 1); - ClearPageCompound(page); - - set_page_count((page + 1), 1); - ClearPageCompound(page + 1); -#endif - paddr = (unsigned long) page_address(page); - memset((char *)paddr, 0, (PAGE_SIZE << DC_ALIAS_SHIFT)); - - if (!color) { - pte = (pte_t *) paddr; - to_free = (unsigned long *) (paddr + PAGE_SIZE); - } else { - pte = (pte_t *) (paddr + PAGE_SIZE); - to_free = (unsigned long *) paddr; - } - -#ifdef DCACHE_ALIASING_POSSIBLE - /* Now free the other one up, adjust cache size. */ - preempt_disable(); - *to_free = (unsigned long) pte_quicklist[color ^ 0x1]; - pte_quicklist[color ^ 0x1] = to_free; - pgtable_cache_size++; - preempt_enable(); -#endif - - return pte; - } - return NULL; -} - void sparc_ultra_dump_itlb(void) { int slot; -- cgit v1.1 From 3c936465249f863f322154ff1aaa628b84ee5750 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:30:27 -0800 Subject: [SPARC64]: Kill pgtable quicklists and use SLAB. Taking a nod from the powerpc port. With the per-cpu caching of both the page allocator and SLAB, the pgtable quicklist scheme becomes relatively silly and primitive. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 936ae1a..7c456af 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -141,26 +141,25 @@ unsigned long sparc64_kern_sec_context __read_mostly; int bigkernel = 0; -/* XXX Tune this... */ -#define PGT_CACHE_LOW 25 -#define PGT_CACHE_HIGH 50 +kmem_cache_t *pgtable_cache __read_mostly; -#ifndef CONFIG_SMP -struct pgtable_cache_struct pgt_quicklists; -#endif +static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags) +{ + clear_page(addr); +} -void check_pgt_cache(void) +void pgtable_cache_init(void) { - preempt_disable(); - if (pgtable_cache_size > PGT_CACHE_HIGH) { - do { - if (pgd_quicklist) - free_pgd_slow(get_pgd_fast()); - if (pte_quicklist) - free_pte_slow(pte_alloc_one_fast()); - } while (pgtable_cache_size > PGT_CACHE_LOW); + pgtable_cache = kmem_cache_create("pgtable_cache", + PAGE_SIZE, PAGE_SIZE, + SLAB_HWCACHE_ALIGN | + SLAB_MUST_HWCACHE_ALIGN, + zero_ctor, + NULL); + if (!pgtable_cache) { + prom_printf("pgtable_cache_init(): Could not create!\n"); + prom_halt(); } - preempt_enable(); } #ifdef CONFIG_DEBUG_DCFLUSH @@ -340,7 +339,6 @@ void show_mem(void) nr_swap_pages << (PAGE_SHIFT-10)); printk("%ld pages of RAM\n", num_physpages); printk("%d free pages\n", nr_free_pages()); - printk("%d pages in page table cache\n",pgtable_cache_size); } void mmu_info(struct seq_file *m) -- cgit v1.1 From 56fb4df6da76c35dca22036174e2d1edef83ff1f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 26 Feb 2006 23:24:22 -0800 Subject: [SPARC64]: Elminate all usage of hard-coded trap globals. UltraSPARC has special sets of global registers which are switched to for certain trap types. There is one set for MMU related traps, one set of Interrupt Vector processing, and another set (called the Alternate globals) for all other trap types. For what seems like forever we've hard coded the values in some of these trap registers. Some examples include: 1) Interrupt Vector global %g6 holds current processors interrupt work struct where received interrupts are managed for IRQ handler dispatch. 2) MMU global %g7 holds the base of the page tables of the currently active address space. 3) Alternate global %g6 held the current_thread_info() value. Such hardcoding has resulted in some serious issues in many areas. There are some code sequences where having another register available would help clean up the implementation. Taking traps such as cross-calls from the OBP firmware requires some trick code sequences wherein we have to save away and restore all of the special sets of global registers when we enter/exit OBP. We were also using the IMMU TSB register on SMP to hold the per-cpu area base address, which doesn't work any longer now that we actually use the TSB facility of the cpu. The implementation is pretty straight forward. One tricky bit is getting the current processor ID as that is different on different cpu variants. We use a stub with a fancy calling convention which we patch at boot time. The calling convention is that the stub is branched to and the (PC - 4) to return to is in register %g1. The cpu number is left in %g6. This stub can be invoked by using the __GET_CPUID macro. We use an array of per-cpu trap state to store the current thread and physical address of the current address space's page tables. The TRAP_LOAD_THREAD_REG loads %g6 with the current thread from this table, it uses __GET_CPUID and also clobbers %g1. TRAP_LOAD_IRQ_WORK is used by the interrupt vector processing to load the current processor's IRQ software state into %g6. It also uses __GET_CPUID and clobbers %g1. Finally, TRAP_LOAD_PGD_PHYS loads the physical address base of the current address space's page tables into %g7, it clobbers %g1 and uses __GET_CPUID. Many refinements are possible, as well as some tuning, with this stuff in place. Signed-off-by: David S. Miller --- arch/sparc64/mm/ultra.S | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S index 22791f2..a873948 100644 --- a/arch/sparc64/mm/ultra.S +++ b/arch/sparc64/mm/ultra.S @@ -295,12 +295,10 @@ cheetah_patch_cachetlbops: * %g1 address arg 1 (tlb page and range flushes) * %g7 address arg 2 (tlb range flush only) * - * %g6 ivector table, don't touch - * %g2 scratch 1 - * %g3 scratch 2 - * %g4 scratch 3 - * - * TODO: Make xcall TLB range flushes use the tricks above... -DaveM + * %g6 scratch 1 + * %g2 scratch 2 + * %g3 scratch 3 + * %g4 scratch 4 */ .align 32 .globl xcall_flush_tlb_mm -- cgit v1.1 From 09f94287f7260e03bbeab497e743691fafcc22c3 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:31:06 -0800 Subject: [SPARC64]: TSB refinements. Move {init_new,destroy}_context() out of line. Do not put huge pages into the TSB, only base page size translations. There are some clever things we could do here, but for now let's be correct instead of fancy. Signed-off-by: David S. Miller --- arch/sparc64/mm/tsb.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index 15e8af5..2f84cef 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -8,6 +8,7 @@ #include #include #include +#include #define TSB_ENTRY_ALIGNMENT 16 @@ -82,3 +83,30 @@ void flush_tsb_user(struct mmu_gather *mp) } } } + +int init_new_context(struct task_struct *tsk, struct mm_struct *mm) +{ + unsigned long page = get_zeroed_page(GFP_KERNEL); + + mm->context.sparc64_ctx_val = 0UL; + if (unlikely(!page)) + return -ENOMEM; + + mm->context.sparc64_tsb = (unsigned long *) page; + + return 0; +} + +void destroy_context(struct mm_struct *mm) +{ + free_page((unsigned long) mm->context.sparc64_tsb); + + spin_lock(&ctx_alloc_lock); + + if (CTX_VALID(mm->context)) { + unsigned long nr = CTX_NRBITS(mm->context); + mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); + } + + spin_unlock(&ctx_alloc_lock); +} -- cgit v1.1 From 98c5584cfc47932c4f3ccf5eee2e0bae1447b85e Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:31:20 -0800 Subject: [SPARC64]: Add infrastructure for dynamic TSB sizing. This also cleans up tsb_context_switch(). The assembler routine is now __tsb_context_switch() and the former is an inline function that picks out the bits from the mm_struct and passes it into the assembler code as arguments. setup_tsb_parms() computes the locked TLB entry to map the TSB. Later when we support using the physical address quad load instructions of Cheetah+ and later, we'll simply use the physical address for the TSB register value and set the map virtual and PTE both to zero. Signed-off-by: David S. Miller --- arch/sparc64/mm/tsb.c | 109 +++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 95 insertions(+), 14 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index 2f84cef..dfe7144 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -9,13 +9,7 @@ #include #include #include - -#define TSB_ENTRY_ALIGNMENT 16 - -struct tsb { - unsigned long tag; - unsigned long pte; -} __attribute__((aligned(TSB_ENTRY_ALIGNMENT))); +#include /* We use an 8K TSB for the whole kernel, this allows to * handle about 4MB of modules and vmalloc mappings without @@ -27,10 +21,10 @@ struct tsb { extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; -static inline unsigned long tsb_hash(unsigned long vaddr) +static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries) { vaddr >>= PAGE_SHIFT; - return vaddr & (KERNEL_TSB_NENTRIES - 1); + return vaddr & (nentries - 1); } static inline int tag_compare(struct tsb *entry, unsigned long vaddr, unsigned long context) @@ -51,7 +45,8 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end) unsigned long v; for (v = start; v < end; v += PAGE_SIZE) { - struct tsb *ent = &swapper_tsb[tsb_hash(v)]; + unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES); + struct tsb *ent = &swapper_tsb[hash]; if (tag_compare(ent, v, 0)) { ent->tag = 0UL; @@ -63,8 +58,9 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end) void flush_tsb_user(struct mmu_gather *mp) { struct mm_struct *mm = mp->mm; - struct tsb *tsb = (struct tsb *) mm->context.sparc64_tsb; + struct tsb *tsb = mm->context.tsb; unsigned long ctx = ~0UL; + unsigned long nentries = mm->context.tsb_nentries; int i; if (CTX_VALID(mm->context)) @@ -76,7 +72,7 @@ void flush_tsb_user(struct mmu_gather *mp) v &= ~0x1UL; - ent = &tsb[tsb_hash(v)]; + ent = &tsb[tsb_hash(v, nentries)]; if (tag_compare(ent, v, ctx)) { ent->tag = 0UL; membar_storeload_storestore(); @@ -84,6 +80,83 @@ void flush_tsb_user(struct mmu_gather *mp) } } +static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) +{ + unsigned long tsb_reg, base, tsb_paddr; + unsigned long page_sz, tte; + + mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb); + + base = TSBMAP_BASE; + tte = (_PAGE_VALID | _PAGE_L | _PAGE_CP | + _PAGE_CV | _PAGE_P | _PAGE_W); + tsb_paddr = __pa(mm->context.tsb); + + /* Use the smallest page size that can map the whole TSB + * in one TLB entry. + */ + switch (tsb_bytes) { + case 8192 << 0: + tsb_reg = 0x0UL; +#ifdef DCACHE_ALIASING_POSSIBLE + base += (tsb_paddr & 8192); +#endif + tte |= _PAGE_SZ8K; + page_sz = 8192; + break; + + case 8192 << 1: + tsb_reg = 0x1UL; + tte |= _PAGE_SZ64K; + page_sz = 64 * 1024; + break; + + case 8192 << 2: + tsb_reg = 0x2UL; + tte |= _PAGE_SZ64K; + page_sz = 64 * 1024; + break; + + case 8192 << 3: + tsb_reg = 0x3UL; + tte |= _PAGE_SZ64K; + page_sz = 64 * 1024; + break; + + case 8192 << 4: + tsb_reg = 0x4UL; + tte |= _PAGE_SZ512K; + page_sz = 512 * 1024; + break; + + case 8192 << 5: + tsb_reg = 0x5UL; + tte |= _PAGE_SZ512K; + page_sz = 512 * 1024; + break; + + case 8192 << 6: + tsb_reg = 0x6UL; + tte |= _PAGE_SZ512K; + page_sz = 512 * 1024; + break; + + case 8192 << 7: + tsb_reg = 0x7UL; + tte |= _PAGE_SZ4MB; + page_sz = 4 * 1024 * 1024; + break; + }; + + tsb_reg |= base; + tsb_reg |= (tsb_paddr & (page_sz - 1UL)); + tte |= (tsb_paddr & ~(page_sz - 1UL)); + + mm->context.tsb_reg_val = tsb_reg; + mm->context.tsb_map_vaddr = base; + mm->context.tsb_map_pte = tte; +} + int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { unsigned long page = get_zeroed_page(GFP_KERNEL); @@ -92,14 +165,22 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) if (unlikely(!page)) return -ENOMEM; - mm->context.sparc64_tsb = (unsigned long *) page; + mm->context.tsb = (struct tsb *) page; + setup_tsb_params(mm, PAGE_SIZE); return 0; } void destroy_context(struct mm_struct *mm) { - free_page((unsigned long) mm->context.sparc64_tsb); + free_page((unsigned long) mm->context.tsb); + + /* We can remove these later, but for now it's useful + * to catch any bogus post-destroy_context() references + * to the TSB. + */ + mm->context.tsb = NULL; + mm->context.tsb_reg_val = 0UL; spin_lock(&ctx_alloc_lock); -- cgit v1.1 From bd40791e1d289d807b8580abe1f117e9c62894e4 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:31:38 -0800 Subject: [SPARC64]: Dynamically grow TSB in response to RSS growth. As the RSS grows, grow the TSB in order to reduce the likelyhood of hash collisions and thus poor hit rates in the TSB. This definitely needs some serious tuning. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 7 +++ arch/sparc64/mm/tsb.c | 151 +++++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 152 insertions(+), 6 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 7c456af..a8119cb 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -246,9 +246,11 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { + struct mm_struct *mm; struct page *page; unsigned long pfn; unsigned long pg_flags; + unsigned long mm_rss; pfn = pte_pfn(pte); if (pfn_valid(pfn) && @@ -270,6 +272,11 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p put_cpu(); } + + mm = vma->vm_mm; + mm_rss = get_mm_rss(mm); + if (mm_rss >= mm->context.tsb_rss_limit) + tsb_grow(mm, mm_rss, GFP_ATOMIC); } void flush_dcache_page(struct page *page) diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index dfe7144..707af4b 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -10,6 +10,7 @@ #include #include #include +#include /* We use an 8K TSB for the whole kernel, this allows to * handle about 4MB of modules and vmalloc mappings without @@ -146,6 +147,9 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) tte |= _PAGE_SZ4MB; page_sz = 4 * 1024 * 1024; break; + + default: + BUG(); }; tsb_reg |= base; @@ -157,23 +161,158 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) mm->context.tsb_map_pte = tte; } +/* The page tables are locked against modifications while this + * runs. + * + * XXX do some prefetching... + */ +static void copy_tsb(struct tsb *old_tsb, unsigned long old_size, + struct tsb *new_tsb, unsigned long new_size) +{ + unsigned long old_nentries = old_size / sizeof(struct tsb); + unsigned long new_nentries = new_size / sizeof(struct tsb); + unsigned long i; + + for (i = 0; i < old_nentries; i++) { + register unsigned long tag asm("o4"); + register unsigned long pte asm("o5"); + unsigned long v; + unsigned int hash; + + __asm__ __volatile__( + "ldda [%2] %3, %0" + : "=r" (tag), "=r" (pte) + : "r" (&old_tsb[i]), "i" (ASI_NUCLEUS_QUAD_LDD)); + + if (!tag || (tag & TSB_TAG_LOCK)) + continue; + + /* We only put base page size PTEs into the TSB, + * but that might change in the future. This code + * would need to be changed if we start putting larger + * page size PTEs into there. + */ + WARN_ON((pte & _PAGE_ALL_SZ_BITS) != _PAGE_SZBITS); + + /* The tag holds bits 22 to 63 of the virtual address + * and the context. Clear out the context, and shift + * up to make a virtual address. + */ + v = (tag & ((1UL << 42UL) - 1UL)) << 22UL; + + /* The implied bits of the tag (bits 13 to 21) are + * determined by the TSB entry index, so fill that in. + */ + v |= (i & (512UL - 1UL)) << 13UL; + + hash = tsb_hash(v, new_nentries); + new_tsb[hash].tag = tag; + new_tsb[hash].pte = pte; + } +} + +/* When the RSS of an address space exceeds mm->context.tsb_rss_limit, + * update_mmu_cache() invokes this routine to try and grow the TSB. + * When we reach the maximum TSB size supported, we stick ~0UL into + * mm->context.tsb_rss_limit so the grow checks in update_mmu_cache() + * will not trigger any longer. + * + * The TSB can be anywhere from 8K to 1MB in size, in increasing powers + * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB + * must be 512K aligned. + * + * The idea here is to grow the TSB when the RSS of the process approaches + * the number of entries that the current TSB can hold at once. Currently, + * we trigger when the RSS hits 3/4 of the TSB capacity. + */ +void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags) +{ + unsigned long max_tsb_size = 1 * 1024 * 1024; + unsigned long size, old_size; + struct page *page; + struct tsb *old_tsb; + + if (max_tsb_size > (PAGE_SIZE << MAX_ORDER)) + max_tsb_size = (PAGE_SIZE << MAX_ORDER); + + for (size = PAGE_SIZE; size < max_tsb_size; size <<= 1UL) { + unsigned long n_entries = size / sizeof(struct tsb); + + n_entries = (n_entries * 3) / 4; + if (n_entries > rss) + break; + } + + page = alloc_pages(gfp_flags | __GFP_ZERO, get_order(size)); + if (unlikely(!page)) + return; + + if (size == max_tsb_size) + mm->context.tsb_rss_limit = ~0UL; + else + mm->context.tsb_rss_limit = + ((size / sizeof(struct tsb)) * 3) / 4; + + old_tsb = mm->context.tsb; + old_size = mm->context.tsb_nentries * sizeof(struct tsb); + + if (old_tsb) + copy_tsb(old_tsb, old_size, page_address(page), size); + + mm->context.tsb = page_address(page); + setup_tsb_params(mm, size); + + /* If old_tsb is NULL, we're being invoked for the first time + * from init_new_context(). + */ + if (old_tsb) { + /* Now force all other processors to reload the new + * TSB state. + */ + smp_tsb_sync(mm); + + /* Finally reload it on the local cpu. No further + * references will remain to the old TSB and we can + * thus free it up. + */ + tsb_context_switch(mm); + + free_pages((unsigned long) old_tsb, get_order(old_size)); + } +} + int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { - unsigned long page = get_zeroed_page(GFP_KERNEL); + unsigned long initial_rss; mm->context.sparc64_ctx_val = 0UL; - if (unlikely(!page)) - return -ENOMEM; - mm->context.tsb = (struct tsb *) page; - setup_tsb_params(mm, PAGE_SIZE); + /* copy_mm() copies over the parent's mm_struct before calling + * us, so we need to zero out the TSB pointer or else tsb_grow() + * will be confused and think there is an older TSB to free up. + */ + mm->context.tsb = NULL; + + /* If this is fork, inherit the parent's TSB size. We would + * grow it to that size on the first page fault anyways. + */ + initial_rss = mm->context.tsb_nentries; + if (initial_rss) + initial_rss -= 1; + + tsb_grow(mm, initial_rss, GFP_KERNEL); + + if (unlikely(!mm->context.tsb)) + return -ENOMEM; return 0; } void destroy_context(struct mm_struct *mm) { - free_page((unsigned long) mm->context.tsb); + unsigned long size = mm->context.tsb_nentries * sizeof(struct tsb); + + free_pages((unsigned long) mm->context.tsb, get_order(size)); /* We can remove these later, but for now it's useful * to catch any bogus post-destroy_context() references -- cgit v1.1 From b70c0fa1613c4f69b4a340a0e2bee387560ebbb1 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:32:04 -0800 Subject: [SPARC64]: Preload TSB entries from update_mmu_cache(). Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index a8119cb..1e8a5a3 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -277,6 +277,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p mm_rss = get_mm_rss(mm); if (mm_rss >= mm->context.tsb_rss_limit) tsb_grow(mm, mm_rss, GFP_ATOMIC); + + if ((pte_val(pte) & _PAGE_ALL_SZ_BITS) == _PAGE_SZBITS) { + struct tsb *tsb; + unsigned long tag; + + tsb = &mm->context.tsb[(address >> PAGE_SHIFT) & + (mm->context.tsb_nentries - 1UL)]; + tag = (address >> 22UL) | CTX_HWBITS(mm->context) << 48UL; + tsb_insert(tsb, tag, pte_val(pte)); + } } void flush_dcache_page(struct page *page) -- cgit v1.1 From 4753eb2ac7022b999e5e484f1a5dc001dba22bd3 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:32:44 -0800 Subject: [SPARC64]: Fix incorrect TSB lock bit handling. The TSB_LOCK_BIT define is actually a special value shifted down by 32-bits for the assembler code macros. In C code, this isn't what we want. Signed-off-by: David S. Miller --- arch/sparc64/mm/tsb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index 707af4b..e60547821 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -184,7 +184,7 @@ static void copy_tsb(struct tsb *old_tsb, unsigned long old_size, : "=r" (tag), "=r" (pte) : "r" (&old_tsb[i]), "i" (ASI_NUCLEUS_QUAD_LDD)); - if (!tag || (tag & TSB_TAG_LOCK)) + if (!tag || (tag & (1UL << TSB_TAG_LOCK_BIT))) continue; /* We only put base page size PTEs into the TSB, -- cgit v1.1 From 4da808c352c290d3f762933d44d4ab90c2fd65f3 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:33:00 -0800 Subject: [SPARC64]: Fix bogus flush instruction usage. Some of the trap code was still assuming that alternate global %g6 was hard coded with current_thread_info(). Let's just consistently flush at KERNBASE when we need a pipeline synchronization. That's locked into the TLB and will always work. Signed-off-by: David S. Miller --- arch/sparc64/mm/ultra.S | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S index a873948..269ed57 100644 --- a/arch/sparc64/mm/ultra.S +++ b/arch/sparc64/mm/ultra.S @@ -36,9 +36,10 @@ __flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ mov 0x50, %g3 stxa %g0, [%g3] ASI_DMMU_DEMAP stxa %g0, [%g3] ASI_IMMU_DEMAP + sethi %hi(KERNBASE), %g3 + flush %g3 retl - flush %g6 - nop + nop nop nop nop @@ -72,7 +73,8 @@ __flush_tlb_pending: brnz,pt %o1, 1b nop stxa %g2, [%o4] ASI_DMMU - flush %g6 + sethi %hi(KERNBASE), %o4 + flush %o4 retl wrpr %g7, 0x0, %pstate nop @@ -94,8 +96,10 @@ __flush_tlb_kernel_range: /* %o0=start, %o1=end */ membar #Sync brnz,pt %o3, 1b sub %o3, %o4, %o3 -2: retl - flush %g6 +2: sethi %hi(KERNBASE), %o3 + flush %o3 + retl + nop __spitfire_flush_tlb_mm_slow: rdpr %pstate, %g1 @@ -105,7 +109,8 @@ __spitfire_flush_tlb_mm_slow: stxa %g0, [%g3] ASI_IMMU_DEMAP flush %g6 stxa %g2, [%o1] ASI_DMMU - flush %g6 + sethi %hi(KERNBASE), %o1 + flush %o1 retl wrpr %g1, 0, %pstate @@ -181,7 +186,7 @@ __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */ .previous /* Cheetah specific versions, patched at boot time. */ -__cheetah_flush_tlb_mm: /* 18 insns */ +__cheetah_flush_tlb_mm: /* 19 insns */ rdpr %pstate, %g7 andn %g7, PSTATE_IE, %g2 wrpr %g2, 0x0, %pstate @@ -196,12 +201,13 @@ __cheetah_flush_tlb_mm: /* 18 insns */ stxa %g0, [%g3] ASI_DMMU_DEMAP stxa %g0, [%g3] ASI_IMMU_DEMAP stxa %g2, [%o2] ASI_DMMU - flush %g6 + sethi %hi(KERNBASE), %o2 + flush %o2 wrpr %g0, 0, %tl retl wrpr %g7, 0x0, %pstate -__cheetah_flush_tlb_pending: /* 26 insns */ +__cheetah_flush_tlb_pending: /* 27 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ rdpr %pstate, %g7 sllx %o1, 3, %o1 @@ -225,7 +231,8 @@ __cheetah_flush_tlb_pending: /* 26 insns */ brnz,pt %o1, 1b nop stxa %g2, [%o4] ASI_DMMU - flush %g6 + sethi %hi(KERNBASE), %o4 + flush %o4 wrpr %g0, 0, %tl retl wrpr %g7, 0x0, %pstate @@ -265,14 +272,14 @@ cheetah_patch_cachetlbops: sethi %hi(__cheetah_flush_tlb_mm), %o1 or %o1, %lo(__cheetah_flush_tlb_mm), %o1 call cheetah_patch_one - mov 18, %o2 + mov 19, %o2 sethi %hi(__flush_tlb_pending), %o0 or %o0, %lo(__flush_tlb_pending), %o0 sethi %hi(__cheetah_flush_tlb_pending), %o1 or %o1, %lo(__cheetah_flush_tlb_pending), %o1 call cheetah_patch_one - mov 26, %o2 + mov 27, %o2 #ifdef DCACHE_ALIASING_POSSIBLE sethi %hi(__flush_dcache_page), %o0 -- cgit v1.1 From 3487d1d4414fbfab5d98ec559e6f84f55520cb15 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:33:25 -0800 Subject: [SPARC64]: Kill PROM locked TLB entry preservation code. It is totally unnecessary complexity. After we take over the trap table, we handle all PROM tlb misses fully. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 295 ++----------------------------------------------- 1 file changed, 10 insertions(+), 285 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 1e8a5a3..f4d22cc 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -555,294 +555,12 @@ static void __init inherit_prom_mappings(void) prom_printf("done.\n"); } -static int prom_ditlb_set; -struct prom_tlb_entry { - int tlb_ent; - unsigned long tlb_tag; - unsigned long tlb_data; -}; -struct prom_tlb_entry prom_itlb[16], prom_dtlb[16]; - void prom_world(int enter) { - unsigned long pstate; - int i; - if (!enter) set_fs((mm_segment_t) { get_thread_current_ds() }); - if (!prom_ditlb_set) - return; - - /* Make sure the following runs atomically. */ - __asm__ __volatile__("flushw\n\t" - "rdpr %%pstate, %0\n\t" - "wrpr %0, %1, %%pstate" - : "=r" (pstate) - : "i" (PSTATE_IE)); - - if (enter) { - /* Install PROM world. */ - for (i = 0; i < 16; i++) { - if (prom_dtlb[i].tlb_ent != -1) { - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "membar #Sync" - : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS), - "i" (ASI_DMMU)); - if (tlb_type == spitfire) - spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, - prom_dtlb[i].tlb_data); - else if (tlb_type == cheetah || tlb_type == cheetah_plus) - cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, - prom_dtlb[i].tlb_data); - } - if (prom_itlb[i].tlb_ent != -1) { - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "membar #Sync" - : : "r" (prom_itlb[i].tlb_tag), - "r" (TLB_TAG_ACCESS), - "i" (ASI_IMMU)); - if (tlb_type == spitfire) - spitfire_put_itlb_data(prom_itlb[i].tlb_ent, - prom_itlb[i].tlb_data); - else if (tlb_type == cheetah || tlb_type == cheetah_plus) - cheetah_put_litlb_data(prom_itlb[i].tlb_ent, - prom_itlb[i].tlb_data); - } - } - } else { - for (i = 0; i < 16; i++) { - if (prom_dtlb[i].tlb_ent != -1) { - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); - if (tlb_type == spitfire) - spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, 0x0UL); - else - cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, 0x0UL); - } - if (prom_itlb[i].tlb_ent != -1) { - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : : "r" (TLB_TAG_ACCESS), - "i" (ASI_IMMU)); - if (tlb_type == spitfire) - spitfire_put_itlb_data(prom_itlb[i].tlb_ent, 0x0UL); - else - cheetah_put_litlb_data(prom_itlb[i].tlb_ent, 0x0UL); - } - } - } - __asm__ __volatile__("wrpr %0, 0, %%pstate" - : : "r" (pstate)); -} - -void inherit_locked_prom_mappings(int save_p) -{ - int i; - int dtlb_seen = 0; - int itlb_seen = 0; - - /* Fucking losing PROM has more mappings in the TLB, but - * it (conveniently) fails to mention any of these in the - * translations property. The only ones that matter are - * the locked PROM tlb entries, so we impose the following - * irrecovable rule on the PROM, it is allowed 8 locked - * entries in the ITLB and 8 in the DTLB. - * - * Supposedly the upper 16GB of the address space is - * reserved for OBP, BUT I WISH THIS WAS DOCUMENTED - * SOMEWHERE!!!!!!!!!!!!!!!!! Furthermore the entire interface - * used between the client program and the firmware on sun5 - * systems to coordinate mmu mappings is also COMPLETELY - * UNDOCUMENTED!!!!!! Thanks S(t)un! - */ - if (save_p) { - for (i = 0; i < 16; i++) { - prom_itlb[i].tlb_ent = -1; - prom_dtlb[i].tlb_ent = -1; - } - } - if (tlb_type == spitfire) { - int high = sparc64_highest_unlocked_tlb_ent; - for (i = 0; i <= high; i++) { - unsigned long data; - - /* Spitfire Errata #32 workaround */ - /* NOTE: Always runs on spitfire, so no cheetah+ - * page size encodings. - */ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "flush %%g6" - : /* No outputs */ - : "r" (0), - "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); - - data = spitfire_get_dtlb_data(i); - if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) { - unsigned long tag; - - /* Spitfire Errata #32 workaround */ - /* NOTE: Always runs on spitfire, so no - * cheetah+ page size encodings. - */ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "flush %%g6" - : /* No outputs */ - : "r" (0), - "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); - - tag = spitfire_get_dtlb_tag(i); - if (save_p) { - prom_dtlb[dtlb_seen].tlb_ent = i; - prom_dtlb[dtlb_seen].tlb_tag = tag; - prom_dtlb[dtlb_seen].tlb_data = data; - } - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); - spitfire_put_dtlb_data(i, 0x0UL); - - dtlb_seen++; - if (dtlb_seen > 15) - break; - } - } - - for (i = 0; i < high; i++) { - unsigned long data; - - /* Spitfire Errata #32 workaround */ - /* NOTE: Always runs on spitfire, so no - * cheetah+ page size encodings. - */ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "flush %%g6" - : /* No outputs */ - : "r" (0), - "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); - - data = spitfire_get_itlb_data(i); - if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) { - unsigned long tag; - - /* Spitfire Errata #32 workaround */ - /* NOTE: Always runs on spitfire, so no - * cheetah+ page size encodings. - */ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "flush %%g6" - : /* No outputs */ - : "r" (0), - "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); - - tag = spitfire_get_itlb_tag(i); - if (save_p) { - prom_itlb[itlb_seen].tlb_ent = i; - prom_itlb[itlb_seen].tlb_tag = tag; - prom_itlb[itlb_seen].tlb_data = data; - } - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); - spitfire_put_itlb_data(i, 0x0UL); - - itlb_seen++; - if (itlb_seen > 15) - break; - } - } - } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { - int high = sparc64_highest_unlocked_tlb_ent; - - for (i = 0; i <= high; i++) { - unsigned long data; - - data = cheetah_get_ldtlb_data(i); - if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) { - unsigned long tag; - - tag = cheetah_get_ldtlb_tag(i); - if (save_p) { - prom_dtlb[dtlb_seen].tlb_ent = i; - prom_dtlb[dtlb_seen].tlb_tag = tag; - prom_dtlb[dtlb_seen].tlb_data = data; - } - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); - cheetah_put_ldtlb_data(i, 0x0UL); - - dtlb_seen++; - if (dtlb_seen > 15) - break; - } - } - - for (i = 0; i < high; i++) { - unsigned long data; - - data = cheetah_get_litlb_data(i); - if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) { - unsigned long tag; - - tag = cheetah_get_litlb_tag(i); - if (save_p) { - prom_itlb[itlb_seen].tlb_ent = i; - prom_itlb[itlb_seen].tlb_tag = tag; - prom_itlb[itlb_seen].tlb_data = data; - } - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); - cheetah_put_litlb_data(i, 0x0UL); - - itlb_seen++; - if (itlb_seen > 15) - break; - } - } - } else { - /* Implement me :-) */ - BUG(); - } - if (save_p) - prom_ditlb_set = 1; -} - -/* Give PROM back his world, done during reboots... */ -void prom_reload_locked(void) -{ - int i; - - for (i = 0; i < 16; i++) { - if (prom_dtlb[i].tlb_ent != -1) { - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "membar #Sync" - : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS), - "i" (ASI_DMMU)); - if (tlb_type == spitfire) - spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, - prom_dtlb[i].tlb_data); - else if (tlb_type == cheetah || tlb_type == cheetah_plus) - cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, - prom_dtlb[i].tlb_data); - } - - if (prom_itlb[i].tlb_ent != -1) { - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "membar #Sync" - : : "r" (prom_itlb[i].tlb_tag), - "r" (TLB_TAG_ACCESS), - "i" (ASI_IMMU)); - if (tlb_type == spitfire) - spitfire_put_itlb_data(prom_itlb[i].tlb_ent, - prom_itlb[i].tlb_data); - else - cheetah_put_litlb_data(prom_itlb[i].tlb_ent, - prom_itlb[i].tlb_data); - } - } + __asm__ __volatile__("flushw"); } #ifdef DCACHE_ALIASING_POSSIBLE @@ -1066,6 +784,15 @@ void sparc_ultra_dump_dtlb(void) } } +static inline void spitfire_errata32(void) +{ + __asm__ __volatile__("stxa %0, [%1] %2\n\t" + "flush %%g6" + : /* No outputs */ + : "r" (0), + "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); +} + extern unsigned long cmdline_memory_size; unsigned long __init bootmem_init(unsigned long *pages_avail) @@ -1375,8 +1102,6 @@ void __init paging_init(void) setup_tba(this_is_starfire); } - inherit_locked_prom_mappings(1); - __flush_tlb_all(); /* Setup bootmem... */ -- cgit v1.1 From a8b900d801697609d1b56cc9c110148c64678068 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:33:37 -0800 Subject: [SPARC64]: Kill sole argument passed to setup_tba(). No longer used, and move extern declaration to a header file. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index f4d22cc..20e7af5 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1092,15 +1092,8 @@ void __init paging_init(void) inherit_prom_mappings(); - /* Ok, we can use our TLB miss and window trap handlers safely. - * We need to do a quick peek here to see if we are on StarFire - * or not, so setup_tba can setup the IRQ globals correctly (it - * needs to get the hard smp processor id correctly). - */ - { - extern void setup_tba(int); - setup_tba(this_is_starfire); - } + /* Ok, we can use our TLB miss and window trap handlers safely. */ + setup_tba(); __flush_tlb_all(); -- cgit v1.1 From 2f7ee7c63f08b7f883b710a29d91c1891b81b8e1 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:33:49 -0800 Subject: [SPARC64]: Increase swapper_tsb size to 32K. Signed-off-by: David S. Miller --- arch/sparc64/mm/tsb.c | 8 -------- 1 file changed, 8 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index e60547821..1c4e5c2 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -12,14 +12,6 @@ #include #include -/* We use an 8K TSB for the whole kernel, this allows to - * handle about 4MB of modules and vmalloc mappings without - * incurring many hash conflicts. - */ -#define KERNEL_TSB_SIZE_BYTES 8192 -#define KERNEL_TSB_NENTRIES \ - (KERNEL_TSB_SIZE_BYTES / sizeof(struct tsb)) - extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries) -- cgit v1.1 From 9954863975910a1b9372b7d5006a6cba43bdd288 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 31 Jan 2006 18:34:34 -0800 Subject: [SPARC64]: Kill swapper_pgd_zero, totally unused. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 20e7af5..2c21d85 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -388,7 +388,6 @@ struct linux_prom_translation { /* Exported for kernel TLB miss handling in ktlb.S */ struct linux_prom_translation prom_trans[512] __read_mostly; unsigned int prom_trans_ents __read_mostly; -unsigned int swapper_pgd_zero __read_mostly; extern unsigned long prom_boot_page; extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle); @@ -1088,8 +1087,6 @@ void __init paging_init(void) pud_set(pud_offset(&swapper_pg_dir[0], 0), swapper_low_pmd_dir + (shift / sizeof(pgd_t))); - swapper_pgd_zero = pgd_val(swapper_pg_dir[0]); - inherit_prom_mappings(); /* Ok, we can use our TLB miss and window trap handlers safely. */ -- cgit v1.1 From 517af33237ecfc3c8a93b335365fa61e741ceca4 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 1 Feb 2006 15:55:21 -0800 Subject: [SPARC64]: Access TSB with physical addresses when possible. This way we don't need to lock the TSB into the TLB. The trick is that every TSB load/store is registered into a special instruction patch section. The default uses virtual addresses, and the patch instructions use physical address load/stores. We can't do this on all chips because only cheetah+ and later have the physical variant of the atomic quad load. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 32 +++++++++++++++++ arch/sparc64/mm/tsb.c | 95 ++++++++++++++++++++++++++++++++++---------------- 2 files changed, 97 insertions(+), 30 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 2c21d85..4893f3e 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -39,6 +39,7 @@ #include #include #include +#include extern void device_scan(void); @@ -244,6 +245,16 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c : "g1", "g7"); } +static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte) +{ + unsigned long tsb_addr = (unsigned long) ent; + + if (tlb_type == cheetah_plus) + tsb_addr = __pa(tsb_addr); + + __tsb_insert(tsb_addr, tag, pte); +} + void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { struct mm_struct *mm; @@ -1040,6 +1051,24 @@ unsigned long __init find_ecache_flush_span(unsigned long size) return ~0UL; } +static void __init tsb_phys_patch(void) +{ + struct tsb_phys_patch_entry *p; + + p = &__tsb_phys_patch; + while (p < &__tsb_phys_patch_end) { + unsigned long addr = p->addr; + + *(unsigned int *) addr = p->insn; + wmb(); + __asm__ __volatile__("flush %0" + : /* no outputs */ + : "r" (addr)); + + p++; + } +} + /* paging_init() sets up the page tables */ extern void cheetah_ecache_flush_init(void); @@ -1052,6 +1081,9 @@ void __init paging_init(void) unsigned long end_pfn, pages_avail, shift; unsigned long real_end, i; + if (tlb_type == cheetah_plus) + tsb_phys_patch(); + /* Find available physical memory... */ read_obp_memory("available", &pavail[0], &pavail_ents); diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index 1c4e5c2..787533f 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -20,12 +20,9 @@ static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries return vaddr & (nentries - 1); } -static inline int tag_compare(struct tsb *entry, unsigned long vaddr, unsigned long context) +static inline int tag_compare(unsigned long tag, unsigned long vaddr, unsigned long context) { - if (context == ~0UL) - return 1; - - return (entry->tag == ((vaddr >> 22) | (context << 48))); + return (tag == ((vaddr >> 22) | (context << 48))); } /* TSB flushes need only occur on the processor initiating the address @@ -41,7 +38,7 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end) unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES); struct tsb *ent = &swapper_tsb[hash]; - if (tag_compare(ent, v, 0)) { + if (tag_compare(ent->tag, v, 0)) { ent->tag = 0UL; membar_storeload_storestore(); } @@ -52,24 +49,31 @@ void flush_tsb_user(struct mmu_gather *mp) { struct mm_struct *mm = mp->mm; struct tsb *tsb = mm->context.tsb; - unsigned long ctx = ~0UL; unsigned long nentries = mm->context.tsb_nentries; + unsigned long ctx, base; int i; - if (CTX_VALID(mm->context)) - ctx = CTX_HWBITS(mm->context); + if (unlikely(!CTX_VALID(mm->context))) + return; + + ctx = CTX_HWBITS(mm->context); + if (tlb_type == cheetah_plus) + base = __pa(tsb); + else + base = (unsigned long) tsb; + for (i = 0; i < mp->tlb_nr; i++) { unsigned long v = mp->vaddrs[i]; - struct tsb *ent; + unsigned long tag, ent, hash; v &= ~0x1UL; - ent = &tsb[tsb_hash(v, nentries)]; - if (tag_compare(ent, v, ctx)) { - ent->tag = 0UL; - membar_storeload_storestore(); - } + hash = tsb_hash(v, nentries); + ent = base + (hash * sizeof(struct tsb)); + tag = (v >> 22UL) | (ctx << 48UL); + + tsb_flush(ent, tag); } } @@ -84,6 +88,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) tte = (_PAGE_VALID | _PAGE_L | _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W); tsb_paddr = __pa(mm->context.tsb); + BUG_ON(tsb_paddr & (tsb_bytes - 1UL)); /* Use the smallest page size that can map the whole TSB * in one TLB entry. @@ -144,13 +149,23 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) BUG(); }; - tsb_reg |= base; - tsb_reg |= (tsb_paddr & (page_sz - 1UL)); - tte |= (tsb_paddr & ~(page_sz - 1UL)); + if (tlb_type == cheetah_plus) { + /* Physical mapping, no locked TLB entry for TSB. */ + tsb_reg |= tsb_paddr; + + mm->context.tsb_reg_val = tsb_reg; + mm->context.tsb_map_vaddr = 0; + mm->context.tsb_map_pte = 0; + } else { + tsb_reg |= base; + tsb_reg |= (tsb_paddr & (page_sz - 1UL)); + tte |= (tsb_paddr & ~(page_sz - 1UL)); + + mm->context.tsb_reg_val = tsb_reg; + mm->context.tsb_map_vaddr = base; + mm->context.tsb_map_pte = tte; + } - mm->context.tsb_reg_val = tsb_reg; - mm->context.tsb_map_vaddr = base; - mm->context.tsb_map_pte = tte; } /* The page tables are locked against modifications while this @@ -168,13 +183,21 @@ static void copy_tsb(struct tsb *old_tsb, unsigned long old_size, for (i = 0; i < old_nentries; i++) { register unsigned long tag asm("o4"); register unsigned long pte asm("o5"); - unsigned long v; - unsigned int hash; - - __asm__ __volatile__( - "ldda [%2] %3, %0" - : "=r" (tag), "=r" (pte) - : "r" (&old_tsb[i]), "i" (ASI_NUCLEUS_QUAD_LDD)); + unsigned long v, hash; + + if (tlb_type == cheetah_plus) { + __asm__ __volatile__( + "ldda [%2] %3, %0" + : "=r" (tag), "=r" (pte) + : "r" (__pa(&old_tsb[i])), + "i" (ASI_QUAD_LDD_PHYS)); + } else { + __asm__ __volatile__( + "ldda [%2] %3, %0" + : "=r" (tag), "=r" (pte) + : "r" (&old_tsb[i]), + "i" (ASI_NUCLEUS_QUAD_LDD)); + } if (!tag || (tag & (1UL << TSB_TAG_LOCK_BIT))) continue; @@ -198,8 +221,20 @@ static void copy_tsb(struct tsb *old_tsb, unsigned long old_size, v |= (i & (512UL - 1UL)) << 13UL; hash = tsb_hash(v, new_nentries); - new_tsb[hash].tag = tag; - new_tsb[hash].pte = pte; + if (tlb_type == cheetah_plus) { + __asm__ __volatile__( + "stxa %0, [%1] %2\n\t" + "stxa %3, [%4] %2" + : /* no outputs */ + : "r" (tag), + "r" (__pa(&new_tsb[hash].tag)), + "i" (ASI_PHYS_USE_EC), + "r" (pte), + "r" (__pa(&new_tsb[hash].pte))); + } else { + new_tsb[hash].tag = tag; + new_tsb[hash].pte = pte; + } } } -- cgit v1.1 From f4e841da30b4bcbb8f1cc20a01157a788ff58b21 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 2 Feb 2006 16:16:24 -0800 Subject: [SPARC64]: Turn off TSB growing for now. There are several tricky races involved with growing the TSB. So just use base-size TSBs for user contexts and we can revisit enabling this later. One part of the SMP problems is that tsb_context_switch() can see partially updated TSB configuration state if tsb_grow() is running in parallel. That's easily solved with a seqlock taken as a writer by tsb_grow() and taken as a reader to capture all the TSB config state in tsb_context_switch(). Then there is flush_tsb_user() running in parallel with a tsb_grow(). In theory we could take the seqlock as a reader there too, and just resample the TSB pointer and reflush but that looks really ugly. Lastly, I believe there is a case with threads that results in a TSB entry lock bit being set spuriously which will cause the next access to that TSB entry to wedge the cpu (since the TSB entry lock bit will never clear). It's either copy_tsb() or some bug elsewhere in the TSB assembly. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 5 ----- arch/sparc64/mm/tsb.c | 11 +---------- 2 files changed, 1 insertion(+), 15 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 4893f3e..1af6330 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -261,7 +261,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p struct page *page; unsigned long pfn; unsigned long pg_flags; - unsigned long mm_rss; pfn = pte_pfn(pte); if (pfn_valid(pfn) && @@ -285,10 +284,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p } mm = vma->vm_mm; - mm_rss = get_mm_rss(mm); - if (mm_rss >= mm->context.tsb_rss_limit) - tsb_grow(mm, mm_rss, GFP_ATOMIC); - if ((pte_val(pte) & _PAGE_ALL_SZ_BITS) == _PAGE_SZBITS) { struct tsb *tsb; unsigned long tag; diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index 787533f..2cc8e65 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -310,7 +310,6 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags) int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { - unsigned long initial_rss; mm->context.sparc64_ctx_val = 0UL; @@ -319,15 +318,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) * will be confused and think there is an older TSB to free up. */ mm->context.tsb = NULL; - - /* If this is fork, inherit the parent's TSB size. We would - * grow it to that size on the first page fault anyways. - */ - initial_rss = mm->context.tsb_nentries; - if (initial_rss) - initial_rss -= 1; - - tsb_grow(mm, initial_rss, GFP_KERNEL); + tsb_grow(mm, 0, GFP_KERNEL); if (unlikely(!mm->context.tsb)) return -ENOMEM; -- cgit v1.1 From 52bf082f0a6e49e08ed99d4d9518c662dc735c7a Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 4 Feb 2006 03:08:37 -0800 Subject: [SPARC64]: SUN4V hypervisor TLB flush support code. Signed-off-by: David S. Miller --- arch/sparc64/mm/ultra.S | 224 +++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 214 insertions(+), 10 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S index 269ed57..cac58d6 100644 --- a/arch/sparc64/mm/ultra.S +++ b/arch/sparc64/mm/ultra.S @@ -15,6 +15,7 @@ #include #include #include +#include /* Basically, most of the Spitfire vs. Cheetah madness * has to do with the fact that Cheetah does not support @@ -29,7 +30,8 @@ .text .align 32 .globl __flush_tlb_mm -__flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ +__flush_tlb_mm: /* 18 insns */ + /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ ldxa [%o1] ASI_DMMU, %g2 cmp %g2, %o0 bne,pn %icc, __spitfire_flush_tlb_mm_slow @@ -52,7 +54,7 @@ __flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */ .align 32 .globl __flush_tlb_pending -__flush_tlb_pending: +__flush_tlb_pending: /* 26 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ rdpr %pstate, %g7 sllx %o1, 3, %o1 @@ -84,7 +86,8 @@ __flush_tlb_pending: .align 32 .globl __flush_tlb_kernel_range -__flush_tlb_kernel_range: /* %o0=start, %o1=end */ +__flush_tlb_kernel_range: /* 14 insns */ + /* %o0=start, %o1=end */ cmp %o0, %o1 be,pn %xcc, 2f sethi %hi(PAGE_SIZE), %o4 @@ -100,6 +103,7 @@ __flush_tlb_kernel_range: /* %o0=start, %o1=end */ flush %o3 retl nop + nop __spitfire_flush_tlb_mm_slow: rdpr %pstate, %g1 @@ -252,7 +256,63 @@ __cheetah_flush_dcache_page: /* 11 insns */ nop #endif /* DCACHE_ALIASING_POSSIBLE */ -cheetah_patch_one: + /* Hypervisor specific versions, patched at boot time. */ +__hypervisor_flush_tlb_mm: /* 8 insns */ + mov %o0, %o2 /* ARG2: mmu context */ + mov 0, %o0 /* ARG0: CPU lists unimplemented */ + mov 0, %o1 /* ARG1: CPU lists unimplemented */ + mov HV_MMU_ALL, %o3 /* ARG3: flags */ + mov HV_FAST_MMU_DEMAP_CTX, %o5 + ta HV_FAST_TRAP + retl + nop + +__hypervisor_flush_tlb_pending: /* 15 insns */ + /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ + sllx %o1, 3, %g1 + mov %o2, %g2 + mov %o0, %g3 +1: sub %g1, (1 << 3), %g1 + ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */ + mov %g3, %o1 /* ARG1: mmu context */ + mov HV_MMU_DMMU, %o2 + andcc %o0, 1, %g0 + movne %icc, HV_MMU_ALL, %o2 /* ARG2: flags */ + andn %o0, 1, %o0 + ta HV_MMU_UNMAP_ADDR_TRAP + brnz,pt %g1, 1b + nop + retl + nop + +__hypervisor_flush_tlb_kernel_range: /* 14 insns */ + /* %o0=start, %o1=end */ + cmp %o0, %o1 + be,pn %xcc, 2f + sethi %hi(PAGE_SIZE), %g3 + mov %o0, %g1 + sub %o1, %g1, %g2 + sub %g2, %g3, %g2 +1: add %g1, %g2, %o0 /* ARG0: virtual address */ + mov 0, %o1 /* ARG1: mmu context */ + mov HV_MMU_ALL, %o2 /* ARG2: flags */ + ta HV_MMU_UNMAP_ADDR_TRAP + brnz,pt %g2, 1b + sub %g2, %g3, %g2 +2: retl + nop + +#ifdef DCACHE_ALIASING_POSSIBLE + /* XXX Niagara and friends have an 8K cache, so no aliasing is + * XXX possible, but nothing explicit in the Hypervisor API + * XXX guarantees this. + */ +__hypervisor_flush_dcache_page: /* 2 insns */ + retl + nop +#endif + +tlb_patch_one: 1: lduw [%o1], %g1 stw %g1, [%o0] flush %o0 @@ -271,14 +331,14 @@ cheetah_patch_cachetlbops: or %o0, %lo(__flush_tlb_mm), %o0 sethi %hi(__cheetah_flush_tlb_mm), %o1 or %o1, %lo(__cheetah_flush_tlb_mm), %o1 - call cheetah_patch_one + call tlb_patch_one mov 19, %o2 sethi %hi(__flush_tlb_pending), %o0 or %o0, %lo(__flush_tlb_pending), %o0 sethi %hi(__cheetah_flush_tlb_pending), %o1 or %o1, %lo(__cheetah_flush_tlb_pending), %o1 - call cheetah_patch_one + call tlb_patch_one mov 27, %o2 #ifdef DCACHE_ALIASING_POSSIBLE @@ -286,7 +346,7 @@ cheetah_patch_cachetlbops: or %o0, %lo(__flush_dcache_page), %o0 sethi %hi(__cheetah_flush_dcache_page), %o1 or %o1, %lo(__cheetah_flush_dcache_page), %o1 - call cheetah_patch_one + call tlb_patch_one mov 11, %o2 #endif /* DCACHE_ALIASING_POSSIBLE */ @@ -309,7 +369,7 @@ cheetah_patch_cachetlbops: */ .align 32 .globl xcall_flush_tlb_mm -xcall_flush_tlb_mm: +xcall_flush_tlb_mm: /* 18 insns */ mov PRIMARY_CONTEXT, %g2 ldxa [%g2] ASI_DMMU, %g3 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4 @@ -321,9 +381,16 @@ xcall_flush_tlb_mm: stxa %g0, [%g4] ASI_IMMU_DEMAP stxa %g3, [%g2] ASI_DMMU retry + nop + nop + nop + nop + nop + nop + nop .globl xcall_flush_tlb_pending -xcall_flush_tlb_pending: +xcall_flush_tlb_pending: /* 20 insns */ /* %g5=context, %g1=nr, %g7=vaddrs[] */ sllx %g1, 3, %g1 mov PRIMARY_CONTEXT, %g4 @@ -348,7 +415,7 @@ xcall_flush_tlb_pending: retry .globl xcall_flush_tlb_kernel_range -xcall_flush_tlb_kernel_range: +xcall_flush_tlb_kernel_range: /* 22 insns */ sethi %hi(PAGE_SIZE - 1), %g2 or %g2, %lo(PAGE_SIZE - 1), %g2 andn %g1, %g2, %g1 @@ -365,6 +432,12 @@ xcall_flush_tlb_kernel_range: retry nop nop + nop + nop + nop + nop + nop + nop /* This runs in a very controlled environment, so we do * not need to worry about BH races etc. @@ -458,6 +531,76 @@ xcall_flush_dcache_page_spitfire: /* %g1 == physical page address nop nop + .globl __hypervisor_xcall_flush_tlb_mm +__hypervisor_xcall_flush_tlb_mm: /* 18 insns */ + /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */ + mov %o0, %g2 + mov %o1, %g3 + mov %o2, %g4 + mov %o3, %g1 + mov %o5, %g7 + clr %o0 /* ARG0: CPU lists unimplemented */ + clr %o1 /* ARG1: CPU lists unimplemented */ + mov %g5, %o2 /* ARG2: mmu context */ + mov HV_MMU_ALL, %o3 /* ARG3: flags */ + mov HV_FAST_MMU_DEMAP_CTX, %o5 + ta HV_FAST_TRAP + mov %g2, %o0 + mov %g3, %o1 + mov %g4, %o2 + mov %g1, %o3 + mov %g7, %o5 + membar #Sync + retry + + .globl __hypervisor_xcall_flush_tlb_pending +__hypervisor_xcall_flush_tlb_pending: /* 18 insns */ + /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4=scratch, %g6=unusable */ + sllx %g1, 3, %g1 + mov %o0, %g2 + mov %o1, %g3 + mov %o2, %g4 +1: sub %g1, (1 << 3), %g1 + ldx [%g7 + %g1], %o0 /* ARG0: virtual address */ + mov %g5, %o1 /* ARG1: mmu context */ + mov HV_MMU_DMMU, %o2 + andcc %o0, 1, %g0 + movne %icc, HV_MMU_ALL, %o2 /* ARG2: flags */ + ta HV_MMU_UNMAP_ADDR_TRAP + brnz,pt %g1, 1b + nop + mov %g2, %o0 + mov %g3, %o1 + mov %g4, %o2 + membar #Sync + retry + + .globl __hypervisor_xcall_flush_tlb_kernel_range +__hypervisor_xcall_flush_tlb_kernel_range: /* 22 insns */ + /* %g1=start, %g7=end, g2,g3,g4,g5=scratch, g6=unusable */ + sethi %hi(PAGE_SIZE - 1), %g2 + or %g2, %lo(PAGE_SIZE - 1), %g2 + andn %g1, %g2, %g1 + andn %g7, %g2, %g7 + sub %g7, %g1, %g3 + add %g2, 1, %g2 + sub %g3, %g2, %g3 + mov %o0, %g2 + mov %o1, %g4 + mov %o2, %g5 +1: add %g1, %g3, %o0 /* ARG0: virtual address */ + mov 0, %o1 /* ARG1: mmu context */ + mov HV_MMU_ALL, %o2 /* ARG2: flags */ + ta HV_MMU_UNMAP_ADDR_TRAP + sethi %hi(PAGE_SIZE), %o2 + brnz,pt %g3, 1b + sub %g3, %o2, %g3 + mov %g2, %o0 + mov %g4, %o1 + mov %g5, %o2 + membar #Sync + retry + /* These just get rescheduled to PIL vectors. */ .globl xcall_call_function xcall_call_function: @@ -475,3 +618,64 @@ xcall_capture: retry #endif /* CONFIG_SMP */ + + + .globl hypervisor_patch_cachetlbops +hypervisor_patch_cachetlbops: + save %sp, -128, %sp + + sethi %hi(__flush_tlb_mm), %o0 + or %o0, %lo(__flush_tlb_mm), %o0 + sethi %hi(__hypervisor_flush_tlb_mm), %o1 + or %o1, %lo(__hypervisor_flush_tlb_mm), %o1 + call tlb_patch_one + mov 8, %o2 + + sethi %hi(__flush_tlb_pending), %o0 + or %o0, %lo(__flush_tlb_pending), %o0 + sethi %hi(__hypervisor_flush_tlb_pending), %o1 + or %o1, %lo(__hypervisor_flush_tlb_pending), %o1 + call tlb_patch_one + mov 15, %o2 + + sethi %hi(__flush_tlb_kernel_range), %o0 + or %o0, %lo(__flush_tlb_kernel_range), %o0 + sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1 + or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1 + call tlb_patch_one + mov 14, %o2 + +#ifdef DCACHE_ALIASING_POSSIBLE + sethi %hi(__flush_dcache_page), %o0 + or %o0, %lo(__flush_dcache_page), %o0 + sethi %hi(__hypervisor_flush_dcache_page), %o1 + or %o1, %lo(__hypervisor_flush_dcache_page), %o1 + call tlb_patch_one + mov 2, %o2 +#endif /* DCACHE_ALIASING_POSSIBLE */ + +#ifdef CONFIG_SMP + sethi %hi(xcall_flush_tlb_mm), %o0 + or %o0, %lo(xcall_flush_tlb_mm), %o0 + sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1 + or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1 + call tlb_patch_one + mov 18, %o2 + + sethi %hi(xcall_flush_tlb_pending), %o0 + or %o0, %lo(xcall_flush_tlb_pending), %o0 + sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1 + or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1 + call tlb_patch_one + mov 18, %o2 + + sethi %hi(xcall_flush_tlb_kernel_range), %o0 + or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 + sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1 + or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1 + call tlb_patch_one + mov 22, %o2 +#endif /* CONFIG_SMP */ + + ret + restore -- cgit v1.1 From a43fe0e789f5445f5224511034f410adf11f153b Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 4 Feb 2006 03:10:53 -0800 Subject: [SPARC64]: Add some hypervisor tlb_type checks. And more consistently check cheetah{,_plus} instead of assuming anything not spitfire is cheetah{,_plus}. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 1af6330..ab50cd9 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -335,7 +335,7 @@ out: void __kprobes flush_icache_range(unsigned long start, unsigned long end) { - /* Cheetah has coherent I-cache. */ + /* Cheetah and Hypervisor platform cpus have coherent I-cache. */ if (tlb_type == spitfire) { unsigned long kaddr; @@ -372,6 +372,8 @@ void mmu_info(struct seq_file *m) seq_printf(m, "MMU Type\t: Cheetah+\n"); else if (tlb_type == spitfire) seq_printf(m, "MMU Type\t: Spitfire\n"); + else if (tlb_type == hypervisor) + seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n"); else seq_printf(m, "MMU Type\t: ???\n"); @@ -581,7 +583,7 @@ void __flush_dcache_range(unsigned long start, unsigned long end) if (++n >= 512) break; } - } else { + } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { start = __pa(start); end = __pa(end); for (va = start; va < end; va += 32) -- cgit v1.1 From 45fec05f805a113372c9a7ff4c653ac749f6921c Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 5 Feb 2006 22:27:28 -0800 Subject: [SPARC64]: Sanitize %pstate writes for sun4v. If we're just switching between different alternate global sets, nop it out on sun4v. Also, get rid of all of the alternate global save/restore in the OBP CIF trampoline code. Signed-off-by: David S. Miller --- arch/sparc64/mm/ultra.S | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S index cac58d6..5dd86ad 100644 --- a/arch/sparc64/mm/ultra.S +++ b/arch/sparc64/mm/ultra.S @@ -444,8 +444,15 @@ xcall_flush_tlb_kernel_range: /* 22 insns */ */ .globl xcall_sync_tick xcall_sync_tick: - rdpr %pstate, %g2 + +661: rdpr %pstate, %g2 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate + .section .gl_2insn_patch, "ax" + .word 661b + nop + nop + .previous + rdpr %pil, %g2 wrpr %g0, 15, %pil sethi %hi(109f), %g7 @@ -468,8 +475,15 @@ xcall_sync_tick: */ .globl xcall_report_regs xcall_report_regs: - rdpr %pstate, %g2 + +661: rdpr %pstate, %g2 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate + .section .gl_2insn_patch, "ax" + .word 661b + nop + nop + .previous + rdpr %pil, %g2 wrpr %g0, 15, %pil sethi %hi(109f), %g7 -- cgit v1.1 From d257d5da39a78b32721ca84b2ba7f461f2f7ed7f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 6 Feb 2006 23:44:37 -0800 Subject: [SPARC64]: Initial sun4v TLB miss handling infrastructure. Things are a little tricky because, unlike sun4u, we have to: 1) do a hypervisor trap to do the TLB load. 2) do the TSB lookup calculations by hand Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index ab50cd9..e9aac42 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1050,8 +1050,25 @@ unsigned long __init find_ecache_flush_span(unsigned long size) static void __init tsb_phys_patch(void) { + struct tsb_ldquad_phys_patch_entry *pquad; struct tsb_phys_patch_entry *p; + pquad = &__tsb_ldquad_phys_patch; + while (pquad < &__tsb_ldquad_phys_patch_end) { + unsigned long addr = pquad->addr; + + if (tlb_type == hypervisor) + *(unsigned int *) addr = pquad->sun4v_insn; + else + *(unsigned int *) addr = pquad->sun4u_insn; + wmb(); + __asm__ __volatile__("flush %0" + : /* no outputs */ + : "r" (addr)); + + pquad++; + } + p = &__tsb_phys_patch; while (p < &__tsb_phys_patch_end) { unsigned long addr = p->addr; @@ -1069,6 +1086,7 @@ static void __init tsb_phys_patch(void) /* paging_init() sets up the page tables */ extern void cheetah_ecache_flush_init(void); +extern void sun4v_patch_tlb_handlers(void); static unsigned long last_valid_pfn; pgd_t swapper_pg_dir[2048]; @@ -1078,9 +1096,13 @@ void __init paging_init(void) unsigned long end_pfn, pages_avail, shift; unsigned long real_end, i; - if (tlb_type == cheetah_plus) + if (tlb_type == cheetah_plus || + tlb_type == hypervisor) tsb_phys_patch(); + if (tlb_type == hypervisor) + sun4v_patch_tlb_handlers(); + /* Find available physical memory... */ read_obp_memory("available", &pavail[0], &pavail_ents); -- cgit v1.1 From df7d6aec96ab98cb182dd5138a85bdc363a9bf0d Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 7 Feb 2006 00:00:16 -0800 Subject: [SPARC64]: Rename gl_{1,2}insn_patch --> sun4v_{1,2}insn_patch Signed-off-by: David S. Miller --- arch/sparc64/mm/ultra.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S index 5dd86ad..8c24493 100644 --- a/arch/sparc64/mm/ultra.S +++ b/arch/sparc64/mm/ultra.S @@ -447,7 +447,7 @@ xcall_sync_tick: 661: rdpr %pstate, %g2 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate - .section .gl_2insn_patch, "ax" + .section .sun4v_2insn_patch, "ax" .word 661b nop nop @@ -478,7 +478,7 @@ xcall_report_regs: 661: rdpr %pstate, %g2 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate - .section .gl_2insn_patch, "ax" + .section .sun4v_2insn_patch, "ax" .word 661b nop nop -- cgit v1.1 From 481295f982b21b1dbe71cbf41d3a93028fee30d1 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 7 Feb 2006 21:51:08 -0800 Subject: [SPARC64]: Register per-cpu fault status area with sun4v hypervisor. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index e9aac42..4c95cf3 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -40,6 +40,7 @@ #include #include #include +#include extern void device_scan(void); @@ -1083,6 +1084,24 @@ static void __init tsb_phys_patch(void) } } +/* Register this cpu's fault status area with the hypervisor. */ +void __cpuinit sun4v_register_fault_status(void) +{ + register unsigned long arg0 asm("%o0"); + register unsigned long arg1 asm("%o1"); + int cpu = hard_smp_processor_id(); + struct trap_per_cpu *tb = &trap_block[cpu]; + unsigned long pa; + + pa = kern_base + ((unsigned long) tb - KERNBASE); + arg0 = HV_FAST_MMU_FAULT_AREA_CONF; + arg1 = pa; + __asm__ __volatile__("ta %4" + : "=&r" (arg0), "=&r" (arg1) + : "0" (arg0), "1" (arg1), + "i" (HV_FAST_TRAP)); +} + /* paging_init() sets up the page tables */ extern void cheetah_ecache_flush_init(void); @@ -1096,12 +1115,17 @@ void __init paging_init(void) unsigned long end_pfn, pages_avail, shift; unsigned long real_end, i; + kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; + kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; + if (tlb_type == cheetah_plus || tlb_type == hypervisor) tsb_phys_patch(); - if (tlb_type == hypervisor) + if (tlb_type == hypervisor) { sun4v_patch_tlb_handlers(); + sun4v_register_fault_status(); + } /* Find available physical memory... */ read_obp_memory("available", &pavail[0], &pavail_ents); @@ -1112,9 +1136,6 @@ void __init paging_init(void) pfn_base = phys_base >> PAGE_SHIFT; - kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; - kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; - set_bit(0, mmu_context_bmap); shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); -- cgit v1.1 From 8b11bd12aff76e02cdc2cbc9e439bba88d281223 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 7 Feb 2006 22:13:05 -0800 Subject: [SPARC64]: Patch up mmu context register writes for sun4v. sun4v uses ASI_MMU instead of ASI_DMMU Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 9 --------- 1 file changed, 9 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 4c95cf3..6504d6e 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -792,15 +792,6 @@ void sparc_ultra_dump_dtlb(void) } } -static inline void spitfire_errata32(void) -{ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "flush %%g6" - : /* No outputs */ - : "r" (0), - "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); -} - extern unsigned long cmdline_memory_size; unsigned long __init bootmem_init(unsigned long *pages_avail) -- cgit v1.1 From d82ace7dc4073b090a55b9740700e32b9a9ae302 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 9 Feb 2006 02:52:44 -0800 Subject: [SPARC64]: Detect sun4v early in boot process. We look for "SUNW,sun4v" in the 'compatible' property of the root OBP device tree node. Protect every %ver register access, to make sure it is not touched on sun4v, as %ver is hyperprivileged there. Lock kernel TLB entries using hypervisor calls instead of calls into OBP. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 58 +++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 46 insertions(+), 12 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 6504d6e..e602b85 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -514,6 +514,29 @@ static void __init read_obp_translations(void) } } +static void __init hypervisor_tlb_lock(unsigned long vaddr, + unsigned long pte, + unsigned long mmu) +{ + register unsigned long func asm("%o0"); + register unsigned long arg0 asm("%o1"); + register unsigned long arg1 asm("%o2"); + register unsigned long arg2 asm("%o3"); + register unsigned long arg3 asm("%o4"); + + func = HV_FAST_MMU_MAP_PERM_ADDR; + arg0 = vaddr; + arg1 = 0; + arg2 = pte; + arg3 = mmu; + __asm__ __volatile__("ta 0x80" + : "=&r" (func), "=&r" (arg0), + "=&r" (arg1), "=&r" (arg2), + "=&r" (arg3) + : "0" (func), "1" (arg0), "2" (arg1), + "3" (arg2), "4" (arg3)); +} + static void __init remap_kernel(void) { unsigned long phys_page, tte_vaddr, tte_data; @@ -527,19 +550,30 @@ static void __init remap_kernel(void) kern_locked_tte_data = tte_data; - /* Now lock us into the TLBs via OBP. */ - prom_dtlb_load(tlb_ent, tte_data, tte_vaddr); - prom_itlb_load(tlb_ent, tte_data, tte_vaddr); - if (bigkernel) { - tlb_ent -= 1; - prom_dtlb_load(tlb_ent, - tte_data + 0x400000, - tte_vaddr + 0x400000); - prom_itlb_load(tlb_ent, - tte_data + 0x400000, - tte_vaddr + 0x400000); + /* Now lock us into the TLBs via Hypervisor or OBP. */ + if (tlb_type == hypervisor) { + hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); + hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); + if (bigkernel) { + tte_vaddr += 0x400000; + tte_data += 0x400000; + hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); + hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); + } + } else { + prom_dtlb_load(tlb_ent, tte_data, tte_vaddr); + prom_itlb_load(tlb_ent, tte_data, tte_vaddr); + if (bigkernel) { + tlb_ent -= 1; + prom_dtlb_load(tlb_ent, + tte_data + 0x400000, + tte_vaddr + 0x400000); + prom_itlb_load(tlb_ent, + tte_data + 0x400000, + tte_vaddr + 0x400000); + } + sparc64_highest_unlocked_tlb_ent = tlb_ent - 1; } - sparc64_highest_unlocked_tlb_ent = tlb_ent - 1; if (tlb_type == cheetah_plus) { sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | CTX_CHEETAH_PLUS_NUC); -- cgit v1.1 From 618e9ed98aed924a1fc664eb6522db4a5e927043 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 9 Feb 2006 17:21:53 -0800 Subject: [SPARC64]: Hypervisor TSB context switching. Signed-off-by: David S. Miller --- arch/sparc64/mm/tsb.c | 48 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 47 insertions(+), 1 deletion(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index 2cc8e65..6ae2a5a 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -149,7 +149,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) BUG(); }; - if (tlb_type == cheetah_plus) { + if (tlb_type == cheetah_plus || tlb_type == hypervisor) { /* Physical mapping, no locked TLB entry for TSB. */ tsb_reg |= tsb_paddr; @@ -166,6 +166,52 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) mm->context.tsb_map_pte = tte; } + /* Setup the Hypervisor TSB descriptor. */ + if (tlb_type == hypervisor) { + struct hv_tsb_descr *hp = &mm->context.tsb_descr; + + switch (PAGE_SIZE) { + case 8192: + default: + hp->pgsz_idx = HV_PGSZ_IDX_8K; + break; + + case 64 * 1024: + hp->pgsz_idx = HV_PGSZ_IDX_64K; + break; + + case 512 * 1024: + hp->pgsz_idx = HV_PGSZ_IDX_512K; + break; + + case 4 * 1024 * 1024: + hp->pgsz_idx = HV_PGSZ_IDX_4MB; + break; + }; + hp->assoc = 1; + hp->num_ttes = tsb_bytes / 16; + hp->ctx_idx = 0; + switch (PAGE_SIZE) { + case 8192: + default: + hp->pgsz_mask = HV_PGSZ_MASK_8K; + break; + + case 64 * 1024: + hp->pgsz_mask = HV_PGSZ_MASK_64K; + break; + + case 512 * 1024: + hp->pgsz_mask = HV_PGSZ_MASK_512K; + break; + + case 4 * 1024 * 1024: + hp->pgsz_mask = HV_PGSZ_MASK_4MB; + break; + }; + hp->tsb_base = tsb_paddr; + hp->resv = 0; + } } /* The page tables are locked against modifications while this -- cgit v1.1 From 164c220fa3947abbada65329d168f421b461a2a7 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 9 Feb 2006 22:57:21 -0800 Subject: [SPARC64]: Fix hypervisor call arg passing. Function goes in %o5, args go in %o0 --> %o5. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index e602b85..7faba33 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -518,11 +518,11 @@ static void __init hypervisor_tlb_lock(unsigned long vaddr, unsigned long pte, unsigned long mmu) { - register unsigned long func asm("%o0"); - register unsigned long arg0 asm("%o1"); - register unsigned long arg1 asm("%o2"); - register unsigned long arg2 asm("%o3"); - register unsigned long arg3 asm("%o4"); + register unsigned long func asm("%o5"); + register unsigned long arg0 asm("%o0"); + register unsigned long arg1 asm("%o1"); + register unsigned long arg2 asm("%o2"); + register unsigned long arg3 asm("%o3"); func = HV_FAST_MMU_MAP_PERM_ADDR; arg0 = vaddr; @@ -1112,18 +1112,18 @@ static void __init tsb_phys_patch(void) /* Register this cpu's fault status area with the hypervisor. */ void __cpuinit sun4v_register_fault_status(void) { + register unsigned long func asm("%o5"); register unsigned long arg0 asm("%o0"); - register unsigned long arg1 asm("%o1"); int cpu = hard_smp_processor_id(); struct trap_per_cpu *tb = &trap_block[cpu]; unsigned long pa; pa = kern_base + ((unsigned long) tb - KERNBASE); - arg0 = HV_FAST_MMU_FAULT_AREA_CONF; - arg1 = pa; + func = HV_FAST_MMU_FAULT_AREA_CONF; + arg0 = pa; __asm__ __volatile__("ta %4" - : "=&r" (arg0), "=&r" (arg1) - : "0" (arg0), "1" (arg1), + : "=&r" (func), "=&r" (arg0) + : "0" (func), "1" (arg0), "i" (HV_FAST_TRAP)); } -- cgit v1.1 From 12eaa328f9fb2d3fcb5afb682c762690d05a3cd8 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 10 Feb 2006 15:39:51 -0800 Subject: [SPARC64]: Use ASI_SCRATCHPAD address 0x0 properly. This is where the virtual address of the fault status area belongs. To set it up we don't make a hypervisor call, instead we call OBP's SUNW,set-trap-table with the real address of the fault status area as the second argument. And right before that call we write the virtual address into ASI_SCRATCHPAD vaddr 0x0. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 22 +--------------------- 1 file changed, 1 insertion(+), 21 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 7faba33..88eb6f6 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1109,24 +1109,6 @@ static void __init tsb_phys_patch(void) } } -/* Register this cpu's fault status area with the hypervisor. */ -void __cpuinit sun4v_register_fault_status(void) -{ - register unsigned long func asm("%o5"); - register unsigned long arg0 asm("%o0"); - int cpu = hard_smp_processor_id(); - struct trap_per_cpu *tb = &trap_block[cpu]; - unsigned long pa; - - pa = kern_base + ((unsigned long) tb - KERNBASE); - func = HV_FAST_MMU_FAULT_AREA_CONF; - arg0 = pa; - __asm__ __volatile__("ta %4" - : "=&r" (func), "=&r" (arg0) - : "0" (func), "1" (arg0), - "i" (HV_FAST_TRAP)); -} - /* paging_init() sets up the page tables */ extern void cheetah_ecache_flush_init(void); @@ -1147,10 +1129,8 @@ void __init paging_init(void) tlb_type == hypervisor) tsb_phys_patch(); - if (tlb_type == hypervisor) { + if (tlb_type == hypervisor) sun4v_patch_tlb_handlers(); - sun4v_register_fault_status(); - } /* Find available physical memory... */ read_obp_memory("available", &pavail[0], &pavail_ents); -- cgit v1.1 From e92b92571c85dfa1cdc56e88566134c51ae1d12b Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 11 Feb 2006 10:19:37 -0800 Subject: [SPARC64]: Handle hypervisor case correctly in copy_tsb(). Signed-off-by: David S. Miller --- arch/sparc64/mm/tsb.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index 6ae2a5a..c5dc4b0 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -231,7 +231,13 @@ static void copy_tsb(struct tsb *old_tsb, unsigned long old_size, register unsigned long pte asm("o5"); unsigned long v, hash; - if (tlb_type == cheetah_plus) { + if (tlb_type == hypervisor) { + __asm__ __volatile__( + "ldda [%2] %3, %0" + : "=r" (tag), "=r" (pte) + : "r" (__pa(&old_tsb[i])), + "i" (ASI_QUAD_LDD_PHYS_4V)); + } else if (tlb_type == cheetah_plus) { __asm__ __volatile__( "ldda [%2] %3, %0" : "=r" (tag), "=r" (pte) @@ -267,7 +273,8 @@ static void copy_tsb(struct tsb *old_tsb, unsigned long old_size, v |= (i & (512UL - 1UL)) << 13UL; hash = tsb_hash(v, new_nentries); - if (tlb_type == cheetah_plus) { + if (tlb_type == cheetah_plus || + tlb_type == hypervisor) { __asm__ __volatile__( "stxa %0, [%1] %2\n\t" "stxa %3, [%4] %2" -- cgit v1.1 From 490384e752a43aa281ed533e9de2da36df25c337 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 11 Feb 2006 14:41:18 -0800 Subject: [SPARC64]: Register kernel TSB with hypervisor. We do this right after we take over the trap table from OBP. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 70 +++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 69 insertions(+), 1 deletion(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 88eb6f6..92756da 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1109,6 +1109,69 @@ static void __init tsb_phys_patch(void) } } +/* Don't mark as init, we give this to the Hypervisor. */ +static struct hv_tsb_descr ktsb_descr[2]; +extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; + +static void __init sun4v_ktsb_init(void) +{ + unsigned long ktsb_pa; + + ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); + + switch (PAGE_SIZE) { + case 8 * 1024: + default: + ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K; + ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K; + break; + + case 64 * 1024: + ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K; + ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K; + break; + + case 512 * 1024: + ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K; + ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K; + break; + + case 4 * 1024 * 1024: + ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB; + ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB; + break; + }; + + ktsb_descr[0].assoc = 0; + ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES; + ktsb_descr[0].ctx_idx = 0; + ktsb_descr[0].tsb_base = ktsb_pa; + ktsb_descr[0].resv = 0; + + /* XXX When we have a kernel large page size TSB, describe + * XXX it in ktsb_descr[1] here. + */ +} + +void __cpuinit sun4v_ktsb_register(void) +{ + register unsigned long func asm("%o5"); + register unsigned long arg0 asm("%o0"); + register unsigned long arg1 asm("%o1"); + unsigned long pa; + + pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); + + func = HV_FAST_MMU_TSB_CTX0; + /* XXX set arg0 to 2 when we use ktsb_descr[1], see above XXX */ + arg0 = 1; + arg1 = pa; + __asm__ __volatile__("ta %6" + : "=&r" (func), "=&r" (arg0), "=&r" (arg1) + : "0" (func), "1" (arg0), "2" (arg1), + "i" (HV_FAST_TRAP)); +} + /* paging_init() sets up the page tables */ extern void cheetah_ecache_flush_init(void); @@ -1129,8 +1192,10 @@ void __init paging_init(void) tlb_type == hypervisor) tsb_phys_patch(); - if (tlb_type == hypervisor) + if (tlb_type == hypervisor) { sun4v_patch_tlb_handlers(); + sun4v_ktsb_init(); + } /* Find available physical memory... */ read_obp_memory("available", &pavail[0], &pavail_ents); @@ -1171,6 +1236,9 @@ void __init paging_init(void) __flush_tlb_all(); + if (tlb_type == hypervisor) + sun4v_ktsb_register(); + /* Setup bootmem... */ pages_avail = 0; last_valid_pfn = end_pfn = bootmem_init(&pages_avail); -- cgit v1.1 From c4bce90ea2069e5a87beac806de3090ab32128d5 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 11 Feb 2006 21:57:54 -0800 Subject: [SPARC64]: Deal with PTE layout differences in SUN4V. Yes, you heard it right, they changed the PTE layout for SUN4V. Ho hum... This is the simple and inefficient way to support this. It'll get optimized, don't worry. Signed-off-by: David S. Miller --- arch/sparc64/mm/fault.c | 2 +- arch/sparc64/mm/generic.c | 40 ++- arch/sparc64/mm/init.c | 703 +++++++++++++++++++++++++++++++++++++++------- arch/sparc64/mm/tsb.c | 12 +- 4 files changed, 630 insertions(+), 127 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c index 6f0539a..439a53c 100644 --- a/arch/sparc64/mm/fault.c +++ b/arch/sparc64/mm/fault.c @@ -137,7 +137,7 @@ static unsigned int get_user_insn(unsigned long tpc) if (!pte_present(pte)) goto out; - pa = (pte_val(pte) & _PAGE_PADDR); + pa = (pte_pfn(pte) << PAGE_SHIFT); pa += (tpc & ~PAGE_MASK); /* Use phys bypass so we don't pollute dtlb/dcache. */ diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c index 580b63d..5fc5c57 100644 --- a/arch/sparc64/mm/generic.c +++ b/arch/sparc64/mm/generic.c @@ -15,15 +15,6 @@ #include #include -static inline pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space) -{ - pte_t pte; - pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E) & - ~(unsigned long)_PAGE_CACHE); - pte_val(pte) |= (((unsigned long)space) << 32); - return pte; -} - /* Remap IO memory, the same way as remap_pfn_range(), but use * the obio memory space. * @@ -48,24 +39,29 @@ static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, pte_t entry; unsigned long curend = address + PAGE_SIZE; - entry = mk_pte_io(offset, prot, space); + entry = mk_pte_io(offset, prot, space, PAGE_SIZE); if (!(address & 0xffff)) { - if (!(address & 0x3fffff) && !(offset & 0x3ffffe) && end >= address + 0x400000) { - entry = mk_pte_io(offset, - __pgprot(pgprot_val (prot) | _PAGE_SZ4MB), - space); + if (PAGE_SIZE < (4 * 1024 * 1024) && + !(address & 0x3fffff) && + !(offset & 0x3ffffe) && + end >= address + 0x400000) { + entry = mk_pte_io(offset, prot, space, + 4 * 1024 * 1024); curend = address + 0x400000; offset += 0x400000; - } else if (!(address & 0x7ffff) && !(offset & 0x7fffe) && end >= address + 0x80000) { - entry = mk_pte_io(offset, - __pgprot(pgprot_val (prot) | _PAGE_SZ512K), - space); + } else if (PAGE_SIZE < (512 * 1024) && + !(address & 0x7ffff) && + !(offset & 0x7fffe) && + end >= address + 0x80000) { + entry = mk_pte_io(offset, prot, space, + 512 * 1024 * 1024); curend = address + 0x80000; offset += 0x80000; - } else if (!(offset & 0xfffe) && end >= address + 0x10000) { - entry = mk_pte_io(offset, - __pgprot(pgprot_val (prot) | _PAGE_SZ64K), - space); + } else if (PAGE_SIZE < (64 * 1024) && + !(offset & 0xfffe) && + end >= address + 0x10000) { + entry = mk_pte_io(offset, prot, space, + 64 * 1024); curend = address + 0x10000; offset += 0x10000; } else diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 92756da..9c2fc23 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -6,6 +6,7 @@ */ #include +#include #include #include #include @@ -118,6 +119,7 @@ unsigned long phys_base __read_mostly; unsigned long kern_base __read_mostly; unsigned long kern_size __read_mostly; unsigned long pfn_base __read_mostly; +unsigned long kern_linear_pte_xor __read_mostly; /* get_new_mmu_context() uses "cache + 1". */ DEFINE_SPINLOCK(ctx_alloc_lock); @@ -256,6 +258,9 @@ static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long __tsb_insert(tsb_addr, tag, pte); } +unsigned long _PAGE_ALL_SZ_BITS __read_mostly; +unsigned long _PAGE_SZBITS __read_mostly; + void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { struct mm_struct *mm; @@ -398,39 +403,9 @@ struct linux_prom_translation { struct linux_prom_translation prom_trans[512] __read_mostly; unsigned int prom_trans_ents __read_mostly; -extern unsigned long prom_boot_page; -extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle); -extern int prom_get_mmu_ihandle(void); -extern void register_prom_callbacks(void); - /* Exported for SMP bootup purposes. */ unsigned long kern_locked_tte_data; -/* - * Translate PROM's mapping we capture at boot time into physical address. - * The second parameter is only set from prom_callback() invocations. - */ -unsigned long prom_virt_to_phys(unsigned long promva, int *error) -{ - int i; - - for (i = 0; i < prom_trans_ents; i++) { - struct linux_prom_translation *p = &prom_trans[i]; - - if (promva >= p->virt && - promva < (p->virt + p->size)) { - unsigned long base = p->data & _PAGE_PADDR; - - if (error) - *error = 0; - return base + (promva & (8192 - 1)); - } - } - if (error) - *error = 1; - return 0UL; -} - /* The obp translations are saved based on 8k pagesize, since obp can * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> * HI_OBP_ADDRESS range are handled in ktlb.S. @@ -537,6 +512,8 @@ static void __init hypervisor_tlb_lock(unsigned long vaddr, "3" (arg2), "4" (arg3)); } +static unsigned long kern_large_tte(unsigned long paddr); + static void __init remap_kernel(void) { unsigned long phys_page, tte_vaddr, tte_data; @@ -544,9 +521,7 @@ static void __init remap_kernel(void) tte_vaddr = (unsigned long) KERNBASE; phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; - tte_data = (phys_page | (_PAGE_VALID | _PAGE_SZ4MB | - _PAGE_CP | _PAGE_CV | _PAGE_P | - _PAGE_L | _PAGE_W)); + tte_data = kern_large_tte(phys_page); kern_locked_tte_data = tte_data; @@ -591,10 +566,6 @@ static void __init inherit_prom_mappings(void) prom_printf("Remapping the kernel... "); remap_kernel(); prom_printf("done.\n"); - - prom_printf("Registering callbacks... "); - register_prom_callbacks(); - prom_printf("done.\n"); } void prom_world(int enter) @@ -631,63 +602,6 @@ void __flush_dcache_range(unsigned long start, unsigned long end) } #endif /* DCACHE_ALIASING_POSSIBLE */ -/* If not locked, zap it. */ -void __flush_tlb_all(void) -{ - unsigned long pstate; - int i; - - __asm__ __volatile__("flushw\n\t" - "rdpr %%pstate, %0\n\t" - "wrpr %0, %1, %%pstate" - : "=r" (pstate) - : "i" (PSTATE_IE)); - if (tlb_type == spitfire) { - for (i = 0; i < 64; i++) { - /* Spitfire Errata #32 workaround */ - /* NOTE: Always runs on spitfire, so no - * cheetah+ page size encodings. - */ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "flush %%g6" - : /* No outputs */ - : "r" (0), - "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); - - if (!(spitfire_get_dtlb_data(i) & _PAGE_L)) { - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : /* no outputs */ - : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); - spitfire_put_dtlb_data(i, 0x0UL); - } - - /* Spitfire Errata #32 workaround */ - /* NOTE: Always runs on spitfire, so no - * cheetah+ page size encodings. - */ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "flush %%g6" - : /* No outputs */ - : "r" (0), - "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); - - if (!(spitfire_get_itlb_data(i) & _PAGE_L)) { - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : /* no outputs */ - : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); - spitfire_put_itlb_data(i, 0x0UL); - } - } - } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { - cheetah_flush_dtlb_all(); - cheetah_flush_itlb_all(); - } - __asm__ __volatile__("wrpr %0, 0, %%pstate" - : : "r" (pstate)); -} - /* Caller does TLB context flushing on local CPU if necessary. * The caller also ensures that CTX_VALID(mm->context) is false. * @@ -1180,6 +1094,9 @@ extern void sun4v_patch_tlb_handlers(void); static unsigned long last_valid_pfn; pgd_t swapper_pg_dir[2048]; +static void sun4u_pgprot_init(void); +static void sun4v_pgprot_init(void); + void __init paging_init(void) { unsigned long end_pfn, pages_avail, shift; @@ -1188,6 +1105,11 @@ void __init paging_init(void) kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; + if (tlb_type == hypervisor) + sun4v_pgprot_init(); + else + sun4u_pgprot_init(); + if (tlb_type == cheetah_plus || tlb_type == hypervisor) tsb_phys_patch(); @@ -1411,3 +1333,596 @@ void free_initrd_mem(unsigned long start, unsigned long end) } } #endif + +/* SUN4U pte bits... */ +#define _PAGE_SZ4MB_4U 0x6000000000000000 /* 4MB Page */ +#define _PAGE_SZ512K_4U 0x4000000000000000 /* 512K Page */ +#define _PAGE_SZ64K_4U 0x2000000000000000 /* 64K Page */ +#define _PAGE_SZ8K_4U 0x0000000000000000 /* 8K Page */ +#define _PAGE_NFO_4U 0x1000000000000000 /* No Fault Only */ +#define _PAGE_IE_4U 0x0800000000000000 /* Invert Endianness */ +#define _PAGE_SOFT2_4U 0x07FC000000000000 /* Software bits, set 2 */ +#define _PAGE_RES1_4U 0x0002000000000000 /* Reserved */ +#define _PAGE_SZ32MB_4U 0x0001000000000000 /* (Panther) 32MB page */ +#define _PAGE_SZ256MB_4U 0x2001000000000000 /* (Panther) 256MB page */ +#define _PAGE_SN_4U 0x0000800000000000 /* (Cheetah) Snoop */ +#define _PAGE_RES2_4U 0x0000780000000000 /* Reserved */ +#define _PAGE_PADDR_4U 0x000007FFFFFFE000 /* (Cheetah) paddr[42:13] */ +#define _PAGE_SOFT_4U 0x0000000000001F80 /* Software bits: */ +#define _PAGE_EXEC_4U 0x0000000000001000 /* Executable SW bit */ +#define _PAGE_MODIFIED_4U 0x0000000000000800 /* Modified (dirty) */ +#define _PAGE_FILE_4U 0x0000000000000800 /* Pagecache page */ +#define _PAGE_ACCESSED_4U 0x0000000000000400 /* Accessed (ref'd) */ +#define _PAGE_READ_4U 0x0000000000000200 /* Readable SW Bit */ +#define _PAGE_WRITE_4U 0x0000000000000100 /* Writable SW Bit */ +#define _PAGE_PRESENT_4U 0x0000000000000080 /* Present */ +#define _PAGE_L_4U 0x0000000000000040 /* Locked TTE */ +#define _PAGE_CP_4U 0x0000000000000020 /* Cacheable in P-Cache */ +#define _PAGE_CV_4U 0x0000000000000010 /* Cacheable in V-Cache */ +#define _PAGE_E_4U 0x0000000000000008 /* side-Effect */ +#define _PAGE_P_4U 0x0000000000000004 /* Privileged Page */ +#define _PAGE_W_4U 0x0000000000000002 /* Writable */ + +/* SUN4V pte bits... */ +#define _PAGE_NFO_4V 0x4000000000000000 /* No Fault Only */ +#define _PAGE_SOFT2_4V 0x3F00000000000000 /* Software bits, set 2 */ +#define _PAGE_MODIFIED_4V 0x2000000000000000 /* Modified (dirty) */ +#define _PAGE_ACCESSED_4V 0x1000000000000000 /* Accessed (ref'd) */ +#define _PAGE_READ_4V 0x0800000000000000 /* Readable SW Bit */ +#define _PAGE_WRITE_4V 0x0400000000000000 /* Writable SW Bit */ +#define _PAGE_PADDR_4V 0x00FFFFFFFFFFE000 /* paddr[55:13] */ +#define _PAGE_IE_4V 0x0000000000001000 /* Invert Endianness */ +#define _PAGE_E_4V 0x0000000000000800 /* side-Effect */ +#define _PAGE_CP_4V 0x0000000000000400 /* Cacheable in P-Cache */ +#define _PAGE_CV_4V 0x0000000000000200 /* Cacheable in V-Cache */ +#define _PAGE_P_4V 0x0000000000000100 /* Privileged Page */ +#define _PAGE_EXEC_4V 0x0000000000000080 /* Executable Page */ +#define _PAGE_W_4V 0x0000000000000040 /* Writable */ +#define _PAGE_SOFT_4V 0x0000000000000030 /* Software bits */ +#define _PAGE_FILE_4V 0x0000000000000020 /* Pagecache page */ +#define _PAGE_PRESENT_4V 0x0000000000000010 /* Present */ +#define _PAGE_RESV_4V 0x0000000000000008 /* Reserved */ +#define _PAGE_SZ16GB_4V 0x0000000000000007 /* 16GB Page */ +#define _PAGE_SZ2GB_4V 0x0000000000000006 /* 2GB Page */ +#define _PAGE_SZ256MB_4V 0x0000000000000005 /* 256MB Page */ +#define _PAGE_SZ32MB_4V 0x0000000000000004 /* 32MB Page */ +#define _PAGE_SZ4MB_4V 0x0000000000000003 /* 4MB Page */ +#define _PAGE_SZ512K_4V 0x0000000000000002 /* 512K Page */ +#define _PAGE_SZ64K_4V 0x0000000000000001 /* 64K Page */ +#define _PAGE_SZ8K_4V 0x0000000000000000 /* 8K Page */ + +#if PAGE_SHIFT == 13 +#define _PAGE_SZBITS_4U _PAGE_SZ8K_4U +#define _PAGE_SZBITS_4V _PAGE_SZ8K_4V +#elif PAGE_SHIFT == 16 +#define _PAGE_SZBITS_4U _PAGE_SZ64K_4U +#define _PAGE_SZBITS_4V _PAGE_SZ64K_4V +#elif PAGE_SHIFT == 19 +#define _PAGE_SZBITS_4U _PAGE_SZ512K_4U +#define _PAGE_SZBITS_4V _PAGE_SZ512K_4V +#elif PAGE_SHIFT == 22 +#define _PAGE_SZBITS_4U _PAGE_SZ4MB_4U +#define _PAGE_SZBITS_4V _PAGE_SZ4MB_4V +#else +#error Wrong PAGE_SHIFT specified +#endif + +#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) +#define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U +#define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V +#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) +#define _PAGE_SZHUGE_4U _PAGE_SZ512K_4U +#define _PAGE_SZHUGE_4V _PAGE_SZ512K_4V +#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) +#define _PAGE_SZHUGE_4U _PAGE_SZ64K_4U +#define _PAGE_SZHUGE_4V _PAGE_SZ64K_4V +#endif + +#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U) +#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V) +#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U) +#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V) +#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R) +#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R) + +pgprot_t PAGE_KERNEL __read_mostly; +EXPORT_SYMBOL(PAGE_KERNEL); + +pgprot_t PAGE_KERNEL_LOCKED __read_mostly; +pgprot_t PAGE_COPY __read_mostly; +pgprot_t PAGE_EXEC __read_mostly; +unsigned long pg_iobits __read_mostly; + +unsigned long _PAGE_IE __read_mostly; +unsigned long _PAGE_E __read_mostly; +unsigned long _PAGE_CACHE __read_mostly; + +static void prot_init_common(unsigned long page_none, + unsigned long page_shared, + unsigned long page_copy, + unsigned long page_readonly, + unsigned long page_exec_bit) +{ + PAGE_COPY = __pgprot(page_copy); + + protection_map[0x0] = __pgprot(page_none); + protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit); + protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit); + protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit); + protection_map[0x4] = __pgprot(page_readonly); + protection_map[0x5] = __pgprot(page_readonly); + protection_map[0x6] = __pgprot(page_copy); + protection_map[0x7] = __pgprot(page_copy); + protection_map[0x8] = __pgprot(page_none); + protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit); + protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit); + protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit); + protection_map[0xc] = __pgprot(page_readonly); + protection_map[0xd] = __pgprot(page_readonly); + protection_map[0xe] = __pgprot(page_shared); + protection_map[0xf] = __pgprot(page_shared); +} + +static void __init sun4u_pgprot_init(void) +{ + unsigned long page_none, page_shared, page_copy, page_readonly; + unsigned long page_exec_bit; + + PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | + _PAGE_CACHE_4U | _PAGE_P_4U | + __ACCESS_BITS_4U | __DIRTY_BITS_4U | + _PAGE_EXEC_4U); + PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID | + _PAGE_CACHE_4U | _PAGE_P_4U | + __ACCESS_BITS_4U | __DIRTY_BITS_4U | + _PAGE_EXEC_4U | _PAGE_L_4U); + PAGE_EXEC = __pgprot(_PAGE_EXEC_4U); + + _PAGE_IE = _PAGE_IE_4U; + _PAGE_E = _PAGE_E_4U; + _PAGE_CACHE = _PAGE_CACHE_4U; + + pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U | + __ACCESS_BITS_4U | _PAGE_E_4U); + + kern_linear_pte_xor = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^ + 0xfffff80000000000; + kern_linear_pte_xor |= (_PAGE_CP_4U | _PAGE_CV_4U | + _PAGE_P_4U | _PAGE_W_4U); + + _PAGE_SZBITS = _PAGE_SZBITS_4U; + _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U | + _PAGE_SZ64K_4U | _PAGE_SZ8K_4U | + _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U); + + + page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U; + page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | + __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U); + page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | + __ACCESS_BITS_4U | _PAGE_EXEC_4U); + page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U | + __ACCESS_BITS_4U | _PAGE_EXEC_4U); + + page_exec_bit = _PAGE_EXEC_4U; + + prot_init_common(page_none, page_shared, page_copy, page_readonly, + page_exec_bit); +} + +static void __init sun4v_pgprot_init(void) +{ + unsigned long page_none, page_shared, page_copy, page_readonly; + unsigned long page_exec_bit; + + PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID | + _PAGE_CACHE_4V | _PAGE_P_4V | + __ACCESS_BITS_4V | __DIRTY_BITS_4V | + _PAGE_EXEC_4V); + PAGE_KERNEL_LOCKED = PAGE_KERNEL; + PAGE_EXEC = __pgprot(_PAGE_EXEC_4V); + + _PAGE_IE = _PAGE_IE_4V; + _PAGE_E = _PAGE_E_4V; + _PAGE_CACHE = _PAGE_CACHE_4V; + + kern_linear_pte_xor = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ + 0xfffff80000000000; + kern_linear_pte_xor |= (_PAGE_CP_4V | _PAGE_CV_4V | + _PAGE_P_4V | _PAGE_W_4V); + + pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V | + __ACCESS_BITS_4V | _PAGE_E_4V); + + _PAGE_SZBITS = _PAGE_SZBITS_4V; + _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V | + _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V | + _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V | + _PAGE_SZ64K_4V | _PAGE_SZ8K_4V); + + page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V; + page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | + __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V); + page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | + __ACCESS_BITS_4V | _PAGE_EXEC_4V); + page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V | + __ACCESS_BITS_4V | _PAGE_EXEC_4V); + + page_exec_bit = _PAGE_EXEC_4V; + + prot_init_common(page_none, page_shared, page_copy, page_readonly, + page_exec_bit); +} + +unsigned long pte_sz_bits(unsigned long sz) +{ + if (tlb_type == hypervisor) { + switch (sz) { + case 8 * 1024: + default: + return _PAGE_SZ8K_4V; + case 64 * 1024: + return _PAGE_SZ64K_4V; + case 512 * 1024: + return _PAGE_SZ512K_4V; + case 4 * 1024 * 1024: + return _PAGE_SZ4MB_4V; + }; + } else { + switch (sz) { + case 8 * 1024: + default: + return _PAGE_SZ8K_4U; + case 64 * 1024: + return _PAGE_SZ64K_4U; + case 512 * 1024: + return _PAGE_SZ512K_4U; + case 4 * 1024 * 1024: + return _PAGE_SZ4MB_4U; + }; + } +} + +pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size) +{ + pte_t pte; + if (tlb_type == hypervisor) { + pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E_4V) & + ~(unsigned long)_PAGE_CACHE_4V); + } else { + pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E_4U) & + ~(unsigned long)_PAGE_CACHE_4U); + } + pte_val(pte) |= (((unsigned long)space) << 32); + pte_val(pte) |= pte_sz_bits(page_size); + return pte; +} + +unsigned long pte_present(pte_t pte) +{ + return (pte_val(pte) & + ((tlb_type == hypervisor) ? + _PAGE_PRESENT_4V : _PAGE_PRESENT_4U)); +} + +unsigned long pte_file(pte_t pte) +{ + return (pte_val(pte) & + ((tlb_type == hypervisor) ? + _PAGE_FILE_4V : _PAGE_FILE_4U)); +} + +unsigned long pte_read(pte_t pte) +{ + return (pte_val(pte) & + ((tlb_type == hypervisor) ? + _PAGE_READ_4V : _PAGE_READ_4U)); +} + +unsigned long pte_exec(pte_t pte) +{ + return (pte_val(pte) & + ((tlb_type == hypervisor) ? + _PAGE_EXEC_4V : _PAGE_EXEC_4U)); +} + +unsigned long pte_write(pte_t pte) +{ + return (pte_val(pte) & + ((tlb_type == hypervisor) ? + _PAGE_WRITE_4V : _PAGE_WRITE_4U)); +} + +unsigned long pte_dirty(pte_t pte) +{ + return (pte_val(pte) & + ((tlb_type == hypervisor) ? + _PAGE_MODIFIED_4V : _PAGE_MODIFIED_4U)); +} + +unsigned long pte_young(pte_t pte) +{ + return (pte_val(pte) & + ((tlb_type == hypervisor) ? + _PAGE_ACCESSED_4V : _PAGE_ACCESSED_4U)); +} + +pte_t pte_wrprotect(pte_t pte) +{ + unsigned long mask = _PAGE_WRITE_4U | _PAGE_W_4U; + + if (tlb_type == hypervisor) + mask = _PAGE_WRITE_4V | _PAGE_W_4V; + + return __pte(pte_val(pte) & ~mask); +} + +pte_t pte_rdprotect(pte_t pte) +{ + unsigned long mask = _PAGE_R | _PAGE_READ_4U; + + if (tlb_type == hypervisor) + mask = _PAGE_R | _PAGE_READ_4V; + + return __pte(pte_val(pte) & ~mask); +} + +pte_t pte_mkclean(pte_t pte) +{ + unsigned long mask = _PAGE_MODIFIED_4U | _PAGE_W_4U; + + if (tlb_type == hypervisor) + mask = _PAGE_MODIFIED_4V | _PAGE_W_4V; + + return __pte(pte_val(pte) & ~mask); +} + +pte_t pte_mkold(pte_t pte) +{ + unsigned long mask = _PAGE_R | _PAGE_ACCESSED_4U; + + if (tlb_type == hypervisor) + mask = _PAGE_R | _PAGE_ACCESSED_4V; + + return __pte(pte_val(pte) & ~mask); +} + +pte_t pte_mkyoung(pte_t pte) +{ + unsigned long mask = _PAGE_R | _PAGE_ACCESSED_4U; + + if (tlb_type == hypervisor) + mask = _PAGE_R | _PAGE_ACCESSED_4V; + + return __pte(pte_val(pte) | mask); +} + +pte_t pte_mkwrite(pte_t pte) +{ + unsigned long mask = _PAGE_WRITE_4U; + + if (tlb_type == hypervisor) + mask = _PAGE_WRITE_4V; + + return __pte(pte_val(pte) | mask); +} + +pte_t pte_mkdirty(pte_t pte) +{ + unsigned long mask = _PAGE_MODIFIED_4U | _PAGE_W_4U; + + if (tlb_type == hypervisor) + mask = _PAGE_MODIFIED_4V | _PAGE_W_4V; + + return __pte(pte_val(pte) | mask); +} + +pte_t pte_mkhuge(pte_t pte) +{ + unsigned long mask = _PAGE_SZHUGE_4U; + + if (tlb_type == hypervisor) + mask = _PAGE_SZHUGE_4V; + + return __pte(pte_val(pte) | mask); +} + +pte_t pgoff_to_pte(unsigned long off) +{ + unsigned long bit = _PAGE_FILE_4U; + + if (tlb_type == hypervisor) + bit = _PAGE_FILE_4V; + + return __pte((off << PAGE_SHIFT) | bit); +} + +pgprot_t pgprot_noncached(pgprot_t prot) +{ + unsigned long val = pgprot_val(prot); + unsigned long off = _PAGE_CP_4U | _PAGE_CV_4U; + unsigned long on = _PAGE_E_4U; + + if (tlb_type == hypervisor) { + off = _PAGE_CP_4V | _PAGE_CV_4V; + on = _PAGE_E_4V; + } + + return __pgprot((val & ~off) | on); +} + +pte_t pfn_pte(unsigned long pfn, pgprot_t prot) +{ + unsigned long sz_bits = _PAGE_SZBITS_4U; + + if (tlb_type == hypervisor) + sz_bits = _PAGE_SZBITS_4V; + + return __pte((pfn << PAGE_SHIFT) | pgprot_val(prot) | sz_bits); +} + +unsigned long pte_pfn(pte_t pte) +{ + unsigned long mask = _PAGE_PADDR_4U; + + if (tlb_type == hypervisor) + mask = _PAGE_PADDR_4V; + + return (pte_val(pte) & mask) >> PAGE_SHIFT; +} + +pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) +{ + unsigned long preserve_mask; + unsigned long val; + + preserve_mask = (_PAGE_PADDR_4U | + _PAGE_MODIFIED_4U | + _PAGE_ACCESSED_4U | + _PAGE_CP_4U | + _PAGE_CV_4U | + _PAGE_E_4U | + _PAGE_PRESENT_4U | + _PAGE_SZBITS_4U); + if (tlb_type == hypervisor) + preserve_mask = (_PAGE_PADDR_4V | + _PAGE_MODIFIED_4V | + _PAGE_ACCESSED_4V | + _PAGE_CP_4V | + _PAGE_CV_4V | + _PAGE_E_4V | + _PAGE_PRESENT_4V | + _PAGE_SZBITS_4V); + + val = (pte_val(orig_pte) & preserve_mask); + + return __pte(val | (pgprot_val(new_prot) & ~preserve_mask)); +} + +static unsigned long kern_large_tte(unsigned long paddr) +{ + unsigned long val; + + val = (_PAGE_VALID | _PAGE_SZ4MB_4U | + _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U | + _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U); + if (tlb_type == hypervisor) + val = (_PAGE_VALID | _PAGE_SZ4MB_4V | + _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V | + _PAGE_EXEC_4V | _PAGE_W_4V); + + return val | paddr; +} + +/* + * Translate PROM's mapping we capture at boot time into physical address. + * The second parameter is only set from prom_callback() invocations. + */ +unsigned long prom_virt_to_phys(unsigned long promva, int *error) +{ + unsigned long mask; + int i; + + mask = _PAGE_PADDR_4U; + if (tlb_type == hypervisor) + mask = _PAGE_PADDR_4V; + + for (i = 0; i < prom_trans_ents; i++) { + struct linux_prom_translation *p = &prom_trans[i]; + + if (promva >= p->virt && + promva < (p->virt + p->size)) { + unsigned long base = p->data & mask; + + if (error) + *error = 0; + return base + (promva & (8192 - 1)); + } + } + if (error) + *error = 1; + return 0UL; +} + +/* XXX We should kill off this ugly thing at so me point. XXX */ +unsigned long sun4u_get_pte(unsigned long addr) +{ + pgd_t *pgdp; + pud_t *pudp; + pmd_t *pmdp; + pte_t *ptep; + unsigned long mask = _PAGE_PADDR_4U; + + if (tlb_type == hypervisor) + mask = _PAGE_PADDR_4V; + + if (addr >= PAGE_OFFSET) + return addr & mask; + + if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS)) + return prom_virt_to_phys(addr, NULL); + + pgdp = pgd_offset_k(addr); + pudp = pud_offset(pgdp, addr); + pmdp = pmd_offset(pudp, addr); + ptep = pte_offset_kernel(pmdp, addr); + + return pte_val(*ptep) & mask; +} + +/* If not locked, zap it. */ +void __flush_tlb_all(void) +{ + unsigned long pstate; + int i; + + __asm__ __volatile__("flushw\n\t" + "rdpr %%pstate, %0\n\t" + "wrpr %0, %1, %%pstate" + : "=r" (pstate) + : "i" (PSTATE_IE)); + if (tlb_type == spitfire) { + for (i = 0; i < 64; i++) { + /* Spitfire Errata #32 workaround */ + /* NOTE: Always runs on spitfire, so no + * cheetah+ page size encodings. + */ + __asm__ __volatile__("stxa %0, [%1] %2\n\t" + "flush %%g6" + : /* No outputs */ + : "r" (0), + "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); + + if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) { + __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); + spitfire_put_dtlb_data(i, 0x0UL); + } + + /* Spitfire Errata #32 workaround */ + /* NOTE: Always runs on spitfire, so no + * cheetah+ page size encodings. + */ + __asm__ __volatile__("stxa %0, [%1] %2\n\t" + "flush %%g6" + : /* No outputs */ + : "r" (0), + "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); + + if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) { + __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" + "membar #Sync" + : /* no outputs */ + : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); + spitfire_put_itlb_data(i, 0x0UL); + } + } + } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { + cheetah_flush_dtlb_all(); + cheetah_flush_itlb_all(); + } + __asm__ __volatile__("wrpr %0, 0, %%pstate" + : : "r" (pstate)); +} diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index c5dc4b0..975242a 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -85,8 +85,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb); base = TSBMAP_BASE; - tte = (_PAGE_VALID | _PAGE_L | _PAGE_CP | - _PAGE_CV | _PAGE_P | _PAGE_W); + tte = pgprot_val(PAGE_KERNEL_LOCKED); tsb_paddr = __pa(mm->context.tsb); BUG_ON(tsb_paddr & (tsb_bytes - 1UL)); @@ -99,55 +98,48 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) #ifdef DCACHE_ALIASING_POSSIBLE base += (tsb_paddr & 8192); #endif - tte |= _PAGE_SZ8K; page_sz = 8192; break; case 8192 << 1: tsb_reg = 0x1UL; - tte |= _PAGE_SZ64K; page_sz = 64 * 1024; break; case 8192 << 2: tsb_reg = 0x2UL; - tte |= _PAGE_SZ64K; page_sz = 64 * 1024; break; case 8192 << 3: tsb_reg = 0x3UL; - tte |= _PAGE_SZ64K; page_sz = 64 * 1024; break; case 8192 << 4: tsb_reg = 0x4UL; - tte |= _PAGE_SZ512K; page_sz = 512 * 1024; break; case 8192 << 5: tsb_reg = 0x5UL; - tte |= _PAGE_SZ512K; page_sz = 512 * 1024; break; case 8192 << 6: tsb_reg = 0x6UL; - tte |= _PAGE_SZ512K; page_sz = 512 * 1024; break; case 8192 << 7: tsb_reg = 0x7UL; - tte |= _PAGE_SZ4MB; page_sz = 4 * 1024 * 1024; break; default: BUG(); }; + tte |= pte_sz_bits(page_sz); if (tlb_type == cheetah_plus || tlb_type == hypervisor) { /* Physical mapping, no locked TLB entry for TSB. */ -- cgit v1.1 From ff02e0d26f139ad95ec3a7e94f88faccaa180dff Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 12 Feb 2006 17:07:51 -0800 Subject: [SPARC64]: Move PTE field definitions back into asm/pgtable.h Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 84 -------------------------------------------------- 1 file changed, 84 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 9c2fc23..81f9f4b 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1334,90 +1334,6 @@ void free_initrd_mem(unsigned long start, unsigned long end) } #endif -/* SUN4U pte bits... */ -#define _PAGE_SZ4MB_4U 0x6000000000000000 /* 4MB Page */ -#define _PAGE_SZ512K_4U 0x4000000000000000 /* 512K Page */ -#define _PAGE_SZ64K_4U 0x2000000000000000 /* 64K Page */ -#define _PAGE_SZ8K_4U 0x0000000000000000 /* 8K Page */ -#define _PAGE_NFO_4U 0x1000000000000000 /* No Fault Only */ -#define _PAGE_IE_4U 0x0800000000000000 /* Invert Endianness */ -#define _PAGE_SOFT2_4U 0x07FC000000000000 /* Software bits, set 2 */ -#define _PAGE_RES1_4U 0x0002000000000000 /* Reserved */ -#define _PAGE_SZ32MB_4U 0x0001000000000000 /* (Panther) 32MB page */ -#define _PAGE_SZ256MB_4U 0x2001000000000000 /* (Panther) 256MB page */ -#define _PAGE_SN_4U 0x0000800000000000 /* (Cheetah) Snoop */ -#define _PAGE_RES2_4U 0x0000780000000000 /* Reserved */ -#define _PAGE_PADDR_4U 0x000007FFFFFFE000 /* (Cheetah) paddr[42:13] */ -#define _PAGE_SOFT_4U 0x0000000000001F80 /* Software bits: */ -#define _PAGE_EXEC_4U 0x0000000000001000 /* Executable SW bit */ -#define _PAGE_MODIFIED_4U 0x0000000000000800 /* Modified (dirty) */ -#define _PAGE_FILE_4U 0x0000000000000800 /* Pagecache page */ -#define _PAGE_ACCESSED_4U 0x0000000000000400 /* Accessed (ref'd) */ -#define _PAGE_READ_4U 0x0000000000000200 /* Readable SW Bit */ -#define _PAGE_WRITE_4U 0x0000000000000100 /* Writable SW Bit */ -#define _PAGE_PRESENT_4U 0x0000000000000080 /* Present */ -#define _PAGE_L_4U 0x0000000000000040 /* Locked TTE */ -#define _PAGE_CP_4U 0x0000000000000020 /* Cacheable in P-Cache */ -#define _PAGE_CV_4U 0x0000000000000010 /* Cacheable in V-Cache */ -#define _PAGE_E_4U 0x0000000000000008 /* side-Effect */ -#define _PAGE_P_4U 0x0000000000000004 /* Privileged Page */ -#define _PAGE_W_4U 0x0000000000000002 /* Writable */ - -/* SUN4V pte bits... */ -#define _PAGE_NFO_4V 0x4000000000000000 /* No Fault Only */ -#define _PAGE_SOFT2_4V 0x3F00000000000000 /* Software bits, set 2 */ -#define _PAGE_MODIFIED_4V 0x2000000000000000 /* Modified (dirty) */ -#define _PAGE_ACCESSED_4V 0x1000000000000000 /* Accessed (ref'd) */ -#define _PAGE_READ_4V 0x0800000000000000 /* Readable SW Bit */ -#define _PAGE_WRITE_4V 0x0400000000000000 /* Writable SW Bit */ -#define _PAGE_PADDR_4V 0x00FFFFFFFFFFE000 /* paddr[55:13] */ -#define _PAGE_IE_4V 0x0000000000001000 /* Invert Endianness */ -#define _PAGE_E_4V 0x0000000000000800 /* side-Effect */ -#define _PAGE_CP_4V 0x0000000000000400 /* Cacheable in P-Cache */ -#define _PAGE_CV_4V 0x0000000000000200 /* Cacheable in V-Cache */ -#define _PAGE_P_4V 0x0000000000000100 /* Privileged Page */ -#define _PAGE_EXEC_4V 0x0000000000000080 /* Executable Page */ -#define _PAGE_W_4V 0x0000000000000040 /* Writable */ -#define _PAGE_SOFT_4V 0x0000000000000030 /* Software bits */ -#define _PAGE_FILE_4V 0x0000000000000020 /* Pagecache page */ -#define _PAGE_PRESENT_4V 0x0000000000000010 /* Present */ -#define _PAGE_RESV_4V 0x0000000000000008 /* Reserved */ -#define _PAGE_SZ16GB_4V 0x0000000000000007 /* 16GB Page */ -#define _PAGE_SZ2GB_4V 0x0000000000000006 /* 2GB Page */ -#define _PAGE_SZ256MB_4V 0x0000000000000005 /* 256MB Page */ -#define _PAGE_SZ32MB_4V 0x0000000000000004 /* 32MB Page */ -#define _PAGE_SZ4MB_4V 0x0000000000000003 /* 4MB Page */ -#define _PAGE_SZ512K_4V 0x0000000000000002 /* 512K Page */ -#define _PAGE_SZ64K_4V 0x0000000000000001 /* 64K Page */ -#define _PAGE_SZ8K_4V 0x0000000000000000 /* 8K Page */ - -#if PAGE_SHIFT == 13 -#define _PAGE_SZBITS_4U _PAGE_SZ8K_4U -#define _PAGE_SZBITS_4V _PAGE_SZ8K_4V -#elif PAGE_SHIFT == 16 -#define _PAGE_SZBITS_4U _PAGE_SZ64K_4U -#define _PAGE_SZBITS_4V _PAGE_SZ64K_4V -#elif PAGE_SHIFT == 19 -#define _PAGE_SZBITS_4U _PAGE_SZ512K_4U -#define _PAGE_SZBITS_4V _PAGE_SZ512K_4V -#elif PAGE_SHIFT == 22 -#define _PAGE_SZBITS_4U _PAGE_SZ4MB_4U -#define _PAGE_SZBITS_4V _PAGE_SZ4MB_4V -#else -#error Wrong PAGE_SHIFT specified -#endif - -#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) -#define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U -#define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) -#define _PAGE_SZHUGE_4U _PAGE_SZ512K_4U -#define _PAGE_SZHUGE_4V _PAGE_SZ512K_4V -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K) -#define _PAGE_SZHUGE_4U _PAGE_SZ64K_4U -#define _PAGE_SZHUGE_4V _PAGE_SZ64K_4V -#endif - #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U) #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V) #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U) -- cgit v1.1 From cf627156c450cd5a0741b31f55181db3400d4887 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 12 Feb 2006 21:10:07 -0800 Subject: [SPARC64]: Use inline patching for critical PTE operations. This handles the SUN4U vs SUN4V PTE layout differences with near zero performance cost. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 211 +------------------------------------------------ 1 file changed, 3 insertions(+), 208 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 81f9f4b..6f860c3 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1502,217 +1502,12 @@ unsigned long pte_sz_bits(unsigned long sz) pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size) { pte_t pte; - if (tlb_type == hypervisor) { - pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E_4V) & - ~(unsigned long)_PAGE_CACHE_4V); - } else { - pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E_4U) & - ~(unsigned long)_PAGE_CACHE_4U); - } + + pte_val(pte) = page | pgprot_val(pgprot_noncached(prot)); pte_val(pte) |= (((unsigned long)space) << 32); pte_val(pte) |= pte_sz_bits(page_size); - return pte; -} - -unsigned long pte_present(pte_t pte) -{ - return (pte_val(pte) & - ((tlb_type == hypervisor) ? - _PAGE_PRESENT_4V : _PAGE_PRESENT_4U)); -} - -unsigned long pte_file(pte_t pte) -{ - return (pte_val(pte) & - ((tlb_type == hypervisor) ? - _PAGE_FILE_4V : _PAGE_FILE_4U)); -} - -unsigned long pte_read(pte_t pte) -{ - return (pte_val(pte) & - ((tlb_type == hypervisor) ? - _PAGE_READ_4V : _PAGE_READ_4U)); -} - -unsigned long pte_exec(pte_t pte) -{ - return (pte_val(pte) & - ((tlb_type == hypervisor) ? - _PAGE_EXEC_4V : _PAGE_EXEC_4U)); -} - -unsigned long pte_write(pte_t pte) -{ - return (pte_val(pte) & - ((tlb_type == hypervisor) ? - _PAGE_WRITE_4V : _PAGE_WRITE_4U)); -} - -unsigned long pte_dirty(pte_t pte) -{ - return (pte_val(pte) & - ((tlb_type == hypervisor) ? - _PAGE_MODIFIED_4V : _PAGE_MODIFIED_4U)); -} - -unsigned long pte_young(pte_t pte) -{ - return (pte_val(pte) & - ((tlb_type == hypervisor) ? - _PAGE_ACCESSED_4V : _PAGE_ACCESSED_4U)); -} -pte_t pte_wrprotect(pte_t pte) -{ - unsigned long mask = _PAGE_WRITE_4U | _PAGE_W_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_WRITE_4V | _PAGE_W_4V; - - return __pte(pte_val(pte) & ~mask); -} - -pte_t pte_rdprotect(pte_t pte) -{ - unsigned long mask = _PAGE_R | _PAGE_READ_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_R | _PAGE_READ_4V; - - return __pte(pte_val(pte) & ~mask); -} - -pte_t pte_mkclean(pte_t pte) -{ - unsigned long mask = _PAGE_MODIFIED_4U | _PAGE_W_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_MODIFIED_4V | _PAGE_W_4V; - - return __pte(pte_val(pte) & ~mask); -} - -pte_t pte_mkold(pte_t pte) -{ - unsigned long mask = _PAGE_R | _PAGE_ACCESSED_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_R | _PAGE_ACCESSED_4V; - - return __pte(pte_val(pte) & ~mask); -} - -pte_t pte_mkyoung(pte_t pte) -{ - unsigned long mask = _PAGE_R | _PAGE_ACCESSED_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_R | _PAGE_ACCESSED_4V; - - return __pte(pte_val(pte) | mask); -} - -pte_t pte_mkwrite(pte_t pte) -{ - unsigned long mask = _PAGE_WRITE_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_WRITE_4V; - - return __pte(pte_val(pte) | mask); -} - -pte_t pte_mkdirty(pte_t pte) -{ - unsigned long mask = _PAGE_MODIFIED_4U | _PAGE_W_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_MODIFIED_4V | _PAGE_W_4V; - - return __pte(pte_val(pte) | mask); -} - -pte_t pte_mkhuge(pte_t pte) -{ - unsigned long mask = _PAGE_SZHUGE_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_SZHUGE_4V; - - return __pte(pte_val(pte) | mask); -} - -pte_t pgoff_to_pte(unsigned long off) -{ - unsigned long bit = _PAGE_FILE_4U; - - if (tlb_type == hypervisor) - bit = _PAGE_FILE_4V; - - return __pte((off << PAGE_SHIFT) | bit); -} - -pgprot_t pgprot_noncached(pgprot_t prot) -{ - unsigned long val = pgprot_val(prot); - unsigned long off = _PAGE_CP_4U | _PAGE_CV_4U; - unsigned long on = _PAGE_E_4U; - - if (tlb_type == hypervisor) { - off = _PAGE_CP_4V | _PAGE_CV_4V; - on = _PAGE_E_4V; - } - - return __pgprot((val & ~off) | on); -} - -pte_t pfn_pte(unsigned long pfn, pgprot_t prot) -{ - unsigned long sz_bits = _PAGE_SZBITS_4U; - - if (tlb_type == hypervisor) - sz_bits = _PAGE_SZBITS_4V; - - return __pte((pfn << PAGE_SHIFT) | pgprot_val(prot) | sz_bits); -} - -unsigned long pte_pfn(pte_t pte) -{ - unsigned long mask = _PAGE_PADDR_4U; - - if (tlb_type == hypervisor) - mask = _PAGE_PADDR_4V; - - return (pte_val(pte) & mask) >> PAGE_SHIFT; -} - -pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) -{ - unsigned long preserve_mask; - unsigned long val; - - preserve_mask = (_PAGE_PADDR_4U | - _PAGE_MODIFIED_4U | - _PAGE_ACCESSED_4U | - _PAGE_CP_4U | - _PAGE_CV_4U | - _PAGE_E_4U | - _PAGE_PRESENT_4U | - _PAGE_SZBITS_4U); - if (tlb_type == hypervisor) - preserve_mask = (_PAGE_PADDR_4V | - _PAGE_MODIFIED_4V | - _PAGE_ACCESSED_4V | - _PAGE_CP_4V | - _PAGE_CV_4V | - _PAGE_E_4V | - _PAGE_PRESENT_4V | - _PAGE_SZBITS_4V); - - val = (pte_val(orig_pte) & preserve_mask); - - return __pte(val | (pgprot_val(new_prot) & ~preserve_mask)); + return pte; } static unsigned long kern_large_tte(unsigned long paddr) -- cgit v1.1 From bf941d6cd62aa2022f0887e25e3d02c389b0bf9b Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 13 Feb 2006 18:07:45 -0800 Subject: [SPARC64]: Log faulting vaddr when bogus kernel PC detected. Signed-off-by: David S. Miller --- arch/sparc64/mm/fault.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c index 439a53c..b97bd05 100644 --- a/arch/sparc64/mm/fault.c +++ b/arch/sparc64/mm/fault.c @@ -91,12 +91,13 @@ static void __kprobes unhandled_fault(unsigned long address, die_if_kernel("Oops", regs); } -static void bad_kernel_pc(struct pt_regs *regs) +static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr) { unsigned long *ksp; printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n", regs->tpc); + printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr); __asm__("mov %%sp, %0" : "=r" (ksp)); show_stack(current, ksp); unhandled_fault(regs->tpc, current, regs); @@ -280,7 +281,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) (tpc >= MODULES_VADDR && tpc < MODULES_END)) { /* Valid, no problems... */ } else { - bad_kernel_pc(regs); + bad_kernel_pc(regs, address); return; } } -- cgit v1.1 From 1daef08a12157923d90ec7a47ead8a97e0d243cc Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 15 Feb 2006 20:35:10 -0800 Subject: [SPARC64]: Fix comment typo in __flush_tlb_kernel_range. Signed-off-by: David S. Miller --- arch/sparc64/mm/ultra.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S index 8c24493..725f8b3 100644 --- a/arch/sparc64/mm/ultra.S +++ b/arch/sparc64/mm/ultra.S @@ -86,7 +86,7 @@ __flush_tlb_pending: /* 26 insns */ .align 32 .globl __flush_tlb_kernel_range -__flush_tlb_kernel_range: /* 14 insns */ +__flush_tlb_kernel_range: /* 16 insns */ /* %o0=start, %o1=end */ cmp %o0, %o1 be,pn %xcc, 2f -- cgit v1.1 From de635d833f61ce0f2ad0b3431e6a3323a1c4fed5 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 15 Feb 2006 21:01:31 -0800 Subject: [SPARC64]: Fix flush_tsb_user() on SUN4V. Needs to use physical addressing just like cheetah_plus. Signed-off-by: David S. Miller --- arch/sparc64/mm/tsb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index 975242a..3c1ff05 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -58,7 +58,7 @@ void flush_tsb_user(struct mmu_gather *mp) ctx = CTX_HWBITS(mm->context); - if (tlb_type == cheetah_plus) + if (tlb_type == cheetah_plus || tlb_type == hypervisor) base = __pa(tsb); else base = (unsigned long) tsb; -- cgit v1.1 From 3b3ab2eb9cf07ef1bc7a676c19aab994adb41a87 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Feb 2006 09:54:42 -0800 Subject: [SPARC64]: Use phys tsb address in tsb_insert() in SUN4V. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 6f860c3..0137d3dc 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -252,7 +252,7 @@ static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long { unsigned long tsb_addr = (unsigned long) ent; - if (tlb_type == cheetah_plus) + if (tlb_type == cheetah_plus || tlb_type == hypervisor) tsb_addr = __pa(tsb_addr); __tsb_insert(tsb_addr, tag, pte); -- cgit v1.1 From 3f19a84e39619053f117bd5bb9183c5bfea7db45 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Feb 2006 12:03:20 -0800 Subject: [SPARC64]: Set associativity of kernel TSB descriptor correctly. It should be 1, not 0. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 0137d3dc..950d580 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1056,7 +1056,7 @@ static void __init sun4v_ktsb_init(void) break; }; - ktsb_descr[0].assoc = 0; + ktsb_descr[0].assoc = 1; ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES; ktsb_descr[0].ctx_idx = 0; ktsb_descr[0].tsb_base = ktsb_pa; -- cgit v1.1 From 12e126ad229abc718d05600027fcd5794c1e31e5 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Feb 2006 14:40:30 -0800 Subject: [SPARC64]: Check for errors in hypervisor_tlb_lock(). Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 950d580..bd9e320 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -510,6 +510,11 @@ static void __init hypervisor_tlb_lock(unsigned long vaddr, "=&r" (arg3) : "0" (func), "1" (arg0), "2" (arg1), "3" (arg2), "4" (arg3)); + if (arg0 != 0) { + prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: " + "errors with %lx\n", vaddr, 0, pte, mmu, arg0); + prom_halt(); + } } static unsigned long kern_large_tte(unsigned long paddr); -- cgit v1.1 From 8b234274418d6d79527c4ac3a72da446ca4cb35f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Feb 2006 18:01:02 -0800 Subject: [SPARC64]: More TLB/TSB handling fixes. The SUN4V convention with non-shared TSBs is that the context bit of the TAG is clear. So we have to choose an "invalid" bit and initialize new TSBs appropriately. Otherwise a zero TAG looks "valid". Make sure, for the window fixup cases, that we use the right global registers and that we don't potentially trample on the live global registers in etrap/rtrap handling (%g2 and %g6) and that we put the missing virtual address properly in %g5. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 4 +++- arch/sparc64/mm/tsb.c | 25 ++++++++++++------------- 2 files changed, 15 insertions(+), 14 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index bd9e320..aa2aec6 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -296,7 +296,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p tsb = &mm->context.tsb[(address >> PAGE_SHIFT) & (mm->context.tsb_nentries - 1UL)]; - tag = (address >> 22UL) | CTX_HWBITS(mm->context) << 48UL; + tag = (address >> 22UL); tsb_insert(tsb, tag, pte_val(pte)); } } @@ -1110,6 +1110,8 @@ void __init paging_init(void) kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; + memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); + if (tlb_type == hypervisor) sun4v_pgprot_init(); else diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index 3c1ff05..353cb06 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -20,9 +20,9 @@ static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries return vaddr & (nentries - 1); } -static inline int tag_compare(unsigned long tag, unsigned long vaddr, unsigned long context) +static inline int tag_compare(unsigned long tag, unsigned long vaddr) { - return (tag == ((vaddr >> 22) | (context << 48))); + return (tag == (vaddr >> 22)); } /* TSB flushes need only occur on the processor initiating the address @@ -38,8 +38,8 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end) unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES); struct tsb *ent = &swapper_tsb[hash]; - if (tag_compare(ent->tag, v, 0)) { - ent->tag = 0UL; + if (tag_compare(ent->tag, v)) { + ent->tag = (1UL << TSB_TAG_INVALID_BIT); membar_storeload_storestore(); } } @@ -50,14 +50,9 @@ void flush_tsb_user(struct mmu_gather *mp) struct mm_struct *mm = mp->mm; struct tsb *tsb = mm->context.tsb; unsigned long nentries = mm->context.tsb_nentries; - unsigned long ctx, base; + unsigned long base; int i; - if (unlikely(!CTX_VALID(mm->context))) - return; - - ctx = CTX_HWBITS(mm->context); - if (tlb_type == cheetah_plus || tlb_type == hypervisor) base = __pa(tsb); else @@ -71,7 +66,7 @@ void flush_tsb_user(struct mmu_gather *mp) hash = tsb_hash(v, nentries); ent = base + (hash * sizeof(struct tsb)); - tag = (v >> 22UL) | (ctx << 48UL); + tag = (v >> 22UL); tsb_flush(ent, tag); } @@ -243,7 +238,8 @@ static void copy_tsb(struct tsb *old_tsb, unsigned long old_size, "i" (ASI_NUCLEUS_QUAD_LDD)); } - if (!tag || (tag & (1UL << TSB_TAG_LOCK_BIT))) + if (tag & ((1UL << TSB_TAG_LOCK_BIT) | + (1UL << TSB_TAG_INVALID_BIT))) continue; /* We only put base page size PTEs into the TSB, @@ -315,10 +311,13 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags) break; } - page = alloc_pages(gfp_flags | __GFP_ZERO, get_order(size)); + page = alloc_pages(gfp_flags, get_order(size)); if (unlikely(!page)) return; + /* Mark all tags as invalid. */ + memset(page_address(page), 0x40, size); + if (size == max_tsb_size) mm->context.tsb_rss_limit = ~0UL; else -- cgit v1.1 From 0f15952ac8641bde1045162ffd4a7b474cc318b0 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 18 Feb 2006 12:43:16 -0800 Subject: [SPARC64]: Export a PAGE_SHARED symbol. For drivers/media/*, noticed by Fabbione. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index aa2aec6..c7aa440 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1353,6 +1353,10 @@ EXPORT_SYMBOL(PAGE_KERNEL); pgprot_t PAGE_KERNEL_LOCKED __read_mostly; pgprot_t PAGE_COPY __read_mostly; + +pgprot_t PAGE_SHARED __read_mostly; +EXPORT_SYMBOL(PAGE_SHARED); + pgprot_t PAGE_EXEC __read_mostly; unsigned long pg_iobits __read_mostly; @@ -1367,6 +1371,7 @@ static void prot_init_common(unsigned long page_none, unsigned long page_exec_bit) { PAGE_COPY = __pgprot(page_copy); + PAGE_SHARED = __pgprot(page_shared); protection_map[0x0] = __pgprot(page_none); protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit); -- cgit v1.1 From 9cc3a1ac9a819cadff05ca37bb7f208013a22035 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 21 Feb 2006 20:51:13 -0800 Subject: [SPARC64]: Make use of Niagara 256MB PTEs for kernel mappings. We use a bitmap, one bit for every 256MB of memory. If the bit is set we can use a 256MB PTE for linear mappings, else we have to use a 4MB PTE. SUN4V support is there, and we can very easily add support for Panther cpu 256MB PTEs in the future. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 82 ++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 69 insertions(+), 13 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index c7aa440..b5869f0 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -45,6 +45,19 @@ extern void device_scan(void); +#define MAX_PHYS_ADDRESS (1UL << 42UL) +#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL) +#define KPTE_BITMAP_BYTES \ + ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 8) + +unsigned long kern_linear_pte_xor[2] __read_mostly; + +/* A bitmap, one bit for every 256MB of physical memory. If the bit + * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else + * if set we should use a 256MB page (via kern_linear_pte_xor[1]). + */ +unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; + #define MAX_BANKS 32 static struct linux_prom64_registers pavail[MAX_BANKS] __initdata; @@ -119,7 +132,6 @@ unsigned long phys_base __read_mostly; unsigned long kern_base __read_mostly; unsigned long kern_size __read_mostly; unsigned long pfn_base __read_mostly; -unsigned long kern_linear_pte_xor __read_mostly; /* get_new_mmu_context() uses "cache + 1". */ DEFINE_SPINLOCK(ctx_alloc_lock); @@ -878,6 +890,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) return end_pfn; } +static struct linux_prom64_registers pall[MAX_BANKS] __initdata; +static int pall_ents __initdata; + #ifdef CONFIG_DEBUG_PAGEALLOC static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot) { @@ -933,14 +948,41 @@ static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, return alloc_bytes; } -static struct linux_prom64_registers pall[MAX_BANKS] __initdata; -static int pall_ents __initdata; - extern unsigned int kvmap_linear_patch[1]; +#endif /* CONFIG_DEBUG_PAGEALLOC */ + +static void __init mark_kpte_bitmap(unsigned long start, unsigned long end) +{ + const unsigned long shift_256MB = 28; + const unsigned long mask_256MB = ((1UL << shift_256MB) - 1UL); + const unsigned long size_256MB = (1UL << shift_256MB); + + while (start < end) { + long remains; + + if (start & mask_256MB) { + start = (start + size_256MB) & ~mask_256MB; + continue; + } + + remains = end - start; + while (remains >= size_256MB) { + unsigned long index = start >> shift_256MB; + + __set_bit(index, kpte_linear_bitmap); + + start += size_256MB; + remains -= size_256MB; + } + } +} static void __init kernel_physical_mapping_init(void) { - unsigned long i, mem_alloced = 0UL; + unsigned long i; +#ifdef CONFIG_DEBUG_PAGEALLOC + unsigned long mem_alloced = 0UL; +#endif read_obp_memory("reg", &pall[0], &pall_ents); @@ -949,10 +991,16 @@ static void __init kernel_physical_mapping_init(void) phys_start = pall[i].phys_addr; phys_end = phys_start + pall[i].reg_size; + + mark_kpte_bitmap(phys_start, phys_end); + +#ifdef CONFIG_DEBUG_PAGEALLOC mem_alloced += kernel_map_range(phys_start, phys_end, PAGE_KERNEL); +#endif } +#ifdef CONFIG_DEBUG_PAGEALLOC printk("Allocated %ld bytes for kernel page tables.\n", mem_alloced); @@ -960,8 +1008,10 @@ static void __init kernel_physical_mapping_init(void) flushi(&kvmap_linear_patch[0]); __flush_tlb_all(); +#endif } +#ifdef CONFIG_DEBUG_PAGEALLOC void kernel_map_pages(struct page *page, int numpages, int enable) { unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT; @@ -1172,9 +1222,7 @@ void __init paging_init(void) pages_avail = 0; last_valid_pfn = end_pfn = bootmem_init(&pages_avail); -#ifdef CONFIG_DEBUG_PAGEALLOC kernel_physical_mapping_init(); -#endif { unsigned long zones_size[MAX_NR_ZONES]; @@ -1413,10 +1461,13 @@ static void __init sun4u_pgprot_init(void) pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U | __ACCESS_BITS_4U | _PAGE_E_4U); - kern_linear_pte_xor = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^ + kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^ 0xfffff80000000000; - kern_linear_pte_xor |= (_PAGE_CP_4U | _PAGE_CV_4U | - _PAGE_P_4U | _PAGE_W_4U); + kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U | + _PAGE_P_4U | _PAGE_W_4U); + + /* XXX Should use 256MB on Panther. XXX */ + kern_linear_pte_xor[1] = kern_linear_pte_xor[0]; _PAGE_SZBITS = _PAGE_SZBITS_4U; _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U | @@ -1454,10 +1505,15 @@ static void __init sun4v_pgprot_init(void) _PAGE_E = _PAGE_E_4V; _PAGE_CACHE = _PAGE_CACHE_4V; - kern_linear_pte_xor = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ + kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^ + 0xfffff80000000000; + kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V | + _PAGE_P_4V | _PAGE_W_4V); + + kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^ 0xfffff80000000000; - kern_linear_pte_xor |= (_PAGE_CP_4V | _PAGE_CV_4V | - _PAGE_P_4V | _PAGE_W_4V); + kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V | + _PAGE_P_4V | _PAGE_W_4V); pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V | __ACCESS_BITS_4V | _PAGE_E_4V); -- cgit v1.1 From d7744a09504d5ae84edc8289a02254e1f2102410 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 21 Feb 2006 22:31:11 -0800 Subject: [SPARC64]: Create a seperate kernel TSB for 4MB/256MB mappings. It can map all of the linear kernel mappings with zero TSB hash conflicts for systems with 16GB or less ram. In such cases, on SUN4V, once we load up this TSB the first time with all the mappings, we never take a linear kernel mapping TLB miss ever again, the hypervisor handles them all. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index b5869f0..2a12313 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -58,6 +58,9 @@ unsigned long kern_linear_pte_xor[2] __read_mostly; */ unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; +/* A special kernel TSB for 4MB and 256MB linear mappings. */ +struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; + #define MAX_BANKS 32 static struct linux_prom64_registers pavail[MAX_BANKS] __initdata; @@ -1086,6 +1089,7 @@ static void __init sun4v_ktsb_init(void) { unsigned long ktsb_pa; + /* First KTSB for PAGE_SIZE mappings. */ ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE); switch (PAGE_SIZE) { @@ -1117,9 +1121,18 @@ static void __init sun4v_ktsb_init(void) ktsb_descr[0].tsb_base = ktsb_pa; ktsb_descr[0].resv = 0; - /* XXX When we have a kernel large page size TSB, describe - * XXX it in ktsb_descr[1] here. - */ + /* Second KTSB for 4MB/256MB mappings. */ + ktsb_pa = (kern_base + + ((unsigned long)&swapper_4m_tsb[0] - KERNBASE)); + + ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB; + ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB | + HV_PGSZ_MASK_256MB); + ktsb_descr[1].assoc = 1; + ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES; + ktsb_descr[1].ctx_idx = 0; + ktsb_descr[1].tsb_base = ktsb_pa; + ktsb_descr[1].resv = 0; } void __cpuinit sun4v_ktsb_register(void) @@ -1132,8 +1145,7 @@ void __cpuinit sun4v_ktsb_register(void) pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); func = HV_FAST_MMU_TSB_CTX0; - /* XXX set arg0 to 2 when we use ktsb_descr[1], see above XXX */ - arg0 = 1; + arg0 = 2; arg1 = pa; __asm__ __volatile__("ta %6" : "=&r" (func), "=&r" (arg0), "=&r" (arg1) @@ -1160,7 +1172,9 @@ void __init paging_init(void) kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; + /* Invalidate both kernel TSBs. */ memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); + memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb)); if (tlb_type == hypervisor) sun4v_pgprot_init(); -- cgit v1.1 From b2bef4424cb4522f53e34d98d3deb0916478338b Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 23 Feb 2006 01:55:55 -0800 Subject: [SPARC64]: Export _PAGE_E and _PAGE_CACHE to modules. SBUS flash driver needs it. Noticed by Fabbione. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 2a12313..16f0db3 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1423,8 +1423,12 @@ pgprot_t PAGE_EXEC __read_mostly; unsigned long pg_iobits __read_mostly; unsigned long _PAGE_IE __read_mostly; + unsigned long _PAGE_E __read_mostly; +EXPORT_SYMBOL(_PAGE_E); + unsigned long _PAGE_CACHE __read_mostly; +EXPORT_SYMBOL(_PAGE_CACHE); static void prot_init_common(unsigned long page_none, unsigned long page_shared, -- cgit v1.1 From a0663a79ad4faebe1db4a56e2e767b120b12333a Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 23 Feb 2006 14:19:28 -0800 Subject: [SPARC64]: Fix TLB context allocation with SMT style shared TLBs. The context allocation scheme we use depends upon there being a 1<-->1 mapping from cpu to physical TLB for correctness. Chips like Niagara break this assumption. So what we do is notify all cpus with a cross call when the context version number changes, and if necessary this makes them allocate a valid context for the address space they are running at the time. Stress tested with make -j1024, make -j2048, and make -j4096 kernel builds on a 32-strand, 8 core, T2000 with 16GB of ram. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 16f0db3..ccf083a 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -629,17 +629,20 @@ void __flush_dcache_range(unsigned long start, unsigned long end) * let the user have CTX 0 (nucleus) or we ever use a CTX * version of zero (and thus NO_CONTEXT would not be caught * by version mis-match tests in mmu_context.h). + * + * Always invoked with interrupts disabled. */ void get_new_mmu_context(struct mm_struct *mm) { unsigned long ctx, new_ctx; unsigned long orig_pgsz_bits; - + int new_version; spin_lock(&ctx_alloc_lock); orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); ctx = (tlb_context_cache + 1) & CTX_NR_MASK; new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); + new_version = 0; if (new_ctx >= (1 << CTX_NR_BITS)) { new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); if (new_ctx >= ctx) { @@ -662,6 +665,7 @@ void get_new_mmu_context(struct mm_struct *mm) mmu_context_bmap[i + 2] = 0; mmu_context_bmap[i + 3] = 0; } + new_version = 1; goto out; } } @@ -671,6 +675,9 @@ out: tlb_context_cache = new_ctx; mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; spin_unlock(&ctx_alloc_lock); + + if (unlikely(new_version)) + smp_new_mmu_context_version(); } void sparc_ultra_dump_itlb(void) -- cgit v1.1 From 77b838fa1ef0ab02f75afc09834c60d87b86772f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 23 Feb 2006 21:40:15 -0800 Subject: [SPARC64]: destroy_context() needs to disable interrupts. get_new_mmu_context() can be invoked from interrupt context now for the new SMP version wrap handling. So disable interrupt while taking ctx_alloc_lock in destroy_context() so we don't deadlock. Signed-off-by: David S. Miller --- arch/sparc64/mm/tsb.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index 353cb06..534ac28 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -373,6 +373,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) void destroy_context(struct mm_struct *mm) { unsigned long size = mm->context.tsb_nentries * sizeof(struct tsb); + unsigned long flags; free_pages((unsigned long) mm->context.tsb, get_order(size)); @@ -383,12 +384,12 @@ void destroy_context(struct mm_struct *mm) mm->context.tsb = NULL; mm->context.tsb_reg_val = 0UL; - spin_lock(&ctx_alloc_lock); + spin_lock_irqsave(&ctx_alloc_lock, flags); if (CTX_VALID(mm->context)) { unsigned long nr = CTX_NRBITS(mm->context); mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); } - spin_unlock(&ctx_alloc_lock); + spin_unlock_irqrestore(&ctx_alloc_lock, flags); } -- cgit v1.1 From 2a3a5f5ddbefde498e87f10924d4bf741c5bf37f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 26 Feb 2006 19:31:49 -0800 Subject: [SPARC64]: Bulletproof hypervisor TLB flushing. Check TLB flush hypervisor calls for errors and report them. Pass HV_MMU_ALL always for now, we can add back the optimization to avoid the I-TLB flush later. Always explicitly page align the virtual address arguments. Signed-off-by: David S. Miller --- arch/sparc64/mm/ultra.S | 94 +++++++++++++++++++++++++++++++++++-------------- 1 file changed, 68 insertions(+), 26 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S index 725f8b3..bd8b0b4 100644 --- a/arch/sparc64/mm/ultra.S +++ b/arch/sparc64/mm/ultra.S @@ -257,17 +257,27 @@ __cheetah_flush_dcache_page: /* 11 insns */ #endif /* DCACHE_ALIASING_POSSIBLE */ /* Hypervisor specific versions, patched at boot time. */ -__hypervisor_flush_tlb_mm: /* 8 insns */ +__hypervisor_tlb_tl0_error: + save %sp, -192, %sp + mov %i0, %o0 + call hypervisor_tlbop_error + mov %i1, %o1 + ret + restore + +__hypervisor_flush_tlb_mm: /* 10 insns */ mov %o0, %o2 /* ARG2: mmu context */ mov 0, %o0 /* ARG0: CPU lists unimplemented */ mov 0, %o1 /* ARG1: CPU lists unimplemented */ mov HV_MMU_ALL, %o3 /* ARG3: flags */ mov HV_FAST_MMU_DEMAP_CTX, %o5 ta HV_FAST_TRAP + brnz,pn %o0, __hypervisor_tlb_tl0_error + mov HV_FAST_MMU_DEMAP_CTX, %o1 retl nop -__hypervisor_flush_tlb_pending: /* 15 insns */ +__hypervisor_flush_tlb_pending: /* 16 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ sllx %o1, 3, %g1 mov %o2, %g2 @@ -275,17 +285,18 @@ __hypervisor_flush_tlb_pending: /* 15 insns */ 1: sub %g1, (1 << 3), %g1 ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */ mov %g3, %o1 /* ARG1: mmu context */ - mov HV_MMU_DMMU, %o2 - andcc %o0, 1, %g0 - movne %icc, HV_MMU_ALL, %o2 /* ARG2: flags */ - andn %o0, 1, %o0 + mov HV_MMU_ALL, %o2 /* ARG2: flags */ + srlx %o0, PAGE_SHIFT, %o0 + sllx %o0, PAGE_SHIFT, %o0 ta HV_MMU_UNMAP_ADDR_TRAP + brnz,pn %o0, __hypervisor_tlb_tl0_error + mov HV_MMU_UNMAP_ADDR_TRAP, %o1 brnz,pt %g1, 1b nop retl nop -__hypervisor_flush_tlb_kernel_range: /* 14 insns */ +__hypervisor_flush_tlb_kernel_range: /* 16 insns */ /* %o0=start, %o1=end */ cmp %o0, %o1 be,pn %xcc, 2f @@ -297,6 +308,8 @@ __hypervisor_flush_tlb_kernel_range: /* 14 insns */ mov 0, %o1 /* ARG1: mmu context */ mov HV_MMU_ALL, %o2 /* ARG2: flags */ ta HV_MMU_UNMAP_ADDR_TRAP + brnz,pn %o0, __hypervisor_tlb_tl0_error + mov HV_MMU_UNMAP_ADDR_TRAP, %o1 brnz,pt %g2, 1b sub %g2, %g3, %g2 2: retl @@ -369,7 +382,7 @@ cheetah_patch_cachetlbops: */ .align 32 .globl xcall_flush_tlb_mm -xcall_flush_tlb_mm: /* 18 insns */ +xcall_flush_tlb_mm: /* 21 insns */ mov PRIMARY_CONTEXT, %g2 ldxa [%g2] ASI_DMMU, %g3 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4 @@ -388,9 +401,12 @@ xcall_flush_tlb_mm: /* 18 insns */ nop nop nop + nop + nop + nop .globl xcall_flush_tlb_pending -xcall_flush_tlb_pending: /* 20 insns */ +xcall_flush_tlb_pending: /* 21 insns */ /* %g5=context, %g1=nr, %g7=vaddrs[] */ sllx %g1, 3, %g1 mov PRIMARY_CONTEXT, %g4 @@ -413,9 +429,10 @@ xcall_flush_tlb_pending: /* 20 insns */ nop stxa %g2, [%g4] ASI_DMMU retry + nop .globl xcall_flush_tlb_kernel_range -xcall_flush_tlb_kernel_range: /* 22 insns */ +xcall_flush_tlb_kernel_range: /* 25 insns */ sethi %hi(PAGE_SIZE - 1), %g2 or %g2, %lo(PAGE_SIZE - 1), %g2 andn %g1, %g2, %g1 @@ -438,6 +455,9 @@ xcall_flush_tlb_kernel_range: /* 22 insns */ nop nop nop + nop + nop + nop /* This runs in a very controlled environment, so we do * not need to worry about BH races etc. @@ -545,8 +565,21 @@ xcall_flush_dcache_page_spitfire: /* %g1 == physical page address nop nop + /* %g5: error + * %g6: tlb op + */ +__hypervisor_tlb_xcall_error: + mov %g5, %g4 + mov %g6, %g5 + ba,pt %xcc, etrap + rd %pc, %g7 + mov %l4, %o0 + call hypervisor_tlbop_error_xcall + mov %l5, %o1 + ba,a,pt %xcc, rtrap_clr_l6 + .globl __hypervisor_xcall_flush_tlb_mm -__hypervisor_xcall_flush_tlb_mm: /* 18 insns */ +__hypervisor_xcall_flush_tlb_mm: /* 21 insns */ /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */ mov %o0, %g2 mov %o1, %g3 @@ -559,6 +592,9 @@ __hypervisor_xcall_flush_tlb_mm: /* 18 insns */ mov HV_MMU_ALL, %o3 /* ARG3: flags */ mov HV_FAST_MMU_DEMAP_CTX, %o5 ta HV_FAST_TRAP + mov HV_FAST_MMU_DEMAP_CTX, %g6 + brnz,pn %o0, __hypervisor_tlb_xcall_error + mov %o0, %g5 mov %g2, %o0 mov %g3, %o1 mov %g4, %o2 @@ -568,8 +604,8 @@ __hypervisor_xcall_flush_tlb_mm: /* 18 insns */ retry .globl __hypervisor_xcall_flush_tlb_pending -__hypervisor_xcall_flush_tlb_pending: /* 18 insns */ - /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4=scratch, %g6=unusable */ +__hypervisor_xcall_flush_tlb_pending: /* 21 insns */ + /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */ sllx %g1, 3, %g1 mov %o0, %g2 mov %o1, %g3 @@ -577,10 +613,13 @@ __hypervisor_xcall_flush_tlb_pending: /* 18 insns */ 1: sub %g1, (1 << 3), %g1 ldx [%g7 + %g1], %o0 /* ARG0: virtual address */ mov %g5, %o1 /* ARG1: mmu context */ - mov HV_MMU_DMMU, %o2 - andcc %o0, 1, %g0 - movne %icc, HV_MMU_ALL, %o2 /* ARG2: flags */ + mov HV_MMU_ALL, %o2 /* ARG2: flags */ + srlx %o0, PAGE_SHIFT, %o0 + sllx %o0, PAGE_SHIFT, %o0 ta HV_MMU_UNMAP_ADDR_TRAP + mov HV_MMU_UNMAP_ADDR_TRAP, %g6 + brnz,a,pn %o0, __hypervisor_tlb_xcall_error + mov %o0, %g5 brnz,pt %g1, 1b nop mov %g2, %o0 @@ -590,8 +629,8 @@ __hypervisor_xcall_flush_tlb_pending: /* 18 insns */ retry .globl __hypervisor_xcall_flush_tlb_kernel_range -__hypervisor_xcall_flush_tlb_kernel_range: /* 22 insns */ - /* %g1=start, %g7=end, g2,g3,g4,g5=scratch, g6=unusable */ +__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */ + /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */ sethi %hi(PAGE_SIZE - 1), %g2 or %g2, %lo(PAGE_SIZE - 1), %g2 andn %g1, %g2, %g1 @@ -601,17 +640,20 @@ __hypervisor_xcall_flush_tlb_kernel_range: /* 22 insns */ sub %g3, %g2, %g3 mov %o0, %g2 mov %o1, %g4 - mov %o2, %g5 + mov %o2, %g7 1: add %g1, %g3, %o0 /* ARG0: virtual address */ mov 0, %o1 /* ARG1: mmu context */ mov HV_MMU_ALL, %o2 /* ARG2: flags */ ta HV_MMU_UNMAP_ADDR_TRAP + mov HV_MMU_UNMAP_ADDR_TRAP, %g6 + brnz,pn %o0, __hypervisor_tlb_xcall_error + mov %o0, %g5 sethi %hi(PAGE_SIZE), %o2 brnz,pt %g3, 1b sub %g3, %o2, %g3 mov %g2, %o0 mov %g4, %o1 - mov %g5, %o2 + mov %g7, %o2 membar #Sync retry @@ -643,21 +685,21 @@ hypervisor_patch_cachetlbops: sethi %hi(__hypervisor_flush_tlb_mm), %o1 or %o1, %lo(__hypervisor_flush_tlb_mm), %o1 call tlb_patch_one - mov 8, %o2 + mov 10, %o2 sethi %hi(__flush_tlb_pending), %o0 or %o0, %lo(__flush_tlb_pending), %o0 sethi %hi(__hypervisor_flush_tlb_pending), %o1 or %o1, %lo(__hypervisor_flush_tlb_pending), %o1 call tlb_patch_one - mov 15, %o2 + mov 16, %o2 sethi %hi(__flush_tlb_kernel_range), %o0 or %o0, %lo(__flush_tlb_kernel_range), %o0 sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1 or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1 call tlb_patch_one - mov 14, %o2 + mov 16, %o2 #ifdef DCACHE_ALIASING_POSSIBLE sethi %hi(__flush_dcache_page), %o0 @@ -674,21 +716,21 @@ hypervisor_patch_cachetlbops: sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1 or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1 call tlb_patch_one - mov 18, %o2 + mov 21, %o2 sethi %hi(xcall_flush_tlb_pending), %o0 or %o0, %lo(xcall_flush_tlb_pending), %o0 sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1 or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1 call tlb_patch_one - mov 18, %o2 + mov 21, %o2 sethi %hi(xcall_flush_tlb_kernel_range), %o0 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1 or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1 call tlb_patch_one - mov 22, %o2 + mov 25, %o2 #endif /* CONFIG_SMP */ ret -- cgit v1.1 From 7a591cfe4efef8a232e4938d44ae6693b319f6d7 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 26 Feb 2006 19:44:50 -0800 Subject: [SPARC64]: Avoid dcache-dirty page state management on sun4v. It is totally wasted work, since we have no D-cache aliasing issues on sun4v. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 50 ++++++++++++++++++++++++++++---------------------- arch/sparc64/mm/tlb.c | 3 ++- 2 files changed, 30 insertions(+), 23 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index ccf083a..87d5d1a 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -188,8 +188,9 @@ atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0); #endif #endif -__inline__ void flush_dcache_page_impl(struct page *page) +inline void flush_dcache_page_impl(struct page *page) { + BUG_ON(tlb_type == hypervisor); #ifdef CONFIG_DEBUG_DCFLUSH atomic_inc(&dcpage_flushes); #endif @@ -279,29 +280,31 @@ unsigned long _PAGE_SZBITS __read_mostly; void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { struct mm_struct *mm; - struct page *page; - unsigned long pfn; - unsigned long pg_flags; - - pfn = pte_pfn(pte); - if (pfn_valid(pfn) && - (page = pfn_to_page(pfn), page_mapping(page)) && - ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) { - int cpu = ((pg_flags >> PG_dcache_cpu_shift) & - PG_dcache_cpu_mask); - int this_cpu = get_cpu(); - - /* This is just to optimize away some function calls - * in the SMP case. - */ - if (cpu == this_cpu) - flush_dcache_page_impl(page); - else - smp_flush_dcache_page_impl(page, cpu); - clear_dcache_dirty_cpu(page, cpu); + if (tlb_type != hypervisor) { + unsigned long pfn = pte_pfn(pte); + unsigned long pg_flags; + struct page *page; + + if (pfn_valid(pfn) && + (page = pfn_to_page(pfn), page_mapping(page)) && + ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) { + int cpu = ((pg_flags >> PG_dcache_cpu_shift) & + PG_dcache_cpu_mask); + int this_cpu = get_cpu(); + + /* This is just to optimize away some function calls + * in the SMP case. + */ + if (cpu == this_cpu) + flush_dcache_page_impl(page); + else + smp_flush_dcache_page_impl(page, cpu); + + clear_dcache_dirty_cpu(page, cpu); - put_cpu(); + put_cpu(); + } } mm = vma->vm_mm; @@ -321,6 +324,9 @@ void flush_dcache_page(struct page *page) struct address_space *mapping; int this_cpu; + if (tlb_type == hypervisor) + return; + /* Do not bother with the expensive D-cache flush if it * is merely the zero page. The 'bigcore' testcase in GDB * causes this case to run millions of times. diff --git a/arch/sparc64/mm/tlb.c b/arch/sparc64/mm/tlb.c index 78357cc..a079cf4 100644 --- a/arch/sparc64/mm/tlb.c +++ b/arch/sparc64/mm/tlb.c @@ -49,7 +49,8 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t if (pte_exec(orig)) vaddr |= 0x1UL; - if (pte_dirty(orig)) { + if (tlb_type != hypervisor && + pte_dirty(orig)) { unsigned long paddr, pfn = pte_pfn(orig); struct address_space *mapping; struct page *page; -- cgit v1.1 From 74ae998772041b62e9ad420d602e4f7dbb182cd6 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 5 Mar 2006 18:26:24 -0800 Subject: [SPARC64]: Simplify TSB insert checks. Don't try to avoid putting non-base page sized entries into the user TSB. It actually costs us more to check this than it helps. Eventually we'll have a multiple TSB scheme for user processes. Once a process starts using larger pages, we'll allocate and use such a TSB. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 87d5d1a..5930e87 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -280,6 +280,8 @@ unsigned long _PAGE_SZBITS __read_mostly; void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { struct mm_struct *mm; + struct tsb *tsb; + unsigned long tag; if (tlb_type != hypervisor) { unsigned long pfn = pte_pfn(pte); @@ -308,15 +310,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p } mm = vma->vm_mm; - if ((pte_val(pte) & _PAGE_ALL_SZ_BITS) == _PAGE_SZBITS) { - struct tsb *tsb; - unsigned long tag; - - tsb = &mm->context.tsb[(address >> PAGE_SHIFT) & - (mm->context.tsb_nentries - 1UL)]; - tag = (address >> 22UL); - tsb_insert(tsb, tag, pte_val(pte)); - } + tsb = &mm->context.tsb[(address >> PAGE_SHIFT) & + (mm->context.tsb_nentries - 1UL)]; + tag = (address >> 22UL); + tsb_insert(tsb, tag, pte_val(pte)); } void flush_dcache_page(struct page *page) -- cgit v1.1 From f7c00338cfeef125032aa12aa8ebeacf9e117e81 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 5 Mar 2006 22:18:50 -0800 Subject: [SPARC64]: Fix loop termination in mark_kpte_bitmap() If we were aligned, but didn't have at least 256MB left to process, we would loop forever. Thanks to fabbione for the report and testing the fix. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 5930e87..9bbd0bf6 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -973,12 +973,15 @@ static void __init mark_kpte_bitmap(unsigned long start, unsigned long end) while (start < end) { long remains; + remains = end - start; + if (remains < size_256MB) + break; + if (start & mask_256MB) { start = (start + size_256MB) & ~mask_256MB; continue; } - remains = end - start; while (remains >= size_256MB) { unsigned long index = start >> shift_256MB; -- cgit v1.1 From a77754b4d0731321db266c6c60ffcd7c62757da5 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 6 Mar 2006 19:59:50 -0800 Subject: [SPARC64]: Bulletproof MMU context locking. 1) Always spin_lock_init() in init_context(). The caller essentially clears it out, or copies the mm info from the parent. In both cases we need to explicitly initialize the spinlock. 2) Always do explicit IRQ disabling while taking mm->context.lock and ctx_alloc_lock. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 5 +++-- arch/sparc64/mm/tsb.c | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 9bbd0bf6..a639393 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -639,9 +639,10 @@ void get_new_mmu_context(struct mm_struct *mm) { unsigned long ctx, new_ctx; unsigned long orig_pgsz_bits; + unsigned long flags; int new_version; - spin_lock(&ctx_alloc_lock); + spin_lock_irqsave(&ctx_alloc_lock, flags); orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); ctx = (tlb_context_cache + 1) & CTX_NR_MASK; new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); @@ -677,7 +678,7 @@ void get_new_mmu_context(struct mm_struct *mm) out: tlb_context_cache = new_ctx; mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; - spin_unlock(&ctx_alloc_lock); + spin_unlock_irqrestore(&ctx_alloc_lock, flags); if (unlikely(new_version)) smp_new_mmu_context_version(); diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index 534ac28..f36799b 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -354,6 +354,7 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags) int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { + spin_lock_init(&mm->context.lock); mm->context.sparc64_ctx_val = 0UL; -- cgit v1.1 From ee29074d3bd23848905f52c515974e0cd0219faa Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 6 Mar 2006 22:50:44 -0800 Subject: [SPARC64]: Fix new context version SMP handling. Don't piggy back the SMP receive signal code to do the context version change handling. Instead allocate another fixed PIL number for this asynchronous cross-call. We can't use smp_call_function() because this thing is invoked with interrupts disabled and a few spinlocks held. Also, fix smp_call_function_mask() to count "cpus" correctly. There is no guarentee that the local cpu is in the mask yet that is exactly what this code was assuming. Signed-off-by: David S. Miller --- arch/sparc64/mm/ultra.S | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S index bd8b0b4..f8479fa 100644 --- a/arch/sparc64/mm/ultra.S +++ b/arch/sparc64/mm/ultra.S @@ -673,6 +673,11 @@ xcall_capture: wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint retry + .globl xcall_new_mmu_context_version +xcall_new_mmu_context_version: + wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint + retry + #endif /* CONFIG_SMP */ -- cgit v1.1 From d1112018b4bc82adf5c8a9c15a08954328f023ae Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 8 Mar 2006 02:16:07 -0800 Subject: [SPARC64]: Move over to sparsemem. This has been pending for a long time, and the fact that we waste a ton of ram on some configurations kind of pushed things over the edge. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 134 ++++++++++++++++++++++++++++++++++++------------- 1 file changed, 98 insertions(+), 36 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index a639393..5f67b53 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -130,11 +130,9 @@ static void __init read_obp_memory(const char *property, unsigned long *sparc64_valid_addr_bitmap __read_mostly; -/* Ugly, but necessary... -DaveM */ -unsigned long phys_base __read_mostly; +/* Kernel physical address base and size in bytes. */ unsigned long kern_base __read_mostly; unsigned long kern_size __read_mostly; -unsigned long pfn_base __read_mostly; /* get_new_mmu_context() uses "cache + 1". */ DEFINE_SPINLOCK(ctx_alloc_lock); @@ -368,16 +366,6 @@ void __kprobes flush_icache_range(unsigned long start, unsigned long end) } } -unsigned long page_to_pfn(struct page *page) -{ - return (unsigned long) ((page - mem_map) + pfn_base); -} - -struct page *pfn_to_page(unsigned long pfn) -{ - return (mem_map + (pfn - pfn_base)); -} - void show_mem(void) { printk("Mem-info:\n"); @@ -773,9 +761,78 @@ void sparc_ultra_dump_dtlb(void) extern unsigned long cmdline_memory_size; -unsigned long __init bootmem_init(unsigned long *pages_avail) +/* Find a free area for the bootmem map, avoiding the kernel image + * and the initial ramdisk. + */ +static unsigned long __init choose_bootmap_pfn(unsigned long start_pfn, + unsigned long end_pfn) { - unsigned long bootmap_size, start_pfn, end_pfn; + unsigned long avoid_start, avoid_end, bootmap_size; + int i; + + bootmap_size = ((end_pfn - start_pfn) + 7) / 8; + bootmap_size = ALIGN(bootmap_size, sizeof(long)); + + avoid_start = avoid_end = 0; +#ifdef CONFIG_BLK_DEV_INITRD + avoid_start = initrd_start; + avoid_end = PAGE_ALIGN(initrd_end); +#endif + +#ifdef CONFIG_DEBUG_BOOTMEM + prom_printf("choose_bootmap_pfn: kern[%lx:%lx] avoid[%lx:%lx]\n", + kern_base, PAGE_ALIGN(kern_base + kern_size), + avoid_start, avoid_end); +#endif + for (i = 0; i < pavail_ents; i++) { + unsigned long start, end; + + start = pavail[i].phys_addr; + end = start + pavail[i].reg_size; + + while (start < end) { + if (start >= kern_base && + start < PAGE_ALIGN(kern_base + kern_size)) { + start = PAGE_ALIGN(kern_base + kern_size); + continue; + } + if (start >= avoid_start && start < avoid_end) { + start = avoid_end; + continue; + } + + if ((end - start) < bootmap_size) + break; + + if (start < kern_base && + (start + bootmap_size) > kern_base) { + start = PAGE_ALIGN(kern_base + kern_size); + continue; + } + + if (start < avoid_start && + (start + bootmap_size) > avoid_start) { + start = avoid_end; + continue; + } + + /* OK, it doesn't overlap anything, use it. */ +#ifdef CONFIG_DEBUG_BOOTMEM + prom_printf("choose_bootmap_pfn: Using %lx [%lx]\n", + start >> PAGE_SHIFT, start); +#endif + return start >> PAGE_SHIFT; + } + } + + prom_printf("Cannot find free area for bootmap, aborting.\n"); + prom_halt(); +} + +static unsigned long __init bootmem_init(unsigned long *pages_avail, + unsigned long phys_base) +{ + unsigned long bootmap_size, end_pfn; unsigned long end_of_phys_memory = 0UL; unsigned long bootmap_pfn, bytes_avail, size; int i; @@ -813,14 +870,6 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) *pages_avail = bytes_avail >> PAGE_SHIFT; - /* Start with page aligned address of last symbol in kernel - * image. The kernel is hard mapped below PAGE_OFFSET in a - * 4MB locked TLB translation. - */ - start_pfn = PAGE_ALIGN(kern_base + kern_size) >> PAGE_SHIFT; - - bootmap_pfn = start_pfn; - end_pfn = end_of_phys_memory >> PAGE_SHIFT; #ifdef CONFIG_BLK_DEV_INITRD @@ -837,23 +886,23 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) "(0x%016lx > 0x%016lx)\ndisabling initrd\n", initrd_end, end_of_phys_memory); initrd_start = 0; - } - if (initrd_start) { - if (initrd_start >= (start_pfn << PAGE_SHIFT) && - initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE) - bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT; + initrd_end = 0; } } #endif /* Initialize the boot-time allocator. */ max_pfn = max_low_pfn = end_pfn; - min_low_pfn = pfn_base; + min_low_pfn = (phys_base >> PAGE_SHIFT); + + bootmap_pfn = choose_bootmap_pfn(min_low_pfn, end_pfn); #ifdef CONFIG_DEBUG_BOOTMEM prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n", min_low_pfn, bootmap_pfn, max_low_pfn); #endif - bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn); + bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, + (phys_base >> PAGE_SHIFT), + end_pfn); /* Now register the available physical memory with the * allocator. @@ -901,6 +950,20 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size); *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; + for (i = 0; i < pavail_ents; i++) { + unsigned long start_pfn, end_pfn; + + start_pfn = pavail[i].phys_addr >> PAGE_SHIFT; + end_pfn = (start_pfn + (pavail[i].reg_size >> PAGE_SHIFT)); +#ifdef CONFIG_DEBUG_BOOTMEM + prom_printf("memory_present(0, %lx, %lx)\n", + start_pfn, end_pfn); +#endif + memory_present(0, start_pfn, end_pfn); + } + + sparse_init(); + return end_pfn; } @@ -1180,7 +1243,7 @@ static void sun4v_pgprot_init(void); void __init paging_init(void) { - unsigned long end_pfn, pages_avail, shift; + unsigned long end_pfn, pages_avail, shift, phys_base; unsigned long real_end, i; kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; @@ -1211,8 +1274,6 @@ void __init paging_init(void) for (i = 0; i < pavail_ents; i++) phys_base = min(phys_base, pavail[i].phys_addr); - pfn_base = phys_base >> PAGE_SHIFT; - set_bit(0, mmu_context_bmap); shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); @@ -1248,7 +1309,9 @@ void __init paging_init(void) /* Setup bootmem... */ pages_avail = 0; - last_valid_pfn = end_pfn = bootmem_init(&pages_avail); + last_valid_pfn = end_pfn = bootmem_init(&pages_avail, phys_base); + + max_mapnr = last_valid_pfn - (phys_base >> PAGE_SHIFT); kernel_physical_mapping_init(); @@ -1261,7 +1324,7 @@ void __init paging_init(void) for (znum = 0; znum < MAX_NR_ZONES; znum++) zones_size[znum] = zholes_size[znum] = 0; - npages = end_pfn - pfn_base; + npages = end_pfn - (phys_base >> PAGE_SHIFT); zones_size[ZONE_DMA] = npages; zholes_size[ZONE_DMA] = npages - pages_avail; @@ -1336,7 +1399,6 @@ void __init mem_init(void) taint_real_pages(); - max_mapnr = last_valid_pfn - pfn_base; high_memory = __va(last_valid_pfn << PAGE_SHIFT); #ifdef CONFIG_DEBUG_BOOTMEM -- cgit v1.1 From 17b0e199a10184d8c5bbbd79a4cee993bb1fb257 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 8 Mar 2006 15:57:03 -0800 Subject: [SPARC64]: Fix 32-bit truncation which broke sparsemem. The page->flags manipulations done by the D-cache dirty state tracking was broken because the constants were not marked with "UL" to make them 64-bit, which means we were clobbering the upper 32-bits of page->flags all the time. This doesn't jive well with sparsemem which stores the section and indexing information in the top 32-bits of page->flags. This is yet another sparc64 bug which has been with us forever. While we're here, tidy up some things in bootmem_init() and paginig_init(): 1) Pass min_low_pfn to init_bootmem_node(), it's identical to (phys_base >> PAGE_SHIFT) but we should use consistent with the variable names we print in CONFIG_BOOTMEM_DEBUG 2) max_mapnr, although no longer used, was being set inaccurately, we shouldn't subtract pfn_base any more. 3) All the games with phys_base in the zones_*[] arrays we pass to free_area_init_node() are no longer necessary. Thanks to Josh Grebe and Fabbione for the bug reports and testing. Fix also verified locally on an SB2500 which had a memory layout that triggered the same problem. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 5f67b53..b40f647 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -205,8 +205,8 @@ inline void flush_dcache_page_impl(struct page *page) } #define PG_dcache_dirty PG_arch_1 -#define PG_dcache_cpu_shift 24 -#define PG_dcache_cpu_mask (256 - 1) +#define PG_dcache_cpu_shift 24UL +#define PG_dcache_cpu_mask (256UL - 1UL) #if NR_CPUS > 256 #error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus @@ -901,8 +901,7 @@ static unsigned long __init bootmem_init(unsigned long *pages_avail, min_low_pfn, bootmap_pfn, max_low_pfn); #endif bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, - (phys_base >> PAGE_SHIFT), - end_pfn); + min_low_pfn, end_pfn); /* Now register the available physical memory with the * allocator. @@ -1311,25 +1310,24 @@ void __init paging_init(void) pages_avail = 0; last_valid_pfn = end_pfn = bootmem_init(&pages_avail, phys_base); - max_mapnr = last_valid_pfn - (phys_base >> PAGE_SHIFT); + max_mapnr = last_valid_pfn; kernel_physical_mapping_init(); { unsigned long zones_size[MAX_NR_ZONES]; unsigned long zholes_size[MAX_NR_ZONES]; - unsigned long npages; int znum; for (znum = 0; znum < MAX_NR_ZONES; znum++) zones_size[znum] = zholes_size[znum] = 0; - npages = end_pfn - (phys_base >> PAGE_SHIFT); - zones_size[ZONE_DMA] = npages; - zholes_size[ZONE_DMA] = npages - pages_avail; + zones_size[ZONE_DMA] = end_pfn; + zholes_size[ZONE_DMA] = end_pfn - pages_avail; free_area_init_node(0, &contig_page_data, zones_size, - phys_base >> PAGE_SHIFT, zholes_size); + __pa(PAGE_OFFSET) >> PAGE_SHIFT, + zholes_size); } device_scan(); -- cgit v1.1 From 7a1ac5264108fc3ed22d17a3cdd76212ed1666d1 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 16 Mar 2006 02:02:32 -0800 Subject: [SPARC64]: Fix and re-enable dynamic TSB sizing. This is good for up to %50 performance improvement of some test cases. The problem has been the race conditions, and hopefully I've plugged them all up here. 1) There was a serious race in switch_mm() wrt. lazy TLB switching to and from kernel threads. We could erroneously skip a tsb_context_switch() and thus use a stale TSB across a TSB grow event. There is a big comment now in that function describing exactly how it can happen. 2) All code paths that do something with the TSB need to be guarded with the mm->context.lock spinlock. This makes page table flushing paths properly synchronize with both TSB growing and TLB context changes. 3) TSB growing events are moved to the end of successful fault processing. Previously it was in update_mmu_cache() but that is deadlock prone. At the end of do_sparc64_fault() we hold no spinlocks that could deadlock the TSB grow sequence. We also have dropped the address space semaphore. While we're here, add prefetching to the copy_tsb() routine and put it in assembler into the tsb.S file. This piece of code is quite time critical. There are some small negative side effects to this code which can be improved upon. In particular we grab the mm->context.lock even for the tsb insert done by update_mmu_cache() now and that's a bit excessive. We can get rid of that locking, and the same lock taking in flush_tsb_user(), by disabling PSTATE_IE around the whole operation including the capturing of the tsb pointer and tsb_nentries value. That would work because anyone growing the TSB won't free up the old TSB until all cpus respond to the TSB change cross call. I'm not quite so confident in that optimization to put it in right now, but eventually we might be able to and the description is here for reference. This code seems very solid now. It passes several parallel GCC bootstrap builds, and our favorite "nut cruncher" stress test which is a full "make -j8192" build of a "make allmodconfig" kernel. That puts about 256 processes on each cpu's run queue, makes lots of process cpu migrations occur, causes lots of page table and TLB flushing activity, incurs many context version number changes, and it swaps the machine real far out to disk even though there is 16GB of ram on this test system. :-) Signed-off-by: David S. Miller --- arch/sparc64/mm/fault.c | 8 ++- arch/sparc64/mm/init.c | 7 +- arch/sparc64/mm/tsb.c | 185 +++++++++++++++++++++--------------------------- 3 files changed, 95 insertions(+), 105 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c index b97bd05..63b6cc0 100644 --- a/arch/sparc64/mm/fault.c +++ b/arch/sparc64/mm/fault.c @@ -29,6 +29,7 @@ #include #include #include +#include /* * To debug kernel to catch accesses to certain virtual/physical addresses. @@ -258,7 +259,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) struct vm_area_struct *vma; unsigned int insn = 0; int si_code, fault_code; - unsigned long address; + unsigned long address, mm_rss; fault_code = get_thread_fault_code(); @@ -407,6 +408,11 @@ good_area: } up_read(&mm->mmap_sem); + + mm_rss = get_mm_rss(mm); + if (unlikely(mm_rss >= mm->context.tsb_rss_limit)) + tsb_grow(mm, mm_rss); + return; /* diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index b40f647..d703b67 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -279,7 +279,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p { struct mm_struct *mm; struct tsb *tsb; - unsigned long tag; + unsigned long tag, flags; if (tlb_type != hypervisor) { unsigned long pfn = pte_pfn(pte); @@ -308,10 +308,15 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p } mm = vma->vm_mm; + + spin_lock_irqsave(&mm->context.lock, flags); + tsb = &mm->context.tsb[(address >> PAGE_SHIFT) & (mm->context.tsb_nentries - 1UL)]; tag = (address >> 22UL); tsb_insert(tsb, tag, pte_val(pte)); + + spin_unlock_irqrestore(&mm->context.lock, flags); } void flush_dcache_page(struct page *page) diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index f36799b..7fbe1e0 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -48,11 +48,15 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end) void flush_tsb_user(struct mmu_gather *mp) { struct mm_struct *mm = mp->mm; - struct tsb *tsb = mm->context.tsb; - unsigned long nentries = mm->context.tsb_nentries; - unsigned long base; + unsigned long nentries, base, flags; + struct tsb *tsb; int i; + spin_lock_irqsave(&mm->context.lock, flags); + + tsb = mm->context.tsb; + nentries = mm->context.tsb_nentries; + if (tlb_type == cheetah_plus || tlb_type == hypervisor) base = __pa(tsb); else @@ -70,6 +74,8 @@ void flush_tsb_user(struct mmu_gather *mp) tsb_flush(ent, tag); } + + spin_unlock_irqrestore(&mm->context.lock, flags); } static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) @@ -201,86 +207,9 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) } } -/* The page tables are locked against modifications while this - * runs. - * - * XXX do some prefetching... - */ -static void copy_tsb(struct tsb *old_tsb, unsigned long old_size, - struct tsb *new_tsb, unsigned long new_size) -{ - unsigned long old_nentries = old_size / sizeof(struct tsb); - unsigned long new_nentries = new_size / sizeof(struct tsb); - unsigned long i; - - for (i = 0; i < old_nentries; i++) { - register unsigned long tag asm("o4"); - register unsigned long pte asm("o5"); - unsigned long v, hash; - - if (tlb_type == hypervisor) { - __asm__ __volatile__( - "ldda [%2] %3, %0" - : "=r" (tag), "=r" (pte) - : "r" (__pa(&old_tsb[i])), - "i" (ASI_QUAD_LDD_PHYS_4V)); - } else if (tlb_type == cheetah_plus) { - __asm__ __volatile__( - "ldda [%2] %3, %0" - : "=r" (tag), "=r" (pte) - : "r" (__pa(&old_tsb[i])), - "i" (ASI_QUAD_LDD_PHYS)); - } else { - __asm__ __volatile__( - "ldda [%2] %3, %0" - : "=r" (tag), "=r" (pte) - : "r" (&old_tsb[i]), - "i" (ASI_NUCLEUS_QUAD_LDD)); - } - - if (tag & ((1UL << TSB_TAG_LOCK_BIT) | - (1UL << TSB_TAG_INVALID_BIT))) - continue; - - /* We only put base page size PTEs into the TSB, - * but that might change in the future. This code - * would need to be changed if we start putting larger - * page size PTEs into there. - */ - WARN_ON((pte & _PAGE_ALL_SZ_BITS) != _PAGE_SZBITS); - - /* The tag holds bits 22 to 63 of the virtual address - * and the context. Clear out the context, and shift - * up to make a virtual address. - */ - v = (tag & ((1UL << 42UL) - 1UL)) << 22UL; - - /* The implied bits of the tag (bits 13 to 21) are - * determined by the TSB entry index, so fill that in. - */ - v |= (i & (512UL - 1UL)) << 13UL; - - hash = tsb_hash(v, new_nentries); - if (tlb_type == cheetah_plus || - tlb_type == hypervisor) { - __asm__ __volatile__( - "stxa %0, [%1] %2\n\t" - "stxa %3, [%4] %2" - : /* no outputs */ - : "r" (tag), - "r" (__pa(&new_tsb[hash].tag)), - "i" (ASI_PHYS_USE_EC), - "r" (pte), - "r" (__pa(&new_tsb[hash].pte))); - } else { - new_tsb[hash].tag = tag; - new_tsb[hash].pte = pte; - } - } -} - /* When the RSS of an address space exceeds mm->context.tsb_rss_limit, - * update_mmu_cache() invokes this routine to try and grow the TSB. + * do_sparc64_fault() invokes this routine to try and grow the TSB. + * * When we reach the maximum TSB size supported, we stick ~0UL into * mm->context.tsb_rss_limit so the grow checks in update_mmu_cache() * will not trigger any longer. @@ -293,12 +222,12 @@ static void copy_tsb(struct tsb *old_tsb, unsigned long old_size, * the number of entries that the current TSB can hold at once. Currently, * we trigger when the RSS hits 3/4 of the TSB capacity. */ -void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags) +void tsb_grow(struct mm_struct *mm, unsigned long rss) { unsigned long max_tsb_size = 1 * 1024 * 1024; - unsigned long size, old_size; + unsigned long size, old_size, flags; struct page *page; - struct tsb *old_tsb; + struct tsb *old_tsb, *new_tsb; if (max_tsb_size > (PAGE_SIZE << MAX_ORDER)) max_tsb_size = (PAGE_SIZE << MAX_ORDER); @@ -311,12 +240,51 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags) break; } - page = alloc_pages(gfp_flags, get_order(size)); + page = alloc_pages(GFP_KERNEL, get_order(size)); if (unlikely(!page)) return; /* Mark all tags as invalid. */ - memset(page_address(page), 0x40, size); + new_tsb = page_address(page); + memset(new_tsb, 0x40, size); + + /* Ok, we are about to commit the changes. If we are + * growing an existing TSB the locking is very tricky, + * so WATCH OUT! + * + * We have to hold mm->context.lock while committing to the + * new TSB, this synchronizes us with processors in + * flush_tsb_user() and switch_mm() for this address space. + * + * But even with that lock held, processors run asynchronously + * accessing the old TSB via TLB miss handling. This is OK + * because those actions are just propagating state from the + * Linux page tables into the TSB, page table mappings are not + * being changed. If a real fault occurs, the processor will + * synchronize with us when it hits flush_tsb_user(), this is + * also true for the case where vmscan is modifying the page + * tables. The only thing we need to be careful with is to + * skip any locked TSB entries during copy_tsb(). + * + * When we finish committing to the new TSB, we have to drop + * the lock and ask all other cpus running this address space + * to run tsb_context_switch() to see the new TSB table. + */ + spin_lock_irqsave(&mm->context.lock, flags); + + old_tsb = mm->context.tsb; + old_size = mm->context.tsb_nentries * sizeof(struct tsb); + + /* Handle multiple threads trying to grow the TSB at the same time. + * One will get in here first, and bump the size and the RSS limit. + * The others will get in here next and hit this check. + */ + if (unlikely(old_tsb && (rss < mm->context.tsb_rss_limit))) { + spin_unlock_irqrestore(&mm->context.lock, flags); + + free_pages((unsigned long) new_tsb, get_order(size)); + return; + } if (size == max_tsb_size) mm->context.tsb_rss_limit = ~0UL; @@ -324,30 +292,37 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags) mm->context.tsb_rss_limit = ((size / sizeof(struct tsb)) * 3) / 4; - old_tsb = mm->context.tsb; - old_size = mm->context.tsb_nentries * sizeof(struct tsb); - - if (old_tsb) - copy_tsb(old_tsb, old_size, page_address(page), size); + if (old_tsb) { + extern void copy_tsb(unsigned long old_tsb_base, + unsigned long old_tsb_size, + unsigned long new_tsb_base, + unsigned long new_tsb_size); + unsigned long old_tsb_base = (unsigned long) old_tsb; + unsigned long new_tsb_base = (unsigned long) new_tsb; + + if (tlb_type == cheetah_plus || tlb_type == hypervisor) { + old_tsb_base = __pa(old_tsb_base); + new_tsb_base = __pa(new_tsb_base); + } + copy_tsb(old_tsb_base, old_size, new_tsb_base, size); + } - mm->context.tsb = page_address(page); + mm->context.tsb = new_tsb; setup_tsb_params(mm, size); + spin_unlock_irqrestore(&mm->context.lock, flags); + /* If old_tsb is NULL, we're being invoked for the first time * from init_new_context(). */ if (old_tsb) { - /* Now force all other processors to reload the new - * TSB state. - */ - smp_tsb_sync(mm); - - /* Finally reload it on the local cpu. No further - * references will remain to the old TSB and we can - * thus free it up. - */ + /* Reload it on the local cpu. */ tsb_context_switch(mm); + /* Now force other processors to do the same. */ + smp_tsb_sync(mm); + + /* Now it is safe to free the old tsb. */ free_pages((unsigned long) old_tsb, get_order(old_size)); } } @@ -363,7 +338,11 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) * will be confused and think there is an older TSB to free up. */ mm->context.tsb = NULL; - tsb_grow(mm, 0, GFP_KERNEL); + + /* If this is fork, inherit the parent's TSB size. We would + * grow it to that size on the first page fault anyways. + */ + tsb_grow(mm, get_mm_rss(mm)); if (unlikely(!mm->context.tsb)) return -ENOMEM; -- cgit v1.1 From b52439c22c63dbbefd5395f2151c0ef4f667e949 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 17 Mar 2006 23:40:47 -0800 Subject: [SPARC64]: Don't kill the page allocator when growing a TSB. Try only lightly on > 1 order allocations. If a grow fails, we are under memory pressure, so do not try to grow the TSB for this address space any more. If a > 0 order TSB allocation fails on a new fork, retry using a 0 order allocation. Signed-off-by: David S. Miller --- arch/sparc64/mm/tsb.c | 43 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 35 insertions(+), 8 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index 7fbe1e0..3eb8670 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -216,7 +216,8 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) * * The TSB can be anywhere from 8K to 1MB in size, in increasing powers * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB - * must be 512K aligned. + * must be 512K aligned. It also must be physically contiguous, so we + * cannot use vmalloc(). * * The idea here is to grow the TSB when the RSS of the process approaches * the number of entries that the current TSB can hold at once. Currently, @@ -228,6 +229,8 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss) unsigned long size, old_size, flags; struct page *page; struct tsb *old_tsb, *new_tsb; + unsigned long order, new_rss_limit; + gfp_t gfp_flags; if (max_tsb_size > (PAGE_SIZE << MAX_ORDER)) max_tsb_size = (PAGE_SIZE << MAX_ORDER); @@ -240,9 +243,37 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss) break; } - page = alloc_pages(GFP_KERNEL, get_order(size)); - if (unlikely(!page)) + if (size == max_tsb_size) + new_rss_limit = ~0UL; + else + new_rss_limit = ((size / sizeof(struct tsb)) * 3) / 4; + +retry_page_alloc: + order = get_order(size); + gfp_flags = GFP_KERNEL; + if (order > 1) + gfp_flags = __GFP_NOWARN | __GFP_NORETRY; + + page = alloc_pages(gfp_flags, order); + if (unlikely(!page)) { + /* Not being able to fork due to a high-order TSB + * allocation failure is very bad behavior. Just back + * down to a 0-order allocation and force no TSB + * growing for this address space. + */ + if (mm->context.tsb == NULL && order > 0) { + size = PAGE_SIZE; + new_rss_limit = ~0UL; + goto retry_page_alloc; + } + + /* If we failed on a TSB grow, we are under serious + * memory pressure so don't try to grow any more. + */ + if (mm->context.tsb != NULL) + mm->context.tsb_rss_limit = ~0UL; return; + } /* Mark all tags as invalid. */ new_tsb = page_address(page); @@ -286,11 +317,7 @@ void tsb_grow(struct mm_struct *mm, unsigned long rss) return; } - if (size == max_tsb_size) - mm->context.tsb_rss_limit = ~0UL; - else - mm->context.tsb_rss_limit = - ((size / sizeof(struct tsb)) * 3) / 4; + mm->context.tsb_rss_limit = new_rss_limit; if (old_tsb) { extern void copy_tsb(unsigned long old_tsb_base, -- cgit v1.1 From 9b4006dcf6a8c43bd482b9c1ec576f0ed270ef23 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 18 Mar 2006 18:12:42 -0800 Subject: [SPARC64]: Use SLAB caches for TSB tables. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 5 ++- arch/sparc64/mm/tsb.c | 86 ++++++++++++++++++++++++++++++++++++-------------- 2 files changed, 66 insertions(+), 25 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index d703b67..a1a364e 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -165,6 +165,8 @@ static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags) clear_page(addr); } +extern void tsb_cache_init(void); + void pgtable_cache_init(void) { pgtable_cache = kmem_cache_create("pgtable_cache", @@ -174,9 +176,10 @@ void pgtable_cache_init(void) zero_ctor, NULL); if (!pgtable_cache) { - prom_printf("pgtable_cache_init(): Could not create!\n"); + prom_printf("Could not create pgtable_cache\n"); prom_halt(); } + tsb_cache_init(); } #ifdef CONFIG_DEBUG_DCFLUSH diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index 3eb8670..1af797a 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -11,6 +11,7 @@ #include #include #include +#include extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; @@ -207,6 +208,39 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) } } +static kmem_cache_t *tsb_caches[8] __read_mostly; + +static const char *tsb_cache_names[8] = { + "tsb_8KB", + "tsb_16KB", + "tsb_32KB", + "tsb_64KB", + "tsb_128KB", + "tsb_256KB", + "tsb_512KB", + "tsb_1MB", +}; + +void __init tsb_cache_init(void) +{ + unsigned long i; + + for (i = 0; i < 8; i++) { + unsigned long size = 8192 << i; + const char *name = tsb_cache_names[i]; + + tsb_caches[i] = kmem_cache_create(name, + size, size, + SLAB_HWCACHE_ALIGN | + SLAB_MUST_HWCACHE_ALIGN, + NULL, NULL); + if (!tsb_caches[i]) { + prom_printf("Could not create %s cache\n", name); + prom_halt(); + } + } +} + /* When the RSS of an address space exceeds mm->context.tsb_rss_limit, * do_sparc64_fault() invokes this routine to try and grow the TSB. * @@ -226,45 +260,48 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) void tsb_grow(struct mm_struct *mm, unsigned long rss) { unsigned long max_tsb_size = 1 * 1024 * 1024; - unsigned long size, old_size, flags; - struct page *page; + unsigned long new_size, old_size, flags; struct tsb *old_tsb, *new_tsb; - unsigned long order, new_rss_limit; + unsigned long new_cache_index, old_cache_index; + unsigned long new_rss_limit; gfp_t gfp_flags; if (max_tsb_size > (PAGE_SIZE << MAX_ORDER)) max_tsb_size = (PAGE_SIZE << MAX_ORDER); - for (size = PAGE_SIZE; size < max_tsb_size; size <<= 1UL) { - unsigned long n_entries = size / sizeof(struct tsb); + new_cache_index = 0; + for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) { + unsigned long n_entries = new_size / sizeof(struct tsb); n_entries = (n_entries * 3) / 4; if (n_entries > rss) break; + + new_cache_index++; } - if (size == max_tsb_size) + if (new_size == max_tsb_size) new_rss_limit = ~0UL; else - new_rss_limit = ((size / sizeof(struct tsb)) * 3) / 4; + new_rss_limit = ((new_size / sizeof(struct tsb)) * 3) / 4; -retry_page_alloc: - order = get_order(size); +retry_tsb_alloc: gfp_flags = GFP_KERNEL; - if (order > 1) + if (new_size > (PAGE_SIZE * 2)) gfp_flags = __GFP_NOWARN | __GFP_NORETRY; - page = alloc_pages(gfp_flags, order); - if (unlikely(!page)) { + new_tsb = kmem_cache_alloc(tsb_caches[new_cache_index], gfp_flags); + if (unlikely(!new_tsb)) { /* Not being able to fork due to a high-order TSB * allocation failure is very bad behavior. Just back * down to a 0-order allocation and force no TSB * growing for this address space. */ - if (mm->context.tsb == NULL && order > 0) { - size = PAGE_SIZE; + if (mm->context.tsb == NULL && new_cache_index > 0) { + new_cache_index = 0; + new_size = 8192; new_rss_limit = ~0UL; - goto retry_page_alloc; + goto retry_tsb_alloc; } /* If we failed on a TSB grow, we are under serious @@ -276,8 +313,7 @@ retry_page_alloc: } /* Mark all tags as invalid. */ - new_tsb = page_address(page); - memset(new_tsb, 0x40, size); + memset(new_tsb, 0x40, new_size); /* Ok, we are about to commit the changes. If we are * growing an existing TSB the locking is very tricky, @@ -304,8 +340,10 @@ retry_page_alloc: spin_lock_irqsave(&mm->context.lock, flags); old_tsb = mm->context.tsb; + old_cache_index = (mm->context.tsb_reg_val & 0x7UL); old_size = mm->context.tsb_nentries * sizeof(struct tsb); + /* Handle multiple threads trying to grow the TSB at the same time. * One will get in here first, and bump the size and the RSS limit. * The others will get in here next and hit this check. @@ -313,7 +351,7 @@ retry_page_alloc: if (unlikely(old_tsb && (rss < mm->context.tsb_rss_limit))) { spin_unlock_irqrestore(&mm->context.lock, flags); - free_pages((unsigned long) new_tsb, get_order(size)); + kmem_cache_free(tsb_caches[new_cache_index], new_tsb); return; } @@ -331,11 +369,11 @@ retry_page_alloc: old_tsb_base = __pa(old_tsb_base); new_tsb_base = __pa(new_tsb_base); } - copy_tsb(old_tsb_base, old_size, new_tsb_base, size); + copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size); } mm->context.tsb = new_tsb; - setup_tsb_params(mm, size); + setup_tsb_params(mm, new_size); spin_unlock_irqrestore(&mm->context.lock, flags); @@ -350,7 +388,7 @@ retry_page_alloc: smp_tsb_sync(mm); /* Now it is safe to free the old tsb. */ - free_pages((unsigned long) old_tsb, get_order(old_size)); + kmem_cache_free(tsb_caches[old_cache_index], old_tsb); } } @@ -379,10 +417,10 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) void destroy_context(struct mm_struct *mm) { - unsigned long size = mm->context.tsb_nentries * sizeof(struct tsb); - unsigned long flags; + unsigned long flags, cache_index; - free_pages((unsigned long) mm->context.tsb, get_order(size)); + cache_index = (mm->context.tsb_reg_val & 0x7UL); + kmem_cache_free(tsb_caches[cache_index], mm->context.tsb); /* We can remove these later, but for now it's useful * to catch any bogus post-destroy_context() references -- cgit v1.1 From 88d7079458f87d6f2d2261b2f87b7b9416019f5e Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 18 Mar 2006 19:16:23 -0800 Subject: [SPARC64]: Allow CONFIG_MEMORY_HOTPLUG to build. online_page() is straightforward, and then add a dummy remove_memory() that returns -EINVAL just like i386. There is no point in implementing remove_memory() since __remove_pages() has no implementation either. Signed-off-by: David S. Miller --- arch/sparc64/mm/init.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index a1a364e..c2b5561 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -1805,3 +1805,21 @@ void __flush_tlb_all(void) __asm__ __volatile__("wrpr %0, 0, %%pstate" : : "r" (pstate)); } + +#ifdef CONFIG_MEMORY_HOTPLUG + +void online_page(struct page *page) +{ + ClearPageReserved(page); + set_page_count(page, 0); + free_cold_page(page); + totalram_pages++; + num_physpages++; +} + +int remove_memory(u64 start, u64 size) +{ + return -EINVAL; +} + +#endif /* CONFIG_MEMORY_HOTPLUG */ -- cgit v1.1 From bb8646d8340fa7c1b66a037428e39f85f8738f0a Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 18 Mar 2006 23:55:11 -0800 Subject: [SPARC64]: Optimized TSB table initialization. We only need to write an invalid tag every 16 bytes, so taking advantage of this can save many instructions compared to the simple memset() call we make now. A prefetching implementation is implemented for sun4u and a block-init store version if implemented for Niagara. The next trick is to be able to perform an init and a copy_tsb() in parallel when growing a TSB table. Signed-off-by: David S. Miller --- arch/sparc64/mm/tsb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index 1af797a..b2064e2 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -313,7 +313,7 @@ retry_tsb_alloc: } /* Mark all tags as invalid. */ - memset(new_tsb, 0x40, new_size); + tsb_init(new_tsb, new_size); /* Ok, we are about to commit the changes. If we are * growing an existing TSB the locking is very tricky, -- cgit v1.1 From f6b83f070e9b7ad9075f7cc5646260e56c7d0219 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 20 Mar 2006 01:17:17 -0800 Subject: [SPARC64]: Fix 2 bugs in huge page support. 1) huge_pte_offset() did not check the page table hierarchy elements as being empty correctly, resulting in an OOPS 2) Need platform specific hugetlb_get_unmapped_area() to handle the top-down vs. bottom-up address space allocation strategies. Signed-off-by: David S. Miller --- arch/sparc64/mm/hugetlbpage.c | 179 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 175 insertions(+), 4 deletions(-) (limited to 'arch/sparc64/mm') diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c index 625cbb3..a7a2486 100644 --- a/arch/sparc64/mm/hugetlbpage.c +++ b/arch/sparc64/mm/hugetlbpage.c @@ -1,7 +1,7 @@ /* * SPARC64 Huge TLB page support. * - * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com) + * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net) */ #include @@ -22,6 +22,175 @@ #include #include +/* Slightly simplified from the non-hugepage variant because by + * definition we don't have to worry about any page coloring stuff + */ +#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL)) +#define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL)) + +static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, + unsigned long addr, + unsigned long len, + unsigned long pgoff, + unsigned long flags) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct * vma; + unsigned long task_size = TASK_SIZE; + unsigned long start_addr; + + if (test_thread_flag(TIF_32BIT)) + task_size = STACK_TOP32; + if (unlikely(len >= VA_EXCLUDE_START)) + return -ENOMEM; + + if (len > mm->cached_hole_size) { + start_addr = addr = mm->free_area_cache; + } else { + start_addr = addr = TASK_UNMAPPED_BASE; + mm->cached_hole_size = 0; + } + + task_size -= len; + +full_search: + addr = ALIGN(addr, HPAGE_SIZE); + + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { + /* At this point: (!vma || addr < vma->vm_end). */ + if (addr < VA_EXCLUDE_START && + (addr + len) >= VA_EXCLUDE_START) { + addr = VA_EXCLUDE_END; + vma = find_vma(mm, VA_EXCLUDE_END); + } + if (unlikely(task_size < addr)) { + if (start_addr != TASK_UNMAPPED_BASE) { + start_addr = addr = TASK_UNMAPPED_BASE; + mm->cached_hole_size = 0; + goto full_search; + } + return -ENOMEM; + } + if (likely(!vma || addr + len <= vma->vm_start)) { + /* + * Remember the place where we stopped the search: + */ + mm->free_area_cache = addr + len; + return addr; + } + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + + addr = ALIGN(vma->vm_end, HPAGE_SIZE); + } +} + +static unsigned long +hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + const unsigned long len, + const unsigned long pgoff, + const unsigned long flags) +{ + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; + unsigned long addr = addr0; + + /* This should only ever run for 32-bit processes. */ + BUG_ON(!test_thread_flag(TIF_32BIT)); + + /* check if free_area_cache is useful for us */ + if (len <= mm->cached_hole_size) { + mm->cached_hole_size = 0; + mm->free_area_cache = mm->mmap_base; + } + + /* either no address requested or can't fit in requested address hole */ + addr = mm->free_area_cache & HPAGE_MASK; + + /* make sure it can fit in the remaining address space */ + if (likely(addr > len)) { + vma = find_vma(mm, addr-len); + if (!vma || addr <= vma->vm_start) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr-len); + } + } + + if (unlikely(mm->mmap_base < len)) + goto bottomup; + + addr = (mm->mmap_base-len) & HPAGE_MASK; + + do { + /* + * Lookup failure means no vma is above this address, + * else if new region fits below vma->vm_start, + * return with success: + */ + vma = find_vma(mm, addr); + if (likely(!vma || addr+len <= vma->vm_start)) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr); + } + + /* remember the largest hole we saw so far */ + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + + /* try just below the current vma->vm_start */ + addr = (vma->vm_start-len) & HPAGE_MASK; + } while (likely(len < vma->vm_start)); + +bottomup: + /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ + mm->cached_hole_size = ~0UL; + mm->free_area_cache = TASK_UNMAPPED_BASE; + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); + /* + * Restore the topdown base: + */ + mm->free_area_cache = mm->mmap_base; + mm->cached_hole_size = ~0UL; + + return addr; +} + +unsigned long +hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + unsigned long task_size = TASK_SIZE; + + if (test_thread_flag(TIF_32BIT)) + task_size = STACK_TOP32; + + if (len & ~HPAGE_MASK) + return -EINVAL; + if (len > task_size) + return -ENOMEM; + + if (addr) { + addr = ALIGN(addr, HPAGE_SIZE); + vma = find_vma(mm, addr); + if (task_size - len >= addr && + (!vma || addr + len <= vma->vm_start)) + return addr; + } + if (mm->get_unmapped_area == arch_get_unmapped_area) + return hugetlb_get_unmapped_area_bottomup(file, addr, len, + pgoff, flags); + else + return hugetlb_get_unmapped_area_topdown(file, addr, len, + pgoff, flags); +} + pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; @@ -48,12 +217,14 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) pmd_t *pmd; pte_t *pte = NULL; + addr &= HPAGE_MASK; + pgd = pgd_offset(mm, addr); - if (pgd) { + if (!pgd_none(*pgd)) { pud = pud_offset(pgd, addr); - if (pud) { + if (!pud_none(*pud)) { pmd = pmd_offset(pud, addr); - if (pmd) + if (!pmd_none(*pmd)) pte = pte_offset_map(pmd, addr); } } -- cgit v1.1