From 9eb8f6743b076b67f00776cda4330c802e157b41 Mon Sep 17 00:00:00 2001 From: Grant Likely Date: Thu, 28 Apr 2011 14:27:20 -0600 Subject: arm/dt: Allow CONFIG_OF on ARM Add some basic empty infrastructure for DT support on ARM. v5: - Fix off-by-one error in size calculation of initrd - Stop mucking with cmd_line, and load command line from dt into boot_command_line instead which matches the behaviour of ATAGS booting v3: - moved cmd_line export and initrd setup to this patch to make the series bisectable. - switched to alloc_bootmem_align() for allocation when unflattening the device tree. memblock_alloc() was not the right interface. Signed-off-by: Jeremy Kerr Tested-by: Tony Lindgren Acked-by: Nicolas Pitre Acked-by: Russell King Signed-off-by: Grant Likely --- arch/arm/mm/init.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index e5f6fc4..26c4054 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -71,6 +72,14 @@ static int __init parse_tag_initrd2(const struct tag *tag) __tagtable(ATAG_INITRD2, parse_tag_initrd2); +#ifdef CONFIG_OF_FLATTREE +void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end) +{ + phys_initrd_start = start; + phys_initrd_size = end - start; +} +#endif /* CONFIG_OF_FLATTREE */ + /* * This keeps memory configuration data used by a couple memory * initialization functions, as well as show_mem() for the skipping -- cgit v1.1 From 93c02ab40ae6e06cb24d14845d9f008fdd24f43d Mon Sep 17 00:00:00 2001 From: Grant Likely Date: Thu, 28 Apr 2011 14:27:21 -0600 Subject: arm/dt: probe for platforms via the device tree If a dtb is passed to the kernel then the kernel needs to iterate through compiled-in mdescs looking for one that matches and move the dtb data to a safe location before it gets accidentally overwritten by the kernel. This patch creates a new function, setup_machine_fdt() which is analogous to the setup_machine_atags() created in the previous patch. It does all the early setup needed to use a device tree machine description. v5: - Print warning with neither dtb nor atags are passed to the kernel - Fix bug in setting of __machine_arch_type to the selected machine, not just the last machine in the list. Reported-by: Tixy - Copy command line directly into boot_command_line instead of cmd_line v4: - Dump some output when a matching machine_desc cannot be found v3: - Added processing of reserved list. - Backed out the v2 change that copied instead of reserved the dtb. dtb is reserved again and the real problem was fixed by using alloc_bootmem_align() for early allocation of RAM for unflattening the tree. - Moved cmd_line and initrd changes to earlier patch to make series bisectable. v2: Changed to save the dtb by copying into an allocated buffer. - Since the dtb will very likely be passed in the first 16k of ram where the interrupt vectors live, memblock_reserve() is insufficient to protect the dtb data. [based on work originally written by Jeremy Kerr ] Tested-by: Tony Lindgren Acked-by: Nicolas Pitre Acked-by: Russell King Signed-off-by: Grant Likely --- arch/arm/mm/init.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 26c4054..b265975 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -22,6 +22,7 @@ #include #include +#include #include #include #include @@ -322,6 +323,7 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) #endif arm_mm_memblock_reserve(); + arm_dt_memblock_reserve(); /* reserve any platform specific memblock areas */ if (mdesc->reserve) -- cgit v1.1 From 7b7bf499f79de3f6c85a340c8453a78789523f85 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 19 May 2011 13:21:14 +0100 Subject: ARM: 6913/1: sparsemem: allow pfn_valid to be overridden when using SPARSEMEM In commit eb33575c ("[ARM] Double check memmap is actually valid with a memmap has unexpected holes V2"), a new function, memmap_valid_within, was introduced to mmzone.h so that holes in the memmap which pass pfn_valid in SPARSEMEM configurations can be detected and avoided. The fix to this problem checks that the pfn <-> page linkages are correct by calculating the page for the pfn and then checking that page_to_pfn on that page returns the original pfn. Unfortunately, in SPARSEMEM configurations, this results in reading from the page flags to determine the correct section. Since the memmap here has been freed, junk is read from memory and the check is no longer robust. In the best case, reading from /proc/pagetypeinfo will give you the wrong answer. In the worst case, you get SEGVs, Kernel OOPses and hung CPUs. Furthermore, ioremap implementations that use pfn_valid to disallow the remapping of normal memory will break. This patch allows architectures to provide their own pfn_valid function instead of using the default implementation used by sparsemem. The architecture-specific version is aware of the memmap state and will return false when passed a pfn for a freed page within a valid section. Acked-by: Mel Gorman Acked-by: Catalin Marinas Tested-by: H Hartley Sweeten Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/mm/init.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 3f17ea1..bbc3346 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -273,13 +273,15 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low, free_area_init_node(0, zone_size, min, zhole_size); } -#ifndef CONFIG_SPARSEMEM +#ifdef CONFIG_HAVE_ARCH_PFN_VALID int pfn_valid(unsigned long pfn) { return memblock_is_memory(pfn << PAGE_SHIFT); } EXPORT_SYMBOL(pfn_valid); +#endif +#ifndef CONFIG_SPARSEMEM static void arm_memory_present(void) { } -- cgit v1.1 From 40f7bfe4f1c2761abeceb3b2b9dc1feec3c47ed9 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 19 May 2011 13:22:48 +0100 Subject: ARM: 6914/1: sparsemem: fix highmem detection when using SPARSEMEM sanity_check_meminfo walks over the registered memory banks and attempts to split banks across lowmem and highmem when they would otherwise overlap with the vmalloc space. When SPARSEMEM is used, there are two potential problems that occur when the virtual address of the start of a bank is equal to vmalloc_min. 1.) The end of lowmem is calculated as __pa(vmalloc_min - 1) + 1. In the above scenario, this will give the end address of the previous bank, rather than the actual bank we are interested in. This value is later used as the memblock limit and artificially restricts the total amount of available memory. 2.) The checks to determine whether or not a bank belongs to highmem or not only check if __va(bank->start) is greater or less than vmalloc_min. In the case that it is equal, the bank is incorrectly treated as lowmem, which hoses the vmalloc area. This patch fixes these two problems by checking whether the virtual start address of a bank is >= vmalloc_min and then calculating lowmem_end by finding the virtual end address of the highest lowmem bank. Acked-by: Catalin Marinas Reviewed-by: Nicolas Pitre Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/mm/mmu.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 08a9236..9d9e736 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -763,15 +763,12 @@ static void __init sanity_check_meminfo(void) { int i, j, highmem = 0; - lowmem_limit = __pa(vmalloc_min - 1) + 1; - memblock_set_current_limit(lowmem_limit); - for (i = 0, j = 0; i < meminfo.nr_banks; i++) { struct membank *bank = &meminfo.bank[j]; *bank = meminfo.bank[i]; #ifdef CONFIG_HIGHMEM - if (__va(bank->start) > vmalloc_min || + if (__va(bank->start) >= vmalloc_min || __va(bank->start) < (void *)PAGE_OFFSET) highmem = 1; @@ -829,6 +826,9 @@ static void __init sanity_check_meminfo(void) bank->size = newsize; } #endif + if (!bank->highmem && bank->start + bank->size > lowmem_limit) + lowmem_limit = bank->start + bank->size; + j++; } #ifdef CONFIG_HIGHMEM @@ -852,6 +852,7 @@ static void __init sanity_check_meminfo(void) } #endif meminfo.nr_banks = j; + memblock_set_current_limit(lowmem_limit); } static inline void prepare_page_table(void) -- cgit v1.1 From a248b13b21ae00b97638b4f435c8df3075808b5d Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 26 May 2011 11:20:19 +0100 Subject: ARM: 6941/1: cache: ensure MVA is cacheline aligned in flush_kern_dcache_area The v6 and v7 implementations of flush_kern_dcache_area do not align the passed MVA to the size of a cacheline in the data cache. If a misaligned address is used, only a subset of the requested area will be flushed. This has been observed to cause failures in SMP boot where the secondary_data initialised by the primary CPU is not cacheline aligned, causing the secondary CPUs to read incorrect values for their pgd and stack pointers. This patch ensures that the base address is cacheline aligned before flushing the d-cache. Cc: Acked-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/mm/cache-v6.S | 1 + arch/arm/mm/cache-v7.S | 2 ++ 2 files changed, 3 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index c96fa1b..73b4a8b 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S @@ -176,6 +176,7 @@ ENDPROC(v6_coherent_kern_range) */ ENTRY(v6_flush_kern_dcache_area) add r1, r0, r1 + bic r0, r0, #D_CACHE_LINE_SIZE - 1 1: #ifdef HARVARD_CACHE mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index dc18d81..d32f02b 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -221,6 +221,8 @@ ENDPROC(v7_coherent_user_range) ENTRY(v7_flush_kern_dcache_area) dcache_line_size r2, r3 add r1, r0, r1 + sub r3, r2, #1 + bic r0, r0, r3 1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line add r0, r0, r2 -- cgit v1.1 From d427958a46af24f75d0017c45eadd172273bbf33 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 26 May 2011 11:22:44 +0100 Subject: ARM: 6942/1: mm: make TTBR1 always point to swapper_pg_dir on ARMv6/7 This patch makes TTBR1 point to swapper_pg_dir so that global, kernel mappings can be used exclusively on v6 and v7 cores where they are needed. Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/mm/proc-v6.S | 4 +++- arch/arm/mm/proc-v7.S | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index ab17cc0..1d2b845 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -213,7 +213,9 @@ __v6_setup: mcr p15, 0, r0, c2, c0, 2 @ TTB control register ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP) ALT_UP(orr r4, r4, #TTB_FLAGS_UP) - mcr p15, 0, r4, c2, c0, 1 @ load TTB1 + ALT_SMP(orr r8, r8, #TTB_FLAGS_SMP) + ALT_UP(orr r8, r8, #TTB_FLAGS_UP) + mcr p15, 0, r8, c2, c0, 1 @ load TTB1 #endif /* CONFIG_MMU */ adr r5, v6_crval ldmia r5, {r5, r6} diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index babfba0..3c38678 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -368,7 +368,9 @@ __v7_setup: mcr p15, 0, r10, c2, c0, 2 @ TTB control register ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP) ALT_UP(orr r4, r4, #TTB_FLAGS_UP) - mcr p15, 0, r4, c2, c0, 1 @ load TTB1 + ALT_SMP(orr r8, r8, #TTB_FLAGS_SMP) + ALT_UP(orr r8, r8, #TTB_FLAGS_UP) + mcr p15, 0, r8, c2, c0, 1 @ load TTB1 ldr r5, =PRRR @ PRRR ldr r6, =NMRR @ NMRR mcr p15, 0, r5, c10, c2, 0 @ write PRRR -- cgit v1.1 From 52af9c6cd863fe37d1103035ec7ee22ac1296458 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 26 May 2011 11:23:43 +0100 Subject: ARM: 6943/1: mm: use TTBR1 instead of reserved context ID On ARMv7 CPUs that cache first level page table entries (like the Cortex-A15), using a reserved ASID while changing the TTBR or flushing the TLB is unsafe. This is because the CPU may cache the first level entry as the result of a speculative memory access while the reserved ASID is assigned. After the process owning the page tables dies, the memory will be reallocated and may be written with junk values which can be interpreted as global, valid PTEs by the processor. This will result in the TLB being populated with bogus global entries. This patch avoids the use of a reserved context ID in the v7 switch_mm and ASID rollover code by temporarily using the swapper_pg_dir pointed at by TTBR1, which contains only global entries that are not tagged with ASIDs. Acked-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/mm/context.c | 11 ++++++----- arch/arm/mm/proc-v7.S | 10 ++++------ 2 files changed, 10 insertions(+), 11 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index b0ee9ba..0d86298 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -24,9 +24,7 @@ DEFINE_PER_CPU(struct mm_struct *, current_mm); /* * We fork()ed a process, and we need a new context for the child - * to run in. We reserve version 0 for initial tasks so we will - * always allocate an ASID. The ASID 0 is reserved for the TTBR - * register changing sequence. + * to run in. */ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) { @@ -36,8 +34,11 @@ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) static void flush_context(void) { - /* set the reserved ASID before flushing the TLB */ - asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0)); + u32 ttb; + /* Copy TTBR1 into TTBR0 */ + asm volatile("mrc p15, 0, %0, c2, c0, 1\n" + "mcr p15, 0, %0, c2, c0, 0" + : "=r" (ttb)); isb(); local_flush_tlb_all(); if (icache_is_vivt_asid_tagged()) { diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 3c38678..b3b566e 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -108,18 +108,16 @@ ENTRY(cpu_v7_switch_mm) #ifdef CONFIG_ARM_ERRATA_430973 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB #endif -#ifdef CONFIG_ARM_ERRATA_754322 - dsb -#endif - mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID - isb -1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 + mrc p15, 0, r2, c2, c0, 1 @ load TTB 1 + mcr p15, 0, r2, c2, c0, 0 @ into TTB 0 isb #ifdef CONFIG_ARM_ERRATA_754322 dsb #endif mcr p15, 0, r1, c13, c0, 1 @ set context ID isb + mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 + isb #endif mov pc, lr ENDPROC(cpu_v7_switch_mm) -- cgit v1.1 From 45b95235b0ac86cef2ad4480b0618b8778847479 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 26 May 2011 11:24:25 +0100 Subject: ARM: 6944/1: mm: allow ASID 0 to be allocated to tasks Now that ASID 0 is no longer used as a reserved value, allow it to be allocated to tasks. Acked-by: Catalin Marinas Signed-off-by: Will Deacon Signed-off-by: Russell King --- arch/arm/mm/context.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 0d86298..8bfae96 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -94,7 +94,7 @@ static void reset_context(void *info) return; smp_rmb(); - asid = cpu_last_asid + cpu + 1; + asid = cpu_last_asid + cpu; flush_context(); set_mm_context(mm, asid); @@ -144,13 +144,13 @@ void __new_context(struct mm_struct *mm) * to start a new version and flush the TLB. */ if (unlikely((asid & ~ASID_MASK) == 0)) { - asid = cpu_last_asid + smp_processor_id() + 1; + asid = cpu_last_asid + smp_processor_id(); flush_context(); #ifdef CONFIG_SMP smp_wmb(); smp_call_function(reset_context, NULL, 1); #endif - cpu_last_asid += NR_CPUS; + cpu_last_asid += NR_CPUS - 1; } set_mm_context(mm, asid); -- cgit v1.1