aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c233
1 files changed, 152 insertions, 81 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 8ba7540..c0b9a78 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -180,20 +180,20 @@ void adjust_cr(unsigned long mask, unsigned long set)
#endif
#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE
-#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_XN|PMD_SECT_AP_WRITE
+#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
static struct mem_type mem_types[] = {
[MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
L_PTE_SHARED,
.prot_l1 = PMD_TYPE_TABLE,
- .prot_sect = PROT_SECT_DEVICE | PMD_SECT_UNCACHED,
+ .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
.domain = DOMAIN_IO,
},
[MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
.prot_l1 = PMD_TYPE_TABLE,
- .prot_sect = PROT_SECT_DEVICE | PMD_SECT_TEX(2),
+ .prot_sect = PROT_SECT_DEVICE,
.domain = DOMAIN_IO,
},
[MT_DEVICE_CACHED] = { /* ioremap_cached */
@@ -205,7 +205,13 @@ static struct mem_type mem_types[] = {
[MT_DEVICE_WC] = { /* ioremap_wc */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
.prot_l1 = PMD_TYPE_TABLE,
- .prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE,
+ .prot_sect = PROT_SECT_DEVICE,
+ .domain = DOMAIN_IO,
+ },
+ [MT_UNCACHED] = {
+ .prot_pte = PROT_PTE_DEVICE,
+ .prot_l1 = PMD_TYPE_TABLE,
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
.domain = DOMAIN_IO,
},
[MT_CACHECLEAN] = {
@@ -273,22 +279,23 @@ static void __init build_mem_type_table(void)
#endif
/*
- * On non-Xscale3 ARMv5-and-older systems, use CB=01
- * (Uncached/Buffered) for ioremap_wc() mappings. On XScale3
- * and ARMv6+, use TEXCB=00100 mappings (Inner/Outer Uncacheable
- * in xsc3 parlance, Uncached Normal in ARMv6 parlance).
+ * Strip out features not present on earlier architectures.
+ * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
+ * without extended page tables don't have the 'Shared' bit.
*/
- if (cpu_is_xsc3() || cpu_arch >= CPU_ARCH_ARMv6) {
- mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
- mem_types[MT_DEVICE_WC].prot_sect &= ~PMD_SECT_BUFFERABLE;
- }
+ if (cpu_arch < CPU_ARCH_ARMv5)
+ for (i = 0; i < ARRAY_SIZE(mem_types); i++)
+ mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
+ if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
+ for (i = 0; i < ARRAY_SIZE(mem_types); i++)
+ mem_types[i].prot_sect &= ~PMD_SECT_S;
/*
- * ARMv5 and lower, bit 4 must be set for page tables.
- * (was: cache "update-able on write" bit on ARM610)
- * However, Xscale cores require this bit to be cleared.
+ * ARMv5 and lower, bit 4 must be set for page tables (was: cache
+ * "update-able on write" bit on ARM610). However, Xscale and
+ * Xscale3 require this bit to be cleared.
*/
- if (cpu_is_xscale()) {
+ if (cpu_is_xscale() || cpu_is_xsc3()) {
for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
mem_types[i].prot_sect &= ~PMD_BIT4;
mem_types[i].prot_l1 &= ~PMD_BIT4;
@@ -302,6 +309,64 @@ static void __init build_mem_type_table(void)
}
}
+ /*
+ * Mark the device areas according to the CPU/architecture.
+ */
+ if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
+ if (!cpu_is_xsc3()) {
+ /*
+ * Mark device regions on ARMv6+ as execute-never
+ * to prevent speculative instruction fetches.
+ */
+ mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
+ mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
+ mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
+ mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
+ }
+ if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
+ /*
+ * For ARMv7 with TEX remapping,
+ * - shared device is SXCB=1100
+ * - nonshared device is SXCB=0100
+ * - write combine device mem is SXCB=0001
+ * (Uncached Normal memory)
+ */
+ mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
+ mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
+ mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
+ } else if (cpu_is_xsc3()) {
+ /*
+ * For Xscale3,
+ * - shared device is TEXCB=00101
+ * - nonshared device is TEXCB=01000
+ * - write combine device mem is TEXCB=00100
+ * (Inner/Outer Uncacheable in xsc3 parlance)
+ */
+ mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
+ mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
+ mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
+ } else {
+ /*
+ * For ARMv6 and ARMv7 without TEX remapping,
+ * - shared device is TEXCB=00001
+ * - nonshared device is TEXCB=01000
+ * - write combine device mem is TEXCB=00100
+ * (Uncached Normal in ARMv6 parlance).
+ */
+ mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
+ mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
+ mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
+ }
+ } else {
+ /*
+ * On others, write combining is "Uncached/Buffered"
+ */
+ mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
+ }
+
+ /*
+ * Now deal with the memory-type mappings
+ */
cp = &cache_policies[cachepolicy];
vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
@@ -317,12 +382,8 @@ static void __init build_mem_type_table(void)
* Enable CPU-specific coherency if supported.
* (Only available on XSC3 at the moment.)
*/
- if (arch_is_coherent()) {
- if (cpu_is_xsc3()) {
- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
- mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
- }
- }
+ if (arch_is_coherent() && cpu_is_xsc3())
+ mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
/*
* ARMv6 and above have extended page tables.
@@ -336,11 +397,6 @@ static void __init build_mem_type_table(void)
mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
- /*
- * Mark the device area as "shared device"
- */
- mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
-
#ifdef CONFIG_SMP
/*
* Mark memory with the "shared" attribute for SMP systems
@@ -360,9 +416,6 @@ static void __init build_mem_type_table(void)
mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
- if (cpu_arch < CPU_ARCH_ARMv5)
- mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
-
pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
L_PTE_DIRTY | L_PTE_WRITE |
@@ -593,68 +646,86 @@ static void __init early_vmalloc(char **arg)
"vmalloc area too small, limiting to %luMB\n",
vmalloc_reserve >> 20);
}
+
+ if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
+ vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
+ printk(KERN_WARNING
+ "vmalloc area is too big, limiting to %luMB\n",
+ vmalloc_reserve >> 20);
+ }
}
__early_param("vmalloc=", early_vmalloc);
#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve)
-static int __init check_membank_valid(struct membank *mb)
+static void __init sanity_check_meminfo(void)
{
- /*
- * Check whether this memory region has non-zero size or
- * invalid node number.
- */
- if (mb->size == 0 || mb->node >= MAX_NUMNODES)
- return 0;
-
- /*
- * Check whether this memory region would entirely overlap
- * the vmalloc area.
- */
- if (phys_to_virt(mb->start) >= VMALLOC_MIN) {
- printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx "
- "(vmalloc region overlap).\n",
- mb->start, mb->start + mb->size - 1);
- return 0;
- }
-
- /*
- * Check whether this memory region would partially overlap
- * the vmalloc area.
- */
- if (phys_to_virt(mb->start + mb->size) < phys_to_virt(mb->start) ||
- phys_to_virt(mb->start + mb->size) > VMALLOC_MIN) {
- unsigned long newsize = VMALLOC_MIN - phys_to_virt(mb->start);
-
- printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx "
- "to -%.8lx (vmalloc region overlap).\n",
- mb->start, mb->start + mb->size - 1,
- mb->start + newsize - 1);
- mb->size = newsize;
- }
+ int i, j;
- return 1;
-}
+ for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
+ struct membank *bank = &meminfo.bank[j];
+ *bank = meminfo.bank[i];
-static void __init sanity_check_meminfo(struct meminfo *mi)
-{
- int i, j;
+#ifdef CONFIG_HIGHMEM
+ /*
+ * Split those memory banks which are partially overlapping
+ * the vmalloc area greatly simplifying things later.
+ */
+ if (__va(bank->start) < VMALLOC_MIN &&
+ bank->size > VMALLOC_MIN - __va(bank->start)) {
+ if (meminfo.nr_banks >= NR_BANKS) {
+ printk(KERN_CRIT "NR_BANKS too low, "
+ "ignoring high memory\n");
+ } else {
+ memmove(bank + 1, bank,
+ (meminfo.nr_banks - i) * sizeof(*bank));
+ meminfo.nr_banks++;
+ i++;
+ bank[1].size -= VMALLOC_MIN - __va(bank->start);
+ bank[1].start = __pa(VMALLOC_MIN - 1) + 1;
+ j++;
+ }
+ bank->size = VMALLOC_MIN - __va(bank->start);
+ }
+#else
+ /*
+ * Check whether this memory bank would entirely overlap
+ * the vmalloc area.
+ */
+ if (__va(bank->start) >= VMALLOC_MIN) {
+ printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx "
+ "(vmalloc region overlap).\n",
+ bank->start, bank->start + bank->size - 1);
+ continue;
+ }
- for (i = 0, j = 0; i < mi->nr_banks; i++) {
- if (check_membank_valid(&mi->bank[i]))
- mi->bank[j++] = mi->bank[i];
+ /*
+ * Check whether this memory bank would partially overlap
+ * the vmalloc area.
+ */
+ if (__va(bank->start + bank->size) > VMALLOC_MIN ||
+ __va(bank->start + bank->size) < __va(bank->start)) {
+ unsigned long newsize = VMALLOC_MIN - __va(bank->start);
+ printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx "
+ "to -%.8lx (vmalloc region overlap).\n",
+ bank->start, bank->start + bank->size - 1,
+ bank->start + newsize - 1);
+ bank->size = newsize;
+ }
+#endif
+ j++;
}
- mi->nr_banks = j;
+ meminfo.nr_banks = j;
}
-static inline void prepare_page_table(struct meminfo *mi)
+static inline void prepare_page_table(void)
{
unsigned long addr;
/*
* Clear out all the mappings below the kernel image.
*/
- for (addr = 0; addr < MODULE_START; addr += PGDIR_SIZE)
+ for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE)
pmd_clear(pmd_off_k(addr));
#ifdef CONFIG_XIP_KERNEL
@@ -668,7 +739,7 @@ static inline void prepare_page_table(struct meminfo *mi)
* Clear out all the kernel space mappings, except for the first
* memory bank, up to the end of the vmalloc region.
*/
- for (addr = __phys_to_virt(mi->bank[0].start + mi->bank[0].size);
+ for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0]));
addr < VMALLOC_END; addr += PGDIR_SIZE)
pmd_clear(pmd_off_k(addr));
}
@@ -766,7 +837,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
*/
#ifdef CONFIG_XIP_KERNEL
map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
- map.virtual = MODULE_START;
+ map.virtual = MODULES_VADDR;
map.length = ((unsigned long)&_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
map.type = MT_ROM;
create_mapping(&map);
@@ -827,14 +898,14 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
*/
-void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc)
+void __init paging_init(struct machine_desc *mdesc)
{
void *zero_page;
build_mem_type_table();
- sanity_check_meminfo(mi);
- prepare_page_table(mi);
- bootmem_init(mi);
+ sanity_check_meminfo();
+ prepare_page_table();
+ bootmem_init();
devicemaps_init(mdesc);
top_pmd = pmd_off_k(0xffff0000);
@@ -843,7 +914,7 @@ void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc)
* allocate the zero page. Note that we count on this going ok.
*/
zero_page = alloc_bootmem_low_pages(PAGE_SIZE);
- memzero(zero_page, PAGE_SIZE);
+ memset(zero_page, 0, PAGE_SIZE);
empty_zero_page = virt_to_page(zero_page);
flush_dcache_page(empty_zero_page);
}