From e3239ff92a17976ac5d26fa0fe40ef3a9daf2523 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 4 Aug 2010 14:06:41 +1000 Subject: memblock: Rename memblock_region to memblock_type and memblock_property to memblock_region Signed-off-by: Benjamin Herrenschmidt --- arch/arm/mm/init.c | 2 +- arch/arm/plat-omap/fb.c | 2 +- arch/microblaze/mm/init.c | 4 ++-- arch/powerpc/mm/hash_utils_64.c | 2 +- arch/powerpc/mm/mem.c | 26 +++++++++++++------------- arch/powerpc/platforms/embedded6xx/wii.c | 2 +- arch/sparc/mm/init_64.c | 6 +++--- 7 files changed, 22 insertions(+), 22 deletions(-) (limited to 'arch') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 7185b00..d1496e6 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -237,7 +237,7 @@ static void __init arm_bootmem_free(struct meminfo *mi, unsigned long min, #ifndef CONFIG_SPARSEMEM int pfn_valid(unsigned long pfn) { - struct memblock_region *mem = &memblock.memory; + struct memblock_type *mem = &memblock.memory; unsigned int left = 0, right = mem->cnt; do { diff --git a/arch/arm/plat-omap/fb.c b/arch/arm/plat-omap/fb.c index 0054b95..05bf228 100644 --- a/arch/arm/plat-omap/fb.c +++ b/arch/arm/plat-omap/fb.c @@ -173,7 +173,7 @@ static int check_fbmem_region(int region_idx, struct omapfb_mem_region *rg, static int valid_sdram(unsigned long addr, unsigned long size) { - struct memblock_property res; + struct memblock_region res; res.base = addr; res.size = size; diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index db59349..afd6494 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -77,8 +77,8 @@ void __init setup_memory(void) /* Find main memory where is the kernel */ for (i = 0; i < memblock.memory.cnt; i++) { - memory_start = (u32) memblock.memory.region[i].base; - memory_end = (u32) memblock.memory.region[i].base + memory_start = (u32) memblock.memory.regions[i].base; + memory_end = (u32) memblock.memory.regions[i].base + (u32) memblock.memory.region[i].size; if ((memory_start <= (u32)_text) && ((u32)_text <= memory_end)) { diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 09dffe6..b1a3784 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -660,7 +660,7 @@ static void __init htab_initialize(void) /* create bolted the linear mapping in the hash table */ for (i=0; i < memblock.memory.cnt; i++) { - base = (unsigned long)__va(memblock.memory.region[i].base); + base = (unsigned long)__va(memblock.memory.regions[i].base); size = memblock.memory.region[i].size; DBG("creating mapping for region: %lx..%lx (prot: %lx)\n", diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 1a84a8d..a33f5c1 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -86,10 +86,10 @@ int page_is_ram(unsigned long pfn) for (i=0; i < memblock.memory.cnt; i++) { unsigned long base; - base = memblock.memory.region[i].base; + base = memblock.memory.regions[i].base; if ((paddr >= base) && - (paddr < (base + memblock.memory.region[i].size))) { + (paddr < (base + memblock.memory.regions[i].size))) { return 1; } } @@ -149,7 +149,7 @@ int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, void *arg, int (*func)(unsigned long, unsigned long, void *)) { - struct memblock_property res; + struct memblock_region res; unsigned long pfn, len; u64 end; int ret = -1; @@ -206,7 +206,7 @@ void __init do_init_bootmem(void) /* Add active regions with valid PFNs */ for (i = 0; i < memblock.memory.cnt; i++) { unsigned long start_pfn, end_pfn; - start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT; + start_pfn = memblock.memory.regions[i].base >> PAGE_SHIFT; end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); add_active_range(0, start_pfn, end_pfn); } @@ -219,16 +219,16 @@ void __init do_init_bootmem(void) /* reserve the sections we're already using */ for (i = 0; i < memblock.reserved.cnt; i++) { - unsigned long addr = memblock.reserved.region[i].base + + unsigned long addr = memblock.reserved.regions[i].base + memblock_size_bytes(&memblock.reserved, i) - 1; if (addr < lowmem_end_addr) - reserve_bootmem(memblock.reserved.region[i].base, + reserve_bootmem(memblock.reserved.regions[i].base, memblock_size_bytes(&memblock.reserved, i), BOOTMEM_DEFAULT); - else if (memblock.reserved.region[i].base < lowmem_end_addr) { + else if (memblock.reserved.regions[i].base < lowmem_end_addr) { unsigned long adjusted_size = lowmem_end_addr - - memblock.reserved.region[i].base; - reserve_bootmem(memblock.reserved.region[i].base, + memblock.reserved.regions[i].base; + reserve_bootmem(memblock.reserved.regions[i].base, adjusted_size, BOOTMEM_DEFAULT); } } @@ -237,7 +237,7 @@ void __init do_init_bootmem(void) /* reserve the sections we're already using */ for (i = 0; i < memblock.reserved.cnt; i++) - reserve_bootmem(memblock.reserved.region[i].base, + reserve_bootmem(memblock.reserved.regions[i].base, memblock_size_bytes(&memblock.reserved, i), BOOTMEM_DEFAULT); @@ -257,10 +257,10 @@ static int __init mark_nonram_nosave(void) for (i = 0; i < memblock.memory.cnt - 1; i++) { memblock_region_max_pfn = - (memblock.memory.region[i].base >> PAGE_SHIFT) + - (memblock.memory.region[i].size >> PAGE_SHIFT); + (memblock.memory.regions[i].base >> PAGE_SHIFT) + + (memblock.memory.regions[i].size >> PAGE_SHIFT); memblock_next_region_start_pfn = - memblock.memory.region[i+1].base >> PAGE_SHIFT; + memblock.memory.regions[i+1].base >> PAGE_SHIFT; if (memblock_region_max_pfn < memblock_next_region_start_pfn) register_nosave_region(memblock_region_max_pfn, diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c index 5cdcc7c..8450c29 100644 --- a/arch/powerpc/platforms/embedded6xx/wii.c +++ b/arch/powerpc/platforms/embedded6xx/wii.c @@ -65,7 +65,7 @@ static int __init page_aligned(unsigned long x) void __init wii_memory_fixups(void) { - struct memblock_property *p = memblock.memory.region; + struct memblock_region *p = memblock.memory.region; /* * This is part of a workaround to allow the use of two diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index f043451..16d8bee 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -978,7 +978,7 @@ static void __init add_node_ranges(void) unsigned long size = memblock_size_bytes(&memblock.memory, i); unsigned long start, end; - start = memblock.memory.region[i].base; + start = memblock.memory.regions[i].base; end = start + size; while (start < end) { unsigned long this_end; @@ -1299,7 +1299,7 @@ static void __init bootmem_init_nonnuma(void) if (!size) continue; - start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT; + start_pfn = memblock.memory.regions[i].base >> PAGE_SHIFT; end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); add_active_range(0, start_pfn, end_pfn); } @@ -1339,7 +1339,7 @@ static void __init trim_reserved_in_node(int nid) numadbg(" trim_reserved_in_node(%d)\n", nid); for (i = 0; i < memblock.reserved.cnt; i++) { - unsigned long start = memblock.reserved.region[i].base; + unsigned long start = memblock.reserved.regions[i].base; unsigned long size = memblock_size_bytes(&memblock.reserved, i); unsigned long end = start + size; -- cgit v1.1 From 5e6f6aa1c243fafeb2648cf4ebd5abd99ab2531b Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 4 Aug 2010 13:23:02 +1000 Subject: memblock/arm: pfn_valid uses memblock_is_memory() The implementation is pretty much similar. There is a -small- added overhead by having another function call and the address shift. If that becomes a concern, I suppose we could actually have memblock itself expose a memblock_pfn_valid() which then ARM can use directly with an appropriate #define... Signed-off-by: Benjamin Herrenschmidt --- arch/arm/mm/init.c | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index d1496e6..e739223 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -237,20 +237,7 @@ static void __init arm_bootmem_free(struct meminfo *mi, unsigned long min, #ifndef CONFIG_SPARSEMEM int pfn_valid(unsigned long pfn) { - struct memblock_type *mem = &memblock.memory; - unsigned int left = 0, right = mem->cnt; - - do { - unsigned int mid = (right + left) / 2; - - if (pfn < memblock_start_pfn(mem, mid)) - right = mid; - else if (pfn >= memblock_end_pfn(mem, mid)) - left = mid + 1; - else - return 1; - } while (left < right); - return 0; + return memblock_is_memory(pfn << PAGE_SHIFT); } EXPORT_SYMBOL(pfn_valid); -- cgit v1.1 From dbe3039e64b1dd4cf26f782d45b524f85b444ad4 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 4 Aug 2010 14:09:23 +1000 Subject: memblock/arm: Use memblock_region_is_memory() for omap fb Instead of the deprecated memblock_find() Signed-off-by: Benjamin Herrenschmidt --- arch/arm/plat-omap/fb.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/arm/plat-omap/fb.c b/arch/arm/plat-omap/fb.c index 05bf228..441af2b 100644 --- a/arch/arm/plat-omap/fb.c +++ b/arch/arm/plat-omap/fb.c @@ -173,11 +173,7 @@ static int check_fbmem_region(int region_idx, struct omapfb_mem_region *rg, static int valid_sdram(unsigned long addr, unsigned long size) { - struct memblock_region res; - - res.base = addr; - res.size = size; - return !memblock_find(&res) && res.base == addr && res.size == size; + return memblock_region_is_memory(addr, size); } static int reserve_sdram(unsigned long addr, unsigned long size) -- cgit v1.1 From 76bfcc818f87d884e427908f5010128dae32d11a Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 4 Aug 2010 14:13:06 +1000 Subject: memblock/microblaze: Use new accessors CC: Michal Simek Signed-off-by: Benjamin Herrenschmidt --- arch/microblaze/mm/init.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index afd6494..32a702b 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -70,16 +70,16 @@ static void __init paging_init(void) void __init setup_memory(void) { - int i; unsigned long map_size; + struct memblock_region *reg; + #ifndef CONFIG_MMU u32 kernel_align_start, kernel_align_size; /* Find main memory where is the kernel */ - for (i = 0; i < memblock.memory.cnt; i++) { - memory_start = (u32) memblock.memory.regions[i].base; - memory_end = (u32) memblock.memory.regions[i].base - + (u32) memblock.memory.region[i].size; + for_each_memblock(memory, reg) { + memory_start = (u32)reg->base; + memory_end = (u32) reg->base + reg->size; if ((memory_start <= (u32)_text) && ((u32)_text <= memory_end)) { memory_size = memory_end - memory_start; @@ -147,12 +147,10 @@ void __init setup_memory(void) free_bootmem(memory_start, memory_size); /* reserve allocate blocks */ - for (i = 0; i < memblock.reserved.cnt; i++) { - pr_debug("reserved %d - 0x%08x-0x%08x\n", i, - (u32) memblock.reserved.region[i].base, - (u32) memblock_size_bytes(&memblock.reserved, i)); - reserve_bootmem(memblock.reserved.region[i].base, - memblock_size_bytes(&memblock.reserved, i) - 1, BOOTMEM_DEFAULT); + for_each_memblock(reserved, reg) { + pr_debug("reserved - 0x%08x-0x%08x\n", + (u32) reg->base, (u32) reg->size); + reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); } #ifdef CONFIG_MMU init_bootmem_done = 1; -- cgit v1.1 From 64106ca61cc73f3798497c76d2b7749b8e93ead4 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 4 Aug 2010 14:11:04 +1000 Subject: memblock/sh: Use new accessors CC: Paul Mundt Signed-off-by: Benjamin Herrenschmidt --- arch/sh/mm/init.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index d0e2491..b977475 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -200,7 +200,6 @@ static void __init bootmem_init_one_node(unsigned int nid) unsigned long total_pages, paddr; unsigned long end_pfn; struct pglist_data *p; - int i; p = NODE_DATA(nid); @@ -226,11 +225,12 @@ static void __init bootmem_init_one_node(unsigned int nid) * reservations in other nodes. */ if (nid == 0) { + struct memblock_region *reg; + /* Reserve the sections we're already using. */ - for (i = 0; i < memblock.reserved.cnt; i++) - reserve_bootmem(memblock.reserved.region[i].base, - memblock_size_bytes(&memblock.reserved, i), - BOOTMEM_DEFAULT); + for_each_memblock(reserved, reg) { + reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); + } } sparse_memory_present_with_active_regions(nid); @@ -238,13 +238,14 @@ static void __init bootmem_init_one_node(unsigned int nid) static void __init do_init_bootmem(void) { + struct memblock_region *reg; int i; /* Add active regions with valid PFNs. */ - for (i = 0; i < memblock.memory.cnt; i++) { + for_each_memblock(memory, reg) { unsigned long start_pfn, end_pfn; - start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT; - end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); + start_pfn = memblock_region_base_pfn(reg); + end_pfn = memblock_region_end_pfn(reg); __add_active_range(0, start_pfn, end_pfn); } -- cgit v1.1 From 08b84798819e54ba1d29be71a52afbb697c7b666 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 4 Aug 2010 13:43:31 +1000 Subject: memblock/sparc: Use new accessors CC: David S. Miller Signed-off-by: Benjamin Herrenschmidt --- arch/sparc/mm/init_64.c | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 16d8bee..dd68025 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -972,13 +972,13 @@ int of_node_to_nid(struct device_node *dp) static void __init add_node_ranges(void) { - int i; + struct memblock_region *reg; - for (i = 0; i < memblock.memory.cnt; i++) { - unsigned long size = memblock_size_bytes(&memblock.memory, i); + for_each_memblock(memory, reg) { + unsigned long size = reg->size; unsigned long start, end; - start = memblock.memory.regions[i].base; + start = reg->base; end = start + size; while (start < end) { unsigned long this_end; @@ -1281,7 +1281,7 @@ static void __init bootmem_init_nonnuma(void) { unsigned long top_of_ram = memblock_end_of_DRAM(); unsigned long total_ram = memblock_phys_mem_size(); - unsigned int i; + struct memblock_region *reg; numadbg("bootmem_init_nonnuma()\n"); @@ -1292,15 +1292,14 @@ static void __init bootmem_init_nonnuma(void) init_node_masks_nonnuma(); - for (i = 0; i < memblock.memory.cnt; i++) { - unsigned long size = memblock_size_bytes(&memblock.memory, i); + for_each_memblock(memory, reg) { unsigned long start_pfn, end_pfn; - if (!size) + if (!reg->size) continue; - start_pfn = memblock.memory.regions[i].base >> PAGE_SHIFT; - end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); + start_pfn = memblock_region_base_pfn(reg); + end_pfn = memblock_region_end_pfn(reg); add_active_range(0, start_pfn, end_pfn); } @@ -1334,17 +1333,12 @@ static void __init reserve_range_in_node(int nid, unsigned long start, static void __init trim_reserved_in_node(int nid) { - int i; + struct memblock_region *reg; numadbg(" trim_reserved_in_node(%d)\n", nid); - for (i = 0; i < memblock.reserved.cnt; i++) { - unsigned long start = memblock.reserved.regions[i].base; - unsigned long size = memblock_size_bytes(&memblock.reserved, i); - unsigned long end = start + size; - - reserve_range_in_node(nid, start, end); - } + for_each_memblock(reserved, reg) + reserve_range_in_node(nid, reg->base, reg->base + reg->size); } static void __init bootmem_init_one_node(int nid) -- cgit v1.1 From 28be7072ce54b82642ebff6a80d474d4c6a6a7fd Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 4 Aug 2010 13:43:53 +1000 Subject: memblock/powerpc: Use new accessors Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/hash_utils_64.c | 8 ++-- arch/powerpc/mm/mem.c | 92 +++++++++++++++-------------------------- arch/powerpc/mm/numa.c | 17 ++++---- 3 files changed, 46 insertions(+), 71 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index b1a3784..4072b87 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -588,7 +588,7 @@ static void __init htab_initialize(void) unsigned long pteg_count; unsigned long prot; unsigned long base = 0, size = 0, limit; - int i; + struct memblock_region *reg; DBG(" -> htab_initialize()\n"); @@ -659,9 +659,9 @@ static void __init htab_initialize(void) */ /* create bolted the linear mapping in the hash table */ - for (i=0; i < memblock.memory.cnt; i++) { - base = (unsigned long)__va(memblock.memory.regions[i].base); - size = memblock.memory.region[i].size; + for_each_memblock(memory, reg) { + base = (unsigned long)__va(reg->base); + size = reg->size; DBG("creating mapping for region: %lx..%lx (prot: %lx)\n", base, size, prot); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index a33f5c1..52df542 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -82,18 +82,11 @@ int page_is_ram(unsigned long pfn) return pfn < max_pfn; #else unsigned long paddr = (pfn << PAGE_SHIFT); - int i; - for (i=0; i < memblock.memory.cnt; i++) { - unsigned long base; + struct memblock_region *reg; - base = memblock.memory.regions[i].base; - - if ((paddr >= base) && - (paddr < (base + memblock.memory.regions[i].size))) { + for_each_memblock(memory, reg) + if (paddr >= reg->base && paddr < (reg->base + reg->size)) return 1; - } - } - return 0; #endif } @@ -149,23 +142,19 @@ int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, void *arg, int (*func)(unsigned long, unsigned long, void *)) { - struct memblock_region res; - unsigned long pfn, len; - u64 end; + struct memblock_region *reg; + unsigned long end_pfn = start_pfn + nr_pages; + unsigned long tstart, tend; int ret = -1; - res.base = (u64) start_pfn << PAGE_SHIFT; - res.size = (u64) nr_pages << PAGE_SHIFT; - - end = res.base + res.size - 1; - while ((res.base < end) && (memblock_find(&res) >= 0)) { - pfn = (unsigned long)(res.base >> PAGE_SHIFT); - len = (unsigned long)(res.size >> PAGE_SHIFT); - ret = (*func)(pfn, len, arg); + for_each_memblock(memory, reg) { + tstart = max(start_pfn, memblock_region_base_pfn(reg)); + tend = min(end_pfn, memblock_region_end_pfn(reg)); + if (tstart >= tend) + continue; + ret = (*func)(tstart, tend - tstart, arg); if (ret) break; - res.base += (res.size + 1); - res.size = (end - res.base + 1); } return ret; } @@ -179,9 +168,9 @@ EXPORT_SYMBOL_GPL(walk_system_ram_range); #ifndef CONFIG_NEED_MULTIPLE_NODES void __init do_init_bootmem(void) { - unsigned long i; unsigned long start, bootmap_pages; unsigned long total_pages; + struct memblock_region *reg; int boot_mapsize; max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; @@ -204,10 +193,10 @@ void __init do_init_bootmem(void) boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn); /* Add active regions with valid PFNs */ - for (i = 0; i < memblock.memory.cnt; i++) { + for_each_memblock(memory, reg) { unsigned long start_pfn, end_pfn; - start_pfn = memblock.memory.regions[i].base >> PAGE_SHIFT; - end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); + start_pfn = memblock_region_base_pfn(reg); + end_pfn = memblock_region_end_pfn(reg); add_active_range(0, start_pfn, end_pfn); } @@ -218,29 +207,21 @@ void __init do_init_bootmem(void) free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT); /* reserve the sections we're already using */ - for (i = 0; i < memblock.reserved.cnt; i++) { - unsigned long addr = memblock.reserved.regions[i].base + - memblock_size_bytes(&memblock.reserved, i) - 1; - if (addr < lowmem_end_addr) - reserve_bootmem(memblock.reserved.regions[i].base, - memblock_size_bytes(&memblock.reserved, i), - BOOTMEM_DEFAULT); - else if (memblock.reserved.regions[i].base < lowmem_end_addr) { - unsigned long adjusted_size = lowmem_end_addr - - memblock.reserved.regions[i].base; - reserve_bootmem(memblock.reserved.regions[i].base, - adjusted_size, BOOTMEM_DEFAULT); + for_each_memblock(reserved, reg) { + unsigned long top = reg->base + reg->size - 1; + if (top < lowmem_end_addr) + reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); + else if (reg->base < lowmem_end_addr) { + unsigned long trunc_size = lowmem_end_addr - reg->base; + reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT); } } #else free_bootmem_with_active_regions(0, max_pfn); /* reserve the sections we're already using */ - for (i = 0; i < memblock.reserved.cnt; i++) - reserve_bootmem(memblock.reserved.regions[i].base, - memblock_size_bytes(&memblock.reserved, i), - BOOTMEM_DEFAULT); - + for_each_memblock(reserved, reg) + reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); #endif /* XXX need to clip this if using highmem? */ sparse_memory_present_with_active_regions(0); @@ -251,22 +232,15 @@ void __init do_init_bootmem(void) /* mark pages that don't exist as nosave */ static int __init mark_nonram_nosave(void) { - unsigned long memblock_next_region_start_pfn, - memblock_region_max_pfn; - int i; - - for (i = 0; i < memblock.memory.cnt - 1; i++) { - memblock_region_max_pfn = - (memblock.memory.regions[i].base >> PAGE_SHIFT) + - (memblock.memory.regions[i].size >> PAGE_SHIFT); - memblock_next_region_start_pfn = - memblock.memory.regions[i+1].base >> PAGE_SHIFT; - - if (memblock_region_max_pfn < memblock_next_region_start_pfn) - register_nosave_region(memblock_region_max_pfn, - memblock_next_region_start_pfn); + struct memblock_region *reg, *prev = NULL; + + for_each_memblock(memory, reg) { + if (prev && + memblock_region_end_pfn(prev) < memblock_region_base_pfn(reg)) + register_nosave_region(memblock_region_end_pfn(prev), + memblock_region_base_pfn(reg)); + prev = reg; } - return 0; } diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index aa731af..9ba9ba1 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -746,16 +746,17 @@ static void __init setup_nonnuma(void) unsigned long top_of_ram = memblock_end_of_DRAM(); unsigned long total_ram = memblock_phys_mem_size(); unsigned long start_pfn, end_pfn; - unsigned int i, nid = 0; + unsigned int nid = 0; + struct memblock_region *reg; printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", top_of_ram, total_ram); printk(KERN_DEBUG "Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20); - for (i = 0; i < memblock.memory.cnt; ++i) { - start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT; - end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); + for_each_memblock(memory, reg) { + start_pfn = memblock_region_base_pfn(reg); + end_pfn = memblock_region_end_pfn(reg); fake_numa_create_new_node(end_pfn, &nid); add_active_range(nid, start_pfn, end_pfn); @@ -891,11 +892,11 @@ static struct notifier_block __cpuinitdata ppc64_numa_nb = { static void mark_reserved_regions_for_nid(int nid) { struct pglist_data *node = NODE_DATA(nid); - int i; + struct memblock_region *reg; - for (i = 0; i < memblock.reserved.cnt; i++) { - unsigned long physbase = memblock.reserved.region[i].base; - unsigned long size = memblock.reserved.region[i].size; + for_each_memblock(reserved, reg) { + unsigned long physbase = reg->base; + unsigned long size = reg->size; unsigned long start_pfn = physbase >> PAGE_SHIFT; unsigned long end_pfn = PFN_UP(physbase + size); struct node_active_region node_ar; -- cgit v1.1 From 719c1514f2fef5f01fcfa2bba81b7bb079c7c6a1 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Thu, 5 Aug 2010 12:55:55 +1000 Subject: memblock/arm: Use new accessors CC: Russell King Signed-off-by: Benjamin Herrenschmidt --- arch/arm/mm/init.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index e739223..8504906 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -150,6 +150,7 @@ static void __init find_limits(struct meminfo *mi, static void __init arm_bootmem_init(struct meminfo *mi, unsigned long start_pfn, unsigned long end_pfn) { + struct memblock_region *reg; unsigned int boot_pages; phys_addr_t bitmap; pg_data_t *pgdat; @@ -180,13 +181,13 @@ static void __init arm_bootmem_init(struct meminfo *mi, /* * Reserve the memblock reserved regions in bootmem. */ - for (i = 0; i < memblock.reserved.cnt; i++) { - phys_addr_t start = memblock_start_pfn(&memblock.reserved, i); - if (start >= start_pfn && - memblock_end_pfn(&memblock.reserved, i) <= end_pfn) + for_each_memblock(reserved, reg) { + phys_addr_t start = memblock_region_base_pfn(reg); + phys_addr_t end = memblock_region_end_pfn(reg); + if (start >= start_pfn && end <= end_pfn) reserve_bootmem_node(pgdat, __pfn_to_phys(start), - memblock_size_bytes(&memblock.reserved, i), - BOOTMEM_DEFAULT); + (end - start) << PAGE_SHIFT, + BOOTMEM_DEFAULT); } } @@ -247,10 +248,12 @@ static void arm_memory_present(void) #else static void arm_memory_present(void) { + struct memblock_region *reg; int i; - for (i = 0; i < memblock.memory.cnt; i++) - memory_present(0, memblock_start_pfn(&memblock.memory, i), - memblock_end_pfn(&memblock.memory, i)); + + for_each_memblock(memory, reg) { + memory_present(0, memblock_region_base_pfn(reg), + memblock_region_end_pfn(reg)); } #endif -- cgit v1.1 From 35a1f0bd07015dde66501b47cfb6ddc72ebe7346 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 6 Jul 2010 15:38:58 -0700 Subject: memblock: Remove nid_range argument, arch provides memblock_nid_range() instead Signed-off-by: Benjamin Herrenschmidt --- arch/sparc/mm/init_64.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index dd68025..0883113 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -785,8 +785,7 @@ static int find_node(unsigned long addr) return -1; } -static unsigned long long nid_range(unsigned long long start, - unsigned long long end, int *nid) +u64 memblock_nid_range(u64 start, u64 end, int *nid) { *nid = find_node(start); start += PAGE_SIZE; @@ -804,8 +803,7 @@ static unsigned long long nid_range(unsigned long long start, return start; } #else -static unsigned long long nid_range(unsigned long long start, - unsigned long long end, int *nid) +u64 memblock_nid_range(u64 start, u64 end, int *nid) { *nid = 0; return end; @@ -822,8 +820,7 @@ static void __init allocate_node_data(int nid) struct pglist_data *p; #ifdef CONFIG_NEED_MULTIPLE_NODES - paddr = memblock_alloc_nid(sizeof(struct pglist_data), - SMP_CACHE_BYTES, nid, nid_range); + paddr = memblock_alloc_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid); if (!paddr) { prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid); prom_halt(); @@ -843,8 +840,7 @@ static void __init allocate_node_data(int nid) if (p->node_spanned_pages) { num_pages = bootmem_bootmap_pages(p->node_spanned_pages); - paddr = memblock_alloc_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid, - nid_range); + paddr = memblock_alloc_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid); if (!paddr) { prom_printf("Cannot allocate bootmap for nid[%d]\n", nid); @@ -984,7 +980,7 @@ static void __init add_node_ranges(void) unsigned long this_end; int nid; - this_end = nid_range(start, end, &nid); + this_end = memblock_nid_range(start, end, &nid); numadbg("Adding active range nid[%d] " "start[%lx] end[%lx]\n", @@ -1317,7 +1313,7 @@ static void __init reserve_range_in_node(int nid, unsigned long start, unsigned long this_end; int n; - this_end = nid_range(start, end, &n); + this_end = memblock_nid_range(start, end, &n); if (n == nid) { numadbg(" MATCH reserving range [%lx:%lx]\n", start, this_end); -- cgit v1.1 From 27f574c223d2c09610058b3ec7a29582d63a3e06 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 6 Jul 2010 15:39:00 -0700 Subject: memblock: Expose MEMBLOCK_ALLOC_ANYWHERE Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/hash_utils_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 4072b87..a542ff5 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -625,7 +625,7 @@ static void __init htab_initialize(void) if (machine_is(cell)) limit = 0x80000000; else - limit = 0; + limit = MEMBLOCK_ALLOC_ANYWHERE; table = memblock_alloc_base(htab_size_bytes, htab_size_bytes, limit); -- cgit v1.1 From e63075a3c9377536d085bc013cd3fe6323162449 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 6 Jul 2010 15:39:01 -0700 Subject: memblock: Introduce default allocation limit and use it to replace explicit ones This introduce memblock.current_limit which is used to limit allocations from memblock_alloc() or memblock_alloc_base(..., MEMBLOCK_ALLOC_ACCESSIBLE). The old MEMBLOCK_ALLOC_ANYWHERE changes value from 0 to ~(u64)0 and can still be used with memblock_alloc_base() to allocate really anywhere. It is -no-longer- cropped to MEMBLOCK_REAL_LIMIT which disappears. Note to archs: I'm leaving the default limit to MEMBLOCK_ALLOC_ANYWHERE. I strongly recommend that you ensure that you set an appropriate limit during boot in order to guarantee that an memblock_alloc() at any time results in something that is accessible with a simple __va(). The reason is that a subsequent patch will introduce the ability for the array to resize itself by reallocating itself. The MEMBLOCK core will honor the current limit when performing those allocations. Signed-off-by: Benjamin Herrenschmidt --- arch/microblaze/include/asm/memblock.h | 3 --- arch/powerpc/include/asm/memblock.h | 7 ------- arch/powerpc/kernel/prom.c | 20 +++++++++++++++++++- arch/powerpc/kernel/setup_32.c | 2 +- arch/powerpc/mm/40x_mmu.c | 5 +++-- arch/powerpc/mm/fsl_booke_mmu.c | 3 ++- arch/powerpc/mm/hash_utils_64.c | 3 ++- arch/powerpc/mm/init_32.c | 29 +++++++---------------------- arch/powerpc/mm/ppc_mmu_32.c | 3 +-- arch/powerpc/mm/tlb_nohash.c | 2 ++ arch/sh/include/asm/memblock.h | 2 -- arch/sparc/include/asm/memblock.h | 2 -- 12 files changed, 37 insertions(+), 44 deletions(-) (limited to 'arch') diff --git a/arch/microblaze/include/asm/memblock.h b/arch/microblaze/include/asm/memblock.h index f9c2fa3..20a8e25 100644 --- a/arch/microblaze/include/asm/memblock.h +++ b/arch/microblaze/include/asm/memblock.h @@ -9,9 +9,6 @@ #ifndef _ASM_MICROBLAZE_MEMBLOCK_H #define _ASM_MICROBLAZE_MEMBLOCK_H -/* MEMBLOCK limit is OFF */ -#define MEMBLOCK_REAL_LIMIT 0xFFFFFFFF - #endif /* _ASM_MICROBLAZE_MEMBLOCK_H */ diff --git a/arch/powerpc/include/asm/memblock.h b/arch/powerpc/include/asm/memblock.h index 3c29728..43efc34 100644 --- a/arch/powerpc/include/asm/memblock.h +++ b/arch/powerpc/include/asm/memblock.h @@ -5,11 +5,4 @@ #define MEMBLOCK_DBG(fmt...) udbg_printf(fmt) -#ifdef CONFIG_PPC32 -extern phys_addr_t lowmem_end_addr; -#define MEMBLOCK_REAL_LIMIT lowmem_end_addr -#else -#define MEMBLOCK_REAL_LIMIT 0 -#endif - #endif /* _ASM_POWERPC_MEMBLOCK_H */ diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index fed9bf6..3aec0b9 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -98,7 +98,7 @@ static void __init move_device_tree(void) if ((memory_limit && (start + size) > memory_limit) || overlaps_crashkernel(start, size)) { - p = __va(memblock_alloc_base(size, PAGE_SIZE, memblock.rmo_size)); + p = __va(memblock_alloc(size, PAGE_SIZE)); memcpy(p, initial_boot_params, size); initial_boot_params = (struct boot_param_header *)p; DBG("Moved device tree to 0x%p\n", p); @@ -655,6 +655,21 @@ static void __init phyp_dump_reserve_mem(void) static inline void __init phyp_dump_reserve_mem(void) {} #endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */ +static void set_boot_memory_limit(void) +{ +#ifdef CONFIG_PPC32 + /* 601 can only access 16MB at the moment */ + if (PVR_VER(mfspr(SPRN_PVR)) == 1) + memblock_set_current_limit(0x01000000); + /* 8xx can only access 8MB at the moment */ + else if (PVR_VER(mfspr(SPRN_PVR)) == 0x50) + memblock_set_current_limit(0x00800000); + else + memblock_set_current_limit(0x10000000); +#else + memblock_set_current_limit(memblock.rmo_size); +#endif +} void __init early_init_devtree(void *params) { @@ -683,6 +698,7 @@ void __init early_init_devtree(void *params) /* Scan memory nodes and rebuild MEMBLOCKs */ memblock_init(); + of_scan_flat_dt(early_init_dt_scan_root, NULL); of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); @@ -718,6 +734,8 @@ void __init early_init_devtree(void *params) DBG("Phys. mem: %llx\n", memblock_phys_mem_size()); + set_boot_memory_limit(); + /* We may need to relocate the flat tree, do it now. * FIXME .. and the initrd too? */ move_device_tree(); diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index a10ffc8..b7eb1de 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -246,7 +246,7 @@ static void __init irqstack_early_init(void) unsigned int i; /* interrupt stacks must be in lowmem, we get that for free on ppc32 - * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */ + * as the memblock is limited to lowmem by default */ for_each_possible_cpu(i) { softirq_ctx[i] = (struct thread_info *) __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c index 1dc2fa5..58969b5 100644 --- a/arch/powerpc/mm/40x_mmu.c +++ b/arch/powerpc/mm/40x_mmu.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -47,6 +48,7 @@ #include #include #include + #include "mmu_decl.h" extern int __map_without_ltlbs; @@ -139,8 +141,7 @@ unsigned long __init mmu_mapin_ram(unsigned long top) * coverage with normal-sized pages (or other reasons) do not * attempt to allocate outside the allowed range. */ - - __initial_memory_limit_addr = memstart_addr + mapped; + memblock_set_current_limit(memstart_addr + mapped); return mapped; } diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index cdc7526..e525f86 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -212,5 +213,5 @@ void __init adjust_total_lowmem(void) pr_cont("%lu Mb, residual: %dMb\n", tlbcam_sz(tlbcam_index - 1) >> 20, (unsigned int)((total_lowmem - __max_low_memory) >> 20)); - __initial_memory_limit_addr = memstart_addr + __max_low_memory; + memblock_set_current_limit(memstart_addr + __max_low_memory); } diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index a542ff5..b05890e 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -696,7 +696,8 @@ static void __init htab_initialize(void) #endif /* CONFIG_U3_DART */ BUG_ON(htab_bolt_mapping(base, base + size, __pa(base), prot, mmu_linear_psize, mmu_kernel_ssize)); - } + } + memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); /* * If we have a memory_limit and we've allocated TCEs then we need to diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 6a6975d..59b208b 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -92,12 +92,6 @@ int __allow_ioremap_reserved; unsigned long __max_low_memory = MAX_LOW_MEM; /* - * address of the limit of what is accessible with initial MMU setup - - * 256MB usually, but only 16MB on 601. - */ -phys_addr_t __initial_memory_limit_addr = (phys_addr_t)0x10000000; - -/* * Check for command-line options that affect what MMU_init will do. */ void MMU_setup(void) @@ -126,13 +120,6 @@ void __init MMU_init(void) if (ppc_md.progress) ppc_md.progress("MMU:enter", 0x111); - /* 601 can only access 16MB at the moment */ - if (PVR_VER(mfspr(SPRN_PVR)) == 1) - __initial_memory_limit_addr = 0x01000000; - /* 8xx can only access 8MB at the moment */ - if (PVR_VER(mfspr(SPRN_PVR)) == 0x50) - __initial_memory_limit_addr = 0x00800000; - /* parse args from command line */ MMU_setup(); @@ -190,20 +177,18 @@ void __init MMU_init(void) #ifdef CONFIG_BOOTX_TEXT btext_unmap(); #endif + + /* Shortly after that, the entire linear mapping will be available */ + memblock_set_current_limit(lowmem_end_addr); } /* This is only called until mem_init is done. */ void __init *early_get_page(void) { - void *p; - - if (init_bootmem_done) { - p = alloc_bootmem_pages(PAGE_SIZE); - } else { - p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, - __initial_memory_limit_addr)); - } - return p; + if (init_bootmem_done) + return alloc_bootmem_pages(PAGE_SIZE); + else + return __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE)); } /* Free up now-unused memory */ diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c index f8a0182..7d34e17 100644 --- a/arch/powerpc/mm/ppc_mmu_32.c +++ b/arch/powerpc/mm/ppc_mmu_32.c @@ -223,8 +223,7 @@ void __init MMU_init_hw(void) * Find some memory for the hash table. */ if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); - Hash = __va(memblock_alloc_base(Hash_size, Hash_size, - __initial_memory_limit_addr)); + Hash = __va(memblock_alloc(Hash_size, Hash_size)); cacheable_memzero(Hash, Hash_size); _SDR1 = __pa(Hash) | SDR1_LOW_BITS; diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index d8695b0..7ba32e7 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c @@ -432,6 +432,8 @@ static void __early_init_mmu(int boot_cpu) * the MMU configuration */ mb(); + + memblock_set_current_limit(linear_map_top); } void __init early_init_mmu(void) diff --git a/arch/sh/include/asm/memblock.h b/arch/sh/include/asm/memblock.h index dfe683b..e87063f 100644 --- a/arch/sh/include/asm/memblock.h +++ b/arch/sh/include/asm/memblock.h @@ -1,6 +1,4 @@ #ifndef __ASM_SH_MEMBLOCK_H #define __ASM_SH_MEMBLOCK_H -#define MEMBLOCK_REAL_LIMIT 0 - #endif /* __ASM_SH_MEMBLOCK_H */ diff --git a/arch/sparc/include/asm/memblock.h b/arch/sparc/include/asm/memblock.h index f12af88..c67b047 100644 --- a/arch/sparc/include/asm/memblock.h +++ b/arch/sparc/include/asm/memblock.h @@ -5,6 +5,4 @@ #define MEMBLOCK_DBG(fmt...) prom_printf(fmt) -#define MEMBLOCK_REAL_LIMIT 0 - #endif /* !(_SPARC64_MEMBLOCK_H) */ -- cgit v1.1 From cd3db0c4ca3d237e7ad20f7107216e575705d2b0 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 6 Jul 2010 15:39:02 -0700 Subject: memblock: Remove rmo_size, burry it in arch/powerpc where it belongs The RMA (RMO is a misnomer) is a concept specific to ppc64 (in fact server ppc64 though I hijack it on embedded ppc64 for similar purposes) and represents the area of memory that can be accessed in real mode (aka with MMU off), or on embedded, from the exception vectors (which is bolted in the TLB) which pretty much boils down to the same thing. We take that out of the generic MEMBLOCK data structure and move it into arch/powerpc where it belongs, renaming it to "RMA" while at it. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/mmu.h | 12 ++++++++++++ arch/powerpc/kernel/head_40x.S | 6 +----- arch/powerpc/kernel/paca.c | 2 +- arch/powerpc/kernel/prom.c | 29 ++++++++--------------------- arch/powerpc/kernel/rtas.c | 2 +- arch/powerpc/kernel/setup_64.c | 2 +- arch/powerpc/mm/40x_mmu.c | 14 +++++++++++++- arch/powerpc/mm/44x_mmu.c | 14 ++++++++++++++ arch/powerpc/mm/fsl_booke_mmu.c | 9 +++++++++ arch/powerpc/mm/hash_utils_64.c | 22 +++++++++++++++++++++- arch/powerpc/mm/init_32.c | 14 ++++++++++++++ arch/powerpc/mm/init_64.c | 1 + arch/powerpc/mm/ppc_mmu_32.c | 15 +++++++++++++++ arch/powerpc/mm/tlb_nohash.c | 14 ++++++++++++++ 14 files changed, 125 insertions(+), 31 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 7ebf42e..bb40a06 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -2,6 +2,8 @@ #define _ASM_POWERPC_MMU_H_ #ifdef __KERNEL__ +#include + #include #include @@ -82,6 +84,16 @@ extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup; extern void early_init_mmu(void); extern void early_init_mmu_secondary(void); +extern void setup_initial_memory_limit(phys_addr_t first_memblock_base, + phys_addr_t first_memblock_size); + +#ifdef CONFIG_PPC64 +/* This is our real memory area size on ppc64 server, on embedded, we + * make it match the size our of bolted TLB area + */ +extern u64 ppc64_rma_size; +#endif /* CONFIG_PPC64 */ + #endif /* !__ASSEMBLY__ */ /* The kernel use the constants below to index in the page sizes array. diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index a90625f..8278e8b 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -923,11 +923,7 @@ initial_mmu: mtspr SPRN_PID,r0 sync - /* Configure and load two entries into TLB slots 62 and 63. - * In case we are pinning TLBs, these are reserved in by the - * other TLB functions. If not reserving, then it doesn't - * matter where they are loaded. - */ + /* Configure and load one entry into TLB slots 63 */ clrrwi r4,r4,10 /* Mask off the real page number */ ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */ diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 139a773..b9ffd7d 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -117,7 +117,7 @@ void __init allocate_pacas(void) * the first segment. On iSeries they must be within the area mapped * by the HV, which is HvPagesToMap * HVPAGESIZE bytes. */ - limit = min(0x10000000ULL, memblock.rmo_size); + limit = min(0x10000000ULL, ppc64_rma_size); if (firmware_has_feature(FW_FEATURE_ISERIES)) limit = min(limit, HvPagesToMap * HVPAGESIZE); diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 3aec0b9..c3c6a88 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -66,6 +66,7 @@ int __initdata iommu_is_off; int __initdata iommu_force_on; unsigned long tce_alloc_start, tce_alloc_end; +u64 ppc64_rma_size; #endif static int __init early_parse_mem(char *p) @@ -492,7 +493,7 @@ static int __init early_init_dt_scan_memory_ppc(unsigned long node, void __init early_init_dt_add_memory_arch(u64 base, u64 size) { -#if defined(CONFIG_PPC64) +#ifdef CONFIG_PPC64 if (iommu_is_off) { if (base >= 0x80000000ul) return; @@ -501,9 +502,13 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size) } #endif - memblock_add(base, size); - + /* First MEMBLOCK added, do some special initializations */ + if (memstart_addr == ~(phys_addr_t)0) + setup_initial_memory_limit(base, size); memstart_addr = min((u64)memstart_addr, base); + + /* Add the chunk to the MEMBLOCK list */ + memblock_add(base, size); } u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align) @@ -655,22 +660,6 @@ static void __init phyp_dump_reserve_mem(void) static inline void __init phyp_dump_reserve_mem(void) {} #endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */ -static void set_boot_memory_limit(void) -{ -#ifdef CONFIG_PPC32 - /* 601 can only access 16MB at the moment */ - if (PVR_VER(mfspr(SPRN_PVR)) == 1) - memblock_set_current_limit(0x01000000); - /* 8xx can only access 8MB at the moment */ - else if (PVR_VER(mfspr(SPRN_PVR)) == 0x50) - memblock_set_current_limit(0x00800000); - else - memblock_set_current_limit(0x10000000); -#else - memblock_set_current_limit(memblock.rmo_size); -#endif -} - void __init early_init_devtree(void *params) { phys_addr_t limit; @@ -734,8 +723,6 @@ void __init early_init_devtree(void *params) DBG("Phys. mem: %llx\n", memblock_phys_mem_size()); - set_boot_memory_limit(); - /* We may need to relocate the flat tree, do it now. * FIXME .. and the initrd too? */ move_device_tree(); diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index d0516db..1662777 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -934,7 +934,7 @@ void __init rtas_initialize(void) */ #ifdef CONFIG_PPC64 if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) { - rtas_region = min(memblock.rmo_size, RTAS_INSTANTIATE_MAX); + rtas_region = min(ppc64_rma_size, RTAS_INSTANTIATE_MAX); ibm_suspend_me_token = rtas_token("ibm,suspend-me"); } #endif diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index d135f93..4360944 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -487,7 +487,7 @@ static void __init emergency_stack_init(void) * bringup, we need to get at them in real mode. This means they * must also be within the RMO region. */ - limit = min(slb0_limit(), memblock.rmo_size); + limit = min(slb0_limit(), ppc64_rma_size); for_each_possible_cpu(i) { unsigned long sp; diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c index 58969b5..5810967 100644 --- a/arch/powerpc/mm/40x_mmu.c +++ b/arch/powerpc/mm/40x_mmu.c @@ -141,7 +141,19 @@ unsigned long __init mmu_mapin_ram(unsigned long top) * coverage with normal-sized pages (or other reasons) do not * attempt to allocate outside the allowed range. */ - memblock_set_current_limit(memstart_addr + mapped); + memblock_set_current_limit(mapped); return mapped; } + +void setup_initial_memory_limit(phys_addr_t first_memblock_base, + phys_addr_t first_memblock_size) +{ + /* We don't currently support the first MEMBLOCK not mapping 0 + * physical on those processors + */ + BUG_ON(first_memblock_base != 0); + + /* 40x can only access 16MB at the moment (see head_40x.S) */ + memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000)); +} diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c index d8c6efb..024acab 100644 --- a/arch/powerpc/mm/44x_mmu.c +++ b/arch/powerpc/mm/44x_mmu.c @@ -24,6 +24,8 @@ */ #include +#include + #include #include #include @@ -213,6 +215,18 @@ unsigned long __init mmu_mapin_ram(unsigned long top) return total_lowmem; } +void setup_initial_memory_limit(phys_addr_t first_memblock_base, + phys_addr_t first_memblock_size) +{ + /* We don't currently support the first MEMBLOCK not mapping 0 + * physical on those processors + */ + BUG_ON(first_memblock_base != 0); + + /* 44x has a 256M TLB entry pinned at boot */ + memblock_set_current_limit(min_t(u64, first_memblock_size, PPC_PIN_SIZE)); +} + #ifdef CONFIG_SMP void __cpuinit mmu_init_secondary(int cpu) { diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index e525f86..0be8fe2 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c @@ -215,3 +215,12 @@ void __init adjust_total_lowmem(void) memblock_set_current_limit(memstart_addr + __max_low_memory); } + +void setup_initial_memory_limit(phys_addr_t first_memblock_base, + phys_addr_t first_memblock_size) +{ + phys_addr_t limit = first_memblock_base + first_memblock_size; + + /* 64M mapped initially according to head_fsl_booke.S */ + memblock_set_current_limit(min_t(u64, limit, 0x04000000)); +} diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index b05890e..83f534d 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -649,7 +649,7 @@ static void __init htab_initialize(void) #ifdef CONFIG_DEBUG_PAGEALLOC linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count, - 1, memblock.rmo_size)); + 1, ppc64_rma_size)); memset(linear_map_hash_slots, 0, linear_map_hash_count); #endif /* CONFIG_DEBUG_PAGEALLOC */ @@ -1248,3 +1248,23 @@ void kernel_map_pages(struct page *page, int numpages, int enable) local_irq_restore(flags); } #endif /* CONFIG_DEBUG_PAGEALLOC */ + +void setup_initial_memory_limit(phys_addr_t first_memblock_base, + phys_addr_t first_memblock_size) +{ + /* We don't currently support the first MEMBLOCK not mapping 0 + * physical on those processors + */ + BUG_ON(first_memblock_base != 0); + + /* On LPAR systems, the first entry is our RMA region, + * non-LPAR 64-bit hash MMU systems don't have a limitation + * on real mode access, but using the first entry works well + * enough. We also clamp it to 1G to avoid some funky things + * such as RTAS bugs etc... + */ + ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); + + /* Finally limit subsequent allocations */ + memblock_set_current_limit(ppc64_rma_size); +} diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 59b208b..742da43 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -237,3 +237,17 @@ void free_initrd_mem(unsigned long start, unsigned long end) } #endif + +#ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */ +void setup_initial_memory_limit(phys_addr_t first_memblock_base, + phys_addr_t first_memblock_size) +{ + /* We don't currently support the first MEMBLOCK not mapping 0 + * physical on those processors + */ + BUG_ON(first_memblock_base != 0); + + /* 8xx can only access 8MB at the moment */ + memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000)); +} +#endif /* CONFIG_8xx */ diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 71f1415..9e081ff 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -328,3 +328,4 @@ int __meminit vmemmap_populate(struct page *start_page, return 0; } #endif /* CONFIG_SPARSEMEM_VMEMMAP */ + diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c index 7d34e17..11571e1 100644 --- a/arch/powerpc/mm/ppc_mmu_32.c +++ b/arch/powerpc/mm/ppc_mmu_32.c @@ -271,3 +271,18 @@ void __init MMU_init_hw(void) if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205); } + +void setup_initial_memory_limit(phys_addr_t first_memblock_base, + phys_addr_t first_memblock_size) +{ + /* We don't currently support the first MEMBLOCK not mapping 0 + * physical on those processors + */ + BUG_ON(first_memblock_base != 0); + + /* 601 can only access 16MB at the moment */ + if (PVR_VER(mfspr(SPRN_PVR)) == 1) + memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01000000)); + else /* Anything else has 256M mapped */ + memblock_set_current_limit(min_t(u64, first_memblock_size, 0x10000000)); +} diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index 7ba32e7..a086ed5 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c @@ -446,4 +446,18 @@ void __cpuinit early_init_mmu_secondary(void) __early_init_mmu(0); } +void setup_initial_memory_limit(phys_addr_t first_memblock_base, + phys_addr_t first_memblock_size) +{ + /* On Embedded 64-bit, we adjust the RMA size to match + * the bolted TLB entry. We know for now that only 1G + * entries are supported though that may eventually + * change. We crop it to the size of the first MEMBLOCK to + * avoid going over total available memory just in case... + */ + ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); + + /* Finally limit subsequent allocations */ + memblock_set_current_limit(ppc64_memblock_base + ppc64_rma_size); +} #endif /* CONFIG_PPC64 */ -- cgit v1.1 From 4734b594c6ca1be796d30c82d93fdf5160f45124 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 28 Jul 2010 14:31:29 +1000 Subject: memblock: Remove memblock_type.size and add memblock.memory_size instead Right now, both the "memory" and "reserved" memblock_type structures have a "size" member. It represents the calculated memory size in the former case and is unused in the latter. This moves it out to the main memblock structure instead Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/mem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 52df542..f661f6c 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -301,7 +301,7 @@ void __init mem_init(void) swiotlb_init(1); #endif - num_physpages = memblock.memory.size >> PAGE_SHIFT; + num_physpages = memblock_phys_mem_size() >> PAGE_SHIFT; high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); #ifdef CONFIG_NEED_MULTIPLE_NODES -- cgit v1.1 From 9d1e24928e6a0728d1c7c76818ccbd11b93e7ac9 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 6 Jul 2010 15:39:17 -0700 Subject: memblock: Separate memblock_alloc_nid() and memblock_alloc_try_nid() The former is now strict, it will fail if it cannot honor the allocation within the node, while the later implements the previous semantic which falls back to allocating anywhere. Signed-off-by: Benjamin Herrenschmidt --- arch/sparc/mm/init_64.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 0883113..dc584d2 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -820,7 +820,7 @@ static void __init allocate_node_data(int nid) struct pglist_data *p; #ifdef CONFIG_NEED_MULTIPLE_NODES - paddr = memblock_alloc_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid); + paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid); if (!paddr) { prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid); prom_halt(); @@ -840,7 +840,7 @@ static void __init allocate_node_data(int nid) if (p->node_spanned_pages) { num_pages = bootmem_bootmap_pages(p->node_spanned_pages); - paddr = memblock_alloc_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid); + paddr = memblock_alloc_try_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid); if (!paddr) { prom_printf("Cannot allocate bootmap for nid[%d]\n", nid); -- cgit v1.1 From fb74fb6db91abc3c1ceeb9d2c17b44866a12c63e Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:15 -0700 Subject: x86, memblock: Add memblock_x86_find_in_range_size() size is returned according free range. Will be used to find free ranges for early_memtest and memory corruption check Do not mess it up with lib/memblock.c yet. Signed-off-by: Yinghai Lu Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/memblock.h | 8 ++++ arch/x86/mm/Makefile | 2 + arch/x86/mm/memblock.c | 87 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 97 insertions(+) create mode 100644 arch/x86/include/asm/memblock.h create mode 100644 arch/x86/mm/memblock.c (limited to 'arch') diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h new file mode 100644 index 0000000..c14219a --- /dev/null +++ b/arch/x86/include/asm/memblock.h @@ -0,0 +1,8 @@ +#ifndef _X86_MEMBLOCK_H +#define _X86_MEMBLOCK_H + +#define ARCH_DISCARD_MEMBLOCK + +u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align); + +#endif diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index a4c7683..5554339 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -26,4 +26,6 @@ obj-$(CONFIG_NUMA) += numa.o numa_$(BITS).o obj-$(CONFIG_K8_NUMA) += k8topology_64.o obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o +obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o + obj-$(CONFIG_MEMTEST) += memtest.o diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c new file mode 100644 index 0000000..26ba462 --- /dev/null +++ b/arch/x86/mm/memblock.c @@ -0,0 +1,87 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +/* Check for already reserved areas */ +static inline bool __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align) +{ + struct memblock_region *r; + u64 addr = *addrp, last; + u64 size = *sizep; + bool changed = false; + +again: + last = addr + size; + for_each_memblock(reserved, r) { + if (last > r->base && addr < r->base) { + size = r->base - addr; + changed = true; + goto again; + } + if (last > (r->base + r->size) && addr < (r->base + r->size)) { + addr = round_up(r->base + r->size, align); + size = last - addr; + changed = true; + goto again; + } + if (last <= (r->base + r->size) && addr >= r->base) { + (*sizep)++; + return false; + } + } + if (changed) { + *addrp = addr; + *sizep = size; + } + return changed; +} + +static u64 __init __memblock_x86_find_in_range_size(u64 ei_start, u64 ei_last, u64 start, + u64 *sizep, u64 align) +{ + u64 addr, last; + + addr = round_up(ei_start, align); + if (addr < start) + addr = round_up(start, align); + if (addr >= ei_last) + goto out; + *sizep = ei_last - addr; + while (bad_addr_size(&addr, sizep, align) && addr + *sizep <= ei_last) + ; + last = addr + *sizep; + if (last > ei_last) + goto out; + + return addr; + +out: + return MEMBLOCK_ERROR; +} + +/* + * Find next free range after start, and size is returned in *sizep + */ +u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align) +{ + struct memblock_region *r; + + for_each_memblock(memory, r) { + u64 ei_start = r->base; + u64 ei_last = ei_start + r->size; + u64 addr; + + addr = __memblock_x86_find_in_range_size(ei_start, ei_last, start, + sizep, align); + + if (addr != MEMBLOCK_ERROR) + return addr; + } + + return MEMBLOCK_ERROR; +} -- cgit v1.1 From f88eff74aa848e58b1ea49768c0bbb874b31357f Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:15 -0700 Subject: bootmem, x86: Add weak version of reserve_bootmem_generic It will be used memblock_x86_to_bootmem converting It is an wrapper for reserve_bootmem, and x86 64bit is using special one. Also clean up that version for x86_64. We don't need to take care of numa path for that, bootmem can handle it how Signed-off-by: Yinghai Lu Signed-off-by: H. Peter Anvin --- arch/x86/mm/init_32.c | 6 ------ arch/x86/mm/init_64.c | 20 ++------------------ 2 files changed, 2 insertions(+), 24 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index bca7909..90e0545 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -1069,9 +1069,3 @@ void mark_rodata_ro(void) #endif } #endif - -int __init reserve_bootmem_generic(unsigned long phys, unsigned long len, - int flags) -{ - return reserve_bootmem(phys, len, flags); -} diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index ee41bba..634fa08 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -799,13 +799,10 @@ void mark_rodata_ro(void) #endif +#ifndef CONFIG_NO_BOOTMEM int __init reserve_bootmem_generic(unsigned long phys, unsigned long len, int flags) { -#ifdef CONFIG_NUMA - int nid, next_nid; - int ret; -#endif unsigned long pfn = phys >> PAGE_SHIFT; if (pfn >= max_pfn) { @@ -821,21 +818,7 @@ int __init reserve_bootmem_generic(unsigned long phys, unsigned long len, return -EFAULT; } - /* Should check here against the e820 map to avoid double free */ -#ifdef CONFIG_NUMA - nid = phys_to_nid(phys); - next_nid = phys_to_nid(phys + len - 1); - if (nid == next_nid) - ret = reserve_bootmem_node(NODE_DATA(nid), phys, len, flags); - else - ret = reserve_bootmem(phys, len, flags); - - if (ret != 0) - return ret; - -#else reserve_bootmem(phys, len, flags); -#endif if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) { dma_reserve += len / PAGE_SIZE; @@ -844,6 +827,7 @@ int __init reserve_bootmem_generic(unsigned long phys, unsigned long len, return 0; } +#endif int kern_addr_valid(unsigned long addr) { -- cgit v1.1 From 27de794365786b4cdc3461ed4e23af2a33f40612 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:15 -0700 Subject: x86, memblock: Add memblock_x86_to_bootmem() memblock_x86_to_bootmem() will reserve memblock.reserved.region in bootmem after bootmem is set up. We can use it to with all arches that support memblock later. Signed-off-by: Yinghai Lu Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/memblock.h | 1 + arch/x86/mm/memblock.c | 29 +++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h index c14219a..69cf853 100644 --- a/arch/x86/include/asm/memblock.h +++ b/arch/x86/include/asm/memblock.h @@ -4,5 +4,6 @@ #define ARCH_DISCARD_MEMBLOCK u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align); +void memblock_x86_to_bootmem(u64 start, u64 end); #endif diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c index 26ba462..8101084 100644 --- a/arch/x86/mm/memblock.c +++ b/arch/x86/mm/memblock.c @@ -85,3 +85,32 @@ u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align) return MEMBLOCK_ERROR; } + +#ifndef CONFIG_NO_BOOTMEM +void __init memblock_x86_to_bootmem(u64 start, u64 end) +{ + int count; + u64 final_start, final_end; + struct memblock_region *r; + + /* Take out region array itself */ + memblock_free_reserved_regions(); + + count = memblock.reserved.cnt; + pr_info("(%d early reservations) ==> bootmem [%010llx-%010llx]\n", count, start, end - 1); + for_each_memblock(reserved, r) { + pr_info(" [%010llx-%010llx] ", (u64)r->base, (u64)r->base + r->size - 1); + final_start = max(start, r->base); + final_end = min(end, r->base + r->size); + if (final_start >= final_end) { + pr_cont("\n"); + continue; + } + pr_cont(" ==> [%010llx-%010llx]\n", final_start, final_end - 1); + reserve_bootmem_generic(final_start, final_end - final_start, BOOTMEM_DEFAULT); + } + + /* Put region array back ? */ + memblock_reserve_reserved_regions(); +} +#endif -- cgit v1.1 From 9dc5d569c133819c1ce069ebb1d771c62de32580 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:15 -0700 Subject: x86, memblock: Add memblock_x86_reserve_range/memblock_x86_free_range They are wrappers for core versions, which take start/end/name instead of base/size. This will make x86 conversion eaasier. could add more debug print out -v2: change get_max_mapped() to memblock.default_alloc_limit according to Michael Ellerman and Ben change to memblock_x86_reserve_range and memblock_x86_free_range according to Michael Ellerman -v3: call check_and_double after reserve/free, so could avoid to use find_memblock_area. Suggested by Michael Ellerman Signed-off-by: Yinghai Lu Cc: Benjamin Herrenschmidt Cc: Michael Ellerman Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/memblock.h | 3 +++ arch/x86/mm/memblock.c | 22 ++++++++++++++++++++++ 2 files changed, 25 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h index 69cf853..e11ddf0 100644 --- a/arch/x86/include/asm/memblock.h +++ b/arch/x86/include/asm/memblock.h @@ -6,4 +6,7 @@ u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align); void memblock_x86_to_bootmem(u64 start, u64 end); +void memblock_x86_reserve_range(u64 start, u64 end, char *name); +void memblock_x86_free_range(u64 start, u64 end); + #endif diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c index 8101084..9829eaf1 100644 --- a/arch/x86/mm/memblock.c +++ b/arch/x86/mm/memblock.c @@ -114,3 +114,25 @@ void __init memblock_x86_to_bootmem(u64 start, u64 end) memblock_reserve_reserved_regions(); } #endif + +void __init memblock_x86_reserve_range(u64 start, u64 end, char *name) +{ + if (start == end) + return; + + if (WARN_ONCE(start > end, "memblock_x86_reserve_range: wrong range [%#llx, %#llx]\n", start, end)) + return; + + memblock_reserve(start, end - start); +} + +void __init memblock_x86_free_range(u64 start, u64 end) +{ + if (start == end) + return; + + if (WARN_ONCE(start > end, "memblock_x86_free_range: wrong range [%#llx, %#llx]\n", start, end)) + return; + + memblock_free(start, end - start); +} -- cgit v1.1 From 4d5cf86ce187c0d3a4cdf233ab0cc6526ccbe01f Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:16 -0700 Subject: x86, memblock: Add get_free_all_memory_range() get_free_all_memory_range is for CONFIG_NO_BOOTMEM=y, and will be called by free_all_memory_core_early(). It will use early_node_map aka active ranges subtract memblock.reserved to get all free range, and those ranges will convert to slab pages. -v4: increase range size Signed-off-by: Yinghai Lu Cc: Jan Beulich Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/memblock.h | 2 + arch/x86/mm/memblock.c | 98 ++++++++++++++++++++++++++++++++++++++++- 2 files changed, 99 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h index e11ddf0..72639ce 100644 --- a/arch/x86/include/asm/memblock.h +++ b/arch/x86/include/asm/memblock.h @@ -8,5 +8,7 @@ void memblock_x86_to_bootmem(u64 start, u64 end); void memblock_x86_reserve_range(u64 start, u64 end, char *name); void memblock_x86_free_range(u64 start, u64 end); +struct range; +int get_free_all_memory_range(struct range **rangep, int nodeid); #endif diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c index 9829eaf1..b450060 100644 --- a/arch/x86/mm/memblock.c +++ b/arch/x86/mm/memblock.c @@ -86,7 +86,103 @@ u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align) return MEMBLOCK_ERROR; } -#ifndef CONFIG_NO_BOOTMEM +static __init struct range *find_range_array(int count) +{ + u64 end, size, mem; + struct range *range; + + size = sizeof(struct range) * count; + end = memblock.current_limit; + + mem = memblock_find_in_range(0, end, size, sizeof(struct range)); + if (mem == MEMBLOCK_ERROR) + panic("can not find more space for range array"); + + /* + * This range is tempoaray, so don't reserve it, it will not be + * overlapped because We will not alloccate new buffer before + * We discard this one + */ + range = __va(mem); + memset(range, 0, size); + + return range; +} + +#ifdef CONFIG_NO_BOOTMEM +static void __init memblock_x86_subtract_reserved(struct range *range, int az) +{ + u64 final_start, final_end; + struct memblock_region *r; + + /* Take out region array itself at first*/ + memblock_free_reserved_regions(); + + pr_info("Subtract (%ld early reservations)\n", memblock.reserved.cnt); + + for_each_memblock(reserved, r) { + pr_info(" [%010llx-%010llx]\n", (u64)r->base, (u64)r->base + r->size - 1); + final_start = PFN_DOWN(r->base); + final_end = PFN_UP(r->base + r->size); + if (final_start >= final_end) + continue; + subtract_range(range, az, final_start, final_end); + } + + /* Put region array back ? */ + memblock_reserve_reserved_regions(); +} + +struct count_data { + int nr; +}; + +static int __init count_work_fn(unsigned long start_pfn, + unsigned long end_pfn, void *datax) +{ + struct count_data *data = datax; + + data->nr++; + + return 0; +} + +static int __init count_early_node_map(int nodeid) +{ + struct count_data data; + + data.nr = 0; + work_with_active_regions(nodeid, count_work_fn, &data); + + return data.nr; +} + +int __init get_free_all_memory_range(struct range **rangep, int nodeid) +{ + int count; + struct range *range; + int nr_range; + + count = (memblock.reserved.cnt + count_early_node_map(nodeid)) * 2; + + range = find_range_array(count); + nr_range = 0; + + /* + * Use early_node_map[] and memblock.reserved.region to get range array + * at first + */ + nr_range = add_from_early_node_map(range, count, nr_range, nodeid); +#ifdef CONFIG_X86_32 + subtract_range(range, count, max_low_pfn, -1ULL); +#endif + memblock_x86_subtract_reserved(range, count); + nr_range = clean_sort_range(range, count); + + *rangep = range; + return nr_range; +} +#else void __init memblock_x86_to_bootmem(u64 start, u64 end) { int count; -- cgit v1.1 From 88ba088c18457caaf8d2e5f8d36becc731a3d4f6 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:16 -0700 Subject: x86, memblock: Add memblock_x86_register_active_regions() and memblock_x86_hole_size() memblock_x86_register_active_regions() will be used to fill early_node_map, the result will be memblock.memory.region AND numa data memblock_x86_hole_size will be used to find hole size on memblock.memory.region with specified range. Signed-off-by: Yinghai Lu Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/memblock.h | 4 +++ arch/x86/mm/memblock.c | 66 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h index 72639ce..16af28d 100644 --- a/arch/x86/include/asm/memblock.h +++ b/arch/x86/include/asm/memblock.h @@ -11,4 +11,8 @@ void memblock_x86_free_range(u64 start, u64 end); struct range; int get_free_all_memory_range(struct range **rangep, int nodeid); +void memblock_x86_register_active_regions(int nid, unsigned long start_pfn, + unsigned long last_pfn); +u64 memblock_x86_hole_size(u64 start, u64 end); + #endif diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c index b450060..53a7a5a 100644 --- a/arch/x86/mm/memblock.c +++ b/arch/x86/mm/memblock.c @@ -232,3 +232,69 @@ void __init memblock_x86_free_range(u64 start, u64 end) memblock_free(start, end - start); } + +/* + * Finds an active region in the address range from start_pfn to last_pfn and + * returns its range in ei_startpfn and ei_endpfn for the memblock entry. + */ +static int __init memblock_x86_find_active_region(const struct memblock_region *ei, + unsigned long start_pfn, + unsigned long last_pfn, + unsigned long *ei_startpfn, + unsigned long *ei_endpfn) +{ + u64 align = PAGE_SIZE; + + *ei_startpfn = round_up(ei->base, align) >> PAGE_SHIFT; + *ei_endpfn = round_down(ei->base + ei->size, align) >> PAGE_SHIFT; + + /* Skip map entries smaller than a page */ + if (*ei_startpfn >= *ei_endpfn) + return 0; + + /* Skip if map is outside the node */ + if (*ei_endpfn <= start_pfn || *ei_startpfn >= last_pfn) + return 0; + + /* Check for overlaps */ + if (*ei_startpfn < start_pfn) + *ei_startpfn = start_pfn; + if (*ei_endpfn > last_pfn) + *ei_endpfn = last_pfn; + + return 1; +} + +/* Walk the memblock.memory map and register active regions within a node */ +void __init memblock_x86_register_active_regions(int nid, unsigned long start_pfn, + unsigned long last_pfn) +{ + unsigned long ei_startpfn; + unsigned long ei_endpfn; + struct memblock_region *r; + + for_each_memblock(memory, r) + if (memblock_x86_find_active_region(r, start_pfn, last_pfn, + &ei_startpfn, &ei_endpfn)) + add_active_range(nid, ei_startpfn, ei_endpfn); +} + +/* + * Find the hole size (in bytes) in the memory range. + * @start: starting address of the memory range to scan + * @end: ending address of the memory range to scan + */ +u64 __init memblock_x86_hole_size(u64 start, u64 end) +{ + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long last_pfn = end >> PAGE_SHIFT; + unsigned long ei_startpfn, ei_endpfn, ram = 0; + struct memblock_region *r; + + for_each_memblock(memory, r) + if (memblock_x86_find_active_region(r, start_pfn, last_pfn, + &ei_startpfn, &ei_endpfn)) + ram += ei_endpfn - ei_startpfn; + + return end - start - ((u64)ram << PAGE_SHIFT); +} -- cgit v1.1 From 6bcc8176d07f108da3b1af17fb2c0e82c80e948e Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:16 -0700 Subject: x86, memblock: Add memblock_x86_find_in_range_node() It can be used to find NODE_DATA for numa. Need to make sure early_node_map[] is filled before it is called, otherwise it will fallback to memblock_find_in_range(), with node range. Signed-off-by: Yinghai Lu Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/memblock.h | 1 + arch/x86/mm/memblock.c | 15 +++++++++++++++ 2 files changed, 16 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h index 16af28d..3a86b10 100644 --- a/arch/x86/include/asm/memblock.h +++ b/arch/x86/include/asm/memblock.h @@ -14,5 +14,6 @@ int get_free_all_memory_range(struct range **rangep, int nodeid); void memblock_x86_register_active_regions(int nid, unsigned long start_pfn, unsigned long last_pfn); u64 memblock_x86_hole_size(u64 start, u64 end); +u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align); #endif diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c index 53a7a5a..22ff0a3 100644 --- a/arch/x86/mm/memblock.c +++ b/arch/x86/mm/memblock.c @@ -234,6 +234,21 @@ void __init memblock_x86_free_range(u64 start, u64 end) } /* + * Need to call this function after memblock_x86_register_active_regions, + * so early_node_map[] is filled already. + */ +u64 __init memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align) +{ + u64 addr; + addr = find_memory_core_early(nid, size, align, start, end); + if (addr != MEMBLOCK_ERROR) + return addr; + + /* Fallback, should already have start end within node range */ + return memblock_find_in_range(start, end, size, align); +} + +/* * Finds an active region in the address range from start_pfn to last_pfn and * returns its range in ei_startpfn and ei_endpfn for the memblock entry. */ -- cgit v1.1 From b52c17ce854125700c4e19d4427d39bf2504ff63 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:16 -0700 Subject: x86, memblock: Add memblock_x86_free_memory_in_range() It will return free memory size in specified range. We can not use memory_size - reserved_size here, because some reserved area may not be in the scope of memblock.memory.region. Use memblock.memory.region subtracting memblock.reserved.region to get free range array. then count size of all free ranges. -v2: Ben insist on using _in_range Signed-off-by: Yinghai Lu Cc: Benjamin Herrenschmidt Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/memblock.h | 1 + arch/x86/mm/memblock.c | 48 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h index 3a86b10..fc3c230 100644 --- a/arch/x86/include/asm/memblock.h +++ b/arch/x86/include/asm/memblock.h @@ -15,5 +15,6 @@ void memblock_x86_register_active_regions(int nid, unsigned long start_pfn, unsigned long last_pfn); u64 memblock_x86_hole_size(u64 start, u64 end); u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align); +u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit); #endif diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c index 22ff0a3..30d60cf 100644 --- a/arch/x86/mm/memblock.c +++ b/arch/x86/mm/memblock.c @@ -211,6 +211,54 @@ void __init memblock_x86_to_bootmem(u64 start, u64 end) } #endif +u64 __init memblock_x86_free_memory_in_range(u64 addr, u64 limit) +{ + int i, count; + struct range *range; + int nr_range; + u64 final_start, final_end; + u64 free_size; + struct memblock_region *r; + + count = (memblock.reserved.cnt + memblock.memory.cnt) * 2; + + range = find_range_array(count); + nr_range = 0; + + addr = PFN_UP(addr); + limit = PFN_DOWN(limit); + + for_each_memblock(memory, r) { + final_start = PFN_UP(r->base); + final_end = PFN_DOWN(r->base + r->size); + if (final_start >= final_end) + continue; + if (final_start >= limit || final_end <= addr) + continue; + + nr_range = add_range(range, count, nr_range, final_start, final_end); + } + subtract_range(range, count, 0, addr); + subtract_range(range, count, limit, -1ULL); + for_each_memblock(reserved, r) { + final_start = PFN_DOWN(r->base); + final_end = PFN_UP(r->base + r->size); + if (final_start >= final_end) + continue; + if (final_start >= limit || final_end <= addr) + continue; + + subtract_range(range, count, final_start, final_end); + } + nr_range = clean_sort_range(range, count); + + free_size = 0; + for (i = 0; i < nr_range; i++) + free_size += range[i].end - range[i].start; + + return free_size << PAGE_SHIFT; +} + void __init memblock_x86_reserve_range(u64 start, u64 end, char *name) { if (start == end) -- cgit v1.1 From e82d42be24bd5d75bf6f81045636e6ca95ab55f2 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:17 -0700 Subject: x86, memblock: Add memblock_x86_memory_in_range() It will return memory size in specified range according to memblock.memory.region Try to share some code with memblock_x86_free_memory_in_range() by passing get_free to __memblock_x86_memory_in_range(). -v2: Ben want _in_range in the name instead of size Signed-off-by: Yinghai Lu Cc: Benjamin Herrenschmidt Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/memblock.h | 1 + arch/x86/mm/memblock.c | 18 +++++++++++++++++- 2 files changed, 18 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h index fc3c230..2c304bb 100644 --- a/arch/x86/include/asm/memblock.h +++ b/arch/x86/include/asm/memblock.h @@ -16,5 +16,6 @@ void memblock_x86_register_active_regions(int nid, unsigned long start_pfn, u64 memblock_x86_hole_size(u64 start, u64 end); u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align); u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit); +u64 memblock_x86_memory_in_range(u64 addr, u64 limit); #endif diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c index 30d60cf..32ddad5 100644 --- a/arch/x86/mm/memblock.c +++ b/arch/x86/mm/memblock.c @@ -211,7 +211,7 @@ void __init memblock_x86_to_bootmem(u64 start, u64 end) } #endif -u64 __init memblock_x86_free_memory_in_range(u64 addr, u64 limit) +static u64 __init __memblock_x86_memory_in_range(u64 addr, u64 limit, bool get_free) { int i, count; struct range *range; @@ -240,6 +240,10 @@ u64 __init memblock_x86_free_memory_in_range(u64 addr, u64 limit) } subtract_range(range, count, 0, addr); subtract_range(range, count, limit, -1ULL); + + /* Subtract memblock.reserved.region in range ? */ + if (!get_free) + goto sort_and_count_them; for_each_memblock(reserved, r) { final_start = PFN_DOWN(r->base); final_end = PFN_UP(r->base + r->size); @@ -250,6 +254,8 @@ u64 __init memblock_x86_free_memory_in_range(u64 addr, u64 limit) subtract_range(range, count, final_start, final_end); } + +sort_and_count_them: nr_range = clean_sort_range(range, count); free_size = 0; @@ -259,6 +265,16 @@ u64 __init memblock_x86_free_memory_in_range(u64 addr, u64 limit) return free_size << PAGE_SHIFT; } +u64 __init memblock_x86_free_memory_in_range(u64 addr, u64 limit) +{ + return __memblock_x86_memory_in_range(addr, limit, true); +} + +u64 __init memblock_x86_memory_in_range(u64 addr, u64 limit) +{ + return __memblock_x86_memory_in_range(addr, limit, false); +} + void __init memblock_x86_reserve_range(u64 start, u64 end, char *name) { if (start == end) -- cgit v1.1 From 301ff3e88ef9ff4bdb92f36a3e6170fce4c9dd34 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:17 -0700 Subject: x86, memblock: Use memblock_debug to control debug message print out Also let memblock_x86_reserve_range/memblock_x86_free_range could print out name if memblock=debug is specified will also print ther name when reserve_memblock_area/free_memblock_area are called. -v2: according to Ingo, put " if (memblock_debug) " in one place Signed-off-by: Yinghai Lu Signed-off-by: H. Peter Anvin --- arch/x86/mm/memblock.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c index 32ddad5..aaff393 100644 --- a/arch/x86/mm/memblock.c +++ b/arch/x86/mm/memblock.c @@ -118,10 +118,10 @@ static void __init memblock_x86_subtract_reserved(struct range *range, int az) /* Take out region array itself at first*/ memblock_free_reserved_regions(); - pr_info("Subtract (%ld early reservations)\n", memblock.reserved.cnt); + memblock_dbg("Subtract (%ld early reservations)\n", memblock.reserved.cnt); for_each_memblock(reserved, r) { - pr_info(" [%010llx-%010llx]\n", (u64)r->base, (u64)r->base + r->size - 1); + memblock_dbg(" [%010llx-%010llx]\n", (u64)r->base, (u64)r->base + r->size - 1); final_start = PFN_DOWN(r->base); final_end = PFN_UP(r->base + r->size); if (final_start >= final_end) @@ -193,16 +193,16 @@ void __init memblock_x86_to_bootmem(u64 start, u64 end) memblock_free_reserved_regions(); count = memblock.reserved.cnt; - pr_info("(%d early reservations) ==> bootmem [%010llx-%010llx]\n", count, start, end - 1); + memblock_dbg("(%d early reservations) ==> bootmem [%#010llx-%#010llx]\n", count, start, end - 1); for_each_memblock(reserved, r) { - pr_info(" [%010llx-%010llx] ", (u64)r->base, (u64)r->base + r->size - 1); + memblock_dbg(" [%#010llx-%#010llx] ", (u64)r->base, (u64)r->base + r->size - 1); final_start = max(start, r->base); final_end = min(end, r->base + r->size); if (final_start >= final_end) { - pr_cont("\n"); + memblock_dbg("\n"); continue; } - pr_cont(" ==> [%010llx-%010llx]\n", final_start, final_end - 1); + memblock_dbg(" ==> [%#010llx-%#010llx]\n", final_start, final_end - 1); reserve_bootmem_generic(final_start, final_end - final_start, BOOTMEM_DEFAULT); } @@ -280,9 +280,11 @@ void __init memblock_x86_reserve_range(u64 start, u64 end, char *name) if (start == end) return; - if (WARN_ONCE(start > end, "memblock_x86_reserve_range: wrong range [%#llx, %#llx]\n", start, end)) + if (WARN_ONCE(start > end, "memblock_x86_reserve_range: wrong range [%#llx, %#llx)\n", start, end)) return; + memblock_dbg(" memblock_x86_reserve_range: [%#010llx-%#010llx] %16s\n", start, end - 1, name); + memblock_reserve(start, end - start); } @@ -291,9 +293,11 @@ void __init memblock_x86_free_range(u64 start, u64 end) if (start == end) return; - if (WARN_ONCE(start > end, "memblock_x86_free_range: wrong range [%#llx, %#llx]\n", start, end)) + if (WARN_ONCE(start > end, "memblock_x86_free_range: wrong range [%#llx, %#llx)\n", start, end)) return; + memblock_dbg(" memblock_x86_free_range: [%#010llx-%#010llx]\n", start, end - 1); + memblock_free(start, end - start); } -- cgit v1.1 From 72d7c3b33c980843e756681fb4867dc1efd62a76 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:17 -0700 Subject: x86: Use memblock to replace early_res 1. replace find_e820_area with memblock_find_in_range 2. replace reserve_early with memblock_x86_reserve_range 3. replace free_early with memblock_x86_free_range. 4. NO_BOOTMEM will switch to use memblock too. 5. use _e820, _early wrap in the patch, in following patch, will replace them all 6. because memblock_x86_free_range support partial free, we can remove some special care 7. Need to make sure that memblock_find_in_range() is called after memblock_x86_fill() so adjust some calling later in setup.c::setup_arch() -- corruption_check and mptable_update -v2: Move reserve_brk() early Before fill_memblock_area, to avoid overlap between brk and memblock_find_in_range() that could happen We have more then 128 RAM entry in E820 tables, and memblock_x86_fill() could use memblock_find_in_range() to find a new place for memblock.memory.region array. and We don't need to use extend_brk() after fill_memblock_area() So move reserve_brk() early before fill_memblock_area(). -v3: Move find_smp_config early To make sure memblock_find_in_range not find wrong place, if BIOS doesn't put mptable in right place. -v4: Treat RESERVED_KERN as RAM in memblock.memory. and they are already in memblock.reserved already.. use __NOT_KEEP_MEMBLOCK to make sure memblock related code could be freed later. -v5: Generic version __memblock_find_in_range() is going from high to low, and for 32bit active_region for 32bit does include high pages need to replace the limit with memblock.default_alloc_limit, aka get_max_mapped() -v6: Use current_limit instead -v7: check with MEMBLOCK_ERROR instead of -1ULL or -1L -v8: Set memblock_can_resize early to handle EFI with more RAM entries -v9: update after kmemleak changes in mainline Suggested-by: David S. Miller Suggested-by: Benjamin Herrenschmidt Suggested-by: Thomas Gleixner Signed-off-by: Yinghai Lu Signed-off-by: H. Peter Anvin --- arch/x86/Kconfig | 9 +-- arch/x86/include/asm/e820.h | 14 ++-- arch/x86/kernel/check.c | 16 +++-- arch/x86/kernel/e820.c | 159 ++++++++++++++--------------------------- arch/x86/kernel/head.c | 3 +- arch/x86/kernel/head32.c | 6 +- arch/x86/kernel/head64.c | 3 + arch/x86/kernel/mpparse.c | 5 +- arch/x86/kernel/setup.c | 46 ++++++++---- arch/x86/kernel/setup_percpu.c | 6 -- arch/x86/mm/numa_64.c | 9 +-- 11 files changed, 123 insertions(+), 153 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index dcb0593..542bb26 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -27,6 +27,7 @@ config X86 select HAVE_PERF_EVENTS if (!M386 && !M486) select HAVE_IOREMAP_PROT select HAVE_KPROBES + select HAVE_MEMBLOCK select ARCH_WANT_OPTIONAL_GPIOLIB select ARCH_WANT_FRAME_POINTERS select HAVE_DMA_ATTRS @@ -195,9 +196,6 @@ config ARCH_SUPPORTS_OPTIMIZED_INLINING config ARCH_SUPPORTS_DEBUG_PAGEALLOC def_bool y -config HAVE_EARLY_RES - def_bool y - config HAVE_INTEL_TXT def_bool y depends on EXPERIMENTAL && DMAR && ACPI @@ -590,14 +588,13 @@ config NO_BOOTMEM default y bool "Disable Bootmem code" ---help--- - Use early_res directly instead of bootmem before slab is ready. + Use memblock directly instead of bootmem before slab is ready. - allocator (buddy) [generic] - early allocator (bootmem) [generic] - - very early allocator (reserve_early*()) [x86] + - very early allocator (memblock) [some generic] - very very early allocator (early brk model) [x86] So reduce one layer between early allocator to final allocator - config MEMTEST bool "Memtest" ---help--- diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h index ec8a52d..388fed2 100644 --- a/arch/x86/include/asm/e820.h +++ b/arch/x86/include/asm/e820.h @@ -117,24 +117,26 @@ extern unsigned long end_user_pfn; extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align); extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align); extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align); -#include extern unsigned long e820_end_of_ram_pfn(void); extern unsigned long e820_end_of_low_ram_pfn(void); -extern int e820_find_active_region(const struct e820entry *ei, - unsigned long start_pfn, - unsigned long last_pfn, - unsigned long *ei_startpfn, - unsigned long *ei_endpfn); extern void e820_register_active_regions(int nid, unsigned long start_pfn, unsigned long end_pfn); extern u64 e820_hole_size(u64 start, u64 end); + +extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align); + +void memblock_x86_fill(void); + extern void finish_e820_parsing(void); extern void e820_reserve_resources(void); extern void e820_reserve_resources_late(void); extern void setup_memory_map(void); extern char *default_machine_specific_memory_setup(void); +void reserve_early(u64 start, u64 end, char *name); +void free_early(u64 start, u64 end); + /* * Returns true iff the specified range [s,e) is completely contained inside * the ISA region. diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c index fc999e6..13a3891 100644 --- a/arch/x86/kernel/check.c +++ b/arch/x86/kernel/check.c @@ -2,7 +2,8 @@ #include #include #include -#include +#include + #include /* @@ -18,10 +19,12 @@ static int __read_mostly memory_corruption_check = -1; static unsigned __read_mostly corruption_check_size = 64*1024; static unsigned __read_mostly corruption_check_period = 60; /* seconds */ -static struct e820entry scan_areas[MAX_SCAN_AREAS]; +static struct scan_area { + u64 addr; + u64 size; +} scan_areas[MAX_SCAN_AREAS]; static int num_scan_areas; - static __init int set_corruption_check(char *arg) { char *end; @@ -81,9 +84,9 @@ void __init setup_bios_corruption_check(void) while (addr < corruption_check_size && num_scan_areas < MAX_SCAN_AREAS) { u64 size; - addr = find_e820_area_size(addr, &size, PAGE_SIZE); + addr = memblock_x86_find_in_range_size(addr, &size, PAGE_SIZE); - if (!(addr + 1)) + if (addr == MEMBLOCK_ERROR) break; if (addr >= corruption_check_size) @@ -92,7 +95,7 @@ void __init setup_bios_corruption_check(void) if ((addr + size) > corruption_check_size) size = corruption_check_size - addr; - e820_update_range(addr, size, E820_RAM, E820_RESERVED); + memblock_x86_reserve_range(addr, addr + size, "SCAN RAM"); scan_areas[num_scan_areas].addr = addr; scan_areas[num_scan_areas].size = size; num_scan_areas++; @@ -105,7 +108,6 @@ void __init setup_bios_corruption_check(void) printk(KERN_INFO "Scanning %d areas for low memory corruption\n", num_scan_areas); - update_e820(); } diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 0d6fc71..a9221d1 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -742,69 +743,29 @@ core_initcall(e820_mark_nvs_memory); */ u64 __init find_e820_area(u64 start, u64 end, u64 size, u64 align) { - int i; - - for (i = 0; i < e820.nr_map; i++) { - struct e820entry *ei = &e820.map[i]; - u64 addr; - u64 ei_start, ei_last; + u64 mem = memblock_find_in_range(start, end, size, align); - if (ei->type != E820_RAM) - continue; - - ei_last = ei->addr + ei->size; - ei_start = ei->addr; - addr = find_early_area(ei_start, ei_last, start, end, - size, align); - - if (addr != -1ULL) - return addr; - } - return -1ULL; -} + if (mem == MEMBLOCK_ERROR) + return -1ULL; -u64 __init find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align) -{ - return find_e820_area(start, end, size, align); + return mem; } -u64 __init get_max_mapped(void) -{ - u64 end = max_pfn_mapped; - - end <<= PAGE_SHIFT; - - return end; -} /* * Find next free range after *start */ u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align) { - int i; + u64 mem = memblock_x86_find_in_range_size(start, sizep, align); - for (i = 0; i < e820.nr_map; i++) { - struct e820entry *ei = &e820.map[i]; - u64 addr; - u64 ei_start, ei_last; - - if (ei->type != E820_RAM) - continue; - - ei_last = ei->addr + ei->size; - ei_start = ei->addr; - addr = find_early_area_size(ei_start, ei_last, start, - sizep, align); + if (mem == MEMBLOCK_ERROR) + return -1ULL - if (addr != -1ULL) - return addr; - } - - return -1ULL; + return mem; } /* - * pre allocated 4k and reserved it in e820 + * pre allocated 4k and reserved it in memblock and e820_saved */ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align) { @@ -813,8 +774,8 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align) u64 start; for (start = startt; ; start += size) { - start = find_e820_area_size(start, &size, align); - if (!(start + 1)) + start = memblock_x86_find_in_range_size(start, &size, align); + if (start == MEMBLOCK_ERROR) return 0; if (size >= sizet) break; @@ -830,10 +791,9 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align) addr = round_down(start + size - sizet, align); if (addr < start) return 0; - e820_update_range(addr, sizet, E820_RAM, E820_RESERVED); + memblock_x86_reserve_range(addr, addr + sizet, "new next"); e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED); - printk(KERN_INFO "update e820 for early_reserve_e820\n"); - update_e820(); + printk(KERN_INFO "update e820_saved for early_reserve_e820\n"); update_e820_saved(); return addr; @@ -895,52 +855,12 @@ unsigned long __init e820_end_of_low_ram_pfn(void) { return e820_end_pfn(1UL<<(32 - PAGE_SHIFT), E820_RAM); } -/* - * Finds an active region in the address range from start_pfn to last_pfn and - * returns its range in ei_startpfn and ei_endpfn for the e820 entry. - */ -int __init e820_find_active_region(const struct e820entry *ei, - unsigned long start_pfn, - unsigned long last_pfn, - unsigned long *ei_startpfn, - unsigned long *ei_endpfn) -{ - u64 align = PAGE_SIZE; - - *ei_startpfn = round_up(ei->addr, align) >> PAGE_SHIFT; - *ei_endpfn = round_down(ei->addr + ei->size, align) >> PAGE_SHIFT; - - /* Skip map entries smaller than a page */ - if (*ei_startpfn >= *ei_endpfn) - return 0; - - /* Skip if map is outside the node */ - if (ei->type != E820_RAM || *ei_endpfn <= start_pfn || - *ei_startpfn >= last_pfn) - return 0; - - /* Check for overlaps */ - if (*ei_startpfn < start_pfn) - *ei_startpfn = start_pfn; - if (*ei_endpfn > last_pfn) - *ei_endpfn = last_pfn; - - return 1; -} /* Walk the e820 map and register active regions within a node */ void __init e820_register_active_regions(int nid, unsigned long start_pfn, unsigned long last_pfn) { - unsigned long ei_startpfn; - unsigned long ei_endpfn; - int i; - - for (i = 0; i < e820.nr_map; i++) - if (e820_find_active_region(&e820.map[i], - start_pfn, last_pfn, - &ei_startpfn, &ei_endpfn)) - add_active_range(nid, ei_startpfn, ei_endpfn); + memblock_x86_register_active_regions(nid, start_pfn, last_pfn); } /* @@ -950,18 +870,16 @@ void __init e820_register_active_regions(int nid, unsigned long start_pfn, */ u64 __init e820_hole_size(u64 start, u64 end) { - unsigned long start_pfn = start >> PAGE_SHIFT; - unsigned long last_pfn = end >> PAGE_SHIFT; - unsigned long ei_startpfn, ei_endpfn, ram = 0; - int i; + return memblock_x86_hole_size(start, end); +} - for (i = 0; i < e820.nr_map; i++) { - if (e820_find_active_region(&e820.map[i], - start_pfn, last_pfn, - &ei_startpfn, &ei_endpfn)) - ram += ei_endpfn - ei_startpfn; - } - return end - start - ((u64)ram << PAGE_SHIFT); +void reserve_early(u64 start, u64 end, char *name) +{ + memblock_x86_reserve_range(start, end, name); +} +void free_early(u64 start, u64 end) +{ + memblock_x86_free_range(start, end); } static void early_panic(char *msg) @@ -1210,3 +1128,32 @@ void __init setup_memory_map(void) printk(KERN_INFO "BIOS-provided physical RAM map:\n"); e820_print_map(who); } + +void __init memblock_x86_fill(void) +{ + int i; + u64 end; + + /* + * EFI may have more than 128 entries + * We are safe to enable resizing, beause memblock_x86_fill() + * is rather later for x86 + */ + memblock_can_resize = 1; + + for (i = 0; i < e820.nr_map; i++) { + struct e820entry *ei = &e820.map[i]; + + end = ei->addr + ei->size; + if (end != (resource_size_t)end) + continue; + + if (ei->type != E820_RAM && ei->type != E820_RESERVED_KERN) + continue; + + memblock_add(ei->addr, ei->size); + } + + memblock_analyze(); + memblock_dump_all(); +} diff --git a/arch/x86/kernel/head.c b/arch/x86/kernel/head.c index 3e66bd3..af0699b 100644 --- a/arch/x86/kernel/head.c +++ b/arch/x86/kernel/head.c @@ -1,5 +1,6 @@ #include #include +#include #include #include @@ -51,5 +52,5 @@ void __init reserve_ebda_region(void) lowmem = 0x9f000; /* reserve all memory between lowmem and the 1MB mark */ - reserve_early_overlap_ok(lowmem, 0x100000, "BIOS reserved"); + memblock_x86_reserve_range(lowmem, 0x100000, "* BIOS reserved"); } diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index b2e2460..da60aa8 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -30,14 +31,15 @@ static void __init i386_default_early_setup(void) void __init i386_start_kernel(void) { + memblock_init(); + #ifdef CONFIG_X86_TRAMPOLINE /* * But first pinch a few for the stack/trampoline stuff * FIXME: Don't need the extra page at 4K, but need to fix * trampoline before removing it. (see the GDT stuff) */ - reserve_early_overlap_ok(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, - "EX TRAMPOLINE"); + memblock_x86_reserve_range(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE"); #endif reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 7147143..8ee930f 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -98,6 +99,8 @@ void __init x86_64_start_reservations(char *real_mode_data) { copy_bootdata(__va(real_mode_data)); + memblock_init(); + reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); #ifdef CONFIG_BLK_DEV_INITRD diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index d86dbf7..8252545 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -641,7 +642,7 @@ static void __init smp_reserve_memory(struct mpf_intel *mpf) { unsigned long size = get_mpc_size(mpf->physptr); - reserve_early_overlap_ok(mpf->physptr, mpf->physptr+size, "MP-table mpc"); + memblock_x86_reserve_range(mpf->physptr, mpf->physptr+size, "* MP-table mpc"); } static int __init smp_scan_config(unsigned long base, unsigned long length) @@ -670,7 +671,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length) mpf, (u64)virt_to_phys(mpf)); mem = virt_to_phys(mpf); - reserve_early_overlap_ok(mem, mem + sizeof(*mpf), "MP-table mpf"); + memblock_x86_reserve_range(mem, mem + sizeof(*mpf), "* MP-table mpf"); if (mpf->physptr) smp_reserve_memory(mpf); diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index b4ae4ac..bbe0aaf 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -614,7 +615,7 @@ static __init void reserve_ibft_region(void) addr = find_ibft_region(&size); if (size) - reserve_early_overlap_ok(addr, addr + size, "ibft"); + memblock_x86_reserve_range(addr, addr + size, "* ibft"); } #ifdef CONFIG_X86_RESERVE_LOW_64K @@ -708,6 +709,15 @@ static void __init trim_bios_range(void) sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); } +static u64 __init get_max_mapped(void) +{ + u64 end = max_pfn_mapped; + + end <<= PAGE_SHIFT; + + return end; +} + /* * Determine if we were loaded by an EFI loader. If so, then we have also been * passed the efi memmap, systab, etc., so we should use these data structures @@ -891,8 +901,6 @@ void __init setup_arch(char **cmdline_p) */ max_pfn = e820_end_of_ram_pfn(); - /* preallocate 4k for mptable mpc */ - early_reserve_e820_mpc_new(); /* update e820 for memory not covered by WB MTRRs */ mtrr_bp_init(); if (mtrr_trim_uncached_memory(max_pfn)) @@ -917,15 +925,6 @@ void __init setup_arch(char **cmdline_p) max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT; #endif -#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION - setup_bios_corruption_check(); -#endif - - printk(KERN_DEBUG "initial memory mapped : 0 - %08lx\n", - max_pfn_mapped< #include #include +#include #include #include #include @@ -171,8 +172,8 @@ static void * __init early_node_mem(int nodeid, unsigned long start, if (start < (MAX_DMA32_PFN< (MAX_DMA32_PFN< Date: Wed, 25 Aug 2010 13:39:17 -0700 Subject: x86, memblock: Replace e820_/_early string with memblock_ 1.include linux/memblock.h directly. so later could reduce e820.h reference. 2 this patch is done by sed scripts mainly -v2: use MEMBLOCK_ERROR instead of -1ULL or -1UL Signed-off-by: Yinghai Lu Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/efi.h | 2 +- arch/x86/kernel/acpi/sleep.c | 9 +++++---- arch/x86/kernel/apic/numaq_32.c | 3 ++- arch/x86/kernel/efi.c | 5 +++-- arch/x86/kernel/head32.c | 4 ++-- arch/x86/kernel/head64.c | 4 ++-- arch/x86/kernel/setup.c | 29 ++++++++++++++--------------- arch/x86/kernel/trampoline.c | 10 +++++----- arch/x86/mm/init.c | 10 ++++++---- arch/x86/mm/init_32.c | 14 ++++++++------ arch/x86/mm/init_64.c | 11 ++++++----- arch/x86/mm/k8topology_64.c | 4 +++- arch/x86/mm/memtest.c | 7 +++---- arch/x86/mm/numa_32.c | 25 +++++++++++++------------ arch/x86/mm/numa_64.c | 34 +++++++++++++++++----------------- arch/x86/mm/srat_32.c | 3 ++- arch/x86/mm/srat_64.c | 11 ++++++----- arch/x86/xen/mmu.c | 5 +++-- arch/x86/xen/setup.c | 3 ++- 19 files changed, 103 insertions(+), 90 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 8406ed7..8e4a165 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -90,7 +90,7 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, #endif /* CONFIG_X86_32 */ extern int add_efi_memmap; -extern void efi_reserve_early(void); +extern void efi_memblock_x86_reserve_range(void); extern void efi_call_phys_prelog(void); extern void efi_call_phys_epilog(void); diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index fcc3c61..d829e75 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -7,6 +7,7 @@ #include #include +#include #include #include #include @@ -125,7 +126,7 @@ void acpi_restore_state_mem(void) */ void __init acpi_reserve_wakeup_memory(void) { - unsigned long mem; + phys_addr_t mem; if ((&wakeup_code_end - &wakeup_code_start) > WAKEUP_SIZE) { printk(KERN_ERR @@ -133,15 +134,15 @@ void __init acpi_reserve_wakeup_memory(void) return; } - mem = find_e820_area(0, 1<<20, WAKEUP_SIZE, PAGE_SIZE); + mem = memblock_find_in_range(0, 1<<20, WAKEUP_SIZE, PAGE_SIZE); - if (mem == -1L) { + if (mem == MEMBLOCK_ERROR) { printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n"); return; } acpi_realmode = (unsigned long) phys_to_virt(mem); acpi_wakeup_address = mem; - reserve_early(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP"); + memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP"); } diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c index 3e28401..960f26a 100644 --- a/arch/x86/kernel/apic/numaq_32.c +++ b/arch/x86/kernel/apic/numaq_32.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -88,7 +89,7 @@ static inline void numaq_register_node(int node, struct sys_cfg_data *scd) node_end_pfn[node] = MB_TO_PAGES(eq->hi_shrd_mem_start + eq->hi_shrd_mem_size); - e820_register_active_regions(node, node_start_pfn[node], + memblock_x86_register_active_regions(node, node_start_pfn[node], node_end_pfn[node]); memory_present(node, node_start_pfn[node], node_end_pfn[node]); diff --git a/arch/x86/kernel/efi.c b/arch/x86/kernel/efi.c index c2fa9b8..0fe27d7 100644 --- a/arch/x86/kernel/efi.c +++ b/arch/x86/kernel/efi.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -275,7 +276,7 @@ static void __init do_add_efi_memmap(void) sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); } -void __init efi_reserve_early(void) +void __init efi_memblock_x86_reserve_range(void) { unsigned long pmap; @@ -290,7 +291,7 @@ void __init efi_reserve_early(void) boot_params.efi_info.efi_memdesc_size; memmap.desc_version = boot_params.efi_info.efi_memdesc_version; memmap.desc_size = boot_params.efi_info.efi_memdesc_size; - reserve_early(pmap, pmap + memmap.nr_map * memmap.desc_size, + memblock_x86_reserve_range(pmap, pmap + memmap.nr_map * memmap.desc_size, "EFI memmap"); } diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index da60aa8..74e4cf6 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c @@ -42,7 +42,7 @@ void __init i386_start_kernel(void) memblock_x86_reserve_range(PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE"); #endif - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); + memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); #ifdef CONFIG_BLK_DEV_INITRD /* Reserve INITRD */ @@ -51,7 +51,7 @@ void __init i386_start_kernel(void) u64 ramdisk_image = boot_params.hdr.ramdisk_image; u64 ramdisk_size = boot_params.hdr.ramdisk_size; u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); - reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); + memblock_x86_reserve_range(ramdisk_image, ramdisk_end, "RAMDISK"); } #endif diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 8ee930f..97adf98 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -101,7 +101,7 @@ void __init x86_64_start_reservations(char *real_mode_data) memblock_init(); - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); + memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); #ifdef CONFIG_BLK_DEV_INITRD /* Reserve INITRD */ @@ -110,7 +110,7 @@ void __init x86_64_start_reservations(char *real_mode_data) unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; unsigned long ramdisk_size = boot_params.hdr.ramdisk_size; unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); - reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); + memblock_x86_reserve_range(ramdisk_image, ramdisk_end, "RAMDISK"); } #endif diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index bbe0aaf..a4f0173 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -302,7 +302,7 @@ static inline void init_gbpages(void) static void __init reserve_brk(void) { if (_brk_end > _brk_start) - reserve_early(__pa(_brk_start), __pa(_brk_end), "BRK"); + memblock_x86_reserve_range(__pa(_brk_start), __pa(_brk_end), "BRK"); /* Mark brk area as locked down and no longer taking any new allocations */ @@ -324,17 +324,16 @@ static void __init relocate_initrd(void) char *p, *q; /* We need to move the initrd down into lowmem */ - ramdisk_here = find_e820_area(0, end_of_lowmem, area_size, + ramdisk_here = memblock_find_in_range(0, end_of_lowmem, area_size, PAGE_SIZE); - if (ramdisk_here == -1ULL) + if (ramdisk_here == MEMBLOCK_ERROR) panic("Cannot find place for new RAMDISK of size %lld\n", ramdisk_size); /* Note: this includes all the lowmem currently occupied by the initrd, we rely on that fact to keep the data intact. */ - reserve_early(ramdisk_here, ramdisk_here + area_size, - "NEW RAMDISK"); + memblock_x86_reserve_range(ramdisk_here, ramdisk_here + area_size, "NEW RAMDISK"); initrd_start = ramdisk_here + PAGE_OFFSET; initrd_end = initrd_start + ramdisk_size; printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n", @@ -390,7 +389,7 @@ static void __init reserve_initrd(void) initrd_start = 0; if (ramdisk_size >= (end_of_lowmem>>1)) { - free_early(ramdisk_image, ramdisk_end); + memblock_x86_free_range(ramdisk_image, ramdisk_end); printk(KERN_ERR "initrd too large to handle, " "disabling initrd\n"); return; @@ -413,7 +412,7 @@ static void __init reserve_initrd(void) relocate_initrd(); - free_early(ramdisk_image, ramdisk_end); + memblock_x86_free_range(ramdisk_image, ramdisk_end); } #else static void __init reserve_initrd(void) @@ -469,7 +468,7 @@ static void __init e820_reserve_setup_data(void) e820_print_map("reserve setup_data"); } -static void __init reserve_early_setup_data(void) +static void __init memblock_x86_reserve_range_setup_data(void) { struct setup_data *data; u64 pa_data; @@ -481,7 +480,7 @@ static void __init reserve_early_setup_data(void) while (pa_data) { data = early_memremap(pa_data, sizeof(*data)); sprintf(buf, "setup data %x", data->type); - reserve_early(pa_data, pa_data+sizeof(*data)+data->len, buf); + memblock_x86_reserve_range(pa_data, pa_data+sizeof(*data)+data->len, buf); pa_data = data->next; early_iounmap(data, sizeof(*data)); } @@ -519,23 +518,23 @@ static void __init reserve_crashkernel(void) if (crash_base <= 0) { const unsigned long long alignment = 16<<20; /* 16M */ - crash_base = find_e820_area(alignment, ULONG_MAX, crash_size, + crash_base = memblock_find_in_range(alignment, ULONG_MAX, crash_size, alignment); - if (crash_base == -1ULL) { + if (crash_base == MEMBLOCK_ERROR) { pr_info("crashkernel reservation failed - No suitable area found.\n"); return; } } else { unsigned long long start; - start = find_e820_area(crash_base, ULONG_MAX, crash_size, + start = memblock_find_in_range(crash_base, ULONG_MAX, crash_size, 1<<20); if (start != crash_base) { pr_info("crashkernel reservation failed - memory is in use.\n"); return; } } - reserve_early(crash_base, crash_base + crash_size, "CRASH KERNEL"); + memblock_x86_reserve_range(crash_base, crash_base + crash_size, "CRASH KERNEL"); printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " "for crashkernel (System RAM: %ldMB)\n", @@ -786,7 +785,7 @@ void __init setup_arch(char **cmdline_p) #endif 4)) { efi_enabled = 1; - efi_reserve_early(); + efi_memblock_x86_reserve_range(); } #endif @@ -846,7 +845,7 @@ void __init setup_arch(char **cmdline_p) vmi_activate(); /* after early param, so could get panic from serial */ - reserve_early_setup_data(); + memblock_x86_reserve_range_setup_data(); if (acpi_mps_check()) { #ifdef CONFIG_X86_LOCAL_APIC diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c index c652ef6..7c2102c 100644 --- a/arch/x86/kernel/trampoline.c +++ b/arch/x86/kernel/trampoline.c @@ -1,7 +1,7 @@ #include +#include #include -#include #if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP) #define __trampinit @@ -16,15 +16,15 @@ unsigned char *__trampinitdata trampoline_base; void __init reserve_trampoline_memory(void) { - unsigned long mem; + phys_addr_t mem; /* Has to be in very low memory so we can execute real-mode AP code. */ - mem = find_e820_area(0, 1<<20, TRAMPOLINE_SIZE, PAGE_SIZE); - if (mem == -1L) + mem = memblock_find_in_range(0, 1<<20, TRAMPOLINE_SIZE, PAGE_SIZE); + if (mem == MEMBLOCK_ERROR) panic("Cannot allocate trampoline\n"); trampoline_base = __va(mem); - reserve_early(mem, mem + TRAMPOLINE_SIZE, "TRAMPOLINE"); + memblock_x86_reserve_range(mem, mem + TRAMPOLINE_SIZE, "TRAMPOLINE"); } /* diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index b278535..c0e28a1 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -2,6 +2,7 @@ #include #include #include +#include #include #include @@ -33,6 +34,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse, int use_gbpages) { unsigned long puds, pmds, ptes, tables, start; + phys_addr_t base; puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); @@ -75,12 +77,12 @@ static void __init find_early_table_space(unsigned long end, int use_pse, #else start = 0x8000; #endif - e820_table_start = find_e820_area(start, max_pfn_mapped<>= PAGE_SHIFT; + e820_table_start = base >> PAGE_SHIFT; e820_table_end = e820_table_start; e820_table_top = e820_table_start + (tables >> PAGE_SHIFT); @@ -299,7 +301,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, __flush_tlb_all(); if (!after_bootmem && e820_table_end > e820_table_start) - reserve_early(e820_table_start << PAGE_SHIFT, + memblock_x86_reserve_range(e820_table_start << PAGE_SHIFT, e820_table_end << PAGE_SHIFT, "PGTABLE"); if (!after_bootmem) diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 90e0545..63b09ba 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -712,14 +713,14 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn, highstart_pfn = highend_pfn = max_pfn; if (max_pfn > max_low_pfn) highstart_pfn = max_low_pfn; - e820_register_active_regions(0, 0, highend_pfn); + memblock_x86_register_active_regions(0, 0, highend_pfn); sparse_memory_present_with_active_regions(0); printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", pages_to_mb(highend_pfn - highstart_pfn)); num_physpages = highend_pfn; high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; #else - e820_register_active_regions(0, 0, max_low_pfn); + memblock_x86_register_active_regions(0, 0, max_low_pfn); sparse_memory_present_with_active_regions(0); num_physpages = max_low_pfn; high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; @@ -776,16 +777,16 @@ void __init setup_bootmem_allocator(void) { #ifndef CONFIG_NO_BOOTMEM int nodeid; - unsigned long bootmap_size, bootmap; + phys_addr_t bootmap_size, bootmap; /* * Initialize the boot-time allocator (with low memory only): */ bootmap_size = bootmem_bootmap_pages(max_low_pfn)< #include #include +#include #include #include #include @@ -577,18 +578,18 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn, unsigned long bootmap_size, bootmap; bootmap_size = bootmem_bootmap_pages(end_pfn)<> PAGE_SHIFT, 0, end_pfn); - e820_register_active_regions(0, start_pfn, end_pfn); + memblock_x86_register_active_regions(0, start_pfn, end_pfn); free_bootmem_with_active_regions(0, end_pfn); #else - e820_register_active_regions(0, start_pfn, end_pfn); + memblock_x86_register_active_regions(0, start_pfn, end_pfn); #endif } #endif diff --git a/arch/x86/mm/k8topology_64.c b/arch/x86/mm/k8topology_64.c index 970ed57..966de93 100644 --- a/arch/x86/mm/k8topology_64.c +++ b/arch/x86/mm/k8topology_64.c @@ -11,6 +11,8 @@ #include #include #include +#include + #include #include #include @@ -222,7 +224,7 @@ int __init k8_scan_nodes(void) for_each_node_mask(i, node_possible_map) { int j; - e820_register_active_regions(i, + memblock_x86_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, nodes[i].end >> PAGE_SHIFT); for (j = apicid_base; j < cores + apicid_base; j++) diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c index 18d244f..92faf3a 100644 --- a/arch/x86/mm/memtest.c +++ b/arch/x86/mm/memtest.c @@ -6,8 +6,7 @@ #include #include #include - -#include +#include static u64 patterns[] __initdata = { 0, @@ -35,7 +34,7 @@ static void __init reserve_bad_mem(u64 pattern, u64 start_bad, u64 end_bad) (unsigned long long) pattern, (unsigned long long) start_bad, (unsigned long long) end_bad); - reserve_early(start_bad, end_bad, "BAD RAM"); + memblock_x86_reserve_range(start_bad, end_bad, "BAD RAM"); } static void __init memtest(u64 pattern, u64 start_phys, u64 size) @@ -74,7 +73,7 @@ static void __init do_one_pass(u64 pattern, u64 start, u64 end) u64 size = 0; while (start < end) { - start = find_e820_area_size(start, &size, 1); + start = memblock_x86_find_in_range_size(start, &size, 1); /* done ? */ if (start >= end) diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 809baaa..ddf9730 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -24,6 +24,7 @@ #include #include +#include #include #include #include @@ -120,7 +121,7 @@ int __init get_memcfg_numa_flat(void) node_start_pfn[0] = 0; node_end_pfn[0] = max_pfn; - e820_register_active_regions(0, 0, max_pfn); + memblock_x86_register_active_regions(0, 0, max_pfn); memory_present(0, 0, max_pfn); node_remap_size[0] = node_memmap_size_bytes(0, 0, max_pfn); @@ -161,14 +162,14 @@ static void __init allocate_pgdat(int nid) NODE_DATA(nid) = (pg_data_t *)node_remap_start_vaddr[nid]; else { unsigned long pgdat_phys; - pgdat_phys = find_e820_area(min_low_pfn<>PAGE_SHIFT)); memset(buf, 0, sizeof(buf)); sprintf(buf, "NODE_DATA %d", nid); - reserve_early(pgdat_phys, pgdat_phys + sizeof(pg_data_t), buf); + memblock_x86_reserve_range(pgdat_phys, pgdat_phys + sizeof(pg_data_t), buf); } printk(KERN_DEBUG "allocate_pgdat: node %d NODE_DATA %08lx\n", nid, (unsigned long)NODE_DATA(nid)); @@ -291,15 +292,15 @@ static __init unsigned long calculate_numa_remap_pages(void) PTRS_PER_PTE); node_kva_target <<= PAGE_SHIFT; do { - node_kva_final = find_e820_area(node_kva_target, + node_kva_final = memblock_find_in_range(node_kva_target, ((u64)node_end_pfn[nid])<>PAGE_SHIFT) > (node_start_pfn[nid])); - if (node_kva_final == -1ULL) + if (node_kva_final == MEMBLOCK_ERROR) panic("Can not get kva ram\n"); node_remap_size[nid] = size; @@ -318,9 +319,9 @@ static __init unsigned long calculate_numa_remap_pages(void) * but we could have some hole in high memory, and it will only * check page_is_ram(pfn) && !page_is_reserved_early(pfn) to decide * to use it as free. - * So reserve_early here, hope we don't run out of that array + * So memblock_x86_reserve_range here, hope we don't run out of that array */ - reserve_early(node_kva_final, + memblock_x86_reserve_range(node_kva_final, node_kva_final+(((u64)size)<> PAGE_SHIFT; kva_target_pfn -= PTRS_PER_PTE; - } while (kva_start_pfn == -1UL && kva_target_pfn > min_low_pfn); + } while (kva_start_pfn == MEMBLOCK_ERROR && kva_target_pfn > min_low_pfn); - if (kva_start_pfn == -1UL) + if (kva_start_pfn == MEMBLOCK_ERROR) panic("Can not get kva space\n"); printk(KERN_INFO "kva_start_pfn ~ %lx max_low_pfn ~ %lx\n", @@ -382,7 +383,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn, printk(KERN_INFO "max_pfn = %lx\n", max_pfn); /* avoid clash with initrd */ - reserve_early(kva_start_pfn< physnodes[i].end) { end = physnodes[i].end; @@ -467,7 +467,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr, * this one must extend to the boundary. */ if (end < dma32_end && dma32_end - end - - e820_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) + memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) end = dma32_end; /* @@ -476,7 +476,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr, * physical node. */ if (physnodes[i].end - end - - e820_hole_size(end, physnodes[i].end) < size) + memblock_x86_hole_size(end, physnodes[i].end) < size) end = physnodes[i].end; /* @@ -504,7 +504,7 @@ static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size) { u64 end = start + size; - while (end - start - e820_hole_size(start, end) < size) { + while (end - start - memblock_x86_hole_size(start, end) < size) { end += FAKE_NODE_MIN_SIZE; if (end > max_addr) { end = max_addr; @@ -533,7 +533,7 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size) * creates a uniform distribution of node sizes across the entire * machine (but not necessarily over physical nodes). */ - min_size = (max_addr - addr - e820_hole_size(addr, max_addr)) / + min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) / MAX_NUMNODES; min_size = max(min_size, FAKE_NODE_MIN_SIZE); if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size) @@ -566,7 +566,7 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size) * this one must extend to the boundary. */ if (end < dma32_end && dma32_end - end - - e820_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) + memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE) end = dma32_end; /* @@ -575,7 +575,7 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size) * physical node. */ if (physnodes[i].end - end - - e820_hole_size(end, physnodes[i].end) < size) + memblock_x86_hole_size(end, physnodes[i].end) < size) end = physnodes[i].end; /* @@ -639,7 +639,7 @@ static int __init numa_emulation(unsigned long start_pfn, */ remove_all_active_ranges(); for_each_node_mask(i, node_possible_map) { - e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, + memblock_x86_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, nodes[i].end >> PAGE_SHIFT); setup_node_bootmem(i, nodes[i].start, nodes[i].end); } @@ -692,7 +692,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn, node_set(0, node_possible_map); for (i = 0; i < nr_cpu_ids; i++) numa_set_node(i, 0); - e820_register_active_regions(0, start_pfn, last_pfn); + memblock_x86_register_active_regions(0, start_pfn, last_pfn); setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT); } diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c index 9324f13..a17dffd 100644 --- a/arch/x86/mm/srat_32.c +++ b/arch/x86/mm/srat_32.c @@ -25,6 +25,7 @@ */ #include #include +#include #include #include #include @@ -264,7 +265,7 @@ int __init get_memcfg_from_srat(void) if (node_read_chunk(chunk->nid, chunk)) continue; - e820_register_active_regions(chunk->nid, chunk->start_pfn, + memblock_x86_register_active_regions(chunk->nid, chunk->start_pfn, min(chunk->end_pfn, max_pfn)); } /* for out of order entries in SRAT */ diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index f9897f7..7f44eb6 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -98,15 +99,15 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit) unsigned long phys; length = slit->header.length; - phys = find_e820_area(0, max_pfn_mapped< x2APIC mapping */ @@ -324,7 +325,7 @@ static int __init nodes_cover_memory(const struct bootnode *nodes) pxmram = 0; } - e820ram = max_pfn - (e820_hole_size(0, max_pfn<>PAGE_SHIFT); + e820ram = max_pfn - (memblock_x86_hole_size(0, max_pfn<>PAGE_SHIFT); /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ if ((long)(e820ram - pxmram) >= (1<<(20 - PAGE_SHIFT))) { printk(KERN_ERR @@ -421,7 +422,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end) } for_each_node_mask(i, nodes_parsed) - e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, + memblock_x86_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, nodes[i].end >> PAGE_SHIFT); /* for out of order entries in SRAT */ sort_node_map(); diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 914f046..b511f19 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include @@ -1735,7 +1736,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, __xen_write_cr3(true, __pa(pgd)); xen_mc_issue(PARAVIRT_LAZY_CPU); - reserve_early(__pa(xen_start_info->pt_base), + memblock_x86_reserve_range(__pa(xen_start_info->pt_base), __pa(xen_start_info->pt_base + xen_start_info->nr_pt_frames * PAGE_SIZE), "XEN PAGETABLES"); @@ -1773,7 +1774,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir))); - reserve_early(__pa(xen_start_info->pt_base), + memblock_x86_reserve_range(__pa(xen_start_info->pt_base), __pa(xen_start_info->pt_base + xen_start_info->nr_pt_frames * PAGE_SIZE), "XEN PAGETABLES"); diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index ad0047f..2ac8f29 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -61,7 +62,7 @@ char * __init xen_memory_setup(void) * - xen_start_info * See comment above "struct start_info" in */ - reserve_early(__pa(xen_start_info->mfn_list), + memblock_x86_reserve_range(__pa(xen_start_info->mfn_list), __pa(xen_start_info->pt_base), "XEN START INFO"); -- cgit v1.1 From a587d2daebcd2bc159d4348b6a7b028950a6d803 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:18 -0700 Subject: x86: Remove not used early_res code and some functions in e820.c that are not used anymore Signed-off-by: Yinghai Lu Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/e820.h | 14 ------------ arch/x86/kernel/e820.c | 52 --------------------------------------------- 2 files changed, 66 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h index 388fed2..7186463 100644 --- a/arch/x86/include/asm/e820.h +++ b/arch/x86/include/asm/e820.h @@ -112,31 +112,17 @@ static inline void early_memtest(unsigned long start, unsigned long end) } #endif -extern unsigned long end_user_pfn; - -extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align); -extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align); -extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align); - extern unsigned long e820_end_of_ram_pfn(void); extern unsigned long e820_end_of_low_ram_pfn(void); -extern void e820_register_active_regions(int nid, unsigned long start_pfn, - unsigned long end_pfn); -extern u64 e820_hole_size(u64 start, u64 end); - extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align); void memblock_x86_fill(void); - extern void finish_e820_parsing(void); extern void e820_reserve_resources(void); extern void e820_reserve_resources_late(void); extern void setup_memory_map(void); extern char *default_machine_specific_memory_setup(void); -void reserve_early(u64 start, u64 end, char *name); -void free_early(u64 start, u64 end); - /* * Returns true iff the specified range [s,e) is completely contained inside * the ISA region. diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index a9221d1..d5fd894 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -739,32 +739,6 @@ core_initcall(e820_mark_nvs_memory); #endif /* - * Find a free area with specified alignment in a specific range. - */ -u64 __init find_e820_area(u64 start, u64 end, u64 size, u64 align) -{ - u64 mem = memblock_find_in_range(start, end, size, align); - - if (mem == MEMBLOCK_ERROR) - return -1ULL; - - return mem; -} - -/* - * Find next free range after *start - */ -u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align) -{ - u64 mem = memblock_x86_find_in_range_size(start, sizep, align); - - if (mem == MEMBLOCK_ERROR) - return -1ULL - - return mem; -} - -/* * pre allocated 4k and reserved it in memblock and e820_saved */ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align) @@ -856,32 +830,6 @@ unsigned long __init e820_end_of_low_ram_pfn(void) return e820_end_pfn(1UL<<(32 - PAGE_SHIFT), E820_RAM); } -/* Walk the e820 map and register active regions within a node */ -void __init e820_register_active_regions(int nid, unsigned long start_pfn, - unsigned long last_pfn) -{ - memblock_x86_register_active_regions(nid, start_pfn, last_pfn); -} - -/* - * Find the hole size (in bytes) in the memory range. - * @start: starting address of the memory range to scan - * @end: ending address of the memory range to scan - */ -u64 __init e820_hole_size(u64 start, u64 end) -{ - return memblock_x86_hole_size(start, end); -} - -void reserve_early(u64 start, u64 end, char *name) -{ - memblock_x86_reserve_range(start, end, name); -} -void free_early(u64 start, u64 end) -{ - memblock_x86_free_range(start, end); -} - static void early_panic(char *msg) { early_printk(msg); -- cgit v1.1 From 6f2a75369e7561e800d86927ecd83c970996b21f Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:18 -0700 Subject: x86, memblock: Use memblock_memory_size()/memblock_free_memory_size() to get correct dma_reserve memblock_memory_size() will return memory size in memblock.memory.region. memblock_free_memory_size() will return free memory size in memblock.memory.region. So We can get exact reseved size in specified range. Set the size right after initmem_init(), because later bootmem API will get area above 16M. (except some fallback). Later after we remove the bootmem, We could call that just before paging_init(). Signed-off-by: Yinghai Lu Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/e820.h | 2 ++ arch/x86/kernel/e820.c | 16 ++++++++++++++++ arch/x86/kernel/setup.c | 1 + arch/x86/mm/init_64.c | 7 ------- 4 files changed, 19 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h index 7186463..5be1542 100644 --- a/arch/x86/include/asm/e820.h +++ b/arch/x86/include/asm/e820.h @@ -117,6 +117,8 @@ extern unsigned long e820_end_of_low_ram_pfn(void); extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align); void memblock_x86_fill(void); +void memblock_find_dma_reserve(void); + extern void finish_e820_parsing(void); extern void e820_reserve_resources(void); extern void e820_reserve_resources_late(void); diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index d5fd894..0c2b7ef 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -1105,3 +1105,19 @@ void __init memblock_x86_fill(void) memblock_analyze(); memblock_dump_all(); } + +void __init memblock_find_dma_reserve(void) +{ +#ifdef CONFIG_X86_64 + u64 free_size_pfn; + u64 mem_size_pfn; + /* + * need to find out used area below MAX_DMA_PFN + * need to use memblock to get free size in [0, MAX_DMA_PFN] + * at first, and assume boot_mem will not take below MAX_DMA_PFN + */ + mem_size_pfn = memblock_x86_memory_in_range(0, MAX_DMA_PFN << PAGE_SHIFT) >> PAGE_SHIFT; + free_size_pfn = memblock_x86_free_memory_in_range(0, MAX_DMA_PFN << PAGE_SHIFT) >> PAGE_SHIFT; + set_dma_reserve(mem_size_pfn - free_size_pfn); +#endif +} diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index a4f0173..924c8f7 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1013,6 +1013,7 @@ void __init setup_arch(char **cmdline_p) #endif initmem_init(0, max_pfn, acpi, k8); + memblock_find_dma_reserve(); #ifndef CONFIG_NO_BOOTMEM memblock_x86_to_bootmem(0, max_low_pfn< #include -static unsigned long dma_reserve __initdata; - static int __init parse_direct_gbpages_off(char *arg) { direct_gbpages = 0; @@ -821,11 +819,6 @@ int __init reserve_bootmem_generic(unsigned long phys, unsigned long len, reserve_bootmem(phys, len, flags); - if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) { - dma_reserve += len / PAGE_SIZE; - set_dma_reserve(dma_reserve); - } - return 0; } #endif -- cgit v1.1 From 774ea0bcb27f57b6fd521b3b6c43237782fed4b9 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:18 -0700 Subject: x86: Remove old bootmem code Requested by Ingo, Thomas and HPA. The old bootmem code is no longer necessary, and the transition is complete. Remove it. Signed-off-by: Yinghai Lu Signed-off-by: H. Peter Anvin --- arch/x86/Kconfig | 10 +-------- arch/x86/kernel/setup.c | 4 ---- arch/x86/mm/init_32.c | 56 ------------------------------------------------- arch/x86/mm/init_64.c | 41 ------------------------------------ arch/x86/mm/memblock.c | 29 ------------------------- arch/x86/mm/numa_32.c | 3 --- arch/x86/mm/numa_64.c | 47 ----------------------------------------- 7 files changed, 1 insertion(+), 189 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 542bb26..ce07615 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -585,15 +585,7 @@ config PARAVIRT_DEBUG a paravirt_op is missing when it is called. config NO_BOOTMEM - default y - bool "Disable Bootmem code" - ---help--- - Use memblock directly instead of bootmem before slab is ready. - - allocator (buddy) [generic] - - early allocator (bootmem) [generic] - - very early allocator (memblock) [some generic] - - very very early allocator (early brk model) [x86] - So reduce one layer between early allocator to final allocator + def_bool y config MEMTEST bool "Memtest" diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 924c8f7..1d114ff 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1014,10 +1014,6 @@ void __init setup_arch(char **cmdline_p) initmem_init(0, max_pfn, acpi, k8); memblock_find_dma_reserve(); -#ifndef CONFIG_NO_BOOTMEM - memblock_x86_to_bootmem(0, max_low_pfn<> PAGE_SHIFT, - start_pfn, end_pfn); - printk(KERN_INFO " node %d low ram: %08lx - %08lx\n", - nodeid, start_pfn< max_low_pfn) - continue; - if (end_pfn > max_low_pfn) - end_pfn = max_low_pfn; -#else - start_pfn = 0; - end_pfn = max_low_pfn; -#endif - bootmap = setup_node_bootmem(nodeid, start_pfn, end_pfn, - bootmap); - } -#endif - after_bootmem = 1; } diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index d6d4084..690b8d1 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -572,23 +572,7 @@ kernel_physical_mapping_init(unsigned long start, void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn, int acpi, int k8) { -#ifndef CONFIG_NO_BOOTMEM - unsigned long bootmap_size, bootmap; - - bootmap_size = bootmem_bootmap_pages(end_pfn)<> PAGE_SHIFT, - 0, end_pfn); memblock_x86_register_active_regions(0, start_pfn, end_pfn); - free_bootmem_with_active_regions(0, end_pfn); -#else - memblock_x86_register_active_regions(0, start_pfn, end_pfn); -#endif } #endif @@ -798,31 +782,6 @@ void mark_rodata_ro(void) #endif -#ifndef CONFIG_NO_BOOTMEM -int __init reserve_bootmem_generic(unsigned long phys, unsigned long len, - int flags) -{ - unsigned long pfn = phys >> PAGE_SHIFT; - - if (pfn >= max_pfn) { - /* - * This can happen with kdump kernels when accessing - * firmware tables: - */ - if (pfn < max_pfn_mapped) - return -EFAULT; - - printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %lu\n", - phys, len); - return -EFAULT; - } - - reserve_bootmem(phys, len, flags); - - return 0; -} -#endif - int kern_addr_valid(unsigned long addr) { unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT; diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c index aaff393..50ecbc5 100644 --- a/arch/x86/mm/memblock.c +++ b/arch/x86/mm/memblock.c @@ -109,7 +109,6 @@ static __init struct range *find_range_array(int count) return range; } -#ifdef CONFIG_NO_BOOTMEM static void __init memblock_x86_subtract_reserved(struct range *range, int az) { u64 final_start, final_end; @@ -182,34 +181,6 @@ int __init get_free_all_memory_range(struct range **rangep, int nodeid) *rangep = range; return nr_range; } -#else -void __init memblock_x86_to_bootmem(u64 start, u64 end) -{ - int count; - u64 final_start, final_end; - struct memblock_region *r; - - /* Take out region array itself */ - memblock_free_reserved_regions(); - - count = memblock.reserved.cnt; - memblock_dbg("(%d early reservations) ==> bootmem [%#010llx-%#010llx]\n", count, start, end - 1); - for_each_memblock(reserved, r) { - memblock_dbg(" [%#010llx-%#010llx] ", (u64)r->base, (u64)r->base + r->size - 1); - final_start = max(start, r->base); - final_end = min(end, r->base + r->size); - if (final_start >= final_end) { - memblock_dbg("\n"); - continue; - } - memblock_dbg(" ==> [%#010llx-%#010llx]\n", final_start, final_end - 1); - reserve_bootmem_generic(final_start, final_end - final_start, BOOTMEM_DEFAULT); - } - - /* Put region array back ? */ - memblock_reserve_reserved_regions(); -} -#endif static u64 __init __memblock_x86_memory_in_range(u64 addr, u64 limit, bool get_free) { diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index ddf9730..70ddeb7 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -420,9 +420,6 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn, for_each_online_node(nid) { memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); NODE_DATA(nid)->node_id = nid; -#ifndef CONFIG_NO_BOOTMEM - NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; -#endif } setup_bootmem_allocator(); diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 984b1ff..aef0ff7 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -199,10 +199,6 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) unsigned long start_pfn, last_pfn, nodedata_phys; const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE); int nid; -#ifndef CONFIG_NO_BOOTMEM - unsigned long bootmap_start, bootmap_pages, bootmap_size; - void *bootmap; -#endif if (!end) return; @@ -239,47 +235,6 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end) NODE_DATA(nodeid)->node_start_pfn = start_pfn; NODE_DATA(nodeid)->node_spanned_pages = last_pfn - start_pfn; -#ifndef CONFIG_NO_BOOTMEM - NODE_DATA(nodeid)->bdata = &bootmem_node_data[nodeid]; - - /* - * Find a place for the bootmem map - * nodedata_phys could be on other nodes by alloc_bootmem, - * so need to sure bootmap_start not to be small, otherwise - * early_node_mem will get that with memblock_find_in_range instead - * of alloc_bootmem, that could clash with reserved range - */ - bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn); - bootmap_start = roundup(nodedata_phys + pgdat_size, PAGE_SIZE); - /* - * SMP_CACHE_BYTES could be enough, but init_bootmem_node like - * to use that to align to PAGE_SIZE - */ - bootmap = early_node_mem(nodeid, bootmap_start, end, - bootmap_pages<> PAGE_SHIFT, - start_pfn, last_pfn); - - printk(KERN_INFO " bootmap [%016lx - %016lx] pages %lx\n", - bootmap_start, bootmap_start + bootmap_size - 1, - bootmap_pages); - nid = phys_to_nid(bootmap_start); - if (nid != nodeid) - printk(KERN_INFO " bootmap(%d) on node %d\n", nodeid, nid); - - free_bootmem_with_active_regions(nodeid, end); -#endif - node_set_online(nodeid); } @@ -704,9 +659,7 @@ unsigned long __init numa_free_all_bootmem(void) for_each_online_node(i) pages += free_all_bootmem_node(NODE_DATA(i)); -#ifdef CONFIG_NO_BOOTMEM pages += free_all_memory_core_early(MAX_NUMNODES); -#endif return pages; } -- cgit v1.1 From da5ab11cdfdf496448e0e9cdbbc2dfe207a96c94 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Sat, 11 Sep 2010 00:07:06 -0700 Subject: memblock, microblaze: Fix memblock API change fallout Adopt Microblaze to the memblock API changes, to fix this build failure: CC arch/microblaze/mm/init.o arch/microblaze/mm/init.c: In function 'mm_cmdline_setup': arch/microblaze/mm/init.c:236: error: 'struct memblock_type' has no member named 'region' ... Signed-off-by: Yinghai Lu Cc: linux-mm@kvack.org Cc: Stephen Rothwell Cc: Benjamin Herrenschmidt LKML-Reference: <4C8B2A9A.1040303@kernel.org> Signed-off-by: Ingo Molnar --- arch/microblaze/mm/init.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 840026c..c843786 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -228,7 +228,7 @@ static void mm_cmdline_setup(void) if (maxmem && memory_size > maxmem) { memory_size = maxmem; memory_end = memory_start + memory_size; - memblock.memory.region[0].size = memory_size; + memblock.memory.regions[0].size = memory_size; } } } @@ -271,14 +271,14 @@ asmlinkage void __init mmu_init(void) machine_restart(NULL); } - if ((u32) memblock.memory.region[0].size < 0x1000000) { + if ((u32) memblock.memory.regions[0].size < 0x1000000) { printk(KERN_EMERG "Memory must be greater than 16MB\n"); machine_restart(NULL); } /* Find main memory where the kernel is */ - memory_start = (u32) memblock.memory.region[0].base; - memory_end = (u32) memblock.memory.region[0].base + - (u32) memblock.memory.region[0].size; + memory_start = (u32) memblock.memory.regions[0].base; + memory_end = (u32) memblock.memory.regions[0].base + + (u32) memblock.memory.regions[0].size; memory_size = memory_end - memory_start; mm_cmdline_setup(); /* FIXME parse args from command line - not used */ -- cgit v1.1 From 823108a056c52a83c32ca199a57566a36fad4d19 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Sat, 11 Sep 2010 00:08:42 -0700 Subject: powerpc, memblock: Fix memblock API change fallout Fix memblock API change fallout in the WII code. Signed-off-by: Yinghai Lu Cc: linux-mm@kvack.org Cc: Benjamin Herrenschmidt LKML-Reference: <4C8B2AFA.2000705@kernel.org> Signed-off-by: Ingo Molnar --- arch/powerpc/platforms/embedded6xx/wii.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c index 8450c29..649473a 100644 --- a/arch/powerpc/platforms/embedded6xx/wii.c +++ b/arch/powerpc/platforms/embedded6xx/wii.c @@ -65,7 +65,7 @@ static int __init page_aligned(unsigned long x) void __init wii_memory_fixups(void) { - struct memblock_region *p = memblock.memory.region; + struct memblock_region *p = memblock.memory.regions; /* * This is part of a workaround to allow the use of two -- cgit v1.1 From 7c996361ef0d02ef8c1435902c909d14195adcdc Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Thu, 16 Sep 2010 00:20:36 -0700 Subject: arm, memblock: Fix the sparsemem build Stephen Rothwell reported this build failure: arch/arm/mm/init.c: In function 'arm_memory_present': arch/arm/mm/init.c:260: warning: ISO C90 forbids mixed declarations and code Caused by commit 719c1514f2 ("memblock/arm: Use new accessors") which forgot a closing brace on a new for_each_memblock() in arm_memory_present(). Reported-by: Stephen Rothwell Signed-off-by: Yinghai Lu Cc: Peter Zijlstra Cc: Benjamin Herrenschmidt Cc: Russell King LKML-Reference: <4C91C544.5050907@kernel.org> Signed-off-by: Ingo Molnar --- arch/arm/mm/init.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 8504906..d6022d1 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -249,9 +249,8 @@ static void arm_memory_present(void) static void arm_memory_present(void) { struct memblock_region *reg; - int i; - for_each_memblock(memory, reg) { + for_each_memblock(memory, reg) memory_present(0, memblock_region_base_pfn(reg), memblock_region_end_pfn(reg)); } -- cgit v1.1 From 9f4c13964b58608fbce05540743281ea3146c0e8 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Tue, 5 Oct 2010 16:05:14 -0700 Subject: x86, memblock: Fix crashkernel allocation Cai Qian found crashkernel is broken with the x86 memblock changes. 1. crashkernel=128M@32M always reported that range is used, even if the first kernel is small and does not usethat range 2. we always got following report when using "kexec -p" Could not find a free area of memory of a000 bytes... locate_hole failed The root cause is that generic memblock_find_in_range() will try to allocate from the top of the range, whereas the kexec code was written assuming that allocation was always near the bottom and that it could blindly extend memory upward. Unfortunately the kexec code doesn't have a system for requesting the range that it really needs, so this is subject to probabilistic failures. This patch hacks around the problem by limiting the target range heuristically to below the traditional bzImage max range. This number is arbitrary and not always correct, and a much better result would be obtained by having kexec communicate this number based on the kernel header information and any appropriate command line options. Reported-and-Bisected-by: CAI Qian Signed-off-by: Yinghai Lu LKML-Reference: <4CABAF2A.5090501@kernel.org> Cc: Vivek Goyal Signed-off-by: H. Peter Anvin --- arch/x86/kernel/setup.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index bf89e0a..b11a238 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -502,6 +502,7 @@ static inline unsigned long long get_total_mem(void) return total << PAGE_SHIFT; } +#define DEFAULT_BZIMAGE_ADDR_MAX 0x37FFFFFF static void __init reserve_crashkernel(void) { unsigned long long total_mem; @@ -519,8 +520,12 @@ static void __init reserve_crashkernel(void) if (crash_base <= 0) { const unsigned long long alignment = 16<<20; /* 16M */ - crash_base = memblock_find_in_range(alignment, ULONG_MAX, crash_size, - alignment); + /* + * kexec want bzImage is below DEFAULT_BZIMAGE_ADDR_MAX + */ + crash_base = memblock_find_in_range(alignment, + DEFAULT_BZIMAGE_ADDR_MAX, crash_size, alignment); + if (crash_base == MEMBLOCK_ERROR) { pr_info("crashkernel reservation failed - No suitable area found.\n"); return; @@ -528,8 +533,8 @@ static void __init reserve_crashkernel(void) } else { unsigned long long start; - start = memblock_find_in_range(crash_base, ULONG_MAX, crash_size, - 1<<20); + start = memblock_find_in_range(crash_base, + crash_base + crash_size, crash_size, 1<<20); if (start != crash_base) { pr_info("crashkernel reservation failed - memory is in use.\n"); return; -- cgit v1.1 From 1d931264af0f10649b35afa8fbd2e169da51ac08 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Tue, 5 Oct 2010 16:15:15 -0700 Subject: x86-32, memblock: Make add_highpages honor early reserved ranges Originally the only early reserved range that is overlapped with high pages is "KVA RAM", but we already do remove that from the active ranges. However, It turns out Xen could have that kind of overlapping to support memory ballooning.x So we need to make add_highpage_with_active_regions() to subtract memblock reserved just like low ram; this is the proper design anyway. In this patch, refactering get_freel_all_memory_range() to make it can be used by add_highpage_with_active_regions(). Also we don't need to remove "KVA RAM" from active ranges. Signed-off-by: Yinghai Lu LKML-Reference: <4CABB183.1040607@kernel.org> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/memblock.h | 2 ++ arch/x86/mm/init_32.c | 53 +++++++++++++---------------------------- arch/x86/mm/memblock.c | 19 +++++++++++---- arch/x86/mm/numa_32.c | 2 -- 4 files changed, 33 insertions(+), 43 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h index 2c304bb..19ae14b 100644 --- a/arch/x86/include/asm/memblock.h +++ b/arch/x86/include/asm/memblock.h @@ -9,6 +9,8 @@ void memblock_x86_to_bootmem(u64 start, u64 end); void memblock_x86_reserve_range(u64 start, u64 end, char *name); void memblock_x86_free_range(u64 start, u64 end); struct range; +int __get_free_all_memory_range(struct range **range, int nodeid, + unsigned long start_pfn, unsigned long end_pfn); int get_free_all_memory_range(struct range **rangep, int nodeid); void memblock_x86_register_active_regions(int nid, unsigned long start_pfn, diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index c2385d7..8546709 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -423,49 +423,28 @@ static void __init add_one_highpage_init(struct page *page) totalhigh_pages++; } -struct add_highpages_data { - unsigned long start_pfn; - unsigned long end_pfn; -}; - -static int __init add_highpages_work_fn(unsigned long start_pfn, - unsigned long end_pfn, void *datax) +void __init add_highpages_with_active_regions(int nid, + unsigned long start_pfn, unsigned long end_pfn) { - int node_pfn; - struct page *page; - unsigned long final_start_pfn, final_end_pfn; - struct add_highpages_data *data; + struct range *range; + int nr_range; + int i; - data = (struct add_highpages_data *)datax; + nr_range = __get_free_all_memory_range(&range, nid, start_pfn, end_pfn); - final_start_pfn = max(start_pfn, data->start_pfn); - final_end_pfn = min(end_pfn, data->end_pfn); - if (final_start_pfn >= final_end_pfn) - return 0; + for (i = 0; i < nr_range; i++) { + struct page *page; + int node_pfn; - for (node_pfn = final_start_pfn; node_pfn < final_end_pfn; - node_pfn++) { - if (!pfn_valid(node_pfn)) - continue; - page = pfn_to_page(node_pfn); - add_one_highpage_init(page); + for (node_pfn = range[i].start; node_pfn < range[i].end; + node_pfn++) { + if (!pfn_valid(node_pfn)) + continue; + page = pfn_to_page(node_pfn); + add_one_highpage_init(page); + } } - - return 0; - } - -void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn, - unsigned long end_pfn) -{ - struct add_highpages_data data; - - data.start_pfn = start_pfn; - data.end_pfn = end_pfn; - - work_with_active_regions(nid, add_highpages_work_fn, &data); -} - #else static inline void permanent_kmaps_init(pgd_t *pgd_base) { diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c index 50ecbc5..fd7a040 100644 --- a/arch/x86/mm/memblock.c +++ b/arch/x86/mm/memblock.c @@ -156,7 +156,8 @@ static int __init count_early_node_map(int nodeid) return data.nr; } -int __init get_free_all_memory_range(struct range **rangep, int nodeid) +int __init __get_free_all_memory_range(struct range **rangep, int nodeid, + unsigned long start_pfn, unsigned long end_pfn) { int count; struct range *range; @@ -172,9 +173,9 @@ int __init get_free_all_memory_range(struct range **rangep, int nodeid) * at first */ nr_range = add_from_early_node_map(range, count, nr_range, nodeid); -#ifdef CONFIG_X86_32 - subtract_range(range, count, max_low_pfn, -1ULL); -#endif + subtract_range(range, count, 0, start_pfn); + subtract_range(range, count, end_pfn, -1ULL); + memblock_x86_subtract_reserved(range, count); nr_range = clean_sort_range(range, count); @@ -182,6 +183,16 @@ int __init get_free_all_memory_range(struct range **rangep, int nodeid) return nr_range; } +int __init get_free_all_memory_range(struct range **rangep, int nodeid) +{ + unsigned long end_pfn = -1UL; + +#ifdef CONFIG_X86_32 + end_pfn = max_low_pfn; +#endif + return __get_free_all_memory_range(rangep, nodeid, 0, end_pfn); +} + static u64 __init __memblock_x86_memory_in_range(u64 addr, u64 limit, bool get_free) { int i, count; diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 70ddeb7..84a3e4c 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -326,8 +326,6 @@ static __init unsigned long calculate_numa_remap_pages(void) "KVA RAM"); node_remap_start_pfn[nid] = node_kva_final>>PAGE_SHIFT; - remove_active_range(nid, node_remap_start_pfn[nid], - node_remap_start_pfn[nid] + size); } printk(KERN_INFO "Reserving total of %lx pages for numa KVA remap\n", reserve_pages); -- cgit v1.1 From 16c36f743bf8481d0ba40a6de0af11736095d7cf Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 4 Oct 2010 14:58:04 -0700 Subject: x86, memblock: Remove __memblock_x86_find_in_range_size() Fold it into memblock_x86_find_in_range(), and change bad_addr_size() to check_reserve_memblock(). So whole memblock_x86_find_in_range_size() code is more readable. Signed-off-by: Yinghai Lu LKML-Reference: <4CAA4DEC.4000401@kernel.org> Signed-off-by: H. Peter Anvin --- arch/x86/mm/memblock.c | 39 +++++++++++---------------------------- 1 file changed, 11 insertions(+), 28 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c index fd7a040..aa11693 100644 --- a/arch/x86/mm/memblock.c +++ b/arch/x86/mm/memblock.c @@ -8,7 +8,7 @@ #include /* Check for already reserved areas */ -static inline bool __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align) +static bool __init check_with_memblock_reserved_size(u64 *addrp, u64 *sizep, u64 align) { struct memblock_region *r; u64 addr = *addrp, last; @@ -30,7 +30,7 @@ again: goto again; } if (last <= (r->base + r->size) && addr >= r->base) { - (*sizep)++; + *sizep = 0; return false; } } @@ -41,29 +41,6 @@ again: return changed; } -static u64 __init __memblock_x86_find_in_range_size(u64 ei_start, u64 ei_last, u64 start, - u64 *sizep, u64 align) -{ - u64 addr, last; - - addr = round_up(ei_start, align); - if (addr < start) - addr = round_up(start, align); - if (addr >= ei_last) - goto out; - *sizep = ei_last - addr; - while (bad_addr_size(&addr, sizep, align) && addr + *sizep <= ei_last) - ; - last = addr + *sizep; - if (last > ei_last) - goto out; - - return addr; - -out: - return MEMBLOCK_ERROR; -} - /* * Find next free range after start, and size is returned in *sizep */ @@ -76,10 +53,16 @@ u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align) u64 ei_last = ei_start + r->size; u64 addr; - addr = __memblock_x86_find_in_range_size(ei_start, ei_last, start, - sizep, align); + addr = round_up(ei_start, align); + if (addr < start) + addr = round_up(start, align); + if (addr >= ei_last) + continue; + *sizep = ei_last - addr; + while (check_with_memblock_reserved_size(&addr, sizep, align)) + ; - if (addr != MEMBLOCK_ERROR) + if (*sizep) return addr; } -- cgit v1.1 From 5fd03ddab7fdbc44bfb2d183a4531c26a8dbca5a Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Tue, 5 Oct 2010 21:28:10 -0700 Subject: memblock/arm: Fix memblock_region_is_memory() typo Fix typo in commit dbe3039 ("memblock/arm: Use memblock_region_is_memory() for omap fb") - it should be memblock_is_region_memory(). Reported-by: Tomi Valkeinen Signed-off-by: Yinghai Lu Cc: Benjamin Herrenschmidt Cc: Felipe Balbi Cc: Kevin Hilman Cc: ext Grazvydas Ignotas LKML-Reference: <4CABFADA.9020305@kernel.org> Signed-off-by: Ingo Molnar --- arch/arm/plat-omap/fb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/plat-omap/fb.c b/arch/arm/plat-omap/fb.c index 441af2b..7193481 100644 --- a/arch/arm/plat-omap/fb.c +++ b/arch/arm/plat-omap/fb.c @@ -173,7 +173,7 @@ static int check_fbmem_region(int region_idx, struct omapfb_mem_region *rg, static int valid_sdram(unsigned long addr, unsigned long size) { - return memblock_region_is_memory(addr, size); + return memblock_is_region_memory(addr, size); } static int reserve_sdram(unsigned long addr, unsigned long size) -- cgit v1.1 From 236260b90dd94516982ad67aa6f5449c4c37db7b Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 6 Oct 2010 15:52:29 -0700 Subject: memblock: Allow memblock_init to be called early The Xen setup code needs to call memblock_x86_reserve_range() very early, so allow it to initialize the memblock subsystem before doing so. The second memblock_init() is ignored. Signed-off-by: Jeremy Fitzhardinge Cc: Yinghai Lu Cc: Benjamin Herrenschmidt LKML-Reference: <4CACFDAD.3090900@goop.org> Signed-off-by: H. Peter Anvin --- arch/x86/xen/enlighten.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 7d46c84..63b83ce 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -1183,6 +1184,8 @@ asmlinkage void __init xen_start_kernel(void) local_irq_disable(); early_boot_irqs_off(); + memblock_init(); + xen_raw_console_write("mapping kernel into physical memory\n"); pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages); -- cgit v1.1 From c7fc2de0c83dbd2eaf759c5cd0e2b9cf1eb4df3a Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Tue, 12 Oct 2010 14:07:09 -0700 Subject: memblock, bootmem: Round pfn properly for memory and reserved regions We need to round memory regions correctly -- specifically, we need to round reserved region in the more expansive direction (lower limit down, upper limit up) whereas usable memory regions need to be rounded in the more restrictive direction (lower limit up, upper limit down). This introduces two set of inlines: memblock_region_memory_base_pfn() memblock_region_memory_end_pfn() memblock_region_reserved_base_pfn() memblock_region_reserved_end_pfn() Although they are antisymmetric (and therefore are technically duplicates) the use of the different inlines explicitly documents the programmer's intention. The lack of proper rounding caused a bug on ARM, which was then found to also affect other architectures. Reported-by: Russell King Signed-off-by: Yinghai Lu LKML-Reference: <4CB4CDFD.4020105@kernel.org> Cc: Jeremy Fitzhardinge Signed-off-by: H. Peter Anvin --- arch/arm/mm/init.c | 8 ++++---- arch/powerpc/mm/mem.c | 14 +++++++------- arch/powerpc/mm/numa.c | 4 ++-- arch/sh/mm/init.c | 4 ++-- arch/sparc/mm/init_64.c | 4 ++-- 5 files changed, 17 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index d6022d1..63f4417 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -182,8 +182,8 @@ static void __init arm_bootmem_init(struct meminfo *mi, * Reserve the memblock reserved regions in bootmem. */ for_each_memblock(reserved, reg) { - phys_addr_t start = memblock_region_base_pfn(reg); - phys_addr_t end = memblock_region_end_pfn(reg); + phys_addr_t start = memblock_region_reserved_base_pfn(reg); + phys_addr_t end = memblock_region_reserved_end_pfn(reg); if (start >= start_pfn && end <= end_pfn) reserve_bootmem_node(pgdat, __pfn_to_phys(start), (end - start) << PAGE_SHIFT, @@ -251,8 +251,8 @@ static void arm_memory_present(void) struct memblock_region *reg; for_each_memblock(memory, reg) - memory_present(0, memblock_region_base_pfn(reg), - memblock_region_end_pfn(reg)); + memory_present(0, memblock_region_memory_base_pfn(reg), + memblock_region_memory_end_pfn(reg)); } #endif diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index f661f6c..a664996 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -148,8 +148,8 @@ walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, int ret = -1; for_each_memblock(memory, reg) { - tstart = max(start_pfn, memblock_region_base_pfn(reg)); - tend = min(end_pfn, memblock_region_end_pfn(reg)); + tstart = max(start_pfn, memblock_region_memory_base_pfn(reg)); + tend = min(end_pfn, memblock_region_memory_end_pfn(reg)); if (tstart >= tend) continue; ret = (*func)(tstart, tend - tstart, arg); @@ -195,8 +195,8 @@ void __init do_init_bootmem(void) /* Add active regions with valid PFNs */ for_each_memblock(memory, reg) { unsigned long start_pfn, end_pfn; - start_pfn = memblock_region_base_pfn(reg); - end_pfn = memblock_region_end_pfn(reg); + start_pfn = memblock_region_memory_base_pfn(reg); + end_pfn = memblock_region_memory_end_pfn(reg); add_active_range(0, start_pfn, end_pfn); } @@ -236,9 +236,9 @@ static int __init mark_nonram_nosave(void) for_each_memblock(memory, reg) { if (prev && - memblock_region_end_pfn(prev) < memblock_region_base_pfn(reg)) - register_nosave_region(memblock_region_end_pfn(prev), - memblock_region_base_pfn(reg)); + memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg)) + register_nosave_region(memblock_region_memory_end_pfn(prev), + memblock_region_memory_base_pfn(reg)); prev = reg; } return 0; diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 066fb44..74505b2 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -811,8 +811,8 @@ static void __init setup_nonnuma(void) (top_of_ram - total_ram) >> 20); for_each_memblock(memory, reg) { - start_pfn = memblock_region_base_pfn(reg); - end_pfn = memblock_region_end_pfn(reg); + start_pfn = memblock_region_memory_base_pfn(reg); + end_pfn = memblock_region_memory_end_pfn(reg); fake_numa_create_new_node(end_pfn, &nid); add_active_range(nid, start_pfn, end_pfn); diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index b977475..552bea5 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -244,8 +244,8 @@ static void __init do_init_bootmem(void) /* Add active regions with valid PFNs. */ for_each_memblock(memory, reg) { unsigned long start_pfn, end_pfn; - start_pfn = memblock_region_base_pfn(reg); - end_pfn = memblock_region_end_pfn(reg); + start_pfn = memblock_region_memory_base_pfn(reg); + end_pfn = memblock_region_memory_end_pfn(reg); __add_active_range(0, start_pfn, end_pfn); } diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index dc584d2..4c25727 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -1294,8 +1294,8 @@ static void __init bootmem_init_nonnuma(void) if (!reg->size) continue; - start_pfn = memblock_region_base_pfn(reg); - end_pfn = memblock_region_end_pfn(reg); + start_pfn = memblock_region_memory_base_pfn(reg); + end_pfn = memblock_region_memory_end_pfn(reg); add_active_range(0, start_pfn, end_pfn); } -- cgit v1.1 From fef5ba797991f9335bcfc295942b684f9bf613a1 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 13 Oct 2010 16:02:24 -0700 Subject: xen: Cope with unmapped pages when initializing kernel pagetable Xen requires that all pages containing pagetable entries to be mapped read-only. If pages used for the initial pagetable are already mapped then we can change the mapping to RO. However, if they are initially unmapped, we need to make sure that when they are later mapped, they are also mapped RO. We do this by knowing that the kernel pagetable memory is pre-allocated in the range e820_table_start - e820_table_end, so any pfn within this range should be mapped read-only. However, the pagetable setup code early_ioremaps the pages to write their entries, so we must make sure that mappings created in the early_ioremap fixmap area are mapped RW. (Those mappings are removed before the pages are presented to Xen as pagetable pages.) Signed-off-by: Jeremy Fitzhardinge LKML-Reference: <4CB63A80.8060702@goop.org> Cc: Yinghai Lu Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/io.h | 1 + arch/x86/mm/ioremap.c | 5 +++++ arch/x86/xen/mmu.c | 26 ++++++++++++++++++-------- 3 files changed, 24 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 30a3e97..66aee6c 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -348,6 +348,7 @@ extern void __iomem *early_memremap(resource_size_t phys_addr, unsigned long size); extern void early_iounmap(void __iomem *addr, unsigned long size); extern void fixup_early_ioremap(void); +extern bool is_early_ioremap_ptep(pte_t *ptep); #define IO_SPACE_LIMIT 0xffff diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 3ba6e06..0369843 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -362,6 +362,11 @@ static inline pte_t * __init early_ioremap_pte(unsigned long addr) return &bm_pte[pte_index(addr)]; } +bool __init is_early_ioremap_ptep(pte_t *ptep) +{ + return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)]; +} + static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata; void __init early_ioremap_init(void) diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 4fe04ac..7d55e9e 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -56,6 +56,7 @@ #include #include #include +#include #include #include @@ -360,7 +361,8 @@ void make_lowmem_page_readonly(void *vaddr) unsigned int level; pte = lookup_address(address, &level); - BUG_ON(pte == NULL); + if (pte == NULL) + return; /* vaddr missing */ ptev = pte_wrprotect(*pte); @@ -375,7 +377,8 @@ void make_lowmem_page_readwrite(void *vaddr) unsigned int level; pte = lookup_address(address, &level); - BUG_ON(pte == NULL); + if (pte == NULL) + return; /* vaddr missing */ ptev = pte_mkwrite(*pte); @@ -1509,13 +1512,25 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) #endif } -#ifdef CONFIG_X86_32 static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) { + unsigned long pfn = pte_pfn(pte); + +#ifdef CONFIG_X86_32 /* If there's an existing pte, then don't allow _PAGE_RW to be set */ if (pte_val_ma(*ptep) & _PAGE_PRESENT) pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & pte_val_ma(pte)); +#endif + + /* + * If the new pfn is within the range of the newly allocated + * kernel pagetable, and it isn't being mapped into an + * early_ioremap fixmap slot, make sure it is RO. + */ + if (!is_early_ioremap_ptep(ptep) && + pfn >= e820_table_start && pfn < e820_table_end) + pte = pte_wrprotect(pte); return pte; } @@ -1528,7 +1543,6 @@ static __init void xen_set_pte_init(pte_t *ptep, pte_t pte) xen_set_pte(ptep, pte); } -#endif static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn) { @@ -1973,11 +1987,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = { .alloc_pmd_clone = paravirt_nop, .release_pmd = xen_release_pmd_init, -#ifdef CONFIG_X86_64 - .set_pte = xen_set_pte, -#else .set_pte = xen_set_pte_init, -#endif .set_pte_at = xen_set_pte_at, .set_pmd = xen_set_pmd_hyper, -- cgit v1.1 From 67e87f0a1c5cbc750f81ebf6a128e8ff6f4376cc Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Wed, 13 Oct 2010 16:34:15 -0700 Subject: x86-64: Only set max_pfn_mapped to 512 MiB if we enter via head_64.S head_64.S maps up to 512 MiB, but that is not necessarity true for other entry paths, such as Xen. Thus, co-locate the setting of max_pfn_mapped with the code to actually set up the page tables in head_64.S. The 32-bit code is already so co-located. (The Xen code already sets max_pfn_mapped correctly for its own use case.) -v2: Yinghai fixed the following bug in this patch: | | max_pfn_mapped is in .bss section, so we need to set that | after bss get cleared. Without that we crash on bootup. | | That is safe because Xen does not call x86_64_start_kernel(). | Signed-off-by: Jeremy Fitzhardinge Fixed-by: Yinghai Lu Signed-off-by: H. Peter Anvin LKML-Reference: <4CB6AB24.9020504@kernel.org> Signed-off-by: Ingo Molnar --- arch/x86/kernel/head64.c | 2 ++ arch/x86/kernel/setup.c | 1 - 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 97adf98..2d2673c 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -80,6 +80,8 @@ void __init x86_64_start_kernel(char * real_mode_data) /* Cleanup the over mapped high alias */ cleanup_highmap(); + max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT; + for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) { #ifdef CONFIG_EARLY_PRINTK set_intr_gate(i, &early_idt_handlers[i]); diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index b11a238..c3cebfe 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -932,7 +932,6 @@ void __init setup_arch(char **cmdline_p) max_low_pfn = max_pfn; high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; - max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT; #endif /* -- cgit v1.1