diff options
author | Tejun Heo <tj@kernel.org> | 2011-05-02 14:18:54 +0200 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2011-05-02 14:18:54 +0200 |
commit | 7888e96b264fad27f97f58c0f3a4d20326eaf181 (patch) | |
tree | 8ef77da9bd9b5491fadd6b01dd7de8fdf9d6eec5 /arch/x86/mm | |
parent | 99cca492ea8ced305bfd687521ed69fb9e0147aa (diff) | |
download | kernel_samsung_smdk4412-7888e96b264fad27f97f58c0f3a4d20326eaf181.zip kernel_samsung_smdk4412-7888e96b264fad27f97f58c0f3a4d20326eaf181.tar.gz kernel_samsung_smdk4412-7888e96b264fad27f97f58c0f3a4d20326eaf181.tar.bz2 |
x86, NUMA: Initialize and use remap allocator from setup_node_bootmem()
setup_node_bootmem() is taken from 64bit and doesn't use remap
allocator. It's about to be shared with 32bit so add support for it.
If NODE_DATA is remapped, it's noted in the debug message and node
locality check is skipped as the __pa() of the remapped address
doesn't reflect the actual physical address.
On 64bit, remap allocator becomes noop and doesn't affect the
behavior.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/numa.c | 41 | ||||
-rw-r--r-- | arch/x86/mm/numa_32.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/numa_internal.h | 6 |
3 files changed, 34 insertions, 15 deletions
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index b45caa3..a72317a 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -197,7 +197,9 @@ static void __init setup_node_bootmem(int nid, u64 start, u64 end) const u64 nd_low = PFN_PHYS(MAX_DMA_PFN); const u64 nd_high = PFN_PHYS(max_pfn_mapped); const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE); + bool remapped = false; u64 nd_pa; + void *nd; int tnid; /* @@ -207,34 +209,45 @@ static void __init setup_node_bootmem(int nid, u64 start, u64 end) if (end && (end - start) < NODE_MIN_SIZE) return; + /* initialize remap allocator before aligning to ZONE_ALIGN */ + init_alloc_remap(nid, start, end); + start = roundup(start, ZONE_ALIGN); printk(KERN_INFO "Initmem setup node %d %016Lx-%016Lx\n", nid, start, end); /* - * Try to allocate node data on local node and then fall back to - * all nodes. Never allocate in DMA zone. + * Allocate node data. Try remap allocator first, node-local + * memory and then any node. Never allocate in DMA zone. */ - nd_pa = memblock_x86_find_in_range_node(nid, nd_low, nd_high, + nd = alloc_remap(nid, nd_size); + if (nd) { + nd_pa = __pa(nd); + remapped = true; + } else { + nd_pa = memblock_x86_find_in_range_node(nid, nd_low, nd_high, nd_size, SMP_CACHE_BYTES); - if (nd_pa == MEMBLOCK_ERROR) - nd_pa = memblock_find_in_range(nd_low, nd_high, - nd_size, SMP_CACHE_BYTES); - if (nd_pa == MEMBLOCK_ERROR) { - pr_err("Cannot find %zu bytes in node %d\n", nd_size, nid); - return; + if (nd_pa == MEMBLOCK_ERROR) + nd_pa = memblock_find_in_range(nd_low, nd_high, + nd_size, SMP_CACHE_BYTES); + if (nd_pa == MEMBLOCK_ERROR) { + pr_err("Cannot find %zu bytes in node %d\n", + nd_size, nid); + return; + } + memblock_x86_reserve_range(nd_pa, nd_pa + nd_size, "NODE_DATA"); + nd = __va(nd_pa); } - memblock_x86_reserve_range(nd_pa, nd_pa + nd_size, "NODE_DATA"); /* report and initialize */ - printk(KERN_INFO " NODE_DATA [%016Lx - %016Lx]\n", - nd_pa, nd_pa + nd_size - 1); + printk(KERN_INFO " NODE_DATA [%016Lx - %016Lx]%s\n", + nd_pa, nd_pa + nd_size - 1, remapped ? " (remapped)" : ""); tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); - if (tnid != nid) + if (!remapped && tnid != nid) printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid); - node_data[nid] = __va(nd_pa); + node_data[nid] = nd; memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); NODE_DATA(nid)->node_id = nid; NODE_DATA(nid)->node_start_pfn = start >> PAGE_SHIFT; diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 9008632..fbd558f 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -265,7 +265,7 @@ void resume_map_numa_kva(pgd_t *pgd_base) * opportunistically and the callers will fall back to other memory * allocation mechanisms on failure. */ -static __init void init_alloc_remap(int nid, u64 start, u64 end) +void __init init_alloc_remap(int nid, u64 start, u64 end) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long end_pfn = end >> PAGE_SHIFT; diff --git a/arch/x86/mm/numa_internal.h b/arch/x86/mm/numa_internal.h index ad86ec9..7178c3a 100644 --- a/arch/x86/mm/numa_internal.h +++ b/arch/x86/mm/numa_internal.h @@ -21,6 +21,12 @@ void __init numa_reset_distance(void); void __init x86_numa_init(void); +#ifdef CONFIG_X86_64 +static inline void init_alloc_remap(int nid, u64 start, u64 end) { } +#else +void __init init_alloc_remap(int nid, u64 start, u64 end); +#endif + #ifdef CONFIG_NUMA_EMU void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt); |