aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2010-12-17 16:59:07 -0800
committerH. Peter Anvin <hpa@linux.intel.com>2010-12-29 14:46:55 -0800
commit1a4a678b12c84db9ae5dce424e0e97f0559bb57c (patch)
treec272694389a0b035f198be679eb9355788fc0e5c /mm/page_alloc.c
parent32e3f2b00c529477d26895c5428ed95bba537443 (diff)
downloadkernel_samsung_smdk4412-1a4a678b12c84db9ae5dce424e0e97f0559bb57c.zip
kernel_samsung_smdk4412-1a4a678b12c84db9ae5dce424e0e97f0559bb57c.tar.gz
kernel_samsung_smdk4412-1a4a678b12c84db9ae5dce424e0e97f0559bb57c.tar.bz2
memblock: Make find_memory_core_early() find from top-down
That is used for find ram in node or bootmem type. We should make it top-down so it will be consistent to memblock_find, and to avoid allocating potentially valuable low memory before we actually need it. Suggested-by: Jeremy Fitzhardinge <jeremy@goop.org> Signed-off-by: Yinghai Lu <yinghai@kernel.org> LKML-Reference: <4D0C075B.3040501@kernel.org> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c34
1 files changed, 33 insertions, 1 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 07a6544..19413bf 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3555,6 +3555,34 @@ static int __meminit next_active_region_index_in_nid(int index, int nid)
return -1;
}
+/*
+ * Basic iterator support. Return the last range of PFNs for a node
+ * Note: nid == MAX_NUMNODES returns last region regardless of node
+ */
+static int __meminit last_active_region_index_in_nid(int nid)
+{
+ int i;
+
+ for (i = nr_nodemap_entries - 1; i >= 0; i--)
+ if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
+ return i;
+
+ return -1;
+}
+
+/*
+ * Basic iterator support. Return the previous active range of PFNs for a node
+ * Note: nid == MAX_NUMNODES returns next region regardless of node
+ */
+static int __meminit previous_active_region_index_in_nid(int index, int nid)
+{
+ for (index = index - 1; index >= 0; index--)
+ if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
+ return index;
+
+ return -1;
+}
+
#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
/*
* Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
@@ -3606,6 +3634,10 @@ bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
for (i = first_active_region_index_in_nid(nid); i != -1; \
i = next_active_region_index_in_nid(i, nid))
+#define for_each_active_range_index_in_nid_reverse(i, nid) \
+ for (i = last_active_region_index_in_nid(nid); i != -1; \
+ i = previous_active_region_index_in_nid(i, nid))
+
/**
* free_bootmem_with_active_regions - Call free_bootmem_node for each active range
* @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
@@ -3644,7 +3676,7 @@ u64 __init find_memory_core_early(int nid, u64 size, u64 align,
int i;
/* Need to go over early_node_map to find out good range for node */
- for_each_active_range_index_in_nid(i, nid) {
+ for_each_active_range_index_in_nid_reverse(i, nid) {
u64 addr;
u64 ei_start, ei_last;
u64 final_start, final_end;