aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-10-16 01:25:36 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 09:42:58 -0700
commit37c0708dbee5825df3bd9ce6ef2199c6c1713970 (patch)
tree747551aa58484e7f872da118b864c8f3ca6e892d
parent56bbd65df0e92a4a8eb70c5f2b416ae2b6c5fb31 (diff)
downloadkernel_samsung_smdk4412-37c0708dbee5825df3bd9ce6ef2199c6c1713970.zip
kernel_samsung_smdk4412-37c0708dbee5825df3bd9ce6ef2199c6c1713970.tar.gz
kernel_samsung_smdk4412-37c0708dbee5825df3bd9ce6ef2199c6c1713970.tar.bz2
Memoryless nodes: Add N_CPU node state
We need the check for a node with cpu in zone reclaim. Zone reclaim will not allow remote zone reclaim if a node has a cpu. [Lee.Schermerhorn@hp.com: Move setup of N_CPU node state mask] Signed-off-by: Christoph Lameter <clameter@sgi.com> Tested-by: Lee Schermerhorn <lee.schermerhorn@hp.com> Acked-by: Bob Picco <bob.picco@hp.com> Cc: Nishanth Aravamudan <nacc@us.ibm.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mel@skynet.ie> Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/nodemask.h1
-rw-r--r--mm/page_alloc.c5
-rw-r--r--mm/vmscan.c4
3 files changed, 6 insertions, 4 deletions
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index ccee962..905e18f 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -349,6 +349,7 @@ enum node_states {
#else
N_HIGH_MEMORY = N_NORMAL_MEMORY,
#endif
+ N_CPU, /* The node has one or more cpus */
NR_NODE_STATES
};
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 07dfd89..161bcb7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2353,6 +2353,9 @@ static struct per_cpu_pageset boot_pageset[NR_CPUS];
static int __cpuinit process_zones(int cpu)
{
struct zone *zone, *dzone;
+ int node = cpu_to_node(cpu);
+
+ node_set_state(node, N_CPU); /* this node has a cpu */
for_each_zone(zone) {
@@ -2360,7 +2363,7 @@ static int __cpuinit process_zones(int cpu)
continue;
zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
- GFP_KERNEL, cpu_to_node(cpu));
+ GFP_KERNEL, node);
if (!zone_pcp(zone, cpu))
goto bad;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8765688..8fd8ba1 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1853,7 +1853,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
{
- cpumask_t mask;
int node_id;
/*
@@ -1890,8 +1889,7 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
* as wide as possible.
*/
node_id = zone_to_nid(zone);
- mask = node_to_cpumask(node_id);
- if (!cpus_empty(mask) && node_id != numa_node_id())
+ if (node_state(node_id, N_CPU) && node_id != numa_node_id())
return 0;
return __zone_reclaim(zone, gfp_mask, order);
}