aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c24
1 files changed, 23 insertions, 1 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 03fef8d..676aec9 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -159,8 +159,13 @@ EXPORT_SYMBOL(nr_node_ids);
#endif
#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
+int page_group_by_mobility_disabled __read_mostly;
+
static inline int get_pageblock_migratetype(struct page *page)
{
+ if (unlikely(page_group_by_mobility_disabled))
+ return MIGRATE_UNMOVABLE;
+
return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
}
@@ -174,6 +179,9 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags, int order)
{
WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
+ if (unlikely(page_group_by_mobility_disabled))
+ return MIGRATE_UNMOVABLE;
+
/* Cluster high-order atomic allocations together */
if (unlikely(order > 0) &&
(!(gfp_flags & __GFP_WAIT) || in_interrupt()))
@@ -2375,9 +2383,23 @@ void build_all_zonelists(void)
/* cpuset refresh routine should be here */
}
vm_total_pages = nr_free_pagecache_pages();
- printk("Built %i zonelists in %s order. Total pages: %ld\n",
+ /*
+ * Disable grouping by mobility if the number of pages in the
+ * system is too low to allow the mechanism to work. It would be
+ * more accurate, but expensive to check per-zone. This check is
+ * made on memory-hotadd so a system can start with mobility
+ * disabled and enable it later
+ */
+ if (vm_total_pages < (MAX_ORDER_NR_PAGES * MIGRATE_TYPES))
+ page_group_by_mobility_disabled = 1;
+ else
+ page_group_by_mobility_disabled = 0;
+
+ printk("Built %i zonelists in %s order, mobility grouping %s. "
+ "Total pages: %ld\n",
num_online_nodes(),
zonelist_order_name[current_zonelist_order],
+ page_group_by_mobility_disabled ? "off" : "on",
vm_total_pages);
#ifdef CONFIG_NUMA
printk("Policy zone: %s\n", zone_names[policy_zone]);