aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDaniel Hillenbrand <daniel.hillenbrand@codeworkx.de>2012-07-21 23:04:45 +0200
committerDaniel Hillenbrand <daniel.hillenbrand@codeworkx.de>2012-07-21 23:04:45 +0200
commit0a1182796f6475b8cb2ff1781dad873a744b3197 (patch)
treee15b5256dac226c49a25b5e24594cd638e2fec2c /mm
parent633018c13fe06461d9c60692fbb114734aa37802 (diff)
downloadkernel_samsung_smdk4412-0a1182796f6475b8cb2ff1781dad873a744b3197.zip
kernel_samsung_smdk4412-0a1182796f6475b8cb2ff1781dad873a744b3197.tar.gz
kernel_samsung_smdk4412-0a1182796f6475b8cb2ff1781dad873a744b3197.tar.bz2
samsung opensource update3
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c75
1 files changed, 75 insertions, 0 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4d8f48c..3606bec 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -101,6 +101,14 @@ unsigned long totalreserve_pages __read_mostly;
int percpu_pagelist_fraction;
gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
+#ifdef CONFIG_COMPACTION_RETRY_DEBUG
+static inline void show_buddy_info(void);
+#else
+static inline void show_buddy_info(void)
+{
+}
+#endif
+
#ifdef CONFIG_PM_SLEEP
/*
* The following functions are used by the suspend/hibernate code to temporarily
@@ -1864,6 +1872,10 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
int migratetype)
{
struct page *page;
+#ifdef CONFIG_COMPACTION_RETRY
+ struct zoneref *z;
+ struct zone *zone;
+#endif
/* Acquire the OOM killer lock for the zones in zonelist */
if (!try_set_zonelist_oom(zonelist, gfp_mask)) {
@@ -1883,6 +1895,32 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
if (page)
goto out;
+#ifdef CONFIG_COMPACTION_RETRY
+ /*
+ * When we reach here, we already tried direct reclaim.
+ * Therefore it might be possible that we have enough
+ * free memory but extremely fragmented.
+ * So we give it a last chance to try memory compaction and get pages.
+ */
+ if (order) {
+ pr_info("reclaim before oom : retry compaction.\n");
+ show_buddy_info();
+
+ for_each_zone_zonelist_nodemask(zone, z, zonelist,
+ high_zoneidx, nodemask)
+ compact_zone_order(zone, -1, gfp_mask, true);
+
+ show_buddy_info();
+ pr_info("reclaim :end\n");
+ page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL,
+ nodemask, order, zonelist, high_zoneidx,
+ ALLOC_WMARK_HIGH|ALLOC_CPUSET,
+ preferred_zone, migratetype);
+ if (page)
+ goto out;
+ }
+#endif
+
if (!(gfp_mask & __GFP_NOFAIL)) {
/* The OOM killer will not help higher order allocs */
if (order > PAGE_ALLOC_COSTLY_ORDER)
@@ -2115,6 +2153,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
#ifdef CONFIG_ANDROID_WIP
unsigned long start_tick = jiffies;
#endif
+
/*
* In the slowpath, we sanity check order to avoid ever trying to
* reclaim >= MAX_ORDER areas which will never succeed. Callers may
@@ -2226,6 +2265,11 @@ rebalance:
if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
if (oom_killer_disabled)
goto nopage;
+#ifdef CONFIG_ANDROID_WIP
+ if (did_some_progress)
+ pr_info("time's up : calling "
+ "__alloc_pages_may_oom\n");
+#endif
page = __alloc_pages_may_oom(gfp_mask, order,
zonelist, high_zoneidx,
nodemask, preferred_zone,
@@ -2731,6 +2775,37 @@ void show_free_areas(unsigned int filter)
show_swap_cache_info();
}
+#ifdef CONFIG_COMPACTION_RETRY_DEBUG
+void show_buddy_info(void)
+{
+ struct zone *zone;
+ unsigned long nr[MAX_ORDER], flags, order, total = 0;
+ char buf[256];
+ int len;
+
+ for_each_populated_zone(zone) {
+
+ if (skip_free_areas_node(SHOW_MEM_FILTER_NODES,
+ zone_to_nid(zone)))
+ continue;
+ show_node(zone);
+ len = sprintf(buf, "%s: ", zone->name);
+
+ spin_lock_irqsave(&zone->lock, flags);
+ for (order = 0; order < MAX_ORDER; order++) {
+ nr[order] = zone->free_area[order].nr_free;
+ total += nr[order] << order;
+ }
+ spin_unlock_irqrestore(&zone->lock, flags);
+ for (order = 0; order < MAX_ORDER; order++)
+ len += sprintf(buf + len, "%lu*%lukB ",
+ nr[order], K(1UL) << order);
+ len += sprintf(buf + len, "= %lukB", K(total));
+ pr_err("%s\n", buf);
+ }
+}
+#endif
+
static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
{
zoneref->zone = zone;