aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c102
1 files changed, 99 insertions, 3 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d14eda6..f6979dc 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -683,9 +683,19 @@ static enum page_references page_check_references(struct page *page,
*/
SetPageReferenced(page);
+#ifndef CONFIG_DMA_CMA
if (referenced_page)
return PAGEREF_ACTIVATE;
+#else
+ if (referenced_page || referenced_ptes > 1)
+ return PAGEREF_ACTIVATE;
+ /*
+ * Activate file-backed executable pages after first usage.
+ */
+ if (vm_flags & VM_EXEC)
+ return PAGEREF_ACTIVATE;
+#endif
return PAGEREF_KEEP;
}
@@ -717,7 +727,10 @@ static noinline_for_stack void free_page_list(struct list_head *free_pages)
/*
* shrink_page_list() returns the number of reclaimed pages
*/
-static unsigned long shrink_page_list(struct list_head *page_list,
+#ifndef CONFIG_ZRAM_FOR_ANDROID
+static
+#endif /* CONFIG_ZRAM_FOR_ANDROID */
+unsigned long shrink_page_list(struct list_head *page_list,
struct zone *zone,
struct scan_control *sc)
{
@@ -997,8 +1010,12 @@ int __isolate_lru_page(struct page *page, int mode, int file)
* unevictable; only give shrink_page_list evictable pages.
*/
if (PageUnevictable(page))
+#ifndef CONFIG_DMA_CMA
return ret;
-
+#else
+ printk(KERN_ERR "%s[%d] Unevictable page %p\n",
+ __func__, __LINE__, page);
+#endif
ret = -EBUSY;
if (likely(get_page_unless_zero(page))) {
@@ -1177,7 +1194,10 @@ static unsigned long isolate_pages_global(unsigned long nr,
* clear_active_flags() is a helper for shrink_active_list(), clearing
* any active bits from the pages in the list.
*/
-static unsigned long clear_active_flags(struct list_head *page_list,
+#ifndef CONFIG_ZRAM_FOR_ANDROID
+static
+#endif /* CONFIG_ZRAM_FOR_ANDROID */
+unsigned long clear_active_flags(struct list_head *page_list,
unsigned int *count)
{
int nr_active = 0;
@@ -1247,6 +1267,40 @@ int isolate_lru_page(struct page *page)
return ret;
}
+#ifdef CONFIG_ZRAM_FOR_ANDROID
+/**
+ * isolate_lru_page_compcache - tries to isolate a page for compcache
+ * @page: page to isolate from its LRU list
+ *
+ * Isolates a @page from an LRU list, clears PageLRU,but
+ * does not adjusts the vmstat statistic
+ * Returns 0 if the page was removed from an LRU list.
+ * Returns -EBUSY if the page was not on an LRU list.
+ */
+int isolate_lru_page_compcache(struct page *page)
+{
+ int ret = -EBUSY;
+
+ VM_BUG_ON(!page_count(page));
+
+ if (PageLRU(page)) {
+ struct zone *zone = page_zone(page);
+
+ spin_lock_irq(&zone->lru_lock);
+ if (PageLRU(page)) {
+ int lru = page_lru(page);
+ ret = 0;
+ get_page(page);
+ ClearPageLRU(page);
+ list_del(&page->lru);
+ mem_cgroup_del_lru_list(page, lru);
+ }
+ spin_unlock_irq(&zone->lru_lock);
+ }
+ return ret;
+}
+#endif
+
/*
* Are there way too many processes in the direct reclaim path already?
*/
@@ -1479,6 +1533,44 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
return nr_reclaimed;
}
+#ifdef CONFIG_ZRAM_FOR_ANDROID
+unsigned long
+zone_id_shrink_pagelist(struct zone *zone, struct list_head *page_list)
+{
+ unsigned long nr_reclaimed = 0;
+ unsigned long nr_anon;
+ unsigned long nr_file;
+
+ struct scan_control sc = {
+ .gfp_mask = GFP_USER,
+ .may_writepage = 1,
+ .nr_to_reclaim = SWAP_CLUSTER_MAX,
+ .may_unmap = 1,
+ .may_swap = 1,
+ .swappiness = vm_swappiness,
+ .order = 0,
+ .mem_cgroup = NULL,
+ .nodemask = NULL,
+ };
+
+ spin_lock_irq(&zone->lru_lock);
+
+ update_isolated_counts(zone, &sc, &nr_anon, &nr_file, page_list);
+
+ spin_unlock_irq(&zone->lru_lock);
+
+ nr_reclaimed = shrink_page_list(page_list, zone, &sc);
+
+ __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
+
+ putback_lru_pages(zone, &sc, nr_anon, nr_file, page_list);
+
+ return nr_reclaimed;
+}
+
+EXPORT_SYMBOL(zone_id_shrink_pagelist);
+#endif /* CONFIG_ZRAM_FOR_ANDROID */
+
/*
* This moves pages from the active list to the inactive list.
*
@@ -2878,7 +2970,11 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
struct reclaim_state reclaim_state;
struct scan_control sc = {
.gfp_mask = GFP_HIGHUSER_MOVABLE,
+#if defined(CONFIG_SLP) && defined(CONFIG_FULL_PAGE_RECLAIM)
+ .may_swap = 0,
+#else
.may_swap = 1,
+#endif
.may_unmap = 1,
.may_writepage = 1,
.nr_to_reclaim = nr_to_reclaim,