aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c103
1 files changed, 98 insertions, 5 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2174733..99082fa 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -726,15 +726,19 @@ static enum page_references page_check_references(struct page *page,
*/
SetPageReferenced(page);
+#ifndef CONFIG_DMA_CMA
+ if (referenced_page)
+ return PAGEREF_ACTIVATE;
+#else
if (referenced_page || referenced_ptes > 1)
return PAGEREF_ACTIVATE;
/*
* Activate file-backed executable pages after first usage.
- */
+ */
if (vm_flags & VM_EXEC)
return PAGEREF_ACTIVATE;
-
+#endif
return PAGEREF_KEEP;
}
@@ -766,7 +770,10 @@ static noinline_for_stack void free_page_list(struct list_head *free_pages)
/*
* shrink_page_list() returns the number of reclaimed pages
*/
-static unsigned long shrink_page_list(struct list_head *page_list,
+#ifndef CONFIG_ZRAM_FOR_ANDROID
+static
+#endif /* CONFIG_ZRAM_FOR_ANDROID */
+unsigned long shrink_page_list(struct list_head *page_list,
struct zone *zone,
struct scan_control *sc)
{
@@ -1050,8 +1057,12 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
* unevictable; only give shrink_page_list evictable pages.
*/
if (PageUnevictable(page))
+#ifndef CONFIG_DMA_CMA
return ret;
-
+#else
+ printk(KERN_ERR "%s[%d] Unevictable page %p\n",
+ __func__, __LINE__, page);
+#endif
ret = -EBUSY;
/*
@@ -1268,7 +1279,10 @@ static unsigned long isolate_pages_global(unsigned long nr,
* clear_active_flags() is a helper for shrink_active_list(), clearing
* any active bits from the pages in the list.
*/
-static unsigned long clear_active_flags(struct list_head *page_list,
+#ifndef CONFIG_ZRAM_FOR_ANDROID
+static
+#endif /* CONFIG_ZRAM_FOR_ANDROID */
+unsigned long clear_active_flags(struct list_head *page_list,
unsigned int *count)
{
int nr_active = 0;
@@ -1338,6 +1352,40 @@ int isolate_lru_page(struct page *page)
return ret;
}
+#ifdef CONFIG_ZRAM_FOR_ANDROID
+/**
+ * isolate_lru_page_compcache - tries to isolate a page for compcache
+ * @page: page to isolate from its LRU list
+ *
+ * Isolates a @page from an LRU list, clears PageLRU,but
+ * does not adjusts the vmstat statistic
+ * Returns 0 if the page was removed from an LRU list.
+ * Returns -EBUSY if the page was not on an LRU list.
+ */
+int isolate_lru_page_compcache(struct page *page)
+{
+ int ret = -EBUSY;
+
+ VM_BUG_ON(!page_count(page));
+
+ if (PageLRU(page)) {
+ struct zone *zone = page_zone(page);
+
+ spin_lock_irq(&zone->lru_lock);
+ if (PageLRU(page)) {
+ int lru = page_lru(page);
+ ret = 0;
+ get_page(page);
+ ClearPageLRU(page);
+ list_del(&page->lru);
+ mem_cgroup_del_lru_list(page, lru);
+ }
+ spin_unlock_irq(&zone->lru_lock);
+ }
+ return ret;
+}
+#endif
+
/*
* Are there way too many processes in the direct reclaim path already?
*/
@@ -1574,6 +1622,44 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
return nr_reclaimed;
}
+#ifdef CONFIG_ZRAM_FOR_ANDROID
+unsigned long
+zone_id_shrink_pagelist(struct zone *zone, struct list_head *page_list)
+{
+ unsigned long nr_reclaimed = 0;
+ unsigned long nr_anon;
+ unsigned long nr_file;
+
+ struct scan_control sc = {
+ .gfp_mask = GFP_USER,
+ .may_writepage = 1,
+ .nr_to_reclaim = SWAP_CLUSTER_MAX,
+ .may_unmap = 1,
+ .may_swap = 1,
+ .swappiness = vm_swappiness,
+ .order = 0,
+ .mem_cgroup = NULL,
+ .nodemask = NULL,
+ };
+
+ spin_lock_irq(&zone->lru_lock);
+
+ update_isolated_counts(zone, &sc, &nr_anon, &nr_file, page_list);
+
+ spin_unlock_irq(&zone->lru_lock);
+
+ nr_reclaimed = shrink_page_list(page_list, zone, &sc);
+
+ __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
+
+ putback_lru_pages(zone, &sc, nr_anon, nr_file, page_list);
+
+ return nr_reclaimed;
+}
+
+EXPORT_SYMBOL(zone_id_shrink_pagelist);
+#endif /* CONFIG_ZRAM_FOR_ANDROID */
+
/*
* This moves pages from the active list to the inactive list.
*
@@ -2040,12 +2126,14 @@ static void shrink_zone(int priority, struct zone *zone,
enum lru_list l;
unsigned long nr_reclaimed, nr_scanned;
unsigned long nr_to_reclaim = sc->nr_to_reclaim;
+ struct blk_plug plug;
restart:
nr_reclaimed = 0;
nr_scanned = sc->nr_scanned;
get_scan_count(zone, sc, nr, priority);
+ blk_start_plug(&plug);
while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
nr[LRU_INACTIVE_FILE]) {
for_each_evictable_lru(l) {
@@ -2069,6 +2157,7 @@ restart:
if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
break;
}
+ blk_finish_plug(&plug);
sc->nr_reclaimed += nr_reclaimed;
/*
@@ -3057,7 +3146,11 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
struct reclaim_state reclaim_state;
struct scan_control sc = {
.gfp_mask = GFP_HIGHUSER_MOVABLE,
+#if defined(CONFIG_SLP) && defined(CONFIG_FULL_PAGE_RECLAIM)
+ .may_swap = 0,
+#else
.may_swap = 1,
+#endif
.may_unmap = 1,
.may_writepage = 1,
.nr_to_reclaim = nr_to_reclaim,