aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAndrew Dodd <atd7@cornell.edu>2013-02-16 19:32:40 -0500
committerAndrew Dodd <atd7@cornell.edu>2013-02-27 09:19:13 -0500
commit5843001eb8e58acecf32f794189193eb82d963b7 (patch)
tree40ca872628e9f8b006ffe32db9aabb6361603079 /mm
parent19dd8724942d4998ccaa090daa2a69b33648d2bb (diff)
downloadkernel_samsung_smdk4412-5843001eb8e58acecf32f794189193eb82d963b7.zip
kernel_samsung_smdk4412-5843001eb8e58acecf32f794189193eb82d963b7.tar.gz
kernel_samsung_smdk4412-5843001eb8e58acecf32f794189193eb82d963b7.tar.bz2
mm: Update Samsung's -cma shit to match 3.0.64
Samsung forked compaction and page_alloc to compaction-cma and page_alloc-cma - fix stuff that breaks. We may need to update more later. Change-Id: I8325611e4e41af22688553a835dbc490c70793e1
Diffstat (limited to 'mm')
-rw-r--r--mm/compaction-cma.c6
-rw-r--r--mm/migrate-cma.c203
-rw-r--r--mm/page_alloc-cma.c31
3 files changed, 152 insertions, 88 deletions
diff --git a/mm/compaction-cma.c b/mm/compaction-cma.c
index b5dced6..f3ce63a 100644
--- a/mm/compaction-cma.c
+++ b/mm/compaction-cma.c
@@ -303,7 +303,8 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
}
/* Try isolate the page */
- if (__isolate_lru_page(page, ISOLATE_BOTH, 0) != 0)
+ if (__isolate_lru_page(page,
+ ISOLATE_ACTIVE|ISOLATE_INACTIVE, 0) != 0)
continue;
VM_BUG_ON(PageTransCompound(page));
@@ -659,7 +660,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
nr_migrate = cc->nr_migratepages;
err = migrate_pages(&cc->migratepages, compaction_alloc,
(unsigned long)cc, false,
- cc->sync, 0);
+ cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC
+ , 0);
update_nr_listpages(cc);
nr_remaining = cc->nr_migratepages;
diff --git a/mm/migrate-cma.c b/mm/migrate-cma.c
index c224481..449efb2 100644
--- a/mm/migrate-cma.c
+++ b/mm/migrate-cma.c
@@ -168,6 +168,7 @@ out:
return SWAP_AGAIN;
}
+
/*
* Get rid of all migration entries and replace them by
* references to the indicated page.
@@ -241,6 +242,55 @@ static int is_failed_page(struct page *page, int pass, int tries)
return 0;
}
+#ifdef CONFIG_BLOCK
+/* Returns true if all buffers are successfully locked */
+static bool buffer_migrate_lock_buffers(struct buffer_head *head, enum migrate_mode mode)
+{
+ struct buffer_head *bh = head;
+
+ /* Simple case, sync compaction */
+ if (mode != MIGRATE_ASYNC) {
+ do {
+ get_bh(bh);
+ lock_buffer(bh);
+ bh = bh->b_this_page;
+
+ } while (bh != head);
+
+ return true;
+ }
+
+ /* async case, we cannot block on lock_buffer so use trylock_buffer */
+ do {
+ get_bh(bh);
+ if (!trylock_buffer(bh)) {
+ /*
+ * We failed to lock the buffer and cannot stall in
+ * async migration. Release the taken locks
+ */
+ struct buffer_head *failed_bh = bh;
+ put_bh(failed_bh);
+ bh = head;
+ while (bh != failed_bh) {
+ unlock_buffer(bh);
+ put_bh(bh);
+ bh = bh->b_this_page;
+ }
+ return false;
+ }
+
+ bh = bh->b_this_page;
+ } while (bh != head);
+ return true;
+}
+#else
+static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
+ enum migrate_mode mode)
+{
+ return true;
+}
+#endif /* CONFIG_BLOCK */
+
/*
* Replace the page in the mapping.
*
@@ -251,6 +301,7 @@ static int is_failed_page(struct page *page, int pass, int tries)
*/
static int migrate_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page,
+ struct buffer_head *head, enum migrate_mode mode,
int pass, int tries)
{
int expected_count;
@@ -295,6 +346,19 @@ static int migrate_page_move_mapping(struct address_space *mapping,
}
/*
+ * In the async migration case of moving a page with buffers, lock the
+ * buffers using trylock before the mapping is moved. If the mapping
+ * was moved, we later failed to lock the buffers and could not move
+ * the mapping back due to an elevated page count, we would have to
+ * block waiting on other references to be dropped.
+ */
+ if (mode == MIGRATE_ASYNC && head && !buffer_migrate_lock_buffers(head, mode)) {
+ page_unfreeze_refs(page, expected_count);
+ spin_unlock_irq(&mapping->tree_lock);
+ return -EAGAIN;
+ }
+
+ /*
* Now we know that no one else is looking at the page.
*/
get_page(newpage); /* add cache reference */
@@ -453,13 +517,13 @@ EXPORT_SYMBOL(fail_migrate_page);
*/
static int __migrate_page(struct address_space *mapping,
struct page *newpage, struct page *page,
- int pass, int tries)
+ enum migrate_mode mode, int pass, int tries)
{
int rc;
BUG_ON(PageWriteback(page)); /* Writeback must be complete */
- rc = migrate_page_move_mapping(mapping, newpage, page, pass, tries);
+ rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, pass, tries);
if (rc) {
if (is_failed_page(page, pass, tries)) {
@@ -474,9 +538,9 @@ static int __migrate_page(struct address_space *mapping,
}
int migrate_page(struct address_space *mapping,
- struct page *newpage, struct page *page)
+ struct page *newpage, struct page *page, enum migrate_mode mode)
{
- return __migrate_page(mapping, newpage, page, 0, 0);
+ return __migrate_page(mapping, newpage, page, mode, 0, 0);
}
EXPORT_SYMBOL(migrate_page);
@@ -487,28 +551,28 @@ EXPORT_SYMBOL(migrate_page);
* exist.
*/
int buffer_migrate_page(struct address_space *mapping,
- struct page *newpage, struct page *page)
+ struct page *newpage, struct page *page, enum migrate_mode mode)
{
struct buffer_head *bh, *head;
int rc;
if (!page_has_buffers(page))
- return migrate_page(mapping, newpage, page);
+ return migrate_page(mapping, newpage, page, mode);
head = page_buffers(page);
- rc = migrate_page_move_mapping(mapping, newpage, page, 0, 0);
+ rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0, 0);
if (rc)
return rc;
- bh = head;
- do {
- get_bh(bh);
- lock_buffer(bh);
- bh = bh->b_this_page;
-
- } while (bh != head);
+ /*
+ * In the async case, migrate_page_move_mapping locked the buffers
+ * with an IRQ-safe spinlock held. In the sync case, the buffers
+ * need to be locked now
+ */
+ if (mode != MIGRATE_ASYNC)
+ BUG_ON(!buffer_migrate_lock_buffers(head, mode));
ClearPagePrivate(page);
set_page_private(newpage, page_private(page));
@@ -585,11 +649,13 @@ static int writeout(struct address_space *mapping, struct page *page)
* Default handling if a filesystem does not provide a migration function.
*/
static int fallback_migrate_page(struct address_space *mapping,
- struct page *newpage, struct page *page, int pass, int tries)
+ struct page *newpage, struct page *page, enum migrate_mode mode, int pass, int tries)
{
int rc;
if (PageDirty(page)) {
+ if(mode != MIGRATE_SYNC)
+ return -EBUSY;
rc = writeout(mapping, page);
if (is_failed_page(page, pass, tries)) {
printk("%s[%d] 1 ", __func__, __LINE__);
@@ -611,7 +677,7 @@ static int fallback_migrate_page(struct address_space *mapping,
return -EAGAIN;
}
- rc = __migrate_page(mapping, newpage, page, pass, tries);
+ rc = __migrate_page(mapping, newpage, page, mode, pass, tries);
if (rc) {
if (is_failed_page(page, pass, tries)) {
printk("%s[%d] 3 ", __func__, __LINE__);
@@ -633,7 +699,7 @@ static int fallback_migrate_page(struct address_space *mapping,
* == 0 - success
*/
static int move_to_new_page(struct page *newpage, struct page *page,
- int remap_swapcache, bool sync,
+ int remap_swapcache, enum migrate_mode mode,
int pass, int tries)
{
struct address_space *mapping;
@@ -655,57 +721,40 @@ static int move_to_new_page(struct page *newpage, struct page *page,
mapping = page_mapping(page);
if (!mapping) {
- rc = __migrate_page(mapping, newpage, page, pass, tries);
+ rc = __migrate_page(mapping, newpage, page, mode, pass, tries);
if (rc) {
if (is_failed_page(page, pass, tries)) {
printk("%s[%d]: 1 ", __func__, __LINE__);
dump_page(page);
}
}
- } else {
- /*
- * Do not writeback pages if !sync and migratepage is
- * not pointing to migrate_page() which is nonblocking
- * (swapcache/tmpfs uses migratepage = migrate_page).
- */
- if (PageDirty(page) && !sync &&
- mapping->a_ops->migratepage != migrate_page) {
- rc = -EBUSY;
- if (rc) {
- if (is_failed_page(page, pass, tries)) {
- printk(KERN_ERR "%s[%d]: 2 ",
- __func__, __LINE__);
- dump_page(page);
- }
- }
- } else if (mapping->a_ops->migratepage) {
- /*
- * Most pages have a mapping and most filesystems
- * should provide a migration function. Anonymous
- * pages are part of swap space which also has its
- * own migration function. This is the most common
- * path for page migration.
- */
- rc = mapping->a_ops->migratepage(mapping,
- newpage, page);
- if (rc) {
- if (is_failed_page(page, pass, tries)) {
- printk(KERN_ERR "%s[%d]: 3 ",
- __func__, __LINE__);
- dump_page(page);
- }
- }
- } else {
- rc = fallback_migrate_page(mapping, newpage, page,
- pass, tries);
- if (rc) {
- if (is_failed_page(page, pass, tries)) {
- printk(KERN_ERR "%s[%d]: 4 ",
- __func__, __LINE__);
- dump_page(page);
- }
- }
- }
+ } else if (mapping->a_ops->migratepage) {
+ /*
+ * Most pages have a mapping and most filesystems
+ * should provide a migration function. Anonymous
+ * pages are part of swap space which also has its
+ * own migration function. This is the most common
+ * path for page migration.
+ */
+ rc = mapping->a_ops->migratepage(mapping,
+ newpage, page, mode);
+ if (rc) {
+ if (is_failed_page(page, pass, tries)) {
+ printk(KERN_ERR "%s[%d]: 3 ",
+ __func__, __LINE__);
+ dump_page(page);
+ }
+ }
+ } else {
+ rc = fallback_migrate_page(mapping, newpage, page, mode,
+ pass, tries);
+ if (rc) {
+ if (is_failed_page(page, pass, tries)) {
+ printk(KERN_ERR "%s[%d]: 4 ",
+ __func__, __LINE__);
+ dump_page(page);
+ }
+ }
}
if (rc) {
@@ -725,7 +774,7 @@ static int move_to_new_page(struct page *newpage, struct page *page,
* to the newly allocated page in newpage.
*/
static int unmap_and_move(new_page_t get_new_page, unsigned long private,
- struct page *page, int force, bool offlining, bool sync,
+ struct page *page, int force, bool offlining, enum migrate_mode mode,
int pass, int tries)
{
int rc = 0;
@@ -751,7 +800,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
rc = -EAGAIN;
if (!trylock_page(page)) {
- if (!force || !sync) {
+ if (!force || mode == MIGRATE_ASYNC) {
if (is_failed_page(page, pass, tries)) {
printk("%s[%d] 1 ", __func__, __LINE__);
dump_page(page);
@@ -812,10 +861,12 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
if (PageWriteback(page)) {
/*
- * For !sync, there is no point retrying as the retry loop
- * is expected to be too short for PageWriteback to be cleared
+ * Only in the case of a full syncronous migration is it
+ * necessary to wait for PageWriteback. In the async case,
+ * the retry loop is too short and in the sync-light case,
+ * the overhead of stalling is too much
*/
- if (!sync) {
+ if (mode != MIGRATE_SYNC) {
rc = -EBUSY;
goto uncharge;
}
@@ -894,7 +945,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
skip_unmap:
if (!page_mapped(page)) {
- rc = move_to_new_page(newpage, page, remap_swapcache, sync,
+ rc = move_to_new_page(newpage, page, remap_swapcache, mode,
pass, tries);
if (rc) {
if (is_failed_page(page, pass, tries)) {
@@ -966,7 +1017,7 @@ move_newpage:
*/
static int unmap_and_move_huge_page(new_page_t get_new_page,
unsigned long private, struct page *hpage,
- int force, bool offlining, bool sync)
+ int force, bool offlining, enum migrate_mode mode)
{
int rc = 0;
int *result = NULL;
@@ -979,7 +1030,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
rc = -EAGAIN;
if (!trylock_page(hpage)) {
- if (!force || !sync)
+ if (!force || mode != MIGRATE_SYNC)
goto out;
lock_page(hpage);
}
@@ -990,7 +1041,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
if (!page_mapped(hpage))
- rc = move_to_new_page(new_hpage, hpage, 1, sync, 0, 0);
+ rc = move_to_new_page(new_hpage, hpage, 1, mode, 0, 0);
if (rc)
remove_migration_ptes(hpage, hpage);
@@ -1035,7 +1086,7 @@ struct page *migrate_pages_current = NULL;
*/
int migrate_pages(struct list_head *from,
new_page_t get_new_page, unsigned long private, bool offlining,
- bool sync, int tries)
+ enum migrate_mode mode, int tries)
{
int retry = 1;
int nr_failed = 0;
@@ -1056,7 +1107,7 @@ int migrate_pages(struct list_head *from,
rc = unmap_and_move(get_new_page, private,
page, pass > 2, offlining,
- sync, pass, tries);
+ mode, pass, tries);
if (rc)
failed_pages[tries][pass] = page;
@@ -1094,7 +1145,7 @@ out:
int migrate_huge_pages(struct list_head *from,
new_page_t get_new_page, unsigned long private, bool offlining,
- bool sync)
+ enum migrate_mode mode)
{
int retry = 1;
int nr_failed = 0;
@@ -1111,7 +1162,7 @@ int migrate_huge_pages(struct list_head *from,
rc = unmap_and_move_huge_page(get_new_page,
private, page, pass > 2, offlining,
- sync);
+ mode);
switch (rc) {
case -ENOMEM:
@@ -1310,7 +1361,7 @@ set_status:
err = 0;
if (!list_empty(&pagelist)) {
err = migrate_pages(&pagelist, new_page_node,
- (unsigned long)pm, 0, true, 0);
+ (unsigned long)pm, 0, MIGRATE_SYNC, 0);
if (err)
putback_lru_pages(&pagelist);
}
diff --git a/mm/page_alloc-cma.c b/mm/page_alloc-cma.c
index 88e26b6..74cdf7d 100644
--- a/mm/page_alloc-cma.c
+++ b/mm/page_alloc-cma.c
@@ -2567,6 +2567,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
struct zone *preferred_zone;
struct page *page;
int migratetype = allocflags_to_migratetype(gfp_mask);
+ unsigned int cpuset_mems_cookie;
gfp_mask &= gfp_allowed_mask;
@@ -2585,15 +2586,14 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
if (unlikely(!zonelist->_zonerefs->zone))
return NULL;
- get_mems_allowed();
+ retry_cpuset:
+ cpuset_mems_cookie = get_mems_allowed();
/* The preferred zone is used for statistics later */
first_zones_zonelist(zonelist, high_zoneidx,
nodemask ? : &cpuset_current_mems_allowed,
&preferred_zone);
- if (!preferred_zone) {
- put_mems_allowed();
- return NULL;
- }
+ if (!preferred_zone)
+ goto out;
/* First allocation attempt */
page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
@@ -2603,9 +2603,18 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
page = __alloc_pages_slowpath(gfp_mask, order,
zonelist, high_zoneidx, nodemask,
preferred_zone, migratetype);
- put_mems_allowed();
-
trace_mm_page_alloc(page, order, gfp_mask, migratetype);
+
+out:
+ /*
+ * When updating a task's mems_allowed, it is possible to race with
+ * parallel threads in such a way that an allocation can fail while
+ * the mask is being updated. If a page allocation is about to fail,
+ * check if the cpuset changed during allocation and if so, retry.
+ */
+ if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+ goto retry_cpuset;
+
return page;
}
EXPORT_SYMBOL(__alloc_pages_nodemask);
@@ -2826,13 +2835,15 @@ void si_meminfo_node(struct sysinfo *val, int nid)
bool skip_free_areas_node(unsigned int flags, int nid)
{
bool ret = false;
+ unsigned int cpuset_mems_cookie;
if (!(flags & SHOW_MEM_FILTER_NODES))
goto out;
- get_mems_allowed();
- ret = !node_isset(nid, cpuset_current_mems_allowed);
- put_mems_allowed();
+ do {
+ cpuset_mems_cookie = get_mems_allowed();
+ ret = !node_isset(nid, cpuset_current_mems_allowed);
+ } while (!put_mems_allowed(cpuset_mems_cookie));
out:
return ret;
}