diff options
author | Andrew Dodd <atd7@cornell.edu> | 2013-02-16 19:32:40 -0500 |
---|---|---|
committer | Andrew Dodd <atd7@cornell.edu> | 2013-02-27 09:19:13 -0500 |
commit | 5843001eb8e58acecf32f794189193eb82d963b7 (patch) | |
tree | 40ca872628e9f8b006ffe32db9aabb6361603079 /mm/page_alloc-cma.c | |
parent | 19dd8724942d4998ccaa090daa2a69b33648d2bb (diff) | |
download | kernel_samsung_smdk4412-5843001eb8e58acecf32f794189193eb82d963b7.zip kernel_samsung_smdk4412-5843001eb8e58acecf32f794189193eb82d963b7.tar.gz kernel_samsung_smdk4412-5843001eb8e58acecf32f794189193eb82d963b7.tar.bz2 |
mm: Update Samsung's -cma shit to match 3.0.64
Samsung forked compaction and page_alloc to
compaction-cma and page_alloc-cma - fix stuff
that breaks. We may need to update more later.
Change-Id: I8325611e4e41af22688553a835dbc490c70793e1
Diffstat (limited to 'mm/page_alloc-cma.c')
-rw-r--r-- | mm/page_alloc-cma.c | 31 |
1 files changed, 21 insertions, 10 deletions
diff --git a/mm/page_alloc-cma.c b/mm/page_alloc-cma.c index 88e26b6..74cdf7d 100644 --- a/mm/page_alloc-cma.c +++ b/mm/page_alloc-cma.c @@ -2567,6 +2567,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, struct zone *preferred_zone; struct page *page; int migratetype = allocflags_to_migratetype(gfp_mask); + unsigned int cpuset_mems_cookie; gfp_mask &= gfp_allowed_mask; @@ -2585,15 +2586,14 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, if (unlikely(!zonelist->_zonerefs->zone)) return NULL; - get_mems_allowed(); + retry_cpuset: + cpuset_mems_cookie = get_mems_allowed(); /* The preferred zone is used for statistics later */ first_zones_zonelist(zonelist, high_zoneidx, nodemask ? : &cpuset_current_mems_allowed, &preferred_zone); - if (!preferred_zone) { - put_mems_allowed(); - return NULL; - } + if (!preferred_zone) + goto out; /* First allocation attempt */ page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order, @@ -2603,9 +2603,18 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, page = __alloc_pages_slowpath(gfp_mask, order, zonelist, high_zoneidx, nodemask, preferred_zone, migratetype); - put_mems_allowed(); - trace_mm_page_alloc(page, order, gfp_mask, migratetype); + +out: + /* + * When updating a task's mems_allowed, it is possible to race with + * parallel threads in such a way that an allocation can fail while + * the mask is being updated. If a page allocation is about to fail, + * check if the cpuset changed during allocation and if so, retry. + */ + if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) + goto retry_cpuset; + return page; } EXPORT_SYMBOL(__alloc_pages_nodemask); @@ -2826,13 +2835,15 @@ void si_meminfo_node(struct sysinfo *val, int nid) bool skip_free_areas_node(unsigned int flags, int nid) { bool ret = false; + unsigned int cpuset_mems_cookie; if (!(flags & SHOW_MEM_FILTER_NODES)) goto out; - get_mems_allowed(); - ret = !node_isset(nid, cpuset_current_mems_allowed); - put_mems_allowed(); + do { + cpuset_mems_cookie = get_mems_allowed(); + ret = !node_isset(nid, cpuset_current_mems_allowed); + } while (!put_mems_allowed(cpuset_mems_cookie)); out: return ret; } |